summaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/arch_dump.c6
-rw-r--r--target-i386/arch_memory_mapping.c38
-rw-r--r--target-i386/cc_helper.c10
-rw-r--r--target-i386/cpu-qom.h4
-rw-r--r--target-i386/cpu.c1002
-rw-r--r--target-i386/cpu.h125
-rw-r--r--target-i386/excp_helper.c4
-rw-r--r--target-i386/fpu_helper.c2
-rw-r--r--target-i386/helper.c187
-rw-r--r--target-i386/helper.h10
-rw-r--r--target-i386/int_helper.c2
-rw-r--r--target-i386/ioport-user.c2
-rw-r--r--target-i386/kvm.c609
-rw-r--r--target-i386/kvm_i386.h22
-rw-r--r--target-i386/machine.c44
-rw-r--r--target-i386/mem_helper.c18
-rw-r--r--target-i386/misc_helper.c4
-rw-r--r--target-i386/seg_helper.c8
-rw-r--r--target-i386/smm_helper.c4
-rw-r--r--target-i386/svm_helper.c10
-rw-r--r--target-i386/translate.c521
21 files changed, 1769 insertions, 863 deletions
diff --git a/target-i386/arch_dump.c b/target-i386/arch_dump.c
index 4240278edd..2cd2f7f09e 100644
--- a/target-i386/arch_dump.c
+++ b/target-i386/arch_dump.c
@@ -12,8 +12,8 @@
*/
#include "cpu.h"
-#include "cpu-all.h"
-#include "dump.h"
+#include "exec/cpu-all.h"
+#include "sysemu/dump.h"
#include "elf.h"
#ifdef TARGET_X86_64
@@ -403,7 +403,7 @@ int cpu_get_dump_info(ArchDumpInfo *info)
} else {
info->d_class = ELFCLASS32;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (block->offset + block->length > UINT_MAX) {
/* The memory size is greater than 4G */
info->d_class = ELFCLASS64;
diff --git a/target-i386/arch_memory_mapping.c b/target-i386/arch_memory_mapping.c
index 8e5a56a3a8..c6c7874474 100644
--- a/target-i386/arch_memory_mapping.c
+++ b/target-i386/arch_memory_mapping.c
@@ -12,14 +12,14 @@
*/
#include "cpu.h"
-#include "cpu-all.h"
-#include "memory_mapping.h"
+#include "exec/cpu-all.h"
+#include "sysemu/memory_mapping.h"
/* PAE Paging or IA-32e Paging */
-static void walk_pte(MemoryMappingList *list, target_phys_addr_t pte_start_addr,
+static void walk_pte(MemoryMappingList *list, hwaddr pte_start_addr,
int32_t a20_mask, target_ulong start_line_addr)
{
- target_phys_addr_t pte_addr, start_paddr;
+ hwaddr pte_addr, start_paddr;
uint64_t pte;
target_ulong start_vaddr;
int i;
@@ -46,10 +46,10 @@ static void walk_pte(MemoryMappingList *list, target_phys_addr_t pte_start_addr,
/* 32-bit Paging */
static void walk_pte2(MemoryMappingList *list,
- target_phys_addr_t pte_start_addr, int32_t a20_mask,
+ hwaddr pte_start_addr, int32_t a20_mask,
target_ulong start_line_addr)
{
- target_phys_addr_t pte_addr, start_paddr;
+ hwaddr pte_addr, start_paddr;
uint32_t pte;
target_ulong start_vaddr;
int i;
@@ -75,10 +75,10 @@ static void walk_pte2(MemoryMappingList *list,
}
/* PAE Paging or IA-32e Paging */
-static void walk_pde(MemoryMappingList *list, target_phys_addr_t pde_start_addr,
+static void walk_pde(MemoryMappingList *list, hwaddr pde_start_addr,
int32_t a20_mask, target_ulong start_line_addr)
{
- target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
+ hwaddr pde_addr, pte_start_addr, start_paddr;
uint64_t pde;
target_ulong line_addr, start_vaddr;
int i;
@@ -112,10 +112,10 @@ static void walk_pde(MemoryMappingList *list, target_phys_addr_t pde_start_addr,
/* 32-bit Paging */
static void walk_pde2(MemoryMappingList *list,
- target_phys_addr_t pde_start_addr, int32_t a20_mask,
+ hwaddr pde_start_addr, int32_t a20_mask,
bool pse)
{
- target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
+ hwaddr pde_addr, pte_start_addr, start_paddr;
uint32_t pde;
target_ulong line_addr, start_vaddr;
int i;
@@ -149,9 +149,9 @@ static void walk_pde2(MemoryMappingList *list,
/* PAE Paging */
static void walk_pdpe2(MemoryMappingList *list,
- target_phys_addr_t pdpe_start_addr, int32_t a20_mask)
+ hwaddr pdpe_start_addr, int32_t a20_mask)
{
- target_phys_addr_t pdpe_addr, pde_start_addr;
+ hwaddr pdpe_addr, pde_start_addr;
uint64_t pdpe;
target_ulong line_addr;
int i;
@@ -173,10 +173,10 @@ static void walk_pdpe2(MemoryMappingList *list,
#ifdef TARGET_X86_64
/* IA-32e Paging */
static void walk_pdpe(MemoryMappingList *list,
- target_phys_addr_t pdpe_start_addr, int32_t a20_mask,
+ hwaddr pdpe_start_addr, int32_t a20_mask,
target_ulong start_line_addr)
{
- target_phys_addr_t pdpe_addr, pde_start_addr, start_paddr;
+ hwaddr pdpe_addr, pde_start_addr, start_paddr;
uint64_t pdpe;
target_ulong line_addr, start_vaddr;
int i;
@@ -210,9 +210,9 @@ static void walk_pdpe(MemoryMappingList *list,
/* IA-32e Paging */
static void walk_pml4e(MemoryMappingList *list,
- target_phys_addr_t pml4e_start_addr, int32_t a20_mask)
+ hwaddr pml4e_start_addr, int32_t a20_mask)
{
- target_phys_addr_t pml4e_addr, pdpe_start_addr;
+ hwaddr pml4e_addr, pdpe_start_addr;
uint64_t pml4e;
target_ulong line_addr;
int i;
@@ -242,20 +242,20 @@ int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env)
if (env->cr[4] & CR4_PAE_MASK) {
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
- target_phys_addr_t pml4e_addr;
+ hwaddr pml4e_addr;
pml4e_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
walk_pml4e(list, pml4e_addr, env->a20_mask);
} else
#endif
{
- target_phys_addr_t pdpe_addr;
+ hwaddr pdpe_addr;
pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
walk_pdpe2(list, pdpe_addr, env->a20_mask);
}
} else {
- target_phys_addr_t pde_addr;
+ hwaddr pde_addr;
bool pse;
pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
diff --git a/target-i386/cc_helper.c b/target-i386/cc_helper.c
index 07892f9049..9422003f24 100644
--- a/target-i386/cc_helper.c
+++ b/target-i386/cc_helper.c
@@ -353,6 +353,16 @@ void helper_sti(CPUX86State *env)
env->eflags |= IF_MASK;
}
+void helper_clac(CPUX86State *env)
+{
+ env->eflags &= ~AC_MASK;
+}
+
+void helper_stac(CPUX86State *env)
+{
+ env->eflags |= AC_MASK;
+}
+
#if 0
/* vm86plus instructions */
void helper_cli_vm(CPUX86State *env)
diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index 5901140480..332916a185 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -20,9 +20,9 @@
#ifndef QEMU_I386_CPU_QOM_H
#define QEMU_I386_CPU_QOM_H
-#include "qemu/cpu.h"
+#include "qom/cpu.h"
#include "cpu.h"
-#include "error.h"
+#include "qapi/error.h"
#ifdef TARGET_X86_64
#define TYPE_X86_CPU "x86_64-cpu"
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 120a2e3d3e..78bd61e18f 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -22,17 +22,28 @@
#include <inttypes.h>
#include "cpu.h"
-#include "kvm.h"
+#include "sysemu/kvm.h"
-#include "qemu-option.h"
-#include "qemu-config.h"
+#include "qemu/option.h"
+#include "qemu/config-file.h"
+#include "qapi/qmp/qerror.h"
-#include "qapi/qapi-visit-core.h"
-#include "arch_init.h"
+#include "qapi/visitor.h"
+#include "sysemu/arch_init.h"
#include "hyperv.h"
#include "hw/hw.h"
+#if defined(CONFIG_KVM)
+#include <linux/kvm_para.h>
+#endif
+
+#include "sysemu/sysemu.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/xen.h"
+#include "hw/sysbus.h"
+#include "hw/apic_internal.h"
+#endif
/* feature flags taken from "Intel Processor Identification and the CPUID
* Instruction" and AMD's "CPUID Specification". In cases of disagreement
@@ -56,34 +67,43 @@ static const char *ext_feature_name[] = {
NULL, "pcid", "dca", "sse4.1|sse4_1",
"sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
"tsc-deadline", "aes", "xsave", "osxsave",
- "avx", NULL, NULL, "hypervisor",
+ "avx", "f16c", "rdrand", "hypervisor",
};
+/* Feature names that are already defined on feature_name[] but are set on
+ * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
+ * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
+ * if and only if CPU vendor is AMD.
+ */
static const char *ext2_feature_name[] = {
- "fpu", "vme", "de", "pse",
- "tsc", "msr", "pae", "mce",
- "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall",
- "mtrr", "pge", "mca", "cmov",
- "pat", "pse36", NULL, NULL /* Linux mp */,
- "nx|xd", NULL, "mmxext", "mmx",
- "fxsr", "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
+ NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
+ NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
+ NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
+ NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
+ NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
+ "nx|xd", NULL, "mmxext", NULL /* mmx */,
+ NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
NULL, "lm|i64", "3dnowext", "3dnow",
};
static const char *ext3_feature_name[] = {
"lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
"cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
"3dnowprefetch", "osvw", "ibs", "xop",
- "skinit", "wdt", NULL, NULL,
- "fma4", NULL, "cvt16", "nodeid_msr",
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
+ "skinit", "wdt", NULL, "lwp",
+ "fma4", "tce", NULL, "nodeid_msr",
+ NULL, "tbm", "topoext", "perfctr_core",
+ "perfctr_nb", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *kvm_feature_name[] = {
- "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, "kvm_pv_eoi", NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
+ "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
};
static const char *svm_feature_name[] = {
@@ -97,19 +117,64 @@ static const char *svm_feature_name[] = {
NULL, NULL, NULL, NULL,
};
+static const char *cpuid_7_0_ebx_feature_name[] = {
+ "fsgsbase", NULL, NULL, "bmi1", "hle", "avx2", NULL, "smep",
+ "bmi2", "erms", "invpcid", "rtm", NULL, NULL, NULL, NULL,
+ NULL, NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+const char *get_register_name_32(unsigned int reg)
+{
+ static const char *reg_names[CPU_NB_REGS32] = {
+ [R_EAX] = "EAX",
+ [R_ECX] = "ECX",
+ [R_EDX] = "EDX",
+ [R_EBX] = "EBX",
+ [R_ESP] = "ESP",
+ [R_EBP] = "EBP",
+ [R_ESI] = "ESI",
+ [R_EDI] = "EDI",
+ };
+
+ if (reg > CPU_NB_REGS32) {
+ return NULL;
+ }
+ return reg_names[reg];
+}
+
/* collects per-function cpuid data
*/
typedef struct model_features_t {
uint32_t *guest_feat;
uint32_t *host_feat;
- uint32_t check_feat;
const char **flag_names;
uint32_t cpuid;
- } model_features_t;
+ int reg;
+} model_features_t;
int check_cpuid = 0;
int enforce_cpuid = 0;
+#if defined(CONFIG_KVM)
+static uint32_t kvm_default_features = (1 << KVM_FEATURE_CLOCKSOURCE) |
+ (1 << KVM_FEATURE_NOP_IO_DELAY) |
+ (1 << KVM_FEATURE_MMU_OP) |
+ (1 << KVM_FEATURE_CLOCKSOURCE2) |
+ (1 << KVM_FEATURE_ASYNC_PF) |
+ (1 << KVM_FEATURE_STEAL_TIME) |
+ (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+static const uint32_t kvm_pv_eoi_features = (0x1 << KVM_FEATURE_PV_EOI);
+#else
+static uint32_t kvm_default_features = 0;
+static const uint32_t kvm_pv_eoi_features = 0;
+#endif
+
+void enable_kvm_pv_eoi(void)
+{
+ kvm_default_features |= kvm_pv_eoi_features;
+}
+
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
{
@@ -212,14 +277,17 @@ static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
uint32_t *ext2_features,
uint32_t *ext3_features,
uint32_t *kvm_features,
- uint32_t *svm_features)
+ uint32_t *svm_features,
+ uint32_t *cpuid_7_0_ebx_features)
{
if (!lookup_feature(features, flagname, NULL, feature_name) &&
!lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
!lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
!lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
!lookup_feature(kvm_features, flagname, NULL, kvm_feature_name) &&
- !lookup_feature(svm_features, flagname, NULL, svm_feature_name))
+ !lookup_feature(svm_features, flagname, NULL, svm_feature_name) &&
+ !lookup_feature(cpuid_7_0_ebx_features, flagname, NULL,
+ cpuid_7_0_ebx_feature_name))
fprintf(stderr, "CPU feature %s not found\n", flagname);
}
@@ -237,7 +305,6 @@ typedef struct x86_def_t {
uint32_t xlevel;
char model_id[48];
int vendor_override;
- uint32_t flags;
/* Store the results of Centaur's CPUID instructions */
uint32_t ext4_features;
uint32_t xlevel2;
@@ -256,7 +323,6 @@ typedef struct x86_def_t {
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
CPUID_PAE | CPUID_SEP | CPUID_APIC)
-#define EXT2_FEATURE_MASK 0x0183F3FF
#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
@@ -269,12 +335,12 @@ typedef struct x86_def_t {
/* missing:
CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
- CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
+ CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
CPUID_EXT_HYPERVISOR)
/* missing:
CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
-#define TCG_EXT2_FEATURES ((TCG_FEATURES & EXT2_FEATURE_MASK) | \
+#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
/* missing:
@@ -282,6 +348,7 @@ typedef struct x86_def_t {
#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
#define TCG_SVM_FEATURES 0
+#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP)
/* maintains list of cpu model definitions
*/
@@ -303,7 +370,7 @@ static x86_def_t builtin_x86_defs[] = {
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
CPUID_PSE36,
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
.ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
@@ -323,7 +390,7 @@ static x86_def_t builtin_x86_defs[] = {
CPUID_PSE36 | CPUID_VME | CPUID_HT,
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
CPUID_EXT_POPCNT,
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
@@ -340,6 +407,9 @@ static x86_def_t builtin_x86_defs[] = {
{
.name = "core2duo",
.level = 10,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
.family = 6,
.model = 15,
.stepping = 11,
@@ -371,7 +441,7 @@ static x86_def_t builtin_x86_defs[] = {
/* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
/* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
/* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
@@ -384,6 +454,9 @@ static x86_def_t builtin_x86_defs[] = {
{
.name = "qemu32",
.level = 4,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
.family = 6,
.model = 3,
.stepping = 3,
@@ -394,13 +467,16 @@ static x86_def_t builtin_x86_defs[] = {
{
.name = "kvm32",
.level = 5,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
.family = 15,
.model = 6,
.stepping = 1,
.features = PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
.ext_features = CPUID_EXT_SSE3,
- .ext2_features = PPRO_FEATURES & EXT2_FEATURE_MASK,
+ .ext2_features = PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
.ext3_features = 0,
.xlevel = 0x80000008,
.model_id = "Common 32-bit KVM processor"
@@ -408,6 +484,9 @@ static x86_def_t builtin_x86_defs[] = {
{
.name = "coreduo",
.level = 10,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
.family = 6,
.model = 14,
.stepping = 8,
@@ -423,6 +502,9 @@ static x86_def_t builtin_x86_defs[] = {
{
.name = "486",
.level = 1,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
.family = 4,
.model = 0,
.stepping = 0,
@@ -432,6 +514,9 @@ static x86_def_t builtin_x86_defs[] = {
{
.name = "pentium",
.level = 1,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
.family = 5,
.model = 4,
.stepping = 3,
@@ -441,6 +526,9 @@ static x86_def_t builtin_x86_defs[] = {
{
.name = "pentium2",
.level = 2,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
.family = 6,
.model = 5,
.stepping = 2,
@@ -450,6 +538,9 @@ static x86_def_t builtin_x86_defs[] = {
{
.name = "pentium3",
.level = 2,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
.family = 6,
.model = 7,
.stepping = 3,
@@ -465,14 +556,19 @@ static x86_def_t builtin_x86_defs[] = {
.family = 6,
.model = 2,
.stepping = 3,
- .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
+ .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
+ CPUID_MCA,
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
+ CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
.xlevel = 0x80000008,
},
{
.name = "n270",
/* original is on level 10 */
.level = 5,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
.family = 6,
.model = 28,
.stepping = 2,
@@ -482,13 +578,296 @@ static x86_def_t builtin_x86_defs[] = {
/* Some CPUs got no CPUID_SEP */
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR,
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) | CPUID_EXT2_NX,
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
+ CPUID_EXT2_NX,
.ext3_features = CPUID_EXT3_LAHF_LM,
.xlevel = 0x8000000A,
.model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
},
+ {
+ .name = "Conroe",
+ .level = 2,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 2,
+ .stepping = 3,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
+ },
+ {
+ .name = "Penryn",
+ .level = 2,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 2,
+ .stepping = 3,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
+ },
+ {
+ .name = "Nehalem",
+ .level = 2,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 2,
+ .stepping = 3,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
+ },
+ {
+ .name = "Westmere",
+ .level = 11,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 44,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
+ },
+ {
+ .name = "SandyBridge",
+ .level = 0xd,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 42,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
+ CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Xeon E312xx (Sandy Bridge)",
+ },
+ {
+ .name = "Haswell",
+ .level = 0xd,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 60,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .cpuid_7_0_ebx_features = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Core Processor (Haswell)",
+ },
+ {
+ .name = "Opteron_G1",
+ .level = 5,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+ CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+ CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+ CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+ CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+ CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G2",
+ .level = 5,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_CX16 | CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
+ CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
+ CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
+ CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
+ CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
+ CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
+ CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .ext3_features = CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G3",
+ .level = 5,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
+ CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
+ CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
+ CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
+ CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
+ CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
+ CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .ext3_features = CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
+ CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G4",
+ .level = 0xd,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 21,
+ .model = 1,
+ .stepping = 2,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+ CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+ CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+ CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+ CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+ CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .ext3_features = CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Opteron 62xx class CPU",
+ },
+ {
+ .name = "Opteron_G5",
+ .level = 0xd,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 21,
+ .model = 2,
+ .stepping = 0,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
+ CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+ CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+ CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+ CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+ CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+ CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+ CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .ext3_features = CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Opteron 63xx class CPU",
+ },
};
+#ifdef CONFIG_KVM
static int cpu_x86_fill_model_id(char *str)
{
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
@@ -503,14 +882,23 @@ static int cpu_x86_fill_model_id(char *str)
}
return 0;
}
+#endif
-static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
+/* Fill a x86_def_t struct with information about the host CPU, and
+ * the CPU features supported by the host hardware + host kernel
+ *
+ * This function may be called only if KVM is enabled.
+ */
+static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def)
{
+#ifdef CONFIG_KVM
+ KVMState *s = kvm_state;
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
+ assert(kvm_enabled());
+
x86_cpu_def->name = "host";
host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_def->level = eax;
x86_cpu_def->vendor1 = ebx;
x86_cpu_def->vendor2 = edx;
x86_cpu_def->vendor3 = ecx;
@@ -519,21 +907,24 @@ static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
x86_cpu_def->stepping = eax & 0x0F;
- x86_cpu_def->ext_features = ecx;
- x86_cpu_def->features = edx;
- if (kvm_enabled() && x86_cpu_def->level >= 7) {
- x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
+ x86_cpu_def->level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
+ x86_cpu_def->features = kvm_arch_get_supported_cpuid(s, 0x1, 0, R_EDX);
+ x86_cpu_def->ext_features = kvm_arch_get_supported_cpuid(s, 0x1, 0, R_ECX);
+
+ if (x86_cpu_def->level >= 7) {
+ x86_cpu_def->cpuid_7_0_ebx_features =
+ kvm_arch_get_supported_cpuid(s, 0x7, 0, R_EBX);
} else {
x86_cpu_def->cpuid_7_0_ebx_features = 0;
}
- host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_def->xlevel = eax;
+ x86_cpu_def->xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
+ x86_cpu_def->ext2_features =
+ kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX);
+ x86_cpu_def->ext3_features =
+ kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX);
- host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_def->ext2_features = edx;
- x86_cpu_def->ext3_features = ecx;
cpu_x86_fill_model_id(x86_cpu_def->model_id);
x86_cpu_def->vendor_override = 0;
@@ -542,23 +933,23 @@ static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
x86_cpu_def->vendor2 == CPUID_VENDOR_VIA_2 &&
x86_cpu_def->vendor3 == CPUID_VENDOR_VIA_3) {
host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx);
+ eax = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
if (eax >= 0xC0000001) {
/* Support VIA max extended level */
x86_cpu_def->xlevel2 = eax;
host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_def->ext4_features = edx;
+ x86_cpu_def->ext4_features =
+ kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
}
}
- /*
- * Every SVM feature requires emulation support in KVM - so we can't just
- * read the host features here. KVM might even support SVM features not
- * available on the host hardware. Just set all bits and mask out the
- * unsupported ones later.
- */
- x86_cpu_def->svm_features = -1;
+ /* Other KVM-specific feature fields: */
+ x86_cpu_def->svm_features =
+ kvm_arch_get_supported_cpuid(s, 0x8000000A, 0, R_EDX);
+ x86_cpu_def->kvm_features =
+ kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
- return 0;
+#endif /* CONFIG_KVM */
}
static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
@@ -567,38 +958,45 @@ static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
for (i = 0; i < 32; ++i)
if (1 << i & mask) {
- fprintf(stderr, "warning: host cpuid %04x_%04x lacks requested"
- " flag '%s' [0x%08x]\n",
- f->cpuid >> 16, f->cpuid & 0xffff,
- f->flag_names[i] ? f->flag_names[i] : "[reserved]", mask);
+ const char *reg = get_register_name_32(f->reg);
+ assert(reg);
+ fprintf(stderr, "warning: host doesn't support requested feature: "
+ "CPUID.%02XH:%s%s%s [bit %d]\n",
+ f->cpuid, reg,
+ f->flag_names[i] ? "." : "",
+ f->flag_names[i] ? f->flag_names[i] : "", i);
break;
}
return 0;
}
/* best effort attempt to inform user requested cpu flags aren't making
- * their way to the guest. Note: ft[].check_feat ideally should be
- * specified via a guest_def field to suppress report of extraneous flags.
+ * their way to the guest.
+ *
+ * This function may be called only if KVM is enabled.
*/
-static int check_features_against_host(x86_def_t *guest_def)
+static int kvm_check_features_against_host(x86_def_t *guest_def)
{
x86_def_t host_def;
uint32_t mask;
int rv, i;
struct model_features_t ft[] = {
{&guest_def->features, &host_def.features,
- ~0, feature_name, 0x00000000},
+ feature_name, 0x00000001, R_EDX},
{&guest_def->ext_features, &host_def.ext_features,
- ~CPUID_EXT_HYPERVISOR, ext_feature_name, 0x00000001},
+ ext_feature_name, 0x00000001, R_ECX},
{&guest_def->ext2_features, &host_def.ext2_features,
- ~PPRO_FEATURES, ext2_feature_name, 0x80000000},
+ ext2_feature_name, 0x80000001, R_EDX},
{&guest_def->ext3_features, &host_def.ext3_features,
- ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}};
+ ext3_feature_name, 0x80000001, R_ECX}
+ };
- cpu_x86_fill_host(&host_def);
+ assert(kvm_enabled());
+
+ kvm_cpu_fill_host(&host_def);
for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i)
for (mask = 1; mask; mask <<= 1)
- if (ft[i].check_feat & mask && *ft[i].guest_feat & mask &&
+ if (*ft[i].guest_feat & mask &&
!(*ft[i].host_feat & mask)) {
unavailable_host_feature(&ft[i], mask);
rv = 1;
@@ -757,13 +1155,13 @@ static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
char *value;
int i;
- value = (char *)g_malloc(12 + 1);
+ value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
for (i = 0; i < 4; i++) {
value[i ] = env->cpuid_vendor1 >> (8 * i);
value[i + 4] = env->cpuid_vendor2 >> (8 * i);
value[i + 8] = env->cpuid_vendor3 >> (8 * i);
}
- value[12] = '\0';
+ value[CPUID_VENDOR_SZ] = '\0';
return value;
}
@@ -774,7 +1172,7 @@ static void x86_cpuid_set_vendor(Object *obj, const char *value,
CPUX86State *env = &cpu->env;
int i;
- if (strlen(value) != 12) {
+ if (strlen(value) != CPUID_VENDOR_SZ) {
error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
"vendor", value);
return;
@@ -843,7 +1241,7 @@ static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
{
X86CPU *cpu = X86_CPU(obj);
const int64_t min = 0;
- const int64_t max = INT_MAX;
+ const int64_t max = INT64_MAX;
int64_t value;
visit_type_int(v, &value, name, errp);
@@ -859,41 +1257,49 @@ static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
cpu->env.tsc_khz = value / 1000;
}
-static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
+static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *name)
{
- unsigned int i;
x86_def_t *def;
- char *s = g_strdup(cpu_model);
- char *featurestr, *name = strtok(s, ",");
- /* Features to be added*/
- uint32_t plus_features = 0, plus_ext_features = 0;
- uint32_t plus_ext2_features = 0, plus_ext3_features = 0;
- uint32_t plus_kvm_features = 0, plus_svm_features = 0;
- /* Features to be removed */
- uint32_t minus_features = 0, minus_ext_features = 0;
- uint32_t minus_ext2_features = 0, minus_ext3_features = 0;
- uint32_t minus_kvm_features = 0, minus_svm_features = 0;
- uint32_t numvalue;
-
- for (def = x86_defs; def; def = def->next)
- if (name && !strcmp(name, def->name))
+ for (def = x86_defs; def; def = def->next) {
+ if (name && !strcmp(name, def->name)) {
break;
+ }
+ }
if (kvm_enabled() && name && strcmp(name, "host") == 0) {
- cpu_x86_fill_host(x86_cpu_def);
+ kvm_cpu_fill_host(x86_cpu_def);
} else if (!def) {
- goto error;
+ return -1;
} else {
memcpy(x86_cpu_def, def, sizeof(*def));
}
- plus_kvm_features = ~0; /* not supported bits will be filtered out later */
+ return 0;
+}
+
+/* Parse "+feature,-feature,feature=foo" CPU feature string
+ */
+static int cpu_x86_parse_featurestr(x86_def_t *x86_cpu_def, char *features)
+{
+ unsigned int i;
+ char *featurestr; /* Single 'key=value" string being parsed */
+ /* Features to be added */
+ uint32_t plus_features = 0, plus_ext_features = 0;
+ uint32_t plus_ext2_features = 0, plus_ext3_features = 0;
+ uint32_t plus_kvm_features = kvm_default_features, plus_svm_features = 0;
+ uint32_t plus_7_0_ebx_features = 0;
+ /* Features to be removed */
+ uint32_t minus_features = 0, minus_ext_features = 0;
+ uint32_t minus_ext2_features = 0, minus_ext3_features = 0;
+ uint32_t minus_kvm_features = 0, minus_svm_features = 0;
+ uint32_t minus_7_0_ebx_features = 0;
+ uint32_t numvalue;
add_flagname_to_bitmaps("hypervisor", &plus_features,
- &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
- &plus_kvm_features, &plus_svm_features);
+ &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
+ &plus_kvm_features, &plus_svm_features, &plus_7_0_ebx_features);
- featurestr = strtok(NULL, ",");
+ featurestr = features ? strtok(features, ",") : NULL;
while (featurestr) {
char *val;
@@ -901,12 +1307,12 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
add_flagname_to_bitmaps(featurestr + 1, &plus_features,
&plus_ext_features, &plus_ext2_features,
&plus_ext3_features, &plus_kvm_features,
- &plus_svm_features);
+ &plus_svm_features, &plus_7_0_ebx_features);
} else if (featurestr[0] == '-') {
add_flagname_to_bitmaps(featurestr + 1, &minus_features,
&minus_ext_features, &minus_ext2_features,
&minus_ext3_features, &minus_kvm_features,
- &minus_svm_features);
+ &minus_svm_features, &minus_7_0_ebx_features);
} else if ((val = strchr(featurestr, '='))) {
*val = 0; val++;
if (!strcmp(featurestr, "family")) {
@@ -1012,21 +1418,21 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
x86_cpu_def->ext3_features |= plus_ext3_features;
x86_cpu_def->kvm_features |= plus_kvm_features;
x86_cpu_def->svm_features |= plus_svm_features;
+ x86_cpu_def->cpuid_7_0_ebx_features |= plus_7_0_ebx_features;
x86_cpu_def->features &= ~minus_features;
x86_cpu_def->ext_features &= ~minus_ext_features;
x86_cpu_def->ext2_features &= ~minus_ext2_features;
x86_cpu_def->ext3_features &= ~minus_ext3_features;
x86_cpu_def->kvm_features &= ~minus_kvm_features;
x86_cpu_def->svm_features &= ~minus_svm_features;
- if (check_cpuid) {
- if (check_features_against_host(x86_cpu_def) && enforce_cpuid)
+ x86_cpu_def->cpuid_7_0_ebx_features &= ~minus_7_0_ebx_features;
+ if (check_cpuid && kvm_enabled()) {
+ if (kvm_check_features_against_host(x86_cpu_def) && enforce_cpuid)
goto error;
}
- g_free(s);
return 0;
error:
- g_free(s);
return -1;
}
@@ -1060,70 +1466,28 @@ static void listflags(char *buf, int bufsize, uint32_t fbits,
}
}
-/* generate CPU information:
- * -? list model names
- * -?model list model names/IDs
- * -?dump output all model (x86_def_t) data
- * -?cpuid list all recognized cpuid flag names
- */
-void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
+/* generate CPU information. */
+void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
{
- unsigned char model = !strcmp("?model", optarg);
- unsigned char dump = !strcmp("?dump", optarg);
- unsigned char cpuid = !strcmp("?cpuid", optarg);
x86_def_t *def;
char buf[256];
- if (cpuid) {
- (*cpu_fprintf)(f, "Recognized CPUID flags:\n");
- listflags(buf, sizeof (buf), (uint32_t)~0, feature_name, 1);
- (*cpu_fprintf)(f, " f_edx: %s\n", buf);
- listflags(buf, sizeof (buf), (uint32_t)~0, ext_feature_name, 1);
- (*cpu_fprintf)(f, " f_ecx: %s\n", buf);
- listflags(buf, sizeof (buf), (uint32_t)~0, ext2_feature_name, 1);
- (*cpu_fprintf)(f, " extf_edx: %s\n", buf);
- listflags(buf, sizeof (buf), (uint32_t)~0, ext3_feature_name, 1);
- (*cpu_fprintf)(f, " extf_ecx: %s\n", buf);
- return;
- }
for (def = x86_defs; def; def = def->next) {
- snprintf(buf, sizeof (buf), def->flags ? "[%s]": "%s", def->name);
- if (model || dump) {
- (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
- } else {
- (*cpu_fprintf)(f, "x86 %16s\n", buf);
- }
- if (dump) {
- memcpy(buf, &def->vendor1, sizeof (def->vendor1));
- memcpy(buf + 4, &def->vendor2, sizeof (def->vendor2));
- memcpy(buf + 8, &def->vendor3, sizeof (def->vendor3));
- buf[12] = '\0';
- (*cpu_fprintf)(f,
- " family %d model %d stepping %d level %d xlevel 0x%x"
- " vendor \"%s\"\n",
- def->family, def->model, def->stepping, def->level,
- def->xlevel, buf);
- listflags(buf, sizeof (buf), def->features, feature_name, 0);
- (*cpu_fprintf)(f, " feature_edx %08x (%s)\n", def->features,
- buf);
- listflags(buf, sizeof (buf), def->ext_features, ext_feature_name,
- 0);
- (*cpu_fprintf)(f, " feature_ecx %08x (%s)\n", def->ext_features,
- buf);
- listflags(buf, sizeof (buf), def->ext2_features, ext2_feature_name,
- 0);
- (*cpu_fprintf)(f, " extfeature_edx %08x (%s)\n",
- def->ext2_features, buf);
- listflags(buf, sizeof (buf), def->ext3_features, ext3_feature_name,
- 0);
- (*cpu_fprintf)(f, " extfeature_ecx %08x (%s)\n",
- def->ext3_features, buf);
- (*cpu_fprintf)(f, "\n");
- }
+ snprintf(buf, sizeof(buf), "%s", def->name);
+ (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
}
if (kvm_enabled()) {
(*cpu_fprintf)(f, "x86 %16s\n", "[host]");
}
+ (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
+ listflags(buf, sizeof(buf), (uint32_t)~0, feature_name, 1);
+ (*cpu_fprintf)(f, " %s\n", buf);
+ listflags(buf, sizeof(buf), (uint32_t)~0, ext_feature_name, 1);
+ (*cpu_fprintf)(f, " %s\n", buf);
+ listflags(buf, sizeof(buf), (uint32_t)~0, ext2_feature_name, 1);
+ (*cpu_fprintf)(f, " %s\n", buf);
+ listflags(buf, sizeof(buf), (uint32_t)~0, ext3_feature_name, 1);
+ (*cpu_fprintf)(f, " %s\n", buf);
}
CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
@@ -1147,25 +1511,60 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
return cpu_list;
}
+#ifdef CONFIG_KVM
+static void filter_features_for_kvm(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ KVMState *s = kvm_state;
+
+ env->cpuid_features &=
+ kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
+ env->cpuid_ext_features &=
+ kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
+ env->cpuid_ext2_features &=
+ kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX);
+ env->cpuid_ext3_features &=
+ kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX);
+ env->cpuid_svm_features &=
+ kvm_arch_get_supported_cpuid(s, 0x8000000A, 0, R_EDX);
+ env->cpuid_7_0_ebx_features &=
+ kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX);
+ env->cpuid_kvm_features &=
+ kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
+ env->cpuid_ext4_features &=
+ kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
+
+}
+#endif
+
int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
{
CPUX86State *env = &cpu->env;
x86_def_t def1, *def = &def1;
Error *error = NULL;
+ char *name, *features;
+ gchar **model_pieces;
memset(def, 0, sizeof(*def));
- if (cpu_x86_find_by_name(def, cpu_model) < 0)
- return -1;
- if (def->vendor1) {
- env->cpuid_vendor1 = def->vendor1;
- env->cpuid_vendor2 = def->vendor2;
- env->cpuid_vendor3 = def->vendor3;
- } else {
- env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
- env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
- env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
+ model_pieces = g_strsplit(cpu_model, ",", 2);
+ if (!model_pieces[0]) {
+ goto error;
}
+ name = model_pieces[0];
+ features = model_pieces[1];
+
+ if (cpu_x86_find_by_name(def, name) < 0) {
+ goto error;
+ }
+
+ if (cpu_x86_parse_featurestr(def, features) < 0) {
+ goto error;
+ }
+ assert(def->vendor1);
+ env->cpuid_vendor1 = def->vendor1;
+ env->cpuid_vendor2 = def->vendor2;
+ env->cpuid_vendor3 = def->vendor3;
env->cpuid_vendor_override = def->vendor_override;
object_property_set_int(OBJECT(cpu), def->level, "level", &error);
object_property_set_int(OBJECT(cpu), def->family, "family", &error);
@@ -1179,133 +1578,26 @@ int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
env->cpuid_kvm_features = def->kvm_features;
env->cpuid_svm_features = def->svm_features;
env->cpuid_ext4_features = def->ext4_features;
- env->cpuid_7_0_ebx = def->cpuid_7_0_ebx_features;
+ env->cpuid_7_0_ebx_features = def->cpuid_7_0_ebx_features;
env->cpuid_xlevel2 = def->xlevel2;
object_property_set_int(OBJECT(cpu), (int64_t)def->tsc_khz * 1000,
"tsc-frequency", &error);
- if (!kvm_enabled()) {
- env->cpuid_features &= TCG_FEATURES;
- env->cpuid_ext_features &= TCG_EXT_FEATURES;
- env->cpuid_ext2_features &= (TCG_EXT2_FEATURES
-#ifdef TARGET_X86_64
- | CPUID_EXT2_SYSCALL | CPUID_EXT2_LM
-#endif
- );
- env->cpuid_ext3_features &= TCG_EXT3_FEATURES;
- env->cpuid_svm_features &= TCG_SVM_FEATURES;
- }
+
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
- if (error_is_set(&error)) {
+ if (error) {
+ fprintf(stderr, "%s\n", error_get_pretty(error));
error_free(error);
- return -1;
+ goto error;
}
+
+ g_strfreev(model_pieces);
return 0;
+error:
+ g_strfreev(model_pieces);
+ return -1;
}
#if !defined(CONFIG_USER_ONLY)
-/* copy vendor id string to 32 bit register, nul pad as needed
- */
-static void cpyid(const char *s, uint32_t *id)
-{
- char *d = (char *)id;
- char i;
-
- for (i = sizeof (*id); i--; )
- *d++ = *s ? *s++ : '\0';
-}
-
-/* interpret radix and convert from string to arbitrary scalar,
- * otherwise flag failure
- */
-#define setscalar(pval, str, perr) \
-{ \
- char *pend; \
- unsigned long ul; \
- \
- ul = strtoul(str, &pend, 0); \
- *str && !*pend ? (*pval = ul) : (*perr = 1); \
-}
-
-/* map cpuid options to feature bits, otherwise return failure
- * (option tags in *str are delimited by whitespace)
- */
-static void setfeatures(uint32_t *pval, const char *str,
- const char **featureset, int *perr)
-{
- const char *p, *q;
-
- for (q = p = str; *p || *q; q = p) {
- while (iswhite(*p))
- q = ++p;
- while (*p && !iswhite(*p))
- ++p;
- if (!*q && !*p)
- return;
- if (!lookup_feature(pval, q, p, featureset)) {
- fprintf(stderr, "error: feature \"%.*s\" not available in set\n",
- (int)(p - q), q);
- *perr = 1;
- return;
- }
- }
-}
-
-/* map config file options to x86_def_t form
- */
-static int cpudef_setfield(const char *name, const char *str, void *opaque)
-{
- x86_def_t *def = opaque;
- int err = 0;
-
- if (!strcmp(name, "name")) {
- g_free((void *)def->name);
- def->name = g_strdup(str);
- } else if (!strcmp(name, "model_id")) {
- strncpy(def->model_id, str, sizeof (def->model_id));
- } else if (!strcmp(name, "level")) {
- setscalar(&def->level, str, &err)
- } else if (!strcmp(name, "vendor")) {
- cpyid(&str[0], &def->vendor1);
- cpyid(&str[4], &def->vendor2);
- cpyid(&str[8], &def->vendor3);
- } else if (!strcmp(name, "family")) {
- setscalar(&def->family, str, &err)
- } else if (!strcmp(name, "model")) {
- setscalar(&def->model, str, &err)
- } else if (!strcmp(name, "stepping")) {
- setscalar(&def->stepping, str, &err)
- } else if (!strcmp(name, "feature_edx")) {
- setfeatures(&def->features, str, feature_name, &err);
- } else if (!strcmp(name, "feature_ecx")) {
- setfeatures(&def->ext_features, str, ext_feature_name, &err);
- } else if (!strcmp(name, "extfeature_edx")) {
- setfeatures(&def->ext2_features, str, ext2_feature_name, &err);
- } else if (!strcmp(name, "extfeature_ecx")) {
- setfeatures(&def->ext3_features, str, ext3_feature_name, &err);
- } else if (!strcmp(name, "xlevel")) {
- setscalar(&def->xlevel, str, &err)
- } else {
- fprintf(stderr, "error: unknown option [%s = %s]\n", name, str);
- return (1);
- }
- if (err) {
- fprintf(stderr, "error: bad option value [%s = %s]\n", name, str);
- return (1);
- }
- return (0);
-}
-
-/* register config file entry as x86_def_t
- */
-static int cpudef_register(QemuOpts *opts, void *opaque)
-{
- x86_def_t *def = g_malloc0(sizeof (x86_def_t));
-
- qemu_opt_foreach(opts, cpudef_setfield, def, 1);
- def->next = x86_defs;
- x86_defs = def;
- return (0);
-}
void cpu_clear_apic_feature(CPUX86State *env)
{
@@ -1314,8 +1606,7 @@ void cpu_clear_apic_feature(CPUX86State *env)
#endif /* !CONFIG_USER_ONLY */
-/* register "cpudef" models defined in configuration file. Here we first
- * preload any built-in definitions
+/* Initialize list of CPU models, filling some non-static fields if necessary
*/
void x86_cpudef_setup(void)
{
@@ -1323,24 +1614,23 @@ void x86_cpudef_setup(void)
static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
- builtin_x86_defs[i].next = x86_defs;
- builtin_x86_defs[i].flags = 1;
+ x86_def_t *def = &builtin_x86_defs[i];
+ def->next = x86_defs;
/* Look for specific "cpudef" models that */
/* have the QEMU version in .model_id */
for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
- if (strcmp(model_with_versions[j], builtin_x86_defs[i].name) == 0) {
- pstrcpy(builtin_x86_defs[i].model_id, sizeof(builtin_x86_defs[i].model_id), "QEMU Virtual CPU version ");
- pstrcat(builtin_x86_defs[i].model_id, sizeof(builtin_x86_defs[i].model_id), qemu_get_version());
+ if (strcmp(model_with_versions[j], def->name) == 0) {
+ pstrcpy(def->model_id, sizeof(def->model_id),
+ "QEMU Virtual CPU version ");
+ pstrcat(def->model_id, sizeof(def->model_id),
+ qemu_get_version());
break;
}
}
- x86_defs = &builtin_x86_defs[i];
+ x86_defs = def;
}
-#if !defined(CONFIG_USER_ONLY)
- qemu_opts_foreach(qemu_find_opts("cpudef"), cpudef_register, NULL, 0);
-#endif
}
static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
@@ -1365,6 +1655,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
/* test if maximum index reached */
if (index & 0x80000000) {
if (index > env->cpuid_xlevel) {
@@ -1376,7 +1669,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
index = env->cpuid_xlevel;
}
} else {
- index = env->cpuid_xlevel;
+ /* Intel documentation states that invalid EAX input will
+ * return the same information as EAX=cpuid_level
+ * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
+ */
+ index = env->cpuid_level;
}
}
} else {
@@ -1461,7 +1758,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
/* Structured Extended Feature Flags Enumeration Leaf */
if (count == 0) {
*eax = 0; /* Maximum ECX value for sub-leaves */
- *ebx = env->cpuid_7_0_ebx; /* Feature flags */
+ *ebx = env->cpuid_7_0_ebx_features; /* Feature flags */
*ecx = 0; /* Reserved */
*edx = 0; /* Reserved */
} else {
@@ -1481,7 +1778,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
case 0xA:
/* Architectural Performance Monitoring Leaf */
if (kvm_enabled()) {
- KVMState *s = env->kvm_state;
+ KVMState *s = cs->kvm_state;
*eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
*ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
@@ -1504,7 +1801,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
}
if (kvm_enabled()) {
- KVMState *s = env->kvm_state;
+ KVMState *s = cs->kvm_state;
*eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
*ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
@@ -1586,17 +1883,17 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
break;
case 0x8000000A:
- if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
- *eax = 0x00000001; /* SVM Revision */
- *ebx = 0x00000010; /* nr of ASIDs */
- *ecx = 0;
- *edx = env->cpuid_svm_features; /* optional features */
- } else {
- *eax = 0;
- *ebx = 0;
- *ecx = 0;
- *edx = 0;
- }
+ if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
+ *eax = 0x00000001; /* SVM Revision */
+ *ebx = 0x00000010; /* nr of ASIDs */
+ *ecx = 0;
+ *edx = env->cpuid_svm_features; /* optional features */
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
break;
case 0xC0000000:
*eax = env->cpuid_xlevel2;
@@ -1640,7 +1937,7 @@ static void x86_cpu_reset(CPUState *s)
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
- log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
+ log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
}
xcc->parent_reset(s);
@@ -1751,12 +2048,97 @@ static void mce_init(X86CPU *cpu)
}
}
+#define MSI_ADDR_BASE 0xfee00000
+
+#ifndef CONFIG_USER_ONLY
+static void x86_cpu_apic_init(X86CPU *cpu, Error **errp)
+{
+ static int apic_mapped;
+ CPUX86State *env = &cpu->env;
+ APICCommonState *apic;
+ const char *apic_type = "apic";
+
+ if (kvm_irqchip_in_kernel()) {
+ apic_type = "kvm-apic";
+ } else if (xen_enabled()) {
+ apic_type = "xen-apic";
+ }
+
+ env->apic_state = qdev_try_create(NULL, apic_type);
+ if (env->apic_state == NULL) {
+ error_setg(errp, "APIC device '%s' could not be created", apic_type);
+ return;
+ }
+
+ object_property_add_child(OBJECT(cpu), "apic",
+ OBJECT(env->apic_state), NULL);
+ qdev_prop_set_uint8(env->apic_state, "id", env->cpuid_apic_id);
+ /* TODO: convert to link<> */
+ apic = APIC_COMMON(env->apic_state);
+ apic->cpu = cpu;
+
+ if (qdev_init(env->apic_state)) {
+ error_setg(errp, "APIC device '%s' could not be initialized",
+ object_get_typename(OBJECT(env->apic_state)));
+ return;
+ }
+
+ /* XXX: mapping more APICs at the same memory location */
+ if (apic_mapped == 0) {
+ /* NOTE: the APIC is directly connected to the CPU - it is not
+ on the global memory bus. */
+ /* XXX: what if the base changes? */
+ sysbus_mmio_map(sysbus_from_qdev(env->apic_state), 0, MSI_ADDR_BASE);
+ apic_mapped = 1;
+ }
+}
+#endif
+
void x86_cpu_realize(Object *obj, Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+
+ if (env->cpuid_7_0_ebx_features && env->cpuid_level < 7) {
+ env->cpuid_level = 7;
+ }
+
+ /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
+ * CPUID[1].EDX.
+ */
+ if (env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
+ env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
+ env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
+ env->cpuid_ext2_features &= ~CPUID_EXT2_AMD_ALIASES;
+ env->cpuid_ext2_features |= (env->cpuid_features
+ & CPUID_EXT2_AMD_ALIASES);
+ }
+
+ if (!kvm_enabled()) {
+ env->cpuid_features &= TCG_FEATURES;
+ env->cpuid_ext_features &= TCG_EXT_FEATURES;
+ env->cpuid_ext2_features &= (TCG_EXT2_FEATURES
+#ifdef TARGET_X86_64
+ | CPUID_EXT2_SYSCALL | CPUID_EXT2_LM
+#endif
+ );
+ env->cpuid_ext3_features &= TCG_EXT3_FEATURES;
+ env->cpuid_svm_features &= TCG_SVM_FEATURES;
+ } else {
+#ifdef CONFIG_KVM
+ filter_features_for_kvm(cpu);
+#endif
+ }
#ifndef CONFIG_USER_ONLY
qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
+
+ if (cpu->env.cpuid_features & CPUID_APIC || smp_cpus > 1) {
+ x86_cpu_apic_init(cpu, errp);
+ if (error_is_set(errp)) {
+ return;
+ }
+ }
#endif
mce_init(cpu);
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 60f9e972bd..e56921bbe3 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -44,9 +44,9 @@
#define CPUArchState struct CPUX86State
-#include "cpu-defs.h"
+#include "exec/cpu-defs.h"
-#include "softfloat.h"
+#include "fpu/softfloat.h"
#define R_EAX 0
#define R_ECX 1
@@ -123,8 +123,8 @@
/* hidden flags - used internally by qemu to represent additional cpu
states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
- redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
- position to ease oring with eflags. */
+ redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK
+ bit positions to ease oring with eflags. */
/* current cpl */
#define HF_CPL_SHIFT 0
/* true if soft mmu is being used */
@@ -147,10 +147,12 @@
#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
#define HF_RF_SHIFT 16 /* must be same as eflags */
#define HF_VM_SHIFT 17 /* must be same as eflags */
+#define HF_AC_SHIFT 18 /* must be same as eflags */
#define HF_SMM_SHIFT 19 /* CPU in SMM mode */
#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
+#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
@@ -168,10 +170,12 @@
#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
#define HF_RF_MASK (1 << HF_RF_SHIFT)
#define HF_VM_MASK (1 << HF_VM_SHIFT)
+#define HF_AC_MASK (1 << HF_AC_SHIFT)
#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
#define HF_SVME_MASK (1 << HF_SVME_SHIFT)
#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
+#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
/* hflags2 */
@@ -210,6 +214,13 @@
#define CR4_OSFXSR_SHIFT 9
#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
#define CR4_OSXMMEXCPT_MASK (1 << 10)
+#define CR4_VMXE_MASK (1 << 13)
+#define CR4_SMXE_MASK (1 << 14)
+#define CR4_FSGSBASE_MASK (1 << 16)
+#define CR4_PCIDE_MASK (1 << 17)
+#define CR4_OSXSAVE_MASK (1 << 18)
+#define CR4_SMEP_MASK (1 << 20)
+#define CR4_SMAP_MASK (1 << 21)
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
@@ -284,6 +295,7 @@
#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+#define MSR_TSC_ADJUST 0x0000003b
#define MSR_IA32_TSCDEADLINE 0x6e0
#define MSR_MTRRcap 0xfe
@@ -382,6 +394,7 @@
#define CPUID_PBE (1 << 31)
#define CPUID_EXT_SSE3 (1 << 0)
+#define CPUID_EXT_PCLMULQDQ (1 << 1)
#define CPUID_EXT_DTES64 (1 << 2)
#define CPUID_EXT_MONITOR (1 << 3)
#define CPUID_EXT_DSCPL (1 << 4)
@@ -391,9 +404,11 @@
#define CPUID_EXT_TM2 (1 << 8)
#define CPUID_EXT_SSSE3 (1 << 9)
#define CPUID_EXT_CID (1 << 10)
+#define CPUID_EXT_FMA (1 << 12)
#define CPUID_EXT_CX16 (1 << 13)
#define CPUID_EXT_XTPR (1 << 14)
#define CPUID_EXT_PDCM (1 << 15)
+#define CPUID_EXT_PCID (1 << 17)
#define CPUID_EXT_DCA (1 << 18)
#define CPUID_EXT_SSE41 (1 << 19)
#define CPUID_EXT_SSE42 (1 << 20)
@@ -401,14 +416,36 @@
#define CPUID_EXT_MOVBE (1 << 22)
#define CPUID_EXT_POPCNT (1 << 23)
#define CPUID_EXT_TSC_DEADLINE_TIMER (1 << 24)
+#define CPUID_EXT_AES (1 << 25)
#define CPUID_EXT_XSAVE (1 << 26)
#define CPUID_EXT_OSXSAVE (1 << 27)
+#define CPUID_EXT_AVX (1 << 28)
+#define CPUID_EXT_F16C (1 << 29)
+#define CPUID_EXT_RDRAND (1 << 30)
#define CPUID_EXT_HYPERVISOR (1 << 31)
+#define CPUID_EXT2_FPU (1 << 0)
+#define CPUID_EXT2_VME (1 << 1)
+#define CPUID_EXT2_DE (1 << 2)
+#define CPUID_EXT2_PSE (1 << 3)
+#define CPUID_EXT2_TSC (1 << 4)
+#define CPUID_EXT2_MSR (1 << 5)
+#define CPUID_EXT2_PAE (1 << 6)
+#define CPUID_EXT2_MCE (1 << 7)
+#define CPUID_EXT2_CX8 (1 << 8)
+#define CPUID_EXT2_APIC (1 << 9)
#define CPUID_EXT2_SYSCALL (1 << 11)
+#define CPUID_EXT2_MTRR (1 << 12)
+#define CPUID_EXT2_PGE (1 << 13)
+#define CPUID_EXT2_MCA (1 << 14)
+#define CPUID_EXT2_CMOV (1 << 15)
+#define CPUID_EXT2_PAT (1 << 16)
+#define CPUID_EXT2_PSE36 (1 << 17)
#define CPUID_EXT2_MP (1 << 19)
#define CPUID_EXT2_NX (1 << 20)
#define CPUID_EXT2_MMXEXT (1 << 22)
+#define CPUID_EXT2_MMX (1 << 23)
+#define CPUID_EXT2_FXSR (1 << 24)
#define CPUID_EXT2_FFXSR (1 << 25)
#define CPUID_EXT2_PDPE1GB (1 << 26)
#define CPUID_EXT2_RDTSCP (1 << 27)
@@ -416,6 +453,17 @@
#define CPUID_EXT2_3DNOWEXT (1 << 30)
#define CPUID_EXT2_3DNOW (1 << 31)
+/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
+#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
+ CPUID_EXT2_DE | CPUID_EXT2_PSE | \
+ CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
+ CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
+ CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
+ CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
+ CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
+ CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
+ CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
+
#define CPUID_EXT3_LAHF_LM (1 << 0)
#define CPUID_EXT3_CMP_LEG (1 << 1)
#define CPUID_EXT3_SVM (1 << 2)
@@ -427,7 +475,17 @@
#define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
#define CPUID_EXT3_OSVW (1 << 9)
#define CPUID_EXT3_IBS (1 << 10)
+#define CPUID_EXT3_XOP (1 << 11)
#define CPUID_EXT3_SKINIT (1 << 12)
+#define CPUID_EXT3_WDT (1 << 13)
+#define CPUID_EXT3_LWP (1 << 15)
+#define CPUID_EXT3_FMA4 (1 << 16)
+#define CPUID_EXT3_TCE (1 << 17)
+#define CPUID_EXT3_NODEID (1 << 19)
+#define CPUID_EXT3_TBM (1 << 21)
+#define CPUID_EXT3_TOPOEXT (1 << 22)
+#define CPUID_EXT3_PERFCORE (1 << 23)
+#define CPUID_EXT3_PERFNB (1 << 24)
#define CPUID_SVM_NPT (1 << 0)
#define CPUID_SVM_LBRV (1 << 1)
@@ -440,6 +498,21 @@
#define CPUID_SVM_PAUSEFILTER (1 << 10)
#define CPUID_SVM_PFTHRESHOLD (1 << 12)
+#define CPUID_7_0_EBX_FSGSBASE (1 << 0)
+#define CPUID_7_0_EBX_BMI1 (1 << 3)
+#define CPUID_7_0_EBX_HLE (1 << 4)
+#define CPUID_7_0_EBX_AVX2 (1 << 5)
+#define CPUID_7_0_EBX_SMEP (1 << 7)
+#define CPUID_7_0_EBX_BMI2 (1 << 8)
+#define CPUID_7_0_EBX_ERMS (1 << 9)
+#define CPUID_7_0_EBX_INVPCID (1 << 10)
+#define CPUID_7_0_EBX_RTM (1 << 11)
+#define CPUID_7_0_EBX_RDSEED (1 << 18)
+#define CPUID_7_0_EBX_ADX (1 << 19)
+#define CPUID_7_0_EBX_SMAP (1 << 20)
+
+#define CPUID_VENDOR_SZ 12
+
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
@@ -615,7 +688,7 @@ typedef struct {
#define CPU_NB_REGS CPU_NB_REGS32
#endif
-#define NB_MMU_MODES 2
+#define NB_MMU_MODES 3
typedef enum TPRAccess {
TPR_ACCESS_READ,
@@ -699,8 +772,10 @@ typedef struct CPUX86State {
uint64_t system_time_msr;
uint64_t wall_clock_msr;
uint64_t async_pf_en_msr;
+ uint64_t pv_eoi_en_msr;
uint64_t tsc;
+ uint64_t tsc_adjust;
uint64_t tsc_deadline;
uint64_t mcg_status;
@@ -744,7 +819,7 @@ typedef struct CPUX86State {
uint32_t cpuid_xlevel2;
uint32_t cpuid_ext4_features;
/* Flags from CPUID[EAX=7,ECX=0].EBX */
- uint32_t cpuid_7_0_ebx;
+ uint32_t cpuid_7_0_ebx_features;
/* MTRRs */
uint64_t mtrr_fixed[11];
@@ -791,7 +866,7 @@ typedef struct CPUX86State {
X86CPU *cpu_x86_init(const char *cpu_model);
int cpu_x86_exec(CPUX86State *s);
-void x86_cpu_list (FILE *f, fprintf_function cpu_fprintf, const char *optarg);
+void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
void x86_cpudef_setup(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
@@ -858,9 +933,11 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
}
}
-static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env,
+static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
int sipi_vector)
{
+ CPUX86State *env = &cpu->env;
+
env->eip = 0;
cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
sipi_vector << 12,
@@ -946,10 +1023,6 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
void cpu_smm_update(CPUX86State *env);
uint64_t cpu_get_tsc(CPUX86State *env);
-/* used to debug */
-#define X86_DUMP_FPU 0x0001 /* dump FPU state too */
-#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
-
#define TARGET_PAGE_BITS 12
#ifdef TARGET_X86_64
@@ -975,7 +1048,7 @@ static inline CPUX86State *cpu_init(const char *cpu_model)
#define cpu_exec cpu_x86_exec
#define cpu_gen_code cpu_x86_gen_code
#define cpu_signal_handler cpu_x86_signal_handler
-#define cpu_list_id x86_cpu_list
+#define cpu_list x86_cpu_list
#define cpudef_setup x86_cpudef_setup
#define CPU_SAVE_VERSION 12
@@ -983,10 +1056,15 @@ static inline CPUX86State *cpu_init(const char *cpu_model)
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
-#define MMU_USER_IDX 1
+#define MMU_MODE2_SUFFIX _ksmap /* Kernel with SMAP override */
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 1
+#define MMU_KSMAP_IDX 2
static inline int cpu_mmu_index (CPUX86State *env)
{
- return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
+ return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
+ ((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK))
+ ? MMU_KSMAP_IDX : MMU_KERNEL_IDX;
}
#undef EAX
@@ -1041,15 +1119,17 @@ static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
}
#endif
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "svm.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/apic.h"
#endif
-static inline bool cpu_has_work(CPUX86State *env)
+static inline bool cpu_has_work(CPUState *cpu)
{
+ CPUX86State *env = &X86_CPU(cpu)->env;
+
return ((env->interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
@@ -1059,7 +1139,7 @@ static inline bool cpu_has_work(CPUX86State *env)
CPU_INTERRUPT_MCE));
}
-#include "exec-all.h"
+#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb)
{
@@ -1072,7 +1152,7 @@ static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
*cs_base = env->segs[R_CS].base;
*pc = *cs_base + env->eip;
*flags = env->hflags |
- (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
+ (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
}
void do_cpu_init(X86CPU *cpu);
@@ -1081,7 +1161,7 @@ void do_cpu_sipi(X86CPU *cpu);
#define MCE_INJECT_BROADCAST 1
#define MCE_INJECT_UNCOND_AO 2
-void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags);
@@ -1138,4 +1218,9 @@ void do_smm_enter(CPUX86State *env1);
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
+void enable_kvm_pv_eoi(void);
+
+/* Return name of 32-bit register, from a R_* constant */
+const char *get_register_name_32(unsigned int reg);
+
#endif /* CPU_I386_H */
diff --git a/target-i386/excp_helper.c b/target-i386/excp_helper.c
index aaa5ca2090..179ea82f0f 100644
--- a/target-i386/excp_helper.c
+++ b/target-i386/excp_helper.c
@@ -18,8 +18,8 @@
*/
#include "cpu.h"
-#include "qemu-log.h"
-#include "sysemu.h"
+#include "qemu/log.h"
+#include "sysemu/sysemu.h"
#include "helper.h"
#if 0
diff --git a/target-i386/fpu_helper.c b/target-i386/fpu_helper.c
index dfc34a6253..44f3d27944 100644
--- a/target-i386/fpu_helper.c
+++ b/target-i386/fpu_helper.c
@@ -22,7 +22,7 @@
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#define FPU_RC_MASK 0xc00
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 8a5da3d7c0..dca1360962 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -18,10 +18,10 @@
*/
#include "cpu.h"
-#include "kvm.h"
+#include "sysemu/kvm.h"
#ifndef CONFIG_USER_ONLY
-#include "sysemu.h"
-#include "monitor.h"
+#include "sysemu/sysemu.h"
+#include "monitor/monitor.h"
#endif
//#define DEBUG_MMU
@@ -284,7 +284,7 @@ void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
env->dr[6], env->dr[7]);
}
- if (flags & X86_DUMP_CCOP) {
+ if (flags & CPU_DUMP_CCOP) {
if ((unsigned)env->cc_op < CC_OP_NB)
snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
else
@@ -303,7 +303,7 @@ void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
}
}
cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
- if (flags & X86_DUMP_FPU) {
+ if (flags & CPU_DUMP_FPU) {
int fptag;
fptag = 0;
for(i = 0; i < 8; i++) {
@@ -443,17 +443,27 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
#if defined(DEBUG_MMU)
printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
#endif
- if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
- (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
+ if ((new_cr4 ^ env->cr[4]) &
+ (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
+ CR4_SMEP_MASK | CR4_SMAP_MASK)) {
tlb_flush(env, 1);
}
/* SSE handling */
- if (!(env->cpuid_features & CPUID_SSE))
+ if (!(env->cpuid_features & CPUID_SSE)) {
new_cr4 &= ~CR4_OSFXSR_MASK;
- if (new_cr4 & CR4_OSFXSR_MASK)
+ }
+ env->hflags &= ~HF_OSFXSR_MASK;
+ if (new_cr4 & CR4_OSFXSR_MASK) {
env->hflags |= HF_OSFXSR_MASK;
- else
- env->hflags &= ~HF_OSFXSR_MASK;
+ }
+
+ if (!(env->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)) {
+ new_cr4 &= ~CR4_SMAP_MASK;
+ }
+ env->hflags &= ~HF_SMAP_MASK;
+ if (new_cr4 & CR4_SMAP_MASK) {
+ env->hflags |= HF_SMAP_MASK;
+ }
env->cr[4] = new_cr4;
}
@@ -493,7 +503,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
uint64_t ptep, pte;
target_ulong pde_addr, pte_addr;
int error_code, is_dirty, prot, page_size, is_write, is_user;
- target_phys_addr_t paddr;
+ hwaddr paddr;
uint32_t page_offset;
target_ulong vaddr, virt_addr;
@@ -591,17 +601,38 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* 2 MB page */
page_size = 2048 * 1024;
ptep ^= PG_NX_MASK;
- if ((ptep & PG_NX_MASK) && is_write1 == 2)
+ if ((ptep & PG_NX_MASK) && is_write1 == 2) {
goto do_fault_protect;
- if (is_user) {
- if (!(ptep & PG_USER_MASK))
+ }
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(ptep & PG_RW_MASK))
+ }
+ if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK))
+ is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
@@ -635,15 +666,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
ptep ^= PG_NX_MASK;
if ((ptep & PG_NX_MASK) && is_write1 == 2)
goto do_fault_protect;
- if (is_user) {
- if (!(ptep & PG_USER_MASK))
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(ptep & PG_RW_MASK))
+ }
+ if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK))
+ is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
@@ -670,15 +721,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
page_size = 4096 * 1024;
- if (is_user) {
- if (!(pde & PG_USER_MASK))
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(pde & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(pde & PG_RW_MASK))
+ }
+ if (is_write && !(pde & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (pde & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (pde & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(pde & PG_RW_MASK))
+ is_write && !(pde & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
@@ -707,15 +778,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
}
/* combine pde and pte user and rw protections */
ptep = pte & pde;
- if (is_user) {
- if (!(ptep & PG_USER_MASK))
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(ptep & PG_RW_MASK))
+ }
+ if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK))
+ is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
@@ -762,8 +853,9 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
if (is_user)
error_code |= PG_ERROR_U_MASK;
if (is_write1 == 2 &&
- (env->efer & MSR_EFER_NXE) &&
- (env->cr[4] & CR4_PAE_MASK))
+ (((env->efer & MSR_EFER_NXE) &&
+ (env->cr[4] & CR4_PAE_MASK)) ||
+ (env->cr[4] & CR4_SMEP_MASK)))
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
@@ -777,11 +869,11 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
return 1;
}
-target_phys_addr_t cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
{
target_ulong pde_addr, pte_addr;
uint64_t pte;
- target_phys_addr_t paddr;
+ hwaddr paddr;
uint32_t page_offset;
int page_size;
@@ -1049,10 +1141,11 @@ static void do_inject_x86_mce(void *data)
}
}
-void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags)
{
+ CPUX86State *cenv = &cpu->env;
MCEInjectionParams params = {
.mon = mon,
.env = cenv,
@@ -1084,7 +1177,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
return;
}
- run_on_cpu(cenv, do_inject_x86_mce, &params);
+ run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
if (flags & MCE_INJECT_BROADCAST) {
params.bank = 1;
params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
@@ -1096,22 +1189,19 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
continue;
}
params.env = env;
- run_on_cpu(cenv, do_inject_x86_mce, &params);
+ run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
}
}
}
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
{
- TranslationBlock *tb;
-
if (kvm_enabled()) {
env->tpr_access_type = access;
cpu_interrupt(env, CPU_INTERRUPT_TPR);
} else {
- tb = tb_find_pc(env->mem_io_pc);
- cpu_restore_state(tb, env, env->mem_io_pc);
+ cpu_restore_state(env, env->mem_io_pc);
apic_handle_tpr_access_report(env->apic_state, env->eip, access);
}
@@ -1151,6 +1241,7 @@ X86CPU *cpu_x86_init(const char *cpu_model)
{
X86CPU *cpu;
CPUX86State *env;
+ Error *error = NULL;
cpu = X86_CPU(object_new(TYPE_X86_CPU));
env = &cpu->env;
@@ -1161,8 +1252,12 @@ X86CPU *cpu_x86_init(const char *cpu_model)
return NULL;
}
- x86_cpu_realize(OBJECT(cpu), NULL);
-
+ x86_cpu_realize(OBJECT(cpu), &error);
+ if (error) {
+ error_free(error);
+ object_delete(OBJECT(cpu));
+ return NULL;
+ }
return cpu;
}
diff --git a/target-i386/helper.h b/target-i386/helper.h
index ab6af638e6..9ed720d0ed 100644
--- a/target-i386/helper.h
+++ b/target-i386/helper.h
@@ -1,7 +1,7 @@
-#include "def-helper.h"
+#include "exec/def-helper.h"
-DEF_HELPER_FLAGS_2(cc_compute_all, TCG_CALL_PURE, i32, env, int)
-DEF_HELPER_FLAGS_2(cc_compute_c, TCG_CALL_PURE, i32, env, int)
+DEF_HELPER_FLAGS_2(cc_compute_all, TCG_CALL_NO_SE, i32, env, int)
+DEF_HELPER_FLAGS_2(cc_compute_c, TCG_CALL_NO_SE, i32, env, int)
DEF_HELPER_0(lock, void)
DEF_HELPER_0(unlock, void)
@@ -67,6 +67,8 @@ DEF_HELPER_3(raise_interrupt, void, env, int, int)
DEF_HELPER_2(raise_exception, void, env, int)
DEF_HELPER_1(cli, void, env)
DEF_HELPER_1(sti, void, env)
+DEF_HELPER_1(clac, void, env)
+DEF_HELPER_1(stac, void, env)
DEF_HELPER_1(set_inhibit_irq, void, env)
DEF_HELPER_1(reset_inhibit_irq, void, env)
DEF_HELPER_3(boundw, void, env, tl, int)
@@ -218,4 +220,4 @@ DEF_HELPER_3(rclq, tl, env, tl, tl)
DEF_HELPER_3(rcrq, tl, env, tl, tl)
#endif
-#include "def-helper.h"
+#include "exec/def-helper.h"
diff --git a/target-i386/int_helper.c b/target-i386/int_helper.c
index f39747e806..84b812dcca 100644
--- a/target-i386/int_helper.c
+++ b/target-i386/int_helper.c
@@ -18,7 +18,7 @@
*/
#include "cpu.h"
-#include "host-utils.h"
+#include "qemu/host-utils.h"
#include "helper.h"
//#define DEBUG_MULDIV
diff --git a/target-i386/ioport-user.c b/target-i386/ioport-user.c
index 03fac22d22..f7636e0a87 100644
--- a/target-i386/ioport-user.c
+++ b/target-i386/ioport-user.c
@@ -21,7 +21,7 @@
#include "qemu.h"
#include "qemu-common.h"
-#include "ioport.h"
+#include "exec/ioport.h"
void cpu_outb(pio_addr_t addr, uint8_t val)
{
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 696b14a04a..3acff40c47 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -21,16 +21,18 @@
#include <linux/kvm_para.h>
#include "qemu-common.h"
-#include "sysemu.h"
-#include "kvm.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
#include "kvm_i386.h"
#include "cpu.h"
-#include "gdbstub.h"
-#include "host-utils.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#include "qemu/config-file.h"
#include "hw/pc.h"
#include "hw/apic.h"
-#include "ioport.h"
+#include "exec/ioport.h"
#include "hyperv.h"
+#include "hw/pci/pci.h"
//#define DEBUG_KVM
@@ -61,8 +63,10 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
static bool has_msr_star;
static bool has_msr_hsave_pa;
+static bool has_msr_tsc_adjust;
static bool has_msr_tsc_deadline;
static bool has_msr_async_pf_en;
+static bool has_msr_pv_eoi_en;
static bool has_msr_misc_enable;
static int lm_capable_kernel;
@@ -96,6 +100,19 @@ static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
return cpuid;
}
+/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
+ * for all entries.
+ */
+static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
+{
+ struct kvm_cpuid2 *cpuid;
+ int max = 1;
+ while ((cpuid = try_get_cpuid(s, max)) == NULL) {
+ max *= 2;
+ }
+ return cpuid;
+}
+
struct kvm_para_features {
int cap;
int feature;
@@ -121,60 +138,98 @@ static int get_para_features(KVMState *s)
}
+/* Returns the value for a specific register on the cpuid entry
+ */
+static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
+{
+ uint32_t ret = 0;
+ switch (reg) {
+ case R_EAX:
+ ret = entry->eax;
+ break;
+ case R_EBX:
+ ret = entry->ebx;
+ break;
+ case R_ECX:
+ ret = entry->ecx;
+ break;
+ case R_EDX:
+ ret = entry->edx;
+ break;
+ }
+ return ret;
+}
+
+/* Find matching entry for function/index on kvm_cpuid2 struct
+ */
+static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index)
+{
+ int i;
+ for (i = 0; i < cpuid->nent; ++i) {
+ if (cpuid->entries[i].function == function &&
+ cpuid->entries[i].index == index) {
+ return &cpuid->entries[i];
+ }
+ }
+ /* not found: */
+ return NULL;
+}
+
uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
uint32_t index, int reg)
{
struct kvm_cpuid2 *cpuid;
- int i, max;
uint32_t ret = 0;
uint32_t cpuid_1_edx;
- int has_kvm_features = 0;
+ bool found = false;
- max = 1;
- while ((cpuid = try_get_cpuid(s, max)) == NULL) {
- max *= 2;
+ cpuid = get_supported_cpuid(s);
+
+ struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
+ if (entry) {
+ found = true;
+ ret = cpuid_entry_get_reg(entry, reg);
}
- for (i = 0; i < cpuid->nent; ++i) {
- if (cpuid->entries[i].function == function &&
- cpuid->entries[i].index == index) {
- if (cpuid->entries[i].function == KVM_CPUID_FEATURES) {
- has_kvm_features = 1;
- }
- switch (reg) {
- case R_EAX:
- ret = cpuid->entries[i].eax;
- break;
- case R_EBX:
- ret = cpuid->entries[i].ebx;
- break;
- case R_ECX:
- ret = cpuid->entries[i].ecx;
- break;
- case R_EDX:
- ret = cpuid->entries[i].edx;
- switch (function) {
- case 1:
- /* KVM before 2.6.30 misreports the following features */
- ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
- break;
- case 0x80000001:
- /* On Intel, kvm returns cpuid according to the Intel spec,
- * so add missing bits according to the AMD spec:
- */
- cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
- ret |= cpuid_1_edx & 0x183f7ff;
- break;
- }
- break;
- }
+ /* Fixups for the data returned by KVM, below */
+
+ if (function == 1 && reg == R_EDX) {
+ /* KVM before 2.6.30 misreports the following features */
+ ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
+ } else if (function == 1 && reg == R_ECX) {
+ /* We can set the hypervisor flag, even if KVM does not return it on
+ * GET_SUPPORTED_CPUID
+ */
+ ret |= CPUID_EXT_HYPERVISOR;
+ /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
+ * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
+ * and the irqchip is in the kernel.
+ */
+ if (kvm_irqchip_in_kernel() &&
+ kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
+ ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
}
+
+ /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
+ * without the in-kernel irqchip
+ */
+ if (!kvm_irqchip_in_kernel()) {
+ ret &= ~CPUID_EXT_X2APIC;
+ }
+ } else if (function == 0x80000001 && reg == R_EDX) {
+ /* On Intel, kvm returns cpuid according to the Intel spec,
+ * so add missing bits according to the AMD spec:
+ */
+ cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
+ ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
}
g_free(cpuid);
/* fallback for older kernels */
- if (!has_kvm_features && (function == KVM_CPUID_FEATURES)) {
+ if ((function == KVM_CPUID_FEATURES) && !found) {
ret = get_para_features(s);
}
@@ -227,8 +282,9 @@ static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
return -ENOSYS;
}
-static void kvm_mce_inject(CPUX86State *env, target_phys_addr_t paddr, int code)
+static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
{
+ CPUX86State *env = &cpu->env;
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
uint64_t mcg_status = MCG_STATUS_MCIP;
@@ -240,7 +296,7 @@ static void kvm_mce_inject(CPUX86State *env, target_phys_addr_t paddr, int code)
status |= 0xc0;
mcg_status |= MCG_STATUS_RIPV;
}
- cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
+ cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
(MCM_ADDR_PHYS << 6) | 0xc,
cpu_x86_support_mca_broadcast(env) ?
MCE_INJECT_BROADCAST : 0);
@@ -252,15 +308,17 @@ static void hardware_memory_error(void)
exit(1);
}
-int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
+int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
{
+ X86CPU *cpu = X86_CPU(c);
+ CPUX86State *env = &cpu->env;
ram_addr_t ram_addr;
- target_phys_addr_t paddr;
+ hwaddr paddr;
if ((env->mcg_cap & MCG_SER_P) && addr
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
- !kvm_physical_memory_addr_from_host(env->kvm_state, addr, &paddr)) {
+ !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!\n");
/* Hope we are lucky for AO MCE */
@@ -271,7 +329,7 @@ int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
}
}
kvm_hwpoison_page_add(ram_addr);
- kvm_mce_inject(env, paddr, code);
+ kvm_mce_inject(cpu, paddr, code);
} else {
if (code == BUS_MCEERR_AO) {
return 0;
@@ -288,18 +346,18 @@ int kvm_arch_on_sigbus(int code, void *addr)
{
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
ram_addr_t ram_addr;
- target_phys_addr_t paddr;
+ hwaddr paddr;
/* Hope we are lucky for AO MCE */
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
- !kvm_physical_memory_addr_from_host(first_cpu->kvm_state, addr,
- &paddr)) {
+ !kvm_physical_memory_addr_from_host(CPU(first_cpu)->kvm_state,
+ addr, &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!: %p\n", addr);
return 0;
}
kvm_hwpoison_page_add(ram_addr);
- kvm_mce_inject(first_cpu, paddr, code);
+ kvm_mce_inject(x86_env_get_cpu(first_cpu), paddr, code);
} else {
if (code == BUS_MCEERR_AO) {
return 0;
@@ -312,8 +370,10 @@ int kvm_arch_on_sigbus(int code, void *addr)
return 0;
}
-static int kvm_inject_mce_oldstyle(CPUX86State *env)
+static int kvm_inject_mce_oldstyle(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
+
if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
unsigned int bank, bank_num = env->mcg_cap & 0xff;
struct kvm_x86_mce mce;
@@ -337,7 +397,7 @@ static int kvm_inject_mce_oldstyle(CPUX86State *env)
mce.addr = env->mce_banks[bank * 4 + 2];
mce.misc = env->mce_banks[bank * 4 + 3];
- return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, &mce);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
}
return 0;
}
@@ -351,37 +411,20 @@ static void cpu_update_state(void *opaque, int running, RunState state)
}
}
-int kvm_arch_init_vcpu(CPUX86State *env)
+int kvm_arch_init_vcpu(CPUState *cs)
{
struct {
struct kvm_cpuid2 cpuid;
struct kvm_cpuid_entry2 entries[100];
} QEMU_PACKED cpuid_data;
- KVMState *s = env->kvm_state;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
uint32_t limit, i, j, cpuid_i;
uint32_t unused;
struct kvm_cpuid_entry2 *c;
uint32_t signature[3];
int r;
- env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
-
- i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
- j = env->cpuid_ext_features & CPUID_EXT_TSC_DEADLINE_TIMER;
- env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
- env->cpuid_ext_features |= i;
- if (j && kvm_irqchip_in_kernel() &&
- kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
- env->cpuid_ext_features |= CPUID_EXT_TSC_DEADLINE_TIMER;
- }
-
- env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
- 0, R_EDX);
- env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
- 0, R_ECX);
- env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
- 0, R_EDX);
-
cpuid_i = 0;
/* Paravirtualization CPUIDs */
@@ -402,8 +445,7 @@ int kvm_arch_init_vcpu(CPUX86State *env)
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_FEATURES;
- c->eax = env->cpuid_kvm_features &
- kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
+ c->eax = env->cpuid_kvm_features;
if (hyperv_enabled()) {
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
@@ -455,6 +497,8 @@ int kvm_arch_init_vcpu(CPUX86State *env)
has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
+ has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
+
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
for (i = 0; i <= limit; i++) {
@@ -522,8 +566,6 @@ int kvm_arch_init_vcpu(CPUX86State *env)
/* Call Centaur's CPUID instructions they are supported. */
if (env->cpuid_xlevel2 > 0) {
- env->cpuid_ext4_features &=
- kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
for (i = 0xC0000000; i <= limit; i++) {
@@ -539,12 +581,12 @@ int kvm_arch_init_vcpu(CPUX86State *env)
if (((env->cpuid_version >> 8)&0xF) >= 6
&& (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
- && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
+ && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
uint64_t mcg_cap;
int banks;
int ret;
- ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
+ ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
if (ret < 0) {
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
return ret;
@@ -555,7 +597,7 @@ int kvm_arch_init_vcpu(CPUX86State *env)
}
mcg_cap &= MCE_CAP_DEF;
mcg_cap |= banks;
- ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
+ ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &mcg_cap);
if (ret < 0) {
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
return ret;
@@ -567,14 +609,14 @@ int kvm_arch_init_vcpu(CPUX86State *env)
qemu_add_vm_change_state_handler(cpu_update_state, env);
cpuid_data.cpuid.padding = 0;
- r = kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
+ r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
if (r) {
return r;
}
- r = kvm_check_extension(env->kvm_state, KVM_CAP_TSC_CONTROL);
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL);
if (r && env->tsc_khz) {
- r = kvm_vcpu_ioctl(env, KVM_SET_TSC_KHZ, env->tsc_khz);
+ r = kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz);
if (r < 0) {
fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
return r;
@@ -588,9 +630,10 @@ int kvm_arch_init_vcpu(CPUX86State *env)
return 0;
}
-void kvm_arch_reset_vcpu(CPUX86State *env)
+void kvm_arch_reset_vcpu(CPUState *cs)
{
- X86CPU *cpu = x86_env_get_cpu(env);
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
env->exception_injected = -1;
env->interrupt_injected = -1;
@@ -641,6 +684,10 @@ static int kvm_get_supported_msrs(KVMState *s)
has_msr_hsave_pa = true;
continue;
}
+ if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
+ has_msr_tsc_adjust = true;
+ continue;
+ }
if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
has_msr_tsc_deadline = true;
continue;
@@ -781,13 +828,14 @@ static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
}
}
-static int kvm_getput_regs(CPUX86State *env, int set)
+static int kvm_getput_regs(X86CPU *cpu, int set)
{
+ CPUX86State *env = &cpu->env;
struct kvm_regs regs;
int ret = 0;
if (!set) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
if (ret < 0) {
return ret;
}
@@ -816,14 +864,15 @@ static int kvm_getput_regs(CPUX86State *env, int set)
kvm_getput_reg(&regs.rip, &env->eip, set);
if (set) {
- ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
}
return ret;
}
-static int kvm_put_fpu(CPUX86State *env)
+static int kvm_put_fpu(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_fpu fpu;
int i;
@@ -841,7 +890,7 @@ static int kvm_put_fpu(CPUX86State *env)
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
fpu.mxcsr = env->mxcsr;
- return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
}
#define XSAVE_FCW_FSW 0
@@ -854,14 +903,15 @@ static int kvm_put_fpu(CPUX86State *env)
#define XSAVE_XSTATE_BV 128
#define XSAVE_YMMH_SPACE 144
-static int kvm_put_xsave(CPUX86State *env)
+static int kvm_put_xsave(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_xsave* xsave = env->kvm_xsave_buf;
uint16_t cwd, swd, twd;
int i, r;
if (!kvm_has_xsave()) {
- return kvm_put_fpu(env);
+ return kvm_put_fpu(cpu);
}
memset(xsave, 0, sizeof(struct kvm_xsave));
@@ -884,12 +934,13 @@ static int kvm_put_xsave(CPUX86State *env)
*(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
sizeof env->ymmh_regs);
- r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
+ r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
return r;
}
-static int kvm_put_xcrs(CPUX86State *env)
+static int kvm_put_xcrs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_xcrs xcrs;
if (!kvm_has_xcrs()) {
@@ -900,11 +951,12 @@ static int kvm_put_xcrs(CPUX86State *env)
xcrs.flags = 0;
xcrs.xcrs[0].xcr = 0;
xcrs.xcrs[0].value = env->xcr0;
- return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
}
-static int kvm_put_sregs(CPUX86State *env)
+static int kvm_put_sregs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_sregs sregs;
memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
@@ -949,7 +1001,7 @@ static int kvm_put_sregs(CPUX86State *env)
sregs.efer = env->efer;
- return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
}
static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
@@ -959,8 +1011,9 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
entry->data = value;
}
-static int kvm_put_msrs(CPUX86State *env, int level)
+static int kvm_put_msrs(X86CPU *cpu, int level)
{
+ CPUX86State *env = &cpu->env;
struct {
struct kvm_msrs info;
struct kvm_msr_entry entries[100];
@@ -978,6 +1031,9 @@ static int kvm_put_msrs(CPUX86State *env, int level)
if (has_msr_hsave_pa) {
kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
}
+ if (has_msr_tsc_adjust) {
+ kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
+ }
if (has_msr_tsc_deadline) {
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
}
@@ -1017,6 +1073,10 @@ static int kvm_put_msrs(CPUX86State *env, int level)
kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
env->async_pf_en_msr);
}
+ if (has_msr_pv_eoi_en) {
+ kvm_msr_entry_set(&msrs[n++], MSR_KVM_PV_EOI_EN,
+ env->pv_eoi_en_msr);
+ }
if (hyperv_hypercall_available()) {
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID, 0);
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL, 0);
@@ -1037,17 +1097,18 @@ static int kvm_put_msrs(CPUX86State *env, int level)
msr_data.info.nmsrs = n;
- return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
}
-static int kvm_get_fpu(CPUX86State *env)
+static int kvm_get_fpu(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_fpu fpu;
int i, ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
if (ret < 0) {
return ret;
}
@@ -1068,17 +1129,18 @@ static int kvm_get_fpu(CPUX86State *env)
return 0;
}
-static int kvm_get_xsave(CPUX86State *env)
+static int kvm_get_xsave(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_xsave* xsave = env->kvm_xsave_buf;
int ret, i;
uint16_t cwd, swd, twd;
if (!kvm_has_xsave()) {
- return kvm_get_fpu(env);
+ return kvm_get_fpu(cpu);
}
- ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
if (ret < 0) {
return ret;
}
@@ -1106,8 +1168,9 @@ static int kvm_get_xsave(CPUX86State *env)
return 0;
}
-static int kvm_get_xcrs(CPUX86State *env)
+static int kvm_get_xcrs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
int i, ret;
struct kvm_xcrs xcrs;
@@ -1115,7 +1178,7 @@ static int kvm_get_xcrs(CPUX86State *env)
return 0;
}
- ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
if (ret < 0) {
return ret;
}
@@ -1130,13 +1193,14 @@ static int kvm_get_xcrs(CPUX86State *env)
return 0;
}
-static int kvm_get_sregs(CPUX86State *env)
+static int kvm_get_sregs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_sregs sregs;
uint32_t hflags;
int bit, i, ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
@@ -1214,8 +1278,9 @@ static int kvm_get_sregs(CPUX86State *env)
return 0;
}
-static int kvm_get_msrs(CPUX86State *env)
+static int kvm_get_msrs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct {
struct kvm_msrs info;
struct kvm_msr_entry entries[100];
@@ -1234,6 +1299,9 @@ static int kvm_get_msrs(CPUX86State *env)
if (has_msr_hsave_pa) {
msrs[n++].index = MSR_VM_HSAVE_PA;
}
+ if (has_msr_tsc_adjust) {
+ msrs[n++].index = MSR_TSC_ADJUST;
+ }
if (has_msr_tsc_deadline) {
msrs[n++].index = MSR_IA32_TSCDEADLINE;
}
@@ -1259,6 +1327,9 @@ static int kvm_get_msrs(CPUX86State *env)
if (has_msr_async_pf_en) {
msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
}
+ if (has_msr_pv_eoi_en) {
+ msrs[n++].index = MSR_KVM_PV_EOI_EN;
+ }
if (env->mcg_cap) {
msrs[n++].index = MSR_MCG_STATUS;
@@ -1269,7 +1340,7 @@ static int kvm_get_msrs(CPUX86State *env)
}
msr_data.info.nmsrs = n;
- ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
if (ret < 0) {
return ret;
}
@@ -1308,6 +1379,9 @@ static int kvm_get_msrs(CPUX86State *env)
case MSR_IA32_TSC:
env->tsc = msrs[i].data;
break;
+ case MSR_TSC_ADJUST:
+ env->tsc_adjust = msrs[i].data;
+ break;
case MSR_IA32_TSCDEADLINE:
env->tsc_deadline = msrs[i].data;
break;
@@ -1338,25 +1412,29 @@ static int kvm_get_msrs(CPUX86State *env)
case MSR_KVM_ASYNC_PF_EN:
env->async_pf_en_msr = msrs[i].data;
break;
+ case MSR_KVM_PV_EOI_EN:
+ env->pv_eoi_en_msr = msrs[i].data;
+ break;
}
}
return 0;
}
-static int kvm_put_mp_state(CPUX86State *env)
+static int kvm_put_mp_state(X86CPU *cpu)
{
- struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
+ struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
- return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
}
-static int kvm_get_mp_state(CPUX86State *env)
+static int kvm_get_mp_state(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_mp_state mp_state;
int ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
if (ret < 0) {
return ret;
}
@@ -1367,14 +1445,15 @@ static int kvm_get_mp_state(CPUX86State *env)
return 0;
}
-static int kvm_get_apic(CPUX86State *env)
+static int kvm_get_apic(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
DeviceState *apic = env->apic_state;
struct kvm_lapic_state kapic;
int ret;
if (apic && kvm_irqchip_in_kernel()) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_LAPIC, &kapic);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
if (ret < 0) {
return ret;
}
@@ -1384,21 +1463,23 @@ static int kvm_get_apic(CPUX86State *env)
return 0;
}
-static int kvm_put_apic(CPUX86State *env)
+static int kvm_put_apic(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
DeviceState *apic = env->apic_state;
struct kvm_lapic_state kapic;
if (apic && kvm_irqchip_in_kernel()) {
kvm_put_apic_state(apic, &kapic);
- return kvm_vcpu_ioctl(env, KVM_SET_LAPIC, &kapic);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_LAPIC, &kapic);
}
return 0;
}
-static int kvm_put_vcpu_events(CPUX86State *env, int level)
+static int kvm_put_vcpu_events(X86CPU *cpu, int level)
{
+ CPUX86State *env = &cpu->env;
struct kvm_vcpu_events events;
if (!kvm_has_vcpu_events()) {
@@ -1428,11 +1509,12 @@ static int kvm_put_vcpu_events(CPUX86State *env, int level)
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
}
- return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
}
-static int kvm_get_vcpu_events(CPUX86State *env)
+static int kvm_get_vcpu_events(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_vcpu_events events;
int ret;
@@ -1440,7 +1522,7 @@ static int kvm_get_vcpu_events(CPUX86State *env)
return 0;
}
- ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
if (ret < 0) {
return ret;
}
@@ -1466,8 +1548,9 @@ static int kvm_get_vcpu_events(CPUX86State *env)
return 0;
}
-static int kvm_guest_debug_workarounds(CPUX86State *env)
+static int kvm_guest_debug_workarounds(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
int ret = 0;
unsigned long reinject_trap = 0;
@@ -1495,8 +1578,9 @@ static int kvm_guest_debug_workarounds(CPUX86State *env)
return ret;
}
-static int kvm_put_debugregs(CPUX86State *env)
+static int kvm_put_debugregs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_debugregs dbgregs;
int i;
@@ -1511,11 +1595,12 @@ static int kvm_put_debugregs(CPUX86State *env)
dbgregs.dr7 = env->dr[7];
dbgregs.flags = 0;
- return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
}
-static int kvm_get_debugregs(CPUX86State *env)
+static int kvm_get_debugregs(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_debugregs dbgregs;
int i, ret;
@@ -1523,7 +1608,7 @@ static int kvm_get_debugregs(CPUX86State *env)
return 0;
}
- ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
if (ret < 0) {
return ret;
}
@@ -1536,117 +1621,121 @@ static int kvm_get_debugregs(CPUX86State *env)
return 0;
}
-int kvm_arch_put_registers(CPUX86State *env, int level)
+int kvm_arch_put_registers(CPUState *cpu, int level)
{
+ X86CPU *x86_cpu = X86_CPU(cpu);
int ret;
- assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
+ assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
- ret = kvm_getput_regs(env, 1);
+ ret = kvm_getput_regs(x86_cpu, 1);
if (ret < 0) {
return ret;
}
- ret = kvm_put_xsave(env);
+ ret = kvm_put_xsave(x86_cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_put_xcrs(env);
+ ret = kvm_put_xcrs(x86_cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_put_sregs(env);
+ ret = kvm_put_sregs(x86_cpu);
if (ret < 0) {
return ret;
}
/* must be before kvm_put_msrs */
- ret = kvm_inject_mce_oldstyle(env);
+ ret = kvm_inject_mce_oldstyle(x86_cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_put_msrs(env, level);
+ ret = kvm_put_msrs(x86_cpu, level);
if (ret < 0) {
return ret;
}
if (level >= KVM_PUT_RESET_STATE) {
- ret = kvm_put_mp_state(env);
+ ret = kvm_put_mp_state(x86_cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_put_apic(env);
+ ret = kvm_put_apic(x86_cpu);
if (ret < 0) {
return ret;
}
}
- ret = kvm_put_vcpu_events(env, level);
+ ret = kvm_put_vcpu_events(x86_cpu, level);
if (ret < 0) {
return ret;
}
- ret = kvm_put_debugregs(env);
+ ret = kvm_put_debugregs(x86_cpu);
if (ret < 0) {
return ret;
}
/* must be last */
- ret = kvm_guest_debug_workarounds(env);
+ ret = kvm_guest_debug_workarounds(x86_cpu);
if (ret < 0) {
return ret;
}
return 0;
}
-int kvm_arch_get_registers(CPUX86State *env)
+int kvm_arch_get_registers(CPUState *cs)
{
+ X86CPU *cpu = X86_CPU(cs);
int ret;
- assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
+ assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
- ret = kvm_getput_regs(env, 0);
+ ret = kvm_getput_regs(cpu, 0);
if (ret < 0) {
return ret;
}
- ret = kvm_get_xsave(env);
+ ret = kvm_get_xsave(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_xcrs(env);
+ ret = kvm_get_xcrs(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_sregs(env);
+ ret = kvm_get_sregs(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_msrs(env);
+ ret = kvm_get_msrs(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_mp_state(env);
+ ret = kvm_get_mp_state(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_apic(env);
+ ret = kvm_get_apic(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_vcpu_events(env);
+ ret = kvm_get_vcpu_events(cpu);
if (ret < 0) {
return ret;
}
- ret = kvm_get_debugregs(env);
+ ret = kvm_get_debugregs(cpu);
if (ret < 0) {
return ret;
}
return 0;
}
-void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
int ret;
/* Inject NMI */
if (env->interrupt_request & CPU_INTERRUPT_NMI) {
env->interrupt_request &= ~CPU_INTERRUPT_NMI;
DPRINTF("injected NMI\n");
- ret = kvm_vcpu_ioctl(env, KVM_NMI);
+ ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
if (ret < 0) {
fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
strerror(-ret));
@@ -1674,7 +1763,7 @@ void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
intr.irq = irq;
DPRINTF("injected interrupt %d\n", irq);
- ret = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
+ ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
if (ret < 0) {
fprintf(stderr,
"KVM: injection failed, interrupt lost (%s)\n",
@@ -1698,8 +1787,11 @@ void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
}
}
-void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
+void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
if (run->if_flag) {
env->eflags |= IF_MASK;
} else {
@@ -1709,9 +1801,10 @@ void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
cpu_set_apic_base(env->apic_state, run->apic_base);
}
-int kvm_arch_process_async_events(CPUX86State *env)
+int kvm_arch_process_async_events(CPUState *cs)
{
- X86CPU *cpu = x86_env_get_cpu(env);
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
if (env->interrupt_request & CPU_INTERRUPT_MCE) {
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
@@ -1767,8 +1860,10 @@ int kvm_arch_process_async_events(CPUX86State *env)
return env->halted;
}
-static int kvm_handle_halt(CPUX86State *env)
+static int kvm_handle_halt(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
+
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
!(env->interrupt_request & CPU_INTERRUPT_NMI)) {
@@ -1779,9 +1874,11 @@ static int kvm_handle_halt(CPUX86State *env)
return 0;
}
-static int kvm_handle_tpr_access(CPUX86State *env)
+static int kvm_handle_tpr_access(X86CPU *cpu)
{
- struct kvm_run *run = env->kvm_run;
+ CPUX86State *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
apic_handle_tpr_access_report(env->apic_state, run->tpr_access.rip,
run->tpr_access.is_write ? TPR_ACCESS_WRITE
@@ -1789,8 +1886,9 @@ static int kvm_handle_tpr_access(CPUX86State *env)
return 1;
}
-int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_insert_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
{
+ CPUX86State *env = &X86_CPU(cpu)->env;
static const uint8_t int3 = 0xcc;
if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
@@ -1800,8 +1898,9 @@ int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp
return 0;
}
-int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_remove_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
{
+ CPUX86State *env = &X86_CPU(cpu)->env;
uint8_t int3;
if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
@@ -1895,14 +1994,16 @@ void kvm_arch_remove_all_hw_breakpoints(void)
static CPUWatchpoint hw_watchpoint;
-static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
+static int kvm_handle_debug(X86CPU *cpu,
+ struct kvm_debug_exit_arch *arch_info)
{
+ CPUX86State *env = &cpu->env;
int ret = 0;
int n;
if (arch_info->exception == 1) {
if (arch_info->dr6 & (1 << 14)) {
- if (cpu_single_env->singlestep_enabled) {
+ if (env->singlestep_enabled) {
ret = EXCP_DEBUG;
}
} else {
@@ -1914,13 +2015,13 @@ static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
break;
case 0x1:
ret = EXCP_DEBUG;
- cpu_single_env->watchpoint_hit = &hw_watchpoint;
+ env->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_WRITE;
break;
case 0x3:
ret = EXCP_DEBUG;
- cpu_single_env->watchpoint_hit = &hw_watchpoint;
+ env->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_ACCESS;
break;
@@ -1928,22 +2029,22 @@ static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
}
}
}
- } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) {
+ } else if (kvm_find_sw_breakpoint(CPU(cpu), arch_info->pc)) {
ret = EXCP_DEBUG;
}
if (ret == 0) {
- cpu_synchronize_state(cpu_single_env);
- assert(cpu_single_env->exception_injected == -1);
+ cpu_synchronize_state(env);
+ assert(env->exception_injected == -1);
/* pass to guest */
- cpu_single_env->exception_injected = arch_info->exception;
- cpu_single_env->has_error_code = 0;
+ env->exception_injected = arch_info->exception;
+ env->has_error_code = 0;
}
return ret;
}
-void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
{
const uint8_t type_code[] = {
[GDB_BREAKPOINT_HW] = 0x0,
@@ -1955,7 +2056,7 @@ void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
};
int n;
- if (kvm_sw_breakpoints_active(env)) {
+ if (kvm_sw_breakpoints_active(cpu)) {
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
}
if (nb_hw_breakpoint > 0) {
@@ -1980,21 +2081,22 @@ static bool host_supports_vmx(void)
#define VMX_INVALID_GUEST_STATE 0x80000021
-int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
+ X86CPU *cpu = X86_CPU(cs);
uint64_t code;
int ret;
switch (run->exit_reason) {
case KVM_EXIT_HLT:
DPRINTF("handle_hlt\n");
- ret = kvm_handle_halt(env);
+ ret = kvm_handle_halt(cpu);
break;
case KVM_EXIT_SET_TPR:
ret = 0;
break;
case KVM_EXIT_TPR_ACCESS:
- ret = kvm_handle_tpr_access(env);
+ ret = kvm_handle_tpr_access(cpu);
break;
case KVM_EXIT_FAIL_ENTRY:
code = run->fail_entry.hardware_entry_failure_reason;
@@ -2020,7 +2122,7 @@ int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
break;
case KVM_EXIT_DEBUG:
DPRINTF("kvm_exit_debug\n");
- ret = kvm_handle_debug(&run->debug.arch);
+ ret = kvm_handle_debug(cpu, &run->debug.arch);
break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
@@ -2031,8 +2133,11 @@ int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
return ret;
}
-bool kvm_arch_stop_on_emulation_error(CPUX86State *env)
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
kvm_cpu_synchronize_state(env);
return !(env->cr[0] & CR0_PE_MASK) ||
((env->segs[R_CS].selector & 3) != 3);
@@ -2055,3 +2160,143 @@ void kvm_arch_init_irq_routing(KVMState *s)
kvm_msi_via_irqfd_allowed = true;
kvm_gsi_routing_allowed = true;
}
+
+/* Classic KVM device assignment interface. Will remain x86 only. */
+int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
+ uint32_t flags, uint32_t *dev_id)
+{
+ struct kvm_assigned_pci_dev dev_data = {
+ .segnr = dev_addr->domain,
+ .busnr = dev_addr->bus,
+ .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
+ .flags = flags,
+ };
+ int ret;
+
+ dev_data.assigned_dev_id =
+ (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
+
+ ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ *dev_id = dev_data.assigned_dev_id;
+
+ return 0;
+}
+
+int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
+{
+ struct kvm_assigned_pci_dev dev_data = {
+ .assigned_dev_id = dev_id,
+ };
+
+ return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
+}
+
+static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
+ uint32_t irq_type, uint32_t guest_irq)
+{
+ struct kvm_assigned_irq assigned_irq = {
+ .assigned_dev_id = dev_id,
+ .guest_irq = guest_irq,
+ .flags = irq_type,
+ };
+
+ if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
+ return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
+ } else {
+ return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
+ }
+}
+
+int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
+ uint32_t guest_irq)
+{
+ uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
+ (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
+
+ return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
+}
+
+int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
+{
+ struct kvm_assigned_pci_dev dev_data = {
+ .assigned_dev_id = dev_id,
+ .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
+ };
+
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
+}
+
+static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
+ uint32_t type)
+{
+ struct kvm_assigned_irq assigned_irq = {
+ .assigned_dev_id = dev_id,
+ .flags = type,
+ };
+
+ return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
+}
+
+int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
+{
+ return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
+ (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
+}
+
+int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
+{
+ return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
+ KVM_DEV_IRQ_GUEST_MSI, virq);
+}
+
+int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
+{
+ return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
+ KVM_DEV_IRQ_HOST_MSI);
+}
+
+bool kvm_device_msix_supported(KVMState *s)
+{
+ /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
+ * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
+}
+
+int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
+ uint32_t nr_vectors)
+{
+ struct kvm_assigned_msix_nr msix_nr = {
+ .assigned_dev_id = dev_id,
+ .entry_nr = nr_vectors,
+ };
+
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
+}
+
+int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
+ int virq)
+{
+ struct kvm_assigned_msix_entry msix_entry = {
+ .assigned_dev_id = dev_id,
+ .gsi = virq,
+ .entry = vector,
+ };
+
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
+}
+
+int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
+{
+ return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
+ KVM_DEV_IRQ_GUEST_MSIX, 0);
+}
+
+int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
+{
+ return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
+ KVM_DEV_IRQ_HOST_MSIX);
+}
diff --git a/target-i386/kvm_i386.h b/target-i386/kvm_i386.h
index b82bbf401e..4392ab4359 100644
--- a/target-i386/kvm_i386.h
+++ b/target-i386/kvm_i386.h
@@ -11,6 +11,28 @@
#ifndef QEMU_KVM_I386_H
#define QEMU_KVM_I386_H
+#include "sysemu/kvm.h"
+
bool kvm_allows_irq0_override(void);
+int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
+ uint32_t flags, uint32_t *dev_id);
+int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id);
+
+int kvm_device_intx_assign(KVMState *s, uint32_t dev_id,
+ bool use_host_msi, uint32_t guest_irq);
+int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked);
+int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi);
+
+int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq);
+int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id);
+
+bool kvm_device_msix_supported(KVMState *s);
+int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
+ uint32_t nr_vectors);
+int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
+ int virq);
+int kvm_device_msix_assign(KVMState *s, uint32_t dev_id);
+int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id);
+
#endif
diff --git a/target-i386/machine.c b/target-i386/machine.c
index a8be058d2d..8354572c7b 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -4,7 +4,7 @@
#include "hw/isa.h"
#include "cpu.h"
-#include "kvm.h"
+#include "sysemu/kvm.h"
static const VMStateDescription vmstate_segment = {
.name = "segment",
@@ -279,6 +279,13 @@ static bool async_pf_msr_needed(void *opaque)
return cpu->async_pf_en_msr != 0;
}
+static bool pv_eoi_msr_needed(void *opaque)
+{
+ CPUX86State *cpu = opaque;
+
+ return cpu->pv_eoi_en_msr != 0;
+}
+
static const VMStateDescription vmstate_async_pf_msr = {
.name = "cpu/async_pf_msr",
.version_id = 1,
@@ -290,6 +297,17 @@ static const VMStateDescription vmstate_async_pf_msr = {
}
};
+static const VMStateDescription vmstate_pv_eoi_msr = {
+ .name = "cpu/async_pv_eoi_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT64(pv_eoi_en_msr, CPUX86State),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool fpop_ip_dp_needed(void *opaque)
{
CPUX86State *env = opaque;
@@ -310,6 +328,24 @@ static const VMStateDescription vmstate_fpop_ip_dp = {
}
};
+static bool tsc_adjust_needed(void *opaque)
+{
+ CPUX86State *env = opaque;
+
+ return env->tsc_adjust != 0;
+}
+
+static const VMStateDescription vmstate_msr_tsc_adjust = {
+ .name = "cpu/msr_tsc_adjust",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(tsc_adjust, CPUX86State),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool tscdeadline_needed(void *opaque)
{
CPUX86State *env = opaque;
@@ -454,9 +490,15 @@ static const VMStateDescription vmstate_cpu = {
.vmsd = &vmstate_async_pf_msr,
.needed = async_pf_msr_needed,
} , {
+ .vmsd = &vmstate_pv_eoi_msr,
+ .needed = pv_eoi_msr_needed,
+ } , {
.vmsd = &vmstate_fpop_ip_dp,
.needed = fpop_ip_dp_needed,
}, {
+ .vmsd = &vmstate_msr_tsc_adjust,
+ .needed = tsc_adjust_needed,
+ }, {
.vmsd = &vmstate_msr_tscdeadline,
.needed = tscdeadline_needed,
}, {
diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c
index 7f99c7cfe3..6cf9ba076e 100644
--- a/target-i386/mem_helper.c
+++ b/target-i386/mem_helper.c
@@ -21,7 +21,7 @@
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
/* broken thread support */
@@ -114,16 +114,16 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
#define MMUSUFFIX _mmu
#define SHIFT 0
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 1
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 2
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#define SHIFT 3
-#include "softmmu_template.h"
+#include "exec/softmmu_template.h"
#endif
@@ -135,19 +135,13 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
void tlb_fill(CPUX86State *env, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
- TranslationBlock *tb;
int ret;
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
if (ret) {
if (retaddr) {
/* now we have a real cpu fault */
- tb = tb_find_pc(retaddr);
- if (tb) {
- /* the PC is inside the translated code. It means that we have
- a virtual CPU fault */
- cpu_restore_state(tb, env, retaddr);
- }
+ cpu_restore_state(env, retaddr);
}
raise_exception_err(env, env->exception_index, env->error_code);
}
diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c
index a02037963f..db3126b79b 100644
--- a/target-i386/misc_helper.c
+++ b/target-i386/misc_helper.c
@@ -18,11 +18,11 @@
*/
#include "cpu.h"
-#include "ioport.h"
+#include "exec/ioport.h"
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
/* check if Port I/O is allowed in TSS */
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 5fff8d59c6..c2a99ee9bc 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -19,19 +19,19 @@
*/
#include "cpu.h"
-#include "qemu-log.h"
+#include "qemu/log.h"
#include "helper.h"
//#define DEBUG_PCALL
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
#ifdef DEBUG_PCALL
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
# define LOG_PCALL_STATE(env) \
- log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
+ log_cpu_state_mask(CPU_LOG_PCALL, (env), CPU_DUMP_CCOP)
#else
# define LOG_PCALL(...) do { } while (0)
# define LOG_PCALL_STATE(env) do { } while (0)
@@ -1177,7 +1177,7 @@ static void do_interrupt_all(CPUX86State *env, int intno, int is_int,
qemu_log(" EAX=" TARGET_FMT_lx, EAX);
}
qemu_log("\n");
- log_cpu_state(env, X86_DUMP_CCOP);
+ log_cpu_state(env, CPU_DUMP_CCOP);
#if 0
{
int i;
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
index 8b04eb2e00..eea2fe9782 100644
--- a/target-i386/smm_helper.c
+++ b/target-i386/smm_helper.c
@@ -47,7 +47,7 @@ void do_smm_enter(CPUX86State *env)
int i, offset;
qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
- log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
+ log_cpu_state_mask(CPU_LOG_INT, env, CPU_DUMP_CCOP);
env->hflags |= HF_SMM_MASK;
cpu_smm_update(env);
@@ -295,7 +295,7 @@ void helper_rsm(CPUX86State *env)
cpu_smm_update(env);
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
- log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
+ log_cpu_state_mask(CPU_LOG_INT, env, CPU_DUMP_CCOP);
}
#endif /* !CONFIG_USER_ONLY */
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
index 4943c37fee..3f246e9073 100644
--- a/target-i386/svm_helper.c
+++ b/target-i386/svm_helper.c
@@ -18,11 +18,11 @@
*/
#include "cpu.h"
-#include "cpu-all.h"
+#include "exec/cpu-all.h"
#include "helper.h"
#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
+#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
/* Secure Virtual Machine helpers */
@@ -85,7 +85,7 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
}
#else
-static inline void svm_save_seg(CPUX86State *env, target_phys_addr_t addr,
+static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
const SegmentCache *sc)
{
stw_phys(addr + offsetof(struct vmcb_seg, selector),
@@ -98,7 +98,7 @@ static inline void svm_save_seg(CPUX86State *env, target_phys_addr_t addr,
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
}
-static inline void svm_load_seg(CPUX86State *env, target_phys_addr_t addr,
+static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
SegmentCache *sc)
{
unsigned int flags;
@@ -110,7 +110,7 @@ static inline void svm_load_seg(CPUX86State *env, target_phys_addr_t addr,
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
}
-static inline void svm_load_seg_cache(CPUX86State *env, target_phys_addr_t addr,
+static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
int seg_reg)
{
SegmentCache sc1, *sc = &sc1;
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 7ab2ccb190..32d21f52d0 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -24,7 +24,7 @@
#include <signal.h>
#include "cpu.h"
-#include "disas.h"
+#include "disas/disas.h"
#include "tcg-op.h"
#include "helper.h"
@@ -65,7 +65,7 @@ static TCGv cpu_tmp5;
static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
-#include "gen-icount.h"
+#include "exec/gen-icount.h"
#ifdef TARGET_X86_64
static int x86_64_hregs;
@@ -107,6 +107,7 @@ typedef struct DisasContext {
int cpuid_ext_features;
int cpuid_ext2_features;
int cpuid_ext3_features;
+ int cpuid_7_0_ebx_features;
} DisasContext;
static void gen_eob(DisasContext *s);
@@ -2017,7 +2018,8 @@ static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
}
}
-static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
+static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
+ int *reg_ptr, int *offset_ptr)
{
target_long disp;
int havesib;
@@ -2043,7 +2045,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
if (base == 4) {
havesib = 1;
- code = cpu_ldub_code(cpu_single_env, s->pc++);
+ code = cpu_ldub_code(env, s->pc++);
scale = (code >> 6) & 3;
index = ((code >> 3) & 7) | REX_X(s);
base = (code & 7);
@@ -2054,7 +2056,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
case 0:
if ((base & 7) == 5) {
base = -1;
- disp = (int32_t)cpu_ldl_code(cpu_single_env, s->pc);
+ disp = (int32_t)cpu_ldl_code(env, s->pc);
s->pc += 4;
if (CODE64(s) && !havesib) {
disp += s->pc + s->rip_offset;
@@ -2064,11 +2066,11 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
}
break;
case 1:
- disp = (int8_t)cpu_ldub_code(cpu_single_env, s->pc++);
+ disp = (int8_t)cpu_ldub_code(env, s->pc++);
break;
default:
case 2:
- disp = (int32_t)cpu_ldl_code(cpu_single_env, s->pc);
+ disp = (int32_t)cpu_ldl_code(env, s->pc);
s->pc += 4;
break;
}
@@ -2131,7 +2133,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
switch (mod) {
case 0:
if (rm == 6) {
- disp = cpu_lduw_code(cpu_single_env, s->pc);
+ disp = cpu_lduw_code(env, s->pc);
s->pc += 2;
gen_op_movl_A0_im(disp);
rm = 0; /* avoid SS override */
@@ -2141,11 +2143,11 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
}
break;
case 1:
- disp = (int8_t)cpu_ldub_code(cpu_single_env, s->pc++);
+ disp = (int8_t)cpu_ldub_code(env, s->pc++);
break;
default:
case 2:
- disp = cpu_lduw_code(cpu_single_env, s->pc);
+ disp = cpu_lduw_code(env, s->pc);
s->pc += 2;
break;
}
@@ -2201,7 +2203,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
*offset_ptr = disp;
}
-static void gen_nop_modrm(DisasContext *s, int modrm)
+static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
{
int mod, rm, base, code;
@@ -2215,7 +2217,7 @@ static void gen_nop_modrm(DisasContext *s, int modrm)
base = rm;
if (base == 4) {
- code = cpu_ldub_code(cpu_single_env, s->pc++);
+ code = cpu_ldub_code(env, s->pc++);
base = (code & 7);
}
@@ -2275,7 +2277,8 @@ static void gen_add_A0_ds_seg(DisasContext *s)
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
OR_TMP0 */
-static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
+static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
+ int ot, int reg, int is_store)
{
int mod, rm, opreg, disp;
@@ -2292,7 +2295,7 @@ static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_s
gen_op_mov_reg_T0(ot, reg);
}
} else {
- gen_lea_modrm(s, modrm, &opreg, &disp);
+ gen_lea_modrm(env, s, modrm, &opreg, &disp);
if (is_store) {
if (reg != OR_TMP0)
gen_op_mov_TN_reg(ot, 0, reg);
@@ -2305,22 +2308,22 @@ static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_s
}
}
-static inline uint32_t insn_get(DisasContext *s, int ot)
+static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
{
uint32_t ret;
switch(ot) {
case OT_BYTE:
- ret = cpu_ldub_code(cpu_single_env, s->pc);
+ ret = cpu_ldub_code(env, s->pc);
s->pc++;
break;
case OT_WORD:
- ret = cpu_lduw_code(cpu_single_env, s->pc);
+ ret = cpu_lduw_code(env, s->pc);
s->pc += 2;
break;
default:
case OT_LONG:
- ret = cpu_ldl_code(cpu_single_env, s->pc);
+ ret = cpu_ldl_code(env, s->pc);
s->pc += 4;
break;
}
@@ -3166,7 +3169,8 @@ static const struct SSEOpHelper_eppi sse_op_table7[256] = {
[0x63] = SSE42_OP(pcmpistri),
};
-static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
+static void gen_sse(CPUX86State *env, DisasContext *s, int b,
+ target_ulong pc_start, int rex_r)
{
int b1, op1_offset, op2_offset, is_xmm, val, ot;
int modrm, mod, rm, reg, reg_addr, offset_addr;
@@ -3229,7 +3233,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
gen_helper_enter_mmx(cpu_env);
}
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7);
if (is_xmm)
reg |= rex_r;
@@ -3240,7 +3244,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x0e7: /* movntq */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
break;
case 0x1e7: /* movntdq */
@@ -3248,20 +3252,20 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x12b: /* movntps */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
break;
case 0x3f0: /* lddqu */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
break;
case 0x22b: /* movntss */
case 0x32b: /* movntsd */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (b1 & 1) {
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
xmm_regs[reg]));
@@ -3274,12 +3278,12 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x6e: /* movd mm, ea */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
} else
#endif
{
- gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
@@ -3289,14 +3293,14 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x16e: /* movd xmm, ea */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
} else
#endif
{
- gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
@@ -3305,7 +3309,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x6f: /* movq mm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
} else {
rm = (modrm & 7);
@@ -3322,7 +3326,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x16f: /* movdqa xmm, ea */
case 0x26f: /* movdqu xmm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3332,7 +3336,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x210: /* movss xmm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
gen_op_movl_T0_0();
@@ -3347,7 +3351,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x310: /* movsd xmm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
gen_op_movl_T0_0();
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
@@ -3361,7 +3365,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x012: /* movlps */
case 0x112: /* movlpd */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
/* movhlps */
@@ -3372,7 +3376,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x212: /* movsldup */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3388,7 +3392,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x312: /* movddup */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3401,7 +3405,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x016: /* movhps */
case 0x116: /* movhpd */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
} else {
/* movlhps */
@@ -3412,7 +3416,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x216: /* movshdup */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3433,8 +3437,8 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (b1 == 1 && reg != 0)
goto illegal_op;
- field_length = cpu_ldub_code(cpu_single_env, s->pc++) & 0x3F;
- bit_index = cpu_ldub_code(cpu_single_env, s->pc++) & 0x3F;
+ field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
+ bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
if (b1 == 1)
@@ -3452,13 +3456,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (s->dflag == 2) {
tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
- gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
- gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
}
break;
case 0x17e: /* movd ea, xmm */
@@ -3466,18 +3470,18 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (s->dflag == 2) {
tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
- gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
- gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
}
break;
case 0x27e: /* movq xmm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3488,7 +3492,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x7f: /* movq ea, mm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
} else {
rm = (modrm & 7);
@@ -3503,7 +3507,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x17f: /* movdqa ea, xmm */
case 0x27f: /* movdqu ea, xmm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3513,7 +3517,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x211: /* movss ea, xmm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
gen_op_st_T0_A0(OT_LONG + s->mem_index);
} else {
@@ -3524,7 +3528,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x311: /* movsd ea, xmm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3535,7 +3539,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x013: /* movlps */
case 0x113: /* movlpd */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
goto illegal_op;
@@ -3544,7 +3548,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x017: /* movhps */
case 0x117: /* movhpd */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
} else {
goto illegal_op;
@@ -3559,7 +3563,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (b1 >= 2) {
goto illegal_op;
}
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (is_xmm) {
gen_op_movl_T0_im(val);
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
@@ -3609,7 +3613,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x12a: /* cvtpi2pd */
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(s->mem_index, op2_offset);
} else {
@@ -3632,7 +3636,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x22a: /* cvtsi2ss */
case 0x32a: /* cvtsi2sd */
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
if (ot == OT_LONG) {
@@ -3654,7 +3658,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x12d: /* cvtpd2pi */
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
op2_offset = offsetof(CPUX86State,xmm_t0);
gen_ldo_env_A0(s->mem_index, op2_offset);
} else {
@@ -3685,7 +3689,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x32d: /* cvtsd2si */
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if ((b >> 8) & 1) {
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
} else {
@@ -3717,8 +3721,8 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0xc4: /* pinsrw */
case 0x1c4:
s->rip_offset = 1;
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
+ val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
tcg_gen_st16_tl(cpu_T[0], cpu_env,
@@ -3734,7 +3738,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (mod != 3)
goto illegal_op;
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
rm = (modrm & 7) | REX_B(s);
@@ -3751,7 +3755,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x1d6: /* movq ea, xmm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3795,7 +3799,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
goto crc32;
case 0x038:
b = modrm;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
@@ -3816,7 +3820,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
} else {
op2_offset = offsetof(CPUX86State,xmm_t0);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
switch (b) {
case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
@@ -3851,7 +3855,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
} else {
op2_offset = offsetof(CPUX86State,mmx_t0);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, op2_offset);
}
}
@@ -3869,7 +3873,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x338: /* crc32 */
crc32:
b = modrm;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (b != 0xf0 && b != 0xf1)
@@ -3889,7 +3893,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
gen_op_mov_TN_reg(OT_LONG, 0, reg);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
cpu_T[0], tcg_const_i32(8 << ot));
@@ -3899,7 +3903,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x03a:
case 0x13a:
b = modrm;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
@@ -3918,9 +3922,9 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
rm = (modrm & 7) | REX_B(s);
if (mod != 3)
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
reg = ((modrm >> 3) & 7) | rex_r;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
switch (b) {
case 0x14: /* pextrb */
tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
@@ -4050,7 +4054,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
} else {
op2_offset = offsetof(CPUX86State,xmm_t0);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, op2_offset);
}
} else {
@@ -4059,11 +4063,11 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
} else {
op2_offset = offsetof(CPUX86State,mmx_t0);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, op2_offset);
}
}
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
s->cc_op = CC_OP_EFLAGS;
@@ -4094,7 +4098,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (is_xmm) {
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
op2_offset = offsetof(CPUX86State,xmm_t0);
if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
b == 0xc2)) {
@@ -4117,7 +4121,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
} else {
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(s->mem_index, op2_offset);
} else {
@@ -4129,7 +4133,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x0f: /* 3DNow! data insns */
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
goto illegal_op;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
sse_fn_epp = sse_op_table5[val];
if (!sse_fn_epp) {
goto illegal_op;
@@ -4140,7 +4144,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x70: /* pshufx insn */
case 0xc6: /* pshufx insn */
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
/* XXX: introduce a new table? */
@@ -4149,7 +4153,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0xc2:
/* compare insns */
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (val >= 8)
goto illegal_op;
sse_fn_epp = sse_op_table4[val][b1];
@@ -4194,7 +4198,8 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
/* convert one instruction. s->is_jmp is set if the translation must
be stopped. Return the next pc value */
-static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
+static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
+ target_ulong pc_start)
{
int b, prefixes, aflag, dflag;
int shift, ot;
@@ -4202,8 +4207,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
target_ulong next_eip, tval;
int rex_w, rex_r;
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
+ if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(pc_start);
+ }
s->pc = pc_start;
prefixes = 0;
aflag = s->code32;
@@ -4218,7 +4224,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
#endif
s->rip_offset = 0; /* for relative ip address */
next_byte:
- b = cpu_ldub_code(cpu_single_env, s->pc);
+ b = cpu_ldub_code(env, s->pc);
s->pc++;
/* check prefixes */
#ifdef TARGET_X86_64
@@ -4333,7 +4339,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x0f:
/**************************/
/* extended op code */
- b = cpu_ldub_code(cpu_single_env, s->pc++) | 0x100;
+ b = cpu_ldub_code(env, s->pc++) | 0x100;
goto reswitch;
/**************************/
@@ -4358,12 +4364,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
switch(f) {
case 0: /* OP Ev, Gv */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
opreg = OR_TMP0;
} else if (op == OP_XORL && rm == reg) {
xor_zero:
@@ -4380,12 +4386,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T1_A0(ot + s->mem_index);
} else if (op == OP_XORL && rm == reg) {
goto xor_zero;
@@ -4395,7 +4401,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op(s, op, ot, reg);
break;
case 2: /* OP A, Iv */
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_movl_T1_im(val);
gen_op(s, op, ot, OR_EAX);
break;
@@ -4417,7 +4423,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
@@ -4427,7 +4433,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
s->rip_offset = 1;
else
s->rip_offset = insn_const_size(ot);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
opreg = OR_TMP0;
} else {
opreg = rm;
@@ -4438,10 +4444,10 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x80:
case 0x81:
case 0x82:
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
break;
case 0x83:
- val = (int8_t)insn_get(s, OT_BYTE);
+ val = (int8_t)insn_get(env, s, OT_BYTE);
break;
}
gen_op_movl_T1_im(val);
@@ -4466,14 +4472,14 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (op == 0)
s->rip_offset = insn_const_size(ot);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_TN_reg(ot, 0, rm);
@@ -4481,7 +4487,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
switch(op) {
case 0: /* test */
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_movl_T1_im(val);
gen_op_testl_T0_T1_cc();
s->cc_op = CC_OP_LOGICB + ot;
@@ -4698,7 +4704,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
@@ -4717,7 +4723,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
}
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (op >= 2 && op != 3 && op != 5)
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
@@ -4810,10 +4816,10 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_TN_reg(ot, 1, reg);
gen_op_testl_T0_T1_cc();
s->cc_op = CC_OP_LOGICB + ot;
@@ -4825,7 +4831,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_mov_TN_reg(ot, 0, OR_EAX);
gen_op_movl_T1_im(val);
@@ -4875,18 +4881,18 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x69: /* imul Gv, Ev, I */
case 0x6b:
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (b == 0x69)
s->rip_offset = insn_const_size(ot);
else if (b == 0x6b)
s->rip_offset = 1;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (b == 0x69) {
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_movl_T1_im(val);
} else if (b == 0x6b) {
- val = (int8_t)insn_get(s, OT_BYTE);
+ val = (int8_t)insn_get(env, s, OT_BYTE);
gen_op_movl_T1_im(val);
} else {
gen_op_mov_TN_reg(ot, 1, reg);
@@ -4939,7 +4945,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
@@ -4950,7 +4956,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_mov_reg_T1(ot, reg);
gen_op_mov_reg_T0(ot, rm);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_mov_TN_reg(ot, 0, reg);
gen_op_ld_T1_A0(ot + s->mem_index);
gen_op_addl_T0_T1();
@@ -4970,7 +4976,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
t0 = tcg_temp_local_new();
@@ -4982,7 +4988,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, t0, rm);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
tcg_gen_mov_tl(a0, cpu_A0);
gen_op_ld_v(ot + s->mem_index, t0, a0);
rm = 0; /* avoid warning */
@@ -5018,7 +5024,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x1c7: /* cmpxchg8b */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if ((mod == 3) || ((modrm & 0x38) != 0x8))
goto illegal_op;
@@ -5029,7 +5035,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_jmp_im(pc_start - s->cs_base);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else
#endif
@@ -5039,7 +5045,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_jmp_im(pc_start - s->cs_base);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
}
s->cc_op = CC_OP_EFLAGS;
@@ -5080,9 +5086,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = dflag + OT_WORD;
}
if (b == 0x68)
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
else
- val = (int8_t)insn_get(s, OT_BYTE);
+ val = (int8_t)insn_get(env, s, OT_BYTE);
gen_op_movl_T0_im(val);
gen_push_T0(s);
break;
@@ -5092,7 +5098,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
ot = dflag + OT_WORD;
}
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
gen_pop_T0(s);
if (mod == 3) {
@@ -5103,7 +5109,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
/* NOTE: order is important too for MMU exceptions */
s->popl_esp_hack = 1 << ot;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
s->popl_esp_hack = 0;
gen_pop_update(s);
}
@@ -5111,9 +5117,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0xc8: /* enter */
{
int level;
- val = cpu_lduw_code(cpu_single_env, s->pc);
+ val = cpu_lduw_code(env, s->pc);
s->pc += 2;
- level = cpu_ldub_code(cpu_single_env, s->pc++);
+ level = cpu_ldub_code(env, s->pc++);
gen_enter(s, val, level);
}
break;
@@ -5193,11 +5199,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
- gen_ldst_modrm(s, modrm, ot, reg, 1);
+ gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0xc6:
case 0xc7: /* mov Ev, Iv */
@@ -5205,13 +5211,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod != 3) {
s->rip_offset = insn_const_size(ot);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
}
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_movl_T0_im(val);
if (mod != 3)
gen_op_st_T0_A0(ot + s->mem_index);
@@ -5224,18 +5230,18 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = OT_WORD + dflag;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_reg_T0(ot, reg);
break;
case 0x8e: /* mov seg, Gv */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
if (reg >= 6 || reg == R_CS)
goto illegal_op;
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace */
@@ -5251,7 +5257,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x8c: /* mov Gv, seg */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (reg >= 6)
@@ -5261,7 +5267,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_WORD + dflag;
else
ot = OT_WORD;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0x1b6: /* movzbS Gv, Eb */
@@ -5274,7 +5280,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
d_ot = dflag + OT_WORD;
/* ot is the size of source */
ot = (b & 1) + OT_BYTE;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
@@ -5298,7 +5304,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
gen_op_mov_reg_T0(d_ot, reg);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (b & 8) {
gen_op_lds_T0_A0(ot + s->mem_index);
} else {
@@ -5311,7 +5317,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x8d: /* lea */
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
@@ -5320,7 +5326,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
s->override = -1;
val = s->addseg;
s->addseg = 0;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
s->addseg = val;
gen_op_mov_reg_A0(ot - OT_WORD, reg);
break;
@@ -5338,16 +5344,16 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = dflag + OT_WORD;
#ifdef TARGET_X86_64
if (s->aflag == 2) {
- offset_addr = cpu_ldq_code(cpu_single_env, s->pc);
+ offset_addr = cpu_ldq_code(env, s->pc);
s->pc += 8;
gen_op_movq_A0_im(offset_addr);
} else
#endif
{
if (s->aflag) {
- offset_addr = insn_get(s, OT_LONG);
+ offset_addr = insn_get(env, s, OT_LONG);
} else {
- offset_addr = insn_get(s, OT_WORD);
+ offset_addr = insn_get(env, s, OT_WORD);
}
gen_op_movl_A0_im(offset_addr);
}
@@ -5385,7 +5391,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_mov_reg_T0(OT_BYTE, R_EAX);
break;
case 0xb0 ... 0xb7: /* mov R, Ib */
- val = insn_get(s, OT_BYTE);
+ val = insn_get(env, s, OT_BYTE);
gen_op_movl_T0_im(val);
gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
break;
@@ -5394,7 +5400,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (dflag == 2) {
uint64_t tmp;
/* 64 bit case */
- tmp = cpu_ldq_code(cpu_single_env, s->pc);
+ tmp = cpu_ldq_code(env, s->pc);
s->pc += 8;
reg = (b & 7) | REX_B(s);
gen_movtl_T0_im(tmp);
@@ -5403,7 +5409,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
#endif
{
ot = dflag ? OT_LONG : OT_WORD;
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
reg = (b & 7) | REX_B(s);
gen_op_movl_T0_im(val);
gen_op_mov_reg_T0(ot, reg);
@@ -5422,7 +5428,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
@@ -5433,7 +5439,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_mov_reg_T0(ot, rm);
gen_op_mov_reg_T1(ot, reg);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_mov_TN_reg(ot, 0, reg);
/* for xchg, lock is implicit */
if (!(prefixes & PREFIX_LOCK))
@@ -5465,12 +5471,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
op = R_GS;
do_lxx:
ot = dflag ? OT_LONG : OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T1_A0(ot + s->mem_index);
gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
/* load the segment first to handle exceptions properly */
@@ -5497,7 +5503,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
@@ -5505,7 +5511,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (shift == 2) {
s->rip_offset = 1;
}
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
opreg = OR_TMP0;
} else {
opreg = (modrm & 7) | REX_B(s);
@@ -5516,7 +5522,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_shift(s, op, ot, opreg, OR_ECX);
} else {
if (shift == 2) {
- shift = cpu_ldub_code(cpu_single_env, s->pc++);
+ shift = cpu_ldub_code(env, s->pc++);
}
gen_shifti(s, op, ot, opreg, shift);
}
@@ -5550,12 +5556,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
shift = 0;
do_shiftd:
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
opreg = OR_TMP0;
} else {
opreg = rm;
@@ -5563,7 +5569,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_mov_TN_reg(ot, 1, reg);
if (shift) {
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
tcg_gen_movi_tl(cpu_T3, val);
} else {
tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
@@ -5580,13 +5586,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = modrm & 7;
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
if (mod != 3) {
/* memory op */
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
switch(op) {
case 0x00 ... 0x07: /* fxxxs */
case 0x10 ... 0x17: /* fixxxl */
@@ -6211,7 +6217,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
gen_op_movl_T0_im(val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
@@ -6231,7 +6237,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
gen_op_movl_T0_im(val);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
@@ -6293,7 +6299,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/************************/
/* control */
case 0xc2: /* ret im */
- val = cpu_ldsw_code(cpu_single_env, s->pc);
+ val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
gen_pop_T0(s);
if (CODE64(s) && s->dflag)
@@ -6313,7 +6319,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_eob(s);
break;
case 0xca: /* lret im */
- val = cpu_ldsw_code(cpu_single_env, s->pc);
+ val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
do_lret:
if (s->pe && !s->vm86) {
@@ -6369,9 +6375,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0xe8: /* call im */
{
if (dflag)
- tval = (int32_t)insn_get(s, OT_LONG);
+ tval = (int32_t)insn_get(env, s, OT_LONG);
else
- tval = (int16_t)insn_get(s, OT_WORD);
+ tval = (int16_t)insn_get(env, s, OT_WORD);
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (s->dflag == 0)
@@ -6390,8 +6396,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (CODE64(s))
goto illegal_op;
ot = dflag ? OT_LONG : OT_WORD;
- offset = insn_get(s, ot);
- selector = insn_get(s, OT_WORD);
+ offset = insn_get(env, s, ot);
+ selector = insn_get(env, s, OT_WORD);
gen_op_movl_T0_im(selector);
gen_op_movl_T1_imu(offset);
@@ -6399,9 +6405,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
goto do_lcall;
case 0xe9: /* jmp im */
if (dflag)
- tval = (int32_t)insn_get(s, OT_LONG);
+ tval = (int32_t)insn_get(env, s, OT_LONG);
else
- tval = (int16_t)insn_get(s, OT_WORD);
+ tval = (int16_t)insn_get(env, s, OT_WORD);
tval += s->pc - s->cs_base;
if (s->dflag == 0)
tval &= 0xffff;
@@ -6416,28 +6422,28 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (CODE64(s))
goto illegal_op;
ot = dflag ? OT_LONG : OT_WORD;
- offset = insn_get(s, ot);
- selector = insn_get(s, OT_WORD);
+ offset = insn_get(env, s, ot);
+ selector = insn_get(env, s, OT_WORD);
gen_op_movl_T0_im(selector);
gen_op_movl_T1_imu(offset);
}
goto do_ljmp;
case 0xeb: /* jmp Jb */
- tval = (int8_t)insn_get(s, OT_BYTE);
+ tval = (int8_t)insn_get(env, s, OT_BYTE);
tval += s->pc - s->cs_base;
if (s->dflag == 0)
tval &= 0xffff;
gen_jmp(s, tval);
break;
case 0x70 ... 0x7f: /* jcc Jb */
- tval = (int8_t)insn_get(s, OT_BYTE);
+ tval = (int8_t)insn_get(env, s, OT_BYTE);
goto do_jcc;
case 0x180 ... 0x18f: /* jcc Jv */
if (dflag) {
- tval = (int32_t)insn_get(s, OT_LONG);
+ tval = (int32_t)insn_get(env, s, OT_LONG);
} else {
- tval = (int16_t)insn_get(s, OT_WORD);
+ tval = (int16_t)insn_get(env, s, OT_WORD);
}
do_jcc:
next_eip = s->pc - s->cs_base;
@@ -6448,9 +6454,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 0x190 ... 0x19f: /* setcc Gv */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
gen_setcc(s, b);
- gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
{
@@ -6458,12 +6464,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
TCGv t0;
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
t0 = tcg_temp_local_new();
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
} else {
rm = (modrm & 7) | REX_B(s);
@@ -6555,7 +6561,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
gen_pop_update(s);
s->cc_op = CC_OP_EFLAGS;
- /* abort translation because TF flag may change */
+ /* abort translation because TF/AC flag may change */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
@@ -6616,19 +6622,19 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/* bit operations */
case 0x1ba: /* bt/bts/btr/btc Gv, im */
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
op = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
s->rip_offset = 1;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_TN_reg(ot, 0, rm);
}
/* load shift */
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
gen_op_movl_T1_im(val);
if (op < 4)
goto illegal_op;
@@ -6647,13 +6653,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
op = 3;
do_btx:
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
gen_op_mov_TN_reg(OT_LONG, 1, reg);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
/* specific case: we need to add a displacement */
gen_exts(ot, cpu_T[1]);
tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
@@ -6708,9 +6714,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
TCGv t0;
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
- gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s,modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T[0]);
t0 = tcg_temp_local_new();
tcg_gen_mov_tl(t0, cpu_T[0]);
@@ -6780,7 +6786,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0xd4: /* aam */
if (CODE64(s))
goto illegal_op;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (val == 0) {
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
} else {
@@ -6791,7 +6797,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0xd5: /* aad */
if (CODE64(s))
goto illegal_op;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
gen_helper_aad(cpu_env, tcg_const_i32(val));
s->cc_op = CC_OP_LOGICB;
break;
@@ -6825,7 +6831,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
break;
case 0xcd: /* int N */
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
@@ -6847,7 +6853,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_debug(s, pc_start - s->cs_base);
#else
/* start debug */
- tb_flush(cpu_single_env);
+ tb_flush(env);
cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
#endif
break;
@@ -6895,13 +6901,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (CODE64(s))
goto illegal_op;
ot = dflag ? OT_LONG : OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_op_mov_TN_reg(ot, 0, reg);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
if (ot == OT_WORD) {
@@ -6942,7 +6948,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
{
int l1, l2, l3;
- tval = (int8_t)insn_get(s, OT_BYTE);
+ tval = (int8_t)insn_get(env, s, OT_BYTE);
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (s->dflag == 0)
@@ -7022,7 +7028,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 0x134: /* sysenter */
/* For Intel SYSENTER is valid on 64-bit */
- if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
@@ -7035,7 +7041,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 0x135: /* sysexit */
/* For Intel SYSEXIT is valid on 64-bit */
- if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
@@ -7086,7 +7092,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x100:
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
@@ -7098,7 +7104,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_WORD;
if (mod == 3)
ot += s->dflag;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 2: /* lldt */
if (!s->pe || s->vm86)
@@ -7107,7 +7113,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
@@ -7121,7 +7127,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_WORD;
if (mod == 3)
ot += s->dflag;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 3: /* ltr */
if (!s->pe || s->vm86)
@@ -7130,7 +7136,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
@@ -7140,7 +7146,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 5: /* verw */
if (!s->pe || s->vm86)
goto illegal_op;
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
if (op == 4) {
@@ -7155,7 +7161,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x101:
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
rm = modrm & 7;
@@ -7164,7 +7170,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (mod == 3)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
gen_op_st_T0_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
@@ -7205,12 +7211,30 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
gen_eob(s);
break;
+ case 2: /* clac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
+ s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_clac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+ case 3: /* stac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
+ s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_stac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
default:
goto illegal_op;
}
} else { /* sidt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
gen_op_st_T0_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
@@ -7312,7 +7336,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
gen_svm_check_intercept(s, pc_start,
op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T1_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
@@ -7334,14 +7358,14 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
#else
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
#endif
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
break;
case 6: /* lmsw */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_helper_lmsw(cpu_env, cpu_T[0]);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
@@ -7355,7 +7379,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_helper_invlpg(cpu_env, cpu_A0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
@@ -7422,7 +7446,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/* d_ot is the size of destination */
d_ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
@@ -7434,7 +7458,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
gen_op_mov_reg_T0(d_ot, reg);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (d_ot == OT_QUAD) {
gen_op_lds_T0_A0(OT_LONG + s->mem_index);
} else {
@@ -7454,12 +7478,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
t1 = tcg_temp_local_new();
t2 = tcg_temp_local_new();
ot = OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
a0 = tcg_temp_local_new();
tcg_gen_mov_tl(a0, cpu_A0);
@@ -7502,9 +7526,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (!s->pe || s->vm86)
goto illegal_op;
ot = dflag ? OT_LONG : OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
t0 = tcg_temp_local_new();
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -7523,7 +7547,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x118:
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
@@ -7533,26 +7557,29 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 3: /* prefetchnt0 */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
/* nothing more to do */
break;
default: /* nop (multi byte) */
- gen_nop_modrm(s, modrm);
+ gen_nop_modrm(env, s, modrm);
break;
}
break;
case 0x119 ... 0x11f: /* nop (multi byte) */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
- gen_nop_modrm(s, modrm);
+ modrm = cpu_ldub_code(env, s->pc++);
+ gen_nop_modrm(env, s, modrm);
break;
case 0x120: /* mov reg, crN */
case 0x122: /* mov crN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
- if ((modrm & 0xc0) != 0xc0)
- goto illegal_op;
+ modrm = cpu_ldub_code(env, s->pc++);
+ /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
+ * AMD documentation (24594.pdf) and testing of
+ * intel 386 and 486 processors all show that the mod bits
+ * are assumed to be 1's, regardless of actual values.
+ */
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
@@ -7593,9 +7620,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
- if ((modrm & 0xc0) != 0xc0)
- goto illegal_op;
+ modrm = cpu_ldub_code(env, s->pc++);
+ /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
+ * AMD documentation (24594.pdf) and testing of
+ * intel 386 and 486 processors all show that the mod bits
+ * are assumed to be 1's, regardless of actual values.
+ */
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
@@ -7634,16 +7664,16 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (!(s->cpuid_features & CPUID_SSE2))
goto illegal_op;
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
- gen_ldst_modrm(s, modrm, ot, reg, 1);
+ gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0x1ae:
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
@@ -7655,7 +7685,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
@@ -7669,7 +7699,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
@@ -7685,7 +7715,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (op == 2) {
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
@@ -7710,7 +7740,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/* clflush */
if (!(s->cpuid_features & CPUID_CLFLUSH))
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
}
break;
default:
@@ -7718,11 +7748,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x10d: /* 3DNow! prefetch(w) */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
/* ignore for now */
break;
case 0x1aa: /* rsm */
@@ -7741,8 +7771,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
goto illegal_op;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
- reg = ((modrm >> 3) & 7);
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
if (s->prefix & PREFIX_DATA)
ot = OT_WORD;
@@ -7751,7 +7781,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = OT_QUAD;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
gen_op_mov_reg_T0(ot, reg);
@@ -7768,7 +7798,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x1c2:
case 0x1c4 ... 0x1c6:
case 0x1d0 ... 0x1fe:
- gen_sse(s, b, pc_start, rex_r);
+ gen_sse(env, s, b, pc_start, rex_r);
break;
default:
goto illegal_op;
@@ -7894,15 +7924,13 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
/* select memory access functions */
dc->mem_index = 0;
if (flags & HF_SOFTMMU_MASK) {
- if (dc->cpl == 3)
- dc->mem_index = 2 * 4;
- else
- dc->mem_index = 1 * 4;
+ dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
}
dc->cpuid_features = env->cpuid_features;
dc->cpuid_ext_features = env->cpuid_ext_features;
dc->cpuid_ext2_features = env->cpuid_ext2_features;
dc->cpuid_ext3_features = env->cpuid_ext3_features;
+ dc->cpuid_7_0_ebx_features = env->cpuid_7_0_ebx_features;
#ifdef TARGET_X86_64
dc->lma = (flags >> HF_LMA_SHIFT) & 1;
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
@@ -7934,7 +7962,7 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
cpu_ptr0 = tcg_temp_new_ptr();
cpu_ptr1 = tcg_temp_new_ptr();
- gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
@@ -7956,21 +7984,21 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
}
}
if (search_pc) {
- j = gen_opc_ptr - gen_opc_buf;
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j)
- gen_opc_instr_start[lj++] = 0;
+ tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
- gen_opc_pc[lj] = pc_ptr;
+ tcg_ctx.gen_opc_pc[lj] = pc_ptr;
gen_opc_cc_op[lj] = dc->cc_op;
- gen_opc_instr_start[lj] = 1;
- gen_opc_icount[lj] = num_insns;
+ tcg_ctx.gen_opc_instr_start[lj] = 1;
+ tcg_ctx.gen_opc_icount[lj] = num_insns;
}
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
- pc_ptr = disas_insn(dc, pc_ptr);
+ pc_ptr = disas_insn(env, dc, pc_ptr);
num_insns++;
/* stop translation if indicated */
if (dc->is_jmp)
@@ -7987,7 +8015,7 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
break;
}
/* if too long translation, stop generation too */
- if (gen_opc_ptr >= gen_opc_end ||
+ if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
(pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
num_insns >= max_insns) {
gen_jmp_im(pc_ptr - dc->cs_base);
@@ -8003,13 +8031,13 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
if (tb->cflags & CF_LAST_IO)
gen_io_end();
gen_icount_end(tb, num_insns);
- *gen_opc_ptr = INDEX_op_end;
+ *tcg_ctx.gen_opc_ptr = INDEX_op_end;
/* we don't forget to fill the last values */
if (search_pc) {
- j = gen_opc_ptr - gen_opc_buf;
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
lj++;
while (lj <= j)
- gen_opc_instr_start[lj++] = 0;
+ tcg_ctx.gen_opc_instr_start[lj++] = 0;
}
#ifdef DEBUG_DISAS
@@ -8023,7 +8051,7 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
else
#endif
disas_flags = !dc->code32;
- log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
+ log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
qemu_log("\n");
}
#endif
@@ -8052,16 +8080,17 @@ void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
int i;
qemu_log("RESTORE:\n");
for(i = 0;i <= pc_pos; i++) {
- if (gen_opc_instr_start[i]) {
- qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
+ if (tcg_ctx.gen_opc_instr_start[i]) {
+ qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
+ tcg_ctx.gen_opc_pc[i]);
}
}
qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
- pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
+ pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
(uint32_t)tb->cs_base);
}
#endif
- env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
+ env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
cc_op = gen_opc_cc_op[pc_pos];
if (cc_op != CC_OP_DYNAMIC)
env->cc_op = cc_op;