summaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-08-28 16:07:23 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-08-28 16:07:23 +0100
commit38a01e55d268aeba68c84eea425252e7f810feaf (patch)
tree6a19c9a35558f6e9d64e2c1a5fc21432f683c830 /target-i386
parent795c050e379ab21b75fc2bbb30699fe8752be157 (diff)
parent172dbc52b39c86d7569af5251cca78cb2c74c912 (diff)
downloadqemu-38a01e55d268aeba68c84eea425252e7f810feaf.tar.gz
Merge remote-tracking branch 'remotes/kvm/tags/for-upstream' into staging
Mostly bugfixes + Alexey's interface-based implementation of the NMI monitor command. # gpg: Signature made Thu 28 Aug 2014 15:07:22 BST using RSA key ID 9B4D86F2 # gpg: Good signature from "Paolo Bonzini <pbonzini@redhat.com>" # gpg: aka "Paolo Bonzini <bonzini@gnu.org>" * remotes/kvm/tags/for-upstream: mc146818rtc: reinitialize irq_reinject_on_ack_count on reset target-i386: Add "tsc_adjust" CPU feature name target-i386: Add "mpx" CPU feature name vl: process -object after other backend options checkpatch.pl: adjust typedef definition to QEMU coding style x86: Clear MTRRs on vCPU reset x86: kvm: Add MTRR support for kvm_get|put_msrs() x86: Use common variable range MTRR counts target-i386: Don't forbid NX bit on PAE PDEs and PTEs spapr: Add support for new NMI interface s390x: Migrate to new NMI interface s390x: Convert QEMUMachine to MachineClass cpus: Define callback for QEMU "nmi" command kvm: run cpu state synchronization on target vcpu thread Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/cpu.c14
-rw-r--r--target-i386/cpu.h4
-rw-r--r--target-i386/helper.c4
-rw-r--r--target-i386/kvm.c101
-rw-r--r--target-i386/machine.c2
5 files changed, 117 insertions, 8 deletions
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 217500c735..fa811a02d1 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -257,8 +257,8 @@ static const char *svm_feature_name[] = {
};
static const char *cpuid_7_0_ebx_feature_name[] = {
- "fsgsbase", NULL, NULL, "bmi1", "hle", "avx2", NULL, "smep",
- "bmi2", "erms", "invpcid", "rtm", NULL, NULL, NULL, NULL,
+ "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
+ "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
NULL, NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
@@ -2588,6 +2588,16 @@ static void x86_cpu_reset(CPUState *s)
env->xcr0 = 1;
+ /*
+ * SDM 11.11.5 requires:
+ * - IA32_MTRR_DEF_TYPE MSR.E = 0
+ * - IA32_MTRR_PHYSMASKn.V = 0
+ * All other bits are undefined. For simplification, zero it all.
+ */
+ env->mtrr_deftype = 0;
+ memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
+ memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
+
#if !defined(CONFIG_USER_ONLY)
/* We hard-wire the BSP to the first CPU. */
if (s->cpu_index == 0) {
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index e634d83e86..3460b12139 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -337,6 +337,8 @@
#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
+#define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
+
#define MSR_MTRRfix64K_00000 0x250
#define MSR_MTRRfix16K_80000 0x258
#define MSR_MTRRfix16K_A0000 0x259
@@ -930,7 +932,7 @@ typedef struct CPUX86State {
/* MTRRs */
uint64_t mtrr_fixed[11];
uint64_t mtrr_deftype;
- MTRRVar mtrr_var[8];
+ MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
/* For KVM */
uint32_t mp_state;
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 47b982b437..30cb0d0143 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -615,8 +615,8 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
- rsvd_mask |= PG_HI_USER_MASK | PG_NX_MASK;
- if (pdpe & rsvd_mask) {
+ rsvd_mask |= PG_HI_USER_MASK;
+ if (pdpe & (rsvd_mask | PG_NX_MASK)) {
goto do_fault_rsvd;
}
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 097fe1188d..ddedc735ff 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -79,6 +79,7 @@ static int lm_capable_kernel;
static bool has_msr_hv_hypercall;
static bool has_msr_hv_vapic;
static bool has_msr_hv_tsc;
+static bool has_msr_mtrr;
static bool has_msr_architectural_pmu;
static uint32_t num_architectural_pmu_counters;
@@ -739,6 +740,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
}
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+ has_msr_mtrr = true;
+ }
+
return 0;
}
@@ -1183,7 +1188,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
CPUX86State *env = &cpu->env;
struct {
struct kvm_msrs info;
- struct kvm_msr_entry entries[100];
+ struct kvm_msr_entry entries[150];
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
int n = 0, i;
@@ -1278,6 +1283,37 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
env->msr_hv_tsc);
}
+ if (has_msr_mtrr) {
+ kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRphysBase(i), env->mtrr_var[i].base);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRphysMask(i), env->mtrr_var[i].mask);
+ }
+ }
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
* kvm_put_msr_feature_control. */
@@ -1484,7 +1520,7 @@ static int kvm_get_msrs(X86CPU *cpu)
CPUX86State *env = &cpu->env;
struct {
struct kvm_msrs info;
- struct kvm_msr_entry entries[100];
+ struct kvm_msr_entry entries[150];
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
int ret, i, n;
@@ -1572,6 +1608,24 @@ static int kvm_get_msrs(X86CPU *cpu)
if (has_msr_hv_tsc) {
msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
}
+ if (has_msr_mtrr) {
+ msrs[n++].index = MSR_MTRRdefType;
+ msrs[n++].index = MSR_MTRRfix64K_00000;
+ msrs[n++].index = MSR_MTRRfix16K_80000;
+ msrs[n++].index = MSR_MTRRfix16K_A0000;
+ msrs[n++].index = MSR_MTRRfix4K_C0000;
+ msrs[n++].index = MSR_MTRRfix4K_C8000;
+ msrs[n++].index = MSR_MTRRfix4K_D0000;
+ msrs[n++].index = MSR_MTRRfix4K_D8000;
+ msrs[n++].index = MSR_MTRRfix4K_E0000;
+ msrs[n++].index = MSR_MTRRfix4K_E8000;
+ msrs[n++].index = MSR_MTRRfix4K_F0000;
+ msrs[n++].index = MSR_MTRRfix4K_F8000;
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+ msrs[n++].index = MSR_MTRRphysBase(i);
+ msrs[n++].index = MSR_MTRRphysMask(i);
+ }
+ }
msr_data.info.nmsrs = n;
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
@@ -1692,6 +1746,49 @@ static int kvm_get_msrs(X86CPU *cpu)
case HV_X64_MSR_REFERENCE_TSC:
env->msr_hv_tsc = msrs[i].data;
break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = msrs[i].data;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[0] = msrs[i].data;
+ break;
+ case MSR_MTRRfix16K_80000:
+ env->mtrr_fixed[1] = msrs[i].data;
+ break;
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[2] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ env->mtrr_fixed[3] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_C8000:
+ env->mtrr_fixed[4] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_D0000:
+ env->mtrr_fixed[5] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_D8000:
+ env->mtrr_fixed[6] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_E0000:
+ env->mtrr_fixed[7] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_E8000:
+ env->mtrr_fixed[8] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_F0000:
+ env->mtrr_fixed[9] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[10] = msrs[i].data;
+ break;
+ case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
+ if (index & 1) {
+ env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data;
+ } else {
+ env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
+ }
+ break;
}
}
diff --git a/target-i386/machine.c b/target-i386/machine.c
index 16d2f6a809..fb890654b1 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -677,7 +677,7 @@ VMStateDescription vmstate_x86_cpu = {
/* MTRRs */
VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
- VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, 8, 8),
+ VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
/* KVM-related states */
VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),