summaryrefslogtreecommitdiff
path: root/target-i386/kvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'target-i386/kvm.c')
-rw-r--r--target-i386/kvm.c238
1 files changed, 25 insertions, 213 deletions
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index d9a6fc501b..4974fa42d2 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -28,7 +28,6 @@
#include "hw/pc.h"
#include "hw/apic.h"
#include "ioport.h"
-#include "kvm_x86.h"
#ifdef CONFIG_KVM_PARA
#include <linux/kvm_para.h>
@@ -193,164 +192,23 @@ static int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap)
return kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, mcg_cap);
}
-static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m)
+static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code)
{
- return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m);
-}
-
-static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n)
-{
- struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs);
- int r;
-
- kmsrs->nmsrs = n;
- memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
- r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs);
- memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
- free(kmsrs);
- return r;
-}
-
-/* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
-static int kvm_mce_in_progress(CPUState *env)
-{
- struct kvm_msr_entry msr_mcg_status = {
- .index = MSR_MCG_STATUS,
- };
- int r;
-
- r = kvm_get_msr(env, &msr_mcg_status, 1);
- if (r == -1 || r == 0) {
- fprintf(stderr, "Failed to get MCE status\n");
- return 0;
- }
- return !!(msr_mcg_status.data & MCG_STATUS_MCIP);
-}
-
-struct kvm_x86_mce_data
-{
- CPUState *env;
- struct kvm_x86_mce *mce;
- int abort_on_error;
-};
-
-static void kvm_do_inject_x86_mce(void *_data)
-{
- struct kvm_x86_mce_data *data = _data;
- int r;
-
- /* If there is an MCE exception being processed, ignore this SRAO MCE */
- if ((data->env->mcg_cap & MCG_SER_P) &&
- !(data->mce->status & MCI_STATUS_AR)) {
- if (kvm_mce_in_progress(data->env)) {
- return;
- }
- }
-
- r = kvm_set_mce(data->env, data->mce);
- if (r < 0) {
- perror("kvm_set_mce FAILED");
- if (data->abort_on_error) {
- abort();
- }
- }
-}
-
-static void kvm_inject_x86_mce_on(CPUState *env, struct kvm_x86_mce *mce,
- int flag)
-{
- struct kvm_x86_mce_data data = {
- .env = env,
- .mce = mce,
- .abort_on_error = (flag & ABORT_ON_ERROR),
- };
-
- if (!env->mcg_cap) {
- fprintf(stderr, "MCE support is not enabled!\n");
- return;
- }
-
- run_on_cpu(env, kvm_do_inject_x86_mce, &data);
-}
+ uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
+ MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
+ uint64_t mcg_status = MCG_STATUS_MCIP;
-static void kvm_mce_broadcast_rest(CPUState *env)
-{
- struct kvm_x86_mce mce = {
- .bank = 1,
- .status = MCI_STATUS_VAL | MCI_STATUS_UC,
- .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
- .addr = 0,
- .misc = 0,
- };
- CPUState *cenv;
-
- /* Broadcast MCA signal for processor version 06H_EH and above */
- if (cpu_x86_support_mca_broadcast(env)) {
- for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
- if (cenv == env) {
- continue;
- }
- kvm_inject_x86_mce_on(cenv, &mce, ABORT_ON_ERROR);
- }
- }
-}
-
-static void kvm_mce_inj_srar_dataload(CPUState *env, target_phys_addr_t paddr)
-{
- struct kvm_x86_mce mce = {
- .bank = 9,
- .status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
- | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
- | MCI_STATUS_AR | 0x134,
- .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV,
- .addr = paddr,
- .misc = (MCM_ADDR_PHYS << 6) | 0xc,
- };
- int r;
-
- r = kvm_set_mce(env, &mce);
- if (r < 0) {
- fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
- abort();
- }
- kvm_mce_broadcast_rest(env);
-}
-
-static void kvm_mce_inj_srao_memscrub(CPUState *env, target_phys_addr_t paddr)
-{
- struct kvm_x86_mce mce = {
- .bank = 9,
- .status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
- | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
- | 0xc0,
- .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
- .addr = paddr,
- .misc = (MCM_ADDR_PHYS << 6) | 0xc,
- };
- int r;
-
- r = kvm_set_mce(env, &mce);
- if (r < 0) {
- fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
- abort();
+ if (code == BUS_MCEERR_AR) {
+ status |= MCI_STATUS_AR | 0x134;
+ mcg_status |= MCG_STATUS_EIPV;
+ } else {
+ status |= 0xc0;
+ mcg_status |= MCG_STATUS_RIPV;
}
- kvm_mce_broadcast_rest(env);
-}
-
-static void kvm_mce_inj_srao_memscrub2(CPUState *env, target_phys_addr_t paddr)
-{
- struct kvm_x86_mce mce = {
- .bank = 9,
- .status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
- | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
- | 0xc0,
- .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
- .addr = paddr,
- .misc = (MCM_ADDR_PHYS << 6) | 0xc,
- };
-
- kvm_inject_x86_mce_on(env, &mce, ABORT_ON_ERROR);
- kvm_mce_broadcast_rest(env);
+ cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
+ (MCM_ADDR_PHYS << 6) | 0xc,
+ cpu_x86_support_mca_broadcast(env) ?
+ MCE_INJECT_BROADCAST : 0);
}
#endif /* KVM_CAP_MCE */
@@ -363,16 +221,14 @@ static void hardware_memory_error(void)
int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
{
#ifdef KVM_CAP_MCE
- void *vaddr;
ram_addr_t ram_addr;
target_phys_addr_t paddr;
if ((env->mcg_cap & MCG_SER_P) && addr
- && (code == BUS_MCEERR_AR
- || code == BUS_MCEERR_AO)) {
- vaddr = (void *)addr;
- if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
- !kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, &paddr)) {
+ && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
+ if (qemu_ram_addr_from_host(addr, &ram_addr) ||
+ !kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr,
+ &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!\n");
/* Hope we are lucky for AO MCE */
@@ -382,20 +238,7 @@ int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
hardware_memory_error();
}
}
-
- if (code == BUS_MCEERR_AR) {
- /* Fake an Intel architectural Data Load SRAR UCR */
- kvm_mce_inj_srar_dataload(env, paddr);
- } else {
- /*
- * If there is an MCE excpetion being processed, ignore
- * this SRAO MCE
- */
- if (!kvm_mce_in_progress(env)) {
- /* Fake an Intel architectural Memory scrubbing UCR */
- kvm_mce_inj_srao_memscrub(env, paddr);
- }
- }
+ kvm_mce_inject(env, paddr, code);
} else
#endif /* KVM_CAP_MCE */
{
@@ -414,20 +257,18 @@ int kvm_arch_on_sigbus(int code, void *addr)
{
#ifdef KVM_CAP_MCE
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
- void *vaddr;
ram_addr_t ram_addr;
target_phys_addr_t paddr;
/* Hope we are lucky for AO MCE */
- vaddr = addr;
- if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
+ if (qemu_ram_addr_from_host(addr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr,
&paddr)) {
fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!: %p\n", addr);
return 0;
}
- kvm_mce_inj_srao_memscrub2(first_cpu, paddr);
+ kvm_mce_inject(first_cpu, paddr, code);
} else
#endif /* KVM_CAP_MCE */
{
@@ -442,31 +283,6 @@ int kvm_arch_on_sigbus(int code, void *addr)
return 0;
}
-void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
- uint64_t mcg_status, uint64_t addr, uint64_t misc,
- int flag)
-{
-#ifdef KVM_CAP_MCE
- struct kvm_x86_mce mce = {
- .bank = bank,
- .status = status,
- .mcg_status = mcg_status,
- .addr = addr,
- .misc = misc,
- };
-
- if (flag & MCE_BROADCAST) {
- kvm_mce_broadcast_rest(cenv);
- }
-
- kvm_inject_x86_mce_on(cenv, &mce, flag);
-#else /* !KVM_CAP_MCE*/
- if (flag & ABORT_ON_ERROR) {
- abort();
- }
-#endif /* !KVM_CAP_MCE*/
-}
-
static int kvm_inject_mce_oldstyle(CPUState *env)
{
#ifdef KVM_CAP_MCE
@@ -1053,14 +869,10 @@ static int kvm_put_msrs(CPUState *env, int level)
if (env->mcg_cap) {
int i;
- if (level == KVM_PUT_RESET_STATE) {
- kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
- } else if (level == KVM_PUT_FULL_STATE) {
- kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
- kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
- for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
- kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
- }
+ kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
+ kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
+ for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
+ kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
}
}
#endif