summaryrefslogtreecommitdiff
path: root/target-s390x
diff options
context:
space:
mode:
Diffstat (limited to 'target-s390x')
-rw-r--r--target-s390x/cpu.h3
-rw-r--r--target-s390x/kvm.c175
2 files changed, 100 insertions, 78 deletions
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index f332d41b94..41903a93fb 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -126,6 +126,9 @@ typedef struct CPUS390XState {
uint64_t pfault_compare;
uint64_t pfault_select;
+ uint64_t gbea;
+ uint64_t pp;
+
CPU_COMMON
/* reset does memset(0) up to here */
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index 56b9af7505..a30d1bc060 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -36,6 +36,7 @@
#include "sysemu/device_tree.h"
#include "qapi/qmp/qjson.h"
#include "monitor/monitor.h"
+#include "trace.h"
/* #define DEBUG_KVM */
@@ -128,14 +129,42 @@ void kvm_arch_reset_vcpu(CPUState *cpu)
}
}
+static int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
+{
+ struct kvm_one_reg reg;
+ int r;
+
+ reg.id = id;
+ reg.addr = (uint64_t) source;
+ r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (r) {
+ trace_kvm_failed_reg_set(id, strerror(errno));
+ }
+ return r;
+}
+
+static int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
+{
+ struct kvm_one_reg reg;
+ int r;
+
+ reg.id = id;
+ reg.addr = (uint64_t) target;
+ r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (r) {
+ trace_kvm_failed_reg_get(id, strerror(errno));
+ }
+ return r;
+}
+
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
- struct kvm_one_reg reg;
struct kvm_sregs sregs;
struct kvm_regs regs;
- int ret;
+ int r;
int i;
/* always save the PSW and the GPRS*/
@@ -151,9 +180,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
for (i = 0; i < 16; i++) {
regs.gprs[i] = env->regs[i];
}
- ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
- if (ret < 0) {
- return ret;
+ r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+ if (r < 0) {
+ return r;
}
}
@@ -162,47 +191,29 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return 0;
}
- reg.id = KVM_REG_S390_CPU_TIMER;
- reg.addr = (__u64)&(env->cputm);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
- }
-
- reg.id = KVM_REG_S390_CLOCK_COMP;
- reg.addr = (__u64)&(env->ckc);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
- }
-
- reg.id = KVM_REG_S390_TODPR;
- reg.addr = (__u64)&(env->todpr);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
- }
+ /*
+ * These ONE_REGS are not protected by a capability. As they are only
+ * necessary for migration we just trace a possible error, but don't
+ * return with an error return code.
+ */
+ kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+ kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+ kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+ kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+ kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
if (cap_async_pf) {
- reg.id = KVM_REG_S390_PFTOKEN;
- reg.addr = (__u64)&(env->pfault_token);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
+ if (r < 0) {
+ return r;
}
-
- reg.id = KVM_REG_S390_PFCOMPARE;
- reg.addr = (__u64)&(env->pfault_compare);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
+ if (r < 0) {
+ return r;
}
-
- reg.id = KVM_REG_S390_PFSELECT;
- reg.addr = (__u64)&(env->pfault_select);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
+ if (r < 0) {
+ return r;
}
}
@@ -220,9 +231,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
sregs.acrs[i] = env->aregs[i];
sregs.crs[i] = env->cregs[i];
}
- ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
- if (ret < 0) {
- return ret;
+ r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
+ if (r < 0) {
+ return r;
}
}
@@ -240,7 +251,6 @@ int kvm_arch_get_registers(CPUState *cs)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
- struct kvm_one_reg reg;
struct kvm_sregs sregs;
struct kvm_regs regs;
int i, r;
@@ -288,46 +298,27 @@ int kvm_arch_get_registers(CPUState *cs)
env->psa = cs->kvm_run->s.regs.prefix;
}
- /* One Regs */
- reg.id = KVM_REG_S390_CPU_TIMER;
- reg.addr = (__u64)&(env->cputm);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
- if (r < 0) {
- return r;
- }
-
- reg.id = KVM_REG_S390_CLOCK_COMP;
- reg.addr = (__u64)&(env->ckc);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
- if (r < 0) {
- return r;
- }
-
- reg.id = KVM_REG_S390_TODPR;
- reg.addr = (__u64)&(env->todpr);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
- if (r < 0) {
- return r;
- }
+ /*
+ * These ONE_REGS are not protected by a capability. As they are only
+ * necessary for migration we just trace a possible error, but don't
+ * return with an error return code.
+ */
+ kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+ kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+ kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+ kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+ kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
if (cap_async_pf) {
- reg.id = KVM_REG_S390_PFTOKEN;
- reg.addr = (__u64)&(env->pfault_token);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
if (r < 0) {
return r;
}
-
- reg.id = KVM_REG_S390_PFCOMPARE;
- reg.addr = (__u64)&(env->pfault_compare);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
if (r < 0) {
return r;
}
-
- reg.id = KVM_REG_S390_PFSELECT;
- reg.addr = (__u64)&(env->pfault_select);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
if (r < 0) {
return r;
}
@@ -383,6 +374,26 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
return 0;
}
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ return -ENOSYS;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ return -ENOSYS;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+}
+
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
{
}
@@ -844,6 +855,11 @@ static int handle_tsch(S390CPU *cpu)
return ret;
}
+static int kvm_arch_handle_debug_exit(S390CPU *cpu)
+{
+ return -ENOSYS;
+}
+
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
S390CPU *cpu = S390_CPU(cs);
@@ -859,6 +875,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
case KVM_EXIT_S390_TSCH:
ret = handle_tsch(cpu);
break;
+ case KVM_EXIT_DEBUG:
+ ret = kvm_arch_handle_debug_exit(cpu);
+ break;
default:
fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
break;