From 5f3e31012e334f3410e04abae7f88565df17c91a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 28 Oct 2013 17:32:18 +0100 Subject: timers: fix stop/cont with -icount Stop/cont commands are broken with -icount due to a deadlock. The real problem is that the computation of timers_state.cpu_ticks_offset makes no sense with -icount enabled: we set it to an icount clock value in cpu_disable_ticks, and subtract a TSC (or similar, whatever cpu_get_real_ticks happens to return) value in cpu_enable_ticks. The fix is simple. timers_state.cpu_ticks_offset is only used together with cpu_get_real_ticks, so we can use cpu_get_real_ticks in cpu_disable_ticks. There is no need to update cpu_ticks_prev at the time cpu_disable_ticks is called; instead, we can do it the next time cpu_get_ticks is called. The change to cpu_disable_ticks is the important part of the patch. The rest modifies the code to always check timers_state.cpu_ticks_prev, even when the ticks are not advancing (i.e. the VM is stopped). It also makes a similar change to cpu_get_clock_locked, so that the code remains similar for cpu_get_ticks and cpu_get_clock_locked. Signed-off-by: Paolo Bonzini Message-id: 1382977938-13844-1-git-send-email-pbonzini@redhat.com Signed-off-by: Anthony Liguori --- cpus.c | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) (limited to 'cpus.c') diff --git a/cpus.c b/cpus.c index 912938cd1b..01d128d7af 100644 --- a/cpus.c +++ b/cpus.c @@ -165,36 +165,38 @@ int64_t cpu_get_icount(void) /* Caller must hold the BQL */ int64_t cpu_get_ticks(void) { + int64_t ticks; + if (use_icount) { return cpu_get_icount(); } - if (!timers_state.cpu_ticks_enabled) { - return timers_state.cpu_ticks_offset; - } else { - int64_t ticks; - ticks = cpu_get_real_ticks(); - if (timers_state.cpu_ticks_prev > ticks) { - /* Note: non increasing ticks may happen if the host uses - software suspend */ - timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; - } - timers_state.cpu_ticks_prev = ticks; - return ticks + timers_state.cpu_ticks_offset; + + ticks = timers_state.cpu_ticks_offset; + if (timers_state.cpu_ticks_enabled) { + ticks += cpu_get_real_ticks(); + } + + if (timers_state.cpu_ticks_prev > ticks) { + /* Note: non increasing ticks may happen if the host uses + software suspend */ + timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; + ticks = timers_state.cpu_ticks_prev; } + + timers_state.cpu_ticks_prev = ticks; + return ticks; } static int64_t cpu_get_clock_locked(void) { - int64_t ti; + int64_t ticks; - if (!timers_state.cpu_ticks_enabled) { - ti = timers_state.cpu_clock_offset; - } else { - ti = get_clock(); - ti += timers_state.cpu_clock_offset; + ticks = timers_state.cpu_clock_offset; + if (timers_state.cpu_ticks_enabled) { + ticks += get_clock(); } - return ti; + return ticks; } /* return the host CPU monotonic timer and handle stop/restart */ @@ -235,7 +237,7 @@ void cpu_disable_ticks(void) /* Here, the really thing protected by seqlock is cpu_clock_offset. */ seqlock_write_lock(&timers_state.vm_clock_seqlock); if (timers_state.cpu_ticks_enabled) { - timers_state.cpu_ticks_offset = cpu_get_ticks(); + timers_state.cpu_ticks_offset += cpu_get_real_ticks(); timers_state.cpu_clock_offset = cpu_get_clock_locked(); timers_state.cpu_ticks_enabled = 0; } -- cgit v1.2.1