summaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorDenis V. Lunev <den@openvz.org>2016-02-24 11:53:39 +0300
committerAmit Shah <amit.shah@redhat.com>2016-02-26 20:40:08 +0530
commitea6a55bcc0d144ac5086cebf7f84afa7071afe90 (patch)
tree321162f47f00aa2900e104ce625f4307f593e001 /migration
parent0aa6aefc9c93db1f64e3ba406ee5234da75b545b (diff)
downloadqemu-ea6a55bcc0d144ac5086cebf7f84afa7071afe90.tar.gz
migration (postcopy): move bdrv_invalidate_cache_all of of coroutine context
There is a possibility to hit an assert in qcow2_get_specific_info that s->qcow_version is undefined. This happens when VM in starting from suspended state, i.e. it processes incoming migration, and in the same time 'info block' is called. The problem is that qcow2_invalidate_cache() closes the image and memset()s BDRVQcowState in the middle. The patch moves processing of bdrv_invalidate_cache_all out of coroutine context for postcopy migration to avoid that. This function is called with the following stack: process_incoming_migration_co qemu_loadvm_state qemu_loadvm_state_main loadvm_process_command loadvm_postcopy_handle_run Signed-off-by: Denis V. Lunev <den@openvz.org> Tested-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> CC: Paolo Bonzini <pbonzini@redhat.com> CC: Juan Quintela <quintela@redhat.com> CC: Amit Shah <amit.shah@redhat.com> Message-Id: <1456304019-10507-3-git-send-email-den@openvz.org> Signed-off-by: Amit Shah <amit.shah@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/savevm.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/migration/savevm.c b/migration/savevm.c
index 02e8487441..b45915612f 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1495,17 +1495,10 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
return 0;
}
-/* After all discards we can start running and asking for pages */
-static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
+static void loadvm_postcopy_handle_run_bh(void *opaque)
{
- PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING);
Error *local_err = NULL;
-
- trace_loadvm_postcopy_handle_run();
- if (ps != POSTCOPY_INCOMING_LISTENING) {
- error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps);
- return -1;
- }
+ MigrationIncomingState *mis = opaque;
/* TODO we should move all of this lot into postcopy_ram.c or a shared code
* in migration.c
@@ -1518,7 +1511,6 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
bdrv_invalidate_cache_all(&local_err);
if (local_err) {
error_report_err(local_err);
- return -1;
}
trace_loadvm_postcopy_handle_run_cpu_sync();
@@ -1534,6 +1526,23 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
runstate_set(RUN_STATE_PAUSED);
}
+ qemu_bh_delete(mis->bh);
+}
+
+/* After all discards we can start running and asking for pages */
+static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
+{
+ PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING);
+
+ trace_loadvm_postcopy_handle_run();
+ if (ps != POSTCOPY_INCOMING_LISTENING) {
+ error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps);
+ return -1;
+ }
+
+ mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, NULL);
+ qemu_bh_schedule(mis->bh);
+
/* We need to finish reading the stream from the package
* and also stop reading anything more from the stream that loaded the
* package (since it's now being read by the listener thread).