summaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorzhanghailiang <zhang.zhanghailiang@huawei.com>2017-01-24 15:59:52 +0800
committerDr. David Alan Gilbert <dgilbert@redhat.com>2017-01-24 18:00:31 +0000
commit1d2acc3162d9c7772510c973f446353fbdd1f9a8 (patch)
tree4e80f2bb02853d3720e9fe35a26296b34f86387e /migration
parentb67b8c3a9deacd4e1f029422f01ad8179e16d580 (diff)
downloadqemu-1d2acc3162d9c7772510c973f446353fbdd1f9a8.tar.gz
migration: re-active images while migration been canceled after inactive them
commit fe904ea8242cbae2d7e69c052c754b8f5f1ba1d6 fixed a case which migration aborted QEMU because it didn't regain the control of images while some errors happened. Actually, there are another two cases can trigger the same error reports: " bdrv_co_do_pwritev: Assertion `!(bs->open_flags & 0x0800)' failed", Case 1, codes path: migration_thread() migration_completion() bdrv_inactivate_all() ----------------> inactivate images qemu_savevm_state_complete_precopy() socket_writev_buffer() --------> error because destination fails qemu_fflush() ----------------> set error on migration stream -> qmp_migrate_cancel() ----------------> user cancelled migration concurrently -> migrate_set_state() ------------------> set migrate CANCELLIN migration_completion() -----------------> go on to fail_invalidate if (s->state == MIGRATION_STATUS_ACTIVE) -> Jump this branch Case 2, codes path: migration_thread() migration_completion() bdrv_inactivate_all() ----------------> inactivate images migreation_completion() finished -> qmp_migrate_cancel() ---------------> user cancelled migration concurrently qemu_mutex_lock_iothread(); qemu_bh_schedule (s->cleanup_bh); As we can see from above, qmp_migrate_cancel can slip in whenever migration_thread does not hold the global lock. If this happens after bdrv_inactive_all() been called, the above error reports will appear. To prevent this, we can call bdrv_invalidate_cache_all() in qmp_migrate_cancel() directly if we find images become inactive. Besides, bdrv_invalidate_cache_all() in migration_completion() doesn't have the protection of big lock, fix it by add the missing qemu_mutex_lock_iothread(); Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com> Message-Id: <1485244792-11248-1-git-send-email-zhang.zhanghailiang@huawei.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/migration.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/migration/migration.c b/migration/migration.c
index 7dcb7d7a32..f8a4500cd1 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1006,6 +1006,16 @@ static void migrate_fd_cancel(MigrationState *s)
if (s->state == MIGRATION_STATUS_CANCELLING && f) {
qemu_file_shutdown(f);
}
+ if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
+ Error *local_err = NULL;
+
+ bdrv_invalidate_cache_all(&local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ } else {
+ s->block_inactive = false;
+ }
+ }
}
void add_migration_state_change_notifier(Notifier *notify)
@@ -1745,6 +1755,7 @@ static void migration_completion(MigrationState *s, int current_active_state,
if (ret >= 0) {
qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
qemu_savevm_state_complete_precopy(s->to_dst_file, false);
+ s->block_inactive = true;
}
}
qemu_mutex_unlock_iothread();
@@ -1795,10 +1806,14 @@ fail_invalidate:
if (s->state == MIGRATION_STATUS_ACTIVE) {
Error *local_err = NULL;
+ qemu_mutex_lock_iothread();
bdrv_invalidate_cache_all(&local_err);
if (local_err) {
error_report_err(local_err);
+ } else {
+ s->block_inactive = false;
}
+ qemu_mutex_unlock_iothread();
}
fail: