summaryrefslogtreecommitdiff
path: root/block.c
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2017-12-06 20:24:44 +0100
committerKevin Wolf <kwolf@redhat.com>2017-12-22 15:05:32 +0100
commit1a63a907507fbbcfaee3f622907ec244b7eabda8 (patch)
tree3d99d71c98b48bbdc326b03fe6d154980a2ce0ce /block.c
parent44487eb973f895d68989cf931e25f309ec9807f9 (diff)
downloadqemu-1a63a907507fbbcfaee3f622907ec244b7eabda8.tar.gz
block: Keep nodes drained between reopen_queue/multiple
The bdrv_reopen*() implementation doesn't like it if the graph is changed between queuing nodes for reopen and actually reopening them (one of the reasons is that queuing can be recursive). So instead of draining the device only in bdrv_reopen_multiple(), require that callers already drained all affected nodes, and assert this in bdrv_reopen_queue(). Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com>
Diffstat (limited to 'block.c')
-rw-r--r--block.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/block.c b/block.c
index 8b46ba21b1..a8da4f2b25 100644
--- a/block.c
+++ b/block.c
@@ -2766,6 +2766,7 @@ BlockDriverState *bdrv_open(const char *filename, const char *reference,
* returns a pointer to bs_queue, which is either the newly allocated
* bs_queue, or the existing bs_queue being used.
*
+ * bs must be drained between bdrv_reopen_queue() and bdrv_reopen_multiple().
*/
static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
BlockDriverState *bs,
@@ -2781,6 +2782,11 @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
BdrvChild *child;
QDict *old_options, *explicit_options;
+ /* Make sure that the caller remembered to use a drained section. This is
+ * important to avoid graph changes between the recursive queuing here and
+ * bdrv_reopen_multiple(). */
+ assert(bs->quiesce_counter > 0);
+
if (bs_queue == NULL) {
bs_queue = g_new0(BlockReopenQueue, 1);
QSIMPLEQ_INIT(bs_queue);
@@ -2905,6 +2911,8 @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
* If all devices prepare successfully, then the changes are committed
* to all devices.
*
+ * All affected nodes must be drained between bdrv_reopen_queue() and
+ * bdrv_reopen_multiple().
*/
int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp)
{
@@ -2914,11 +2922,8 @@ int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **er
assert(bs_queue != NULL);
- aio_context_release(ctx);
- bdrv_drain_all_begin();
- aio_context_acquire(ctx);
-
QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
+ assert(bs_entry->state.bs->quiesce_counter > 0);
if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
error_propagate(errp, local_err);
goto cleanup;
@@ -2947,8 +2952,6 @@ cleanup:
}
g_free(bs_queue);
- bdrv_drain_all_end();
-
return ret;
}
@@ -2958,12 +2961,18 @@ int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
{
int ret = -1;
Error *local_err = NULL;
- BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, NULL, bdrv_flags);
+ BlockReopenQueue *queue;
+ bdrv_subtree_drained_begin(bs);
+
+ queue = bdrv_reopen_queue(NULL, bs, NULL, bdrv_flags);
ret = bdrv_reopen_multiple(bdrv_get_aio_context(bs), queue, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
}
+
+ bdrv_subtree_drained_end(bs);
+
return ret;
}