summaryrefslogtreecommitdiff
path: root/block/backup.c
diff options
context:
space:
mode:
authorFam Zheng <famz@redhat.com>2016-10-13 17:58:21 -0400
committerMax Reitz <mreitz@redhat.com>2016-10-24 17:56:07 +0200
commitdc162c8e4f088b08575460cca35b042d58c141aa (patch)
tree75d301502b322906f00e12571f12936e6fe8436d /block/backup.c
parent1ba7e159787d7c96a8783584395b670c22bba4ef (diff)
downloadqemu-dc162c8e4f088b08575460cca35b042d58c141aa.tar.gz
block: Hide HBitmap in block dirty bitmap interface
HBitmap is an implementation detail of block dirty bitmap that should be hidden from users. Introduce a BdrvDirtyBitmapIter to encapsulate the underlying HBitmapIter. A small difference in the interface is, before, an HBitmapIter is initialized in place, now the new BdrvDirtyBitmapIter must be dynamically allocated because the structure definition is in block/dirty-bitmap.c. Two current users are converted too. Signed-off-by: Fam Zheng <famz@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: John Snow <jsnow@redhat.com> Message-id: 1476395910-8697-2-git-send-email-jsnow@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block/backup.c')
-rw-r--r--block/backup.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/block/backup.c b/block/backup.c
index 582bd0f7ee..02dbe48035 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -372,14 +372,14 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
int64_t end;
int64_t last_cluster = -1;
int64_t sectors_per_cluster = cluster_size_sectors(job);
- HBitmapIter hbi;
+ BdrvDirtyBitmapIter *dbi;
granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
clusters_per_iter = MAX((granularity / job->cluster_size), 1);
- bdrv_dirty_iter_init(job->sync_bitmap, &hbi);
+ dbi = bdrv_dirty_iter_new(job->sync_bitmap, 0);
/* Find the next dirty sector(s) */
- while ((sector = hbitmap_iter_next(&hbi)) != -1) {
+ while ((sector = bdrv_dirty_iter_next(dbi)) != -1) {
cluster = sector / sectors_per_cluster;
/* Fake progress updates for any clusters we skipped */
@@ -391,7 +391,7 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
do {
if (yield_and_check(job)) {
- return ret;
+ goto out;
}
ret = backup_do_cow(job, cluster * sectors_per_cluster,
sectors_per_cluster, &error_is_read,
@@ -399,7 +399,7 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
if ((ret < 0) &&
backup_error_action(job, error_is_read, -ret) ==
BLOCK_ERROR_ACTION_REPORT) {
- return ret;
+ goto out;
}
} while (ret < 0);
}
@@ -407,7 +407,7 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
/* If the bitmap granularity is smaller than the backup granularity,
* we need to advance the iterator pointer to the next cluster. */
if (granularity < job->cluster_size) {
- bdrv_set_dirty_iter(&hbi, cluster * sectors_per_cluster);
+ bdrv_set_dirty_iter(dbi, cluster * sectors_per_cluster);
}
last_cluster = cluster - 1;
@@ -419,6 +419,8 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
}
+out:
+ bdrv_dirty_iter_free(dbi);
return ret;
}