summaryrefslogtreecommitdiff
path: root/block/backup.c
diff options
context:
space:
mode:
authorEric Blake <eblake@redhat.com>2017-07-07 07:44:57 -0500
committerKevin Wolf <kwolf@redhat.com>2017-07-10 13:18:07 +0200
commitd6a644bbfef81bb6c7ab11656ad71e326f75ac77 (patch)
tree51e6a34d23229e6b01bf0c1fb81d6782854a07e8 /block/backup.c
parent6f8e35e2414433a56b4bd548b87b8ac2aedecb77 (diff)
downloadqemu-d6a644bbfef81bb6c7ab11656ad71e326f75ac77.tar.gz
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards byte-based. In the common case, allocation is unlikely to ever use values that are not naturally sector-aligned, but it is possible that byte-based values will let us be more precise about allocation at the end of an unaligned file that can do byte-based access. Changing the signature of the function to use int64_t *pnum ensures that the compiler enforces that all callers are updated. For now, the io.c layer still assert()s that all callers are sector-aligned on input and that *pnum is sector-aligned on return to the caller, but that can be relaxed when a later patch implements byte-based block status. Therefore, this code adds usages like DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned values, where the call might reasonbly give non-aligned results in the future; on the other hand, no rounding is needed for callers that should just continue to work with byte alignment. For the most part this patch is just the addition of scaling at the callers followed by inverse scaling at bdrv_is_allocated(). But some code, particularly bdrv_commit(), gets a lot simpler because it no longer has to mess with sectors; also, it is now possible to pass NULL if the caller does not care how much of the image is allocated beyond the initial offset. Leave comments where we can further simplify once a later patch eliminates the need for sector-aligned requests through bdrv_is_allocated(). For ease of review, bdrv_is_allocated_above() will be tackled separately. Signed-off-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/backup.c')
-rw-r--r--block/backup.c17
1 files changed, 5 insertions, 12 deletions
diff --git a/block/backup.c b/block/backup.c
index 2bd1d94231..b69184eac5 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -47,12 +47,6 @@ typedef struct BackupBlockJob {
QLIST_HEAD(, CowRequest) inflight_reqs;
} BackupBlockJob;
-/* Size of a cluster in sectors, instead of bytes. */
-static inline int64_t cluster_size_sectors(BackupBlockJob *job)
-{
- return job->cluster_size / BDRV_SECTOR_SIZE;
-}
-
/* See if in-flight requests overlap and wait for them to complete */
static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
int64_t start,
@@ -433,7 +427,6 @@ static void coroutine_fn backup_run(void *opaque)
BackupCompleteData *data;
BlockDriverState *bs = blk_bs(job->common.blk);
int64_t offset;
- int64_t sectors_per_cluster = cluster_size_sectors(job);
int ret = 0;
QLIST_INIT(&job->inflight_reqs);
@@ -465,12 +458,13 @@ static void coroutine_fn backup_run(void *opaque)
}
if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
- int i, n;
+ int i;
+ int64_t n;
/* Check to see if these blocks are already in the
* backing file. */
- for (i = 0; i < sectors_per_cluster;) {
+ for (i = 0; i < job->cluster_size;) {
/* bdrv_is_allocated() only returns true/false based
* on the first set of sectors it comes across that
* are are all in the same state.
@@ -478,9 +472,8 @@ static void coroutine_fn backup_run(void *opaque)
* backup cluster length. We end up copying more than
* needed but at some point that is always the case. */
alloced =
- bdrv_is_allocated(bs,
- (offset >> BDRV_SECTOR_BITS) + i,
- sectors_per_cluster - i, &n);
+ bdrv_is_allocated(bs, offset + i,
+ job->cluster_size - i, &n);
i += n;
if (alloced || n == 0) {