summaryrefslogtreecommitdiff
path: root/block/mirror.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-01-22 09:03:15 +0100
committerKevin Wolf <kwolf@redhat.com>2013-01-25 18:18:35 +0100
commit884fea4e87fbc6daf4e6df618bb3cadc188dcc6b (patch)
treed18e2177c7f35c5be87c599a6e383993be654146 /block/mirror.c
parent402a47411bff5e849dc880dd08ba7e6564e6e4f4 (diff)
downloadqemu-884fea4e87fbc6daf4e6df618bb3cadc188dcc6b.tar.gz
mirror: support arbitrarily-sized iterations
Yet another optimization is to extend the mirroring iteration to include more adjacent dirty blocks. This limits the number of I/O operations and makes mirroring efficient even with a small granularity. Most of the infrastructure is already in place; we only need to put a loop around the computation of the origin and sector count of the iteration. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/mirror.c')
-rw-r--r--block/mirror.c97
1 files changed, 67 insertions, 30 deletions
diff --git a/block/mirror.c b/block/mirror.c
index 4696dc84ea..9347533b9b 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -140,7 +140,7 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
{
BlockDriverState *source = s->common.bs;
int nb_sectors, sectors_per_chunk, nb_chunks;
- int64_t end, sector_num, chunk_num, next_sector, hbitmap_next_sector;
+ int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
MirrorOp *op;
s->sector_num = hbitmap_iter_next(&s->hbi);
@@ -152,45 +152,82 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
}
hbitmap_next_sector = s->sector_num;
+ sector_num = s->sector_num;
+ sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
+ end = s->common.len >> BDRV_SECTOR_BITS;
- /* If we have no backing file yet in the destination, and the cluster size
- * is very large, we need to do COW ourselves. The first time a cluster is
- * copied, copy it entirely.
+ /* Extend the QEMUIOVector to include all adjacent blocks that will
+ * be copied in this operation.
+ *
+ * We have to do this if we have no backing file yet in the destination,
+ * and the cluster size is very large. Then we need to do COW ourselves.
+ * The first time a cluster is copied, copy it entirely. Note that,
+ * because both the granularity and the cluster size are powers of two,
+ * the number of sectors to copy cannot exceed one cluster.
*
- * Because both the granularity and the cluster size are powers of two, the
- * number of sectors to copy cannot exceed one cluster.
+ * We also want to extend the QEMUIOVector to include more adjacent
+ * dirty blocks if possible, to limit the number of I/O operations and
+ * run efficiently even with a small granularity.
*/
- sector_num = s->sector_num;
- sectors_per_chunk = nb_sectors = s->granularity >> BDRV_SECTOR_BITS;
- chunk_num = sector_num / sectors_per_chunk;
- if (s->cow_bitmap && !test_bit(chunk_num, s->cow_bitmap)) {
- trace_mirror_cow(s, sector_num);
- bdrv_round_to_clusters(s->target,
- sector_num, sectors_per_chunk,
- &sector_num, &nb_sectors);
-
- /* The rounding may make us copy sectors before the
- * first dirty one.
- */
- chunk_num = sector_num / sectors_per_chunk;
- }
+ nb_chunks = 0;
+ nb_sectors = 0;
+ next_sector = sector_num;
+ next_chunk = sector_num / sectors_per_chunk;
/* Wait for I/O to this cluster (from a previous iteration) to be done. */
- while (test_bit(chunk_num, s->in_flight_bitmap)) {
+ while (test_bit(next_chunk, s->in_flight_bitmap)) {
trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
qemu_coroutine_yield();
}
- end = s->common.len >> BDRV_SECTOR_BITS;
- nb_sectors = MIN(nb_sectors, end - sector_num);
- nb_chunks = (nb_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
- while (s->buf_free_count < nb_chunks) {
- trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
- qemu_coroutine_yield();
- }
+ do {
+ int added_sectors, added_chunks;
+
+ if (!bdrv_get_dirty(source, next_sector) ||
+ test_bit(next_chunk, s->in_flight_bitmap)) {
+ assert(nb_sectors > 0);
+ break;
+ }
+
+ added_sectors = sectors_per_chunk;
+ if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
+ bdrv_round_to_clusters(s->target,
+ next_sector, added_sectors,
+ &next_sector, &added_sectors);
+
+ /* On the first iteration, the rounding may make us copy
+ * sectors before the first dirty one.
+ */
+ if (next_sector < sector_num) {
+ assert(nb_sectors == 0);
+ sector_num = next_sector;
+ next_chunk = next_sector / sectors_per_chunk;
+ }
+ }
+
+ added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
+ added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
+
+ /* When doing COW, it may happen that there is not enough space for
+ * a full cluster. Wait if that is the case.
+ */
+ while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
+ trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
+ qemu_coroutine_yield();
+ }
+ if (s->buf_free_count < nb_chunks + added_chunks) {
+ trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
+ break;
+ }
+
+ /* We have enough free space to copy these sectors. */
+ bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);
- /* We have enough free space to copy these sectors. */
- bitmap_set(s->in_flight_bitmap, chunk_num, nb_chunks);
+ nb_sectors += added_sectors;
+ nb_chunks += added_chunks;
+ next_sector += added_sectors;
+ next_chunk += added_chunks;
+ } while (next_sector < end);
/* Allocate a MirrorOp that is used as an AIO callback. */
op = g_slice_new(MirrorOp);