summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-09-07 10:45:18 +0100
committerPeter Maydell <peter.maydell@linaro.org>2017-09-07 10:45:18 +0100
commit8ee5f9b3ecc94e3eb7a8235f4b2c3ec9024807f6 (patch)
tree302d6285c803b2317a41bcd1dc5ffc67c0959776 /block
parent8c6a76cd23979f08d8acf1de97945fb48a3a684b (diff)
parent83a8c775a8bf134eb18a719322939b74a818d750 (diff)
downloadqemu-8ee5f9b3ecc94e3eb7a8235f4b2c3ec9024807f6.tar.gz
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
Block layer patches # gpg: Signature made Wed 06 Sep 2017 14:44:41 BST # gpg: using RSA key 0x7F09B272C88F2FD6 # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" # Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6 * remotes/kevin/tags/for-upstream: qcow2: move qcow2_store_persistent_dirty_bitmaps() before cache flushing qemu-iotests: add 184 for throttle filter driver block: add throttle block filter driver block: convert ThrottleGroup to object with QOM block: tidy ThrottleGroupMember initializations block: add aio_context field in ThrottleGroupMember block: move ThrottleGroup membership to ThrottleGroupMember block: document semantics of bdrv_co_preadv|pwritev qcow: Check failure of bdrv_getlength() and bdrv_truncate() qcow: Change signature of get_cluster_offset() block: add default implementations for bdrv_co_get_block_status() block: remove bdrv_truncate callback in blkdebug block: remove unused bdrv_media_changed block: pass bdrv_* methods to bs->file by default in block filters Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'block')
-rw-r--r--block/Makefile.objs1
-rw-r--r--block/blkdebug.c20
-rw-r--r--block/block-backend.c62
-rw-r--r--block/commit.c12
-rw-r--r--block/io.c26
-rw-r--r--block/mirror.c12
-rw-r--r--block/qapi.c8
-rw-r--r--block/qcow.c153
-rw-r--r--block/qcow2.c16
-rw-r--r--block/raw-format.c6
-rw-r--r--block/throttle-groups.c750
-rw-r--r--block/throttle.c237
12 files changed, 966 insertions, 337 deletions
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 2aaede4ae1..6eaf78a046 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -25,6 +25,7 @@ block-obj-y += accounting.o dirty-bitmap.o
block-obj-y += write-threshold.o
block-obj-y += backup.o
block-obj-$(CONFIG_REPLICATION) += replication.o
+block-obj-y += throttle.o
block-obj-y += crypto.o
diff --git a/block/blkdebug.c b/block/blkdebug.c
index 8e385acf54..46e53f2f09 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -628,16 +628,6 @@ static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs,
return bdrv_co_pdiscard(bs->file->bs, offset, bytes);
}
-static int64_t coroutine_fn blkdebug_co_get_block_status(
- BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
- BlockDriverState **file)
-{
- *pnum = nb_sectors;
- *file = bs->file->bs;
- return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
- (sector_num << BDRV_SECTOR_BITS);
-}
-
static void blkdebug_close(BlockDriverState *bs)
{
BDRVBlkdebugState *s = bs->opaque;
@@ -808,12 +798,6 @@ static int64_t blkdebug_getlength(BlockDriverState *bs)
return bdrv_getlength(bs->file->bs);
}
-static int blkdebug_truncate(BlockDriverState *bs, int64_t offset,
- PreallocMode prealloc, Error **errp)
-{
- return bdrv_truncate(bs->file, offset, prealloc, errp);
-}
-
static void blkdebug_refresh_filename(BlockDriverState *bs, QDict *options)
{
BDRVBlkdebugState *s = bs->opaque;
@@ -896,6 +880,7 @@ static BlockDriver bdrv_blkdebug = {
.format_name = "blkdebug",
.protocol_name = "blkdebug",
.instance_size = sizeof(BDRVBlkdebugState),
+ .is_filter = true,
.bdrv_parse_filename = blkdebug_parse_filename,
.bdrv_file_open = blkdebug_open,
@@ -904,7 +889,6 @@ static BlockDriver bdrv_blkdebug = {
.bdrv_child_perm = bdrv_filter_default_perms,
.bdrv_getlength = blkdebug_getlength,
- .bdrv_truncate = blkdebug_truncate,
.bdrv_refresh_filename = blkdebug_refresh_filename,
.bdrv_refresh_limits = blkdebug_refresh_limits,
@@ -913,7 +897,7 @@ static BlockDriver bdrv_blkdebug = {
.bdrv_co_flush_to_disk = blkdebug_co_flush,
.bdrv_co_pwrite_zeroes = blkdebug_co_pwrite_zeroes,
.bdrv_co_pdiscard = blkdebug_co_pdiscard,
- .bdrv_co_get_block_status = blkdebug_co_get_block_status,
+ .bdrv_co_get_block_status = bdrv_co_get_block_status_from_file,
.bdrv_debug_event = blkdebug_debug_event,
.bdrv_debug_breakpoint = blkdebug_debug_breakpoint,
diff --git a/block/block-backend.c b/block/block-backend.c
index 1031742401..45d9101be3 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -273,9 +273,6 @@ BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm)
blk->shared_perm = shared_perm;
blk_set_enable_write_cache(blk, true);
- qemu_co_mutex_init(&blk->public.throttled_reqs_lock);
- qemu_co_queue_init(&blk->public.throttled_reqs[0]);
- qemu_co_queue_init(&blk->public.throttled_reqs[1]);
block_acct_init(&blk->stats);
notifier_list_init(&blk->remove_bs_notifiers);
@@ -343,7 +340,7 @@ static void blk_delete(BlockBackend *blk)
assert(!blk->refcnt);
assert(!blk->name);
assert(!blk->dev);
- if (blk->public.throttle_state) {
+ if (blk->public.throttle_group_member.throttle_state) {
blk_io_limits_disable(blk);
}
if (blk->root) {
@@ -658,9 +655,12 @@ BlockBackend *blk_by_public(BlockBackendPublic *public)
*/
void blk_remove_bs(BlockBackend *blk)
{
+ ThrottleTimers *tt;
+
notifier_list_notify(&blk->remove_bs_notifiers, blk);
- if (blk->public.throttle_state) {
- throttle_timers_detach_aio_context(&blk->public.throttle_timers);
+ if (blk->public.throttle_group_member.throttle_state) {
+ tt = &blk->public.throttle_group_member.throttle_timers;
+ throttle_timers_detach_aio_context(tt);
}
blk_update_root_state(blk);
@@ -682,9 +682,10 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
bdrv_ref(bs);
notifier_list_notify(&blk->insert_bs_notifiers, blk);
- if (blk->public.throttle_state) {
+ if (blk->public.throttle_group_member.throttle_state) {
throttle_timers_attach_aio_context(
- &blk->public.throttle_timers, bdrv_get_aio_context(bs));
+ &blk->public.throttle_group_member.throttle_timers,
+ bdrv_get_aio_context(bs));
}
return 0;
@@ -1046,8 +1047,9 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
bdrv_inc_in_flight(bs);
/* throttling disk I/O */
- if (blk->public.throttle_state) {
- throttle_group_co_io_limits_intercept(blk, bytes, false);
+ if (blk->public.throttle_group_member.throttle_state) {
+ throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
+ bytes, false);
}
ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
@@ -1070,10 +1072,10 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
}
bdrv_inc_in_flight(bs);
-
/* throttling disk I/O */
- if (blk->public.throttle_state) {
- throttle_group_co_io_limits_intercept(blk, bytes, true);
+ if (blk->public.throttle_group_member.throttle_state) {
+ throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
+ bytes, true);
}
if (!blk->enable_write_cache) {
@@ -1742,16 +1744,14 @@ static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
{
BlockDriverState *bs = blk_bs(blk);
+ ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
if (bs) {
- if (blk->public.throttle_state) {
- throttle_timers_detach_aio_context(&blk->public.throttle_timers);
+ if (tgm->throttle_state) {
+ throttle_group_detach_aio_context(tgm);
+ throttle_group_attach_aio_context(tgm, new_context);
}
bdrv_set_aio_context(bs, new_context);
- if (blk->public.throttle_state) {
- throttle_timers_attach_aio_context(&blk->public.throttle_timers,
- new_context);
- }
}
}
@@ -1969,33 +1969,35 @@ int blk_commit_all(void)
/* throttling disk I/O limits */
void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
{
- throttle_group_config(blk, cfg);
+ throttle_group_config(&blk->public.throttle_group_member, cfg);
}
void blk_io_limits_disable(BlockBackend *blk)
{
- assert(blk->public.throttle_state);
+ assert(blk->public.throttle_group_member.throttle_state);
bdrv_drained_begin(blk_bs(blk));
- throttle_group_unregister_blk(blk);
+ throttle_group_unregister_tgm(&blk->public.throttle_group_member);
bdrv_drained_end(blk_bs(blk));
}
/* should be called before blk_set_io_limits if a limit is set */
void blk_io_limits_enable(BlockBackend *blk, const char *group)
{
- assert(!blk->public.throttle_state);
- throttle_group_register_blk(blk, group);
+ assert(!blk->public.throttle_group_member.throttle_state);
+ throttle_group_register_tgm(&blk->public.throttle_group_member,
+ group, blk_get_aio_context(blk));
}
void blk_io_limits_update_group(BlockBackend *blk, const char *group)
{
/* this BB is not part of any group */
- if (!blk->public.throttle_state) {
+ if (!blk->public.throttle_group_member.throttle_state) {
return;
}
/* this BB is a part of the same group than the one we want */
- if (!g_strcmp0(throttle_group_get_name(blk), group)) {
+ if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member),
+ group)) {
return;
}
@@ -2017,8 +2019,8 @@ static void blk_root_drained_begin(BdrvChild *child)
/* Note that blk->root may not be accessible here yet if we are just
* attaching to a BlockDriverState that is drained. Use child instead. */
- if (atomic_fetch_inc(&blk->public.io_limits_disabled) == 0) {
- throttle_group_restart_blk(blk);
+ if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) {
+ throttle_group_restart_tgm(&blk->public.throttle_group_member);
}
}
@@ -2027,8 +2029,8 @@ static void blk_root_drained_end(BdrvChild *child)
BlockBackend *blk = child->opaque;
assert(blk->quiesce_counter);
- assert(blk->public.io_limits_disabled);
- atomic_dec(&blk->public.io_limits_disabled);
+ assert(blk->public.throttle_group_member.io_limits_disabled);
+ atomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
if (--blk->quiesce_counter == 0) {
if (blk->dev_ops && blk->dev_ops->drained_end) {
diff --git a/block/commit.c b/block/commit.c
index c7857c3321..898d91f653 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -244,16 +244,6 @@ static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs,
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
}
-static int64_t coroutine_fn bdrv_commit_top_get_block_status(
- BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
- BlockDriverState **file)
-{
- *pnum = nb_sectors;
- *file = bs->backing->bs;
- return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
- (sector_num << BDRV_SECTOR_BITS);
-}
-
static void bdrv_commit_top_refresh_filename(BlockDriverState *bs, QDict *opts)
{
bdrv_refresh_filename(bs->backing->bs);
@@ -279,7 +269,7 @@ static void bdrv_commit_top_child_perm(BlockDriverState *bs, BdrvChild *c,
static BlockDriver bdrv_commit_top = {
.format_name = "commit_top",
.bdrv_co_preadv = bdrv_commit_top_preadv,
- .bdrv_co_get_block_status = bdrv_commit_top_get_block_status,
+ .bdrv_co_get_block_status = bdrv_co_get_block_status_from_backing,
.bdrv_refresh_filename = bdrv_commit_top_refresh_filename,
.bdrv_close = bdrv_commit_top_close,
.bdrv_child_perm = bdrv_commit_top_child_perm,
diff --git a/block/io.c b/block/io.c
index 26003814eb..4378ae4c7d 100644
--- a/block/io.c
+++ b/block/io.c
@@ -1714,6 +1714,32 @@ typedef struct BdrvCoGetBlockStatusData {
bool done;
} BdrvCoGetBlockStatusData;
+int64_t coroutine_fn bdrv_co_get_block_status_from_file(BlockDriverState *bs,
+ int64_t sector_num,
+ int nb_sectors,
+ int *pnum,
+ BlockDriverState **file)
+{
+ assert(bs->file && bs->file->bs);
+ *pnum = nb_sectors;
+ *file = bs->file->bs;
+ return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
+ (sector_num << BDRV_SECTOR_BITS);
+}
+
+int64_t coroutine_fn bdrv_co_get_block_status_from_backing(BlockDriverState *bs,
+ int64_t sector_num,
+ int nb_sectors,
+ int *pnum,
+ BlockDriverState **file)
+{
+ assert(bs->backing && bs->backing->bs);
+ *pnum = nb_sectors;
+ *file = bs->backing->bs;
+ return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
+ (sector_num << BDRV_SECTOR_BITS);
+}
+
/*
* Returns the allocation status of the specified sectors.
* Drivers not implementing the functionality are assumed to not support
diff --git a/block/mirror.c b/block/mirror.c
index 429751b9fe..6531652d73 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -1059,16 +1059,6 @@ static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
return bdrv_co_flush(bs->backing->bs);
}
-static int64_t coroutine_fn bdrv_mirror_top_get_block_status(
- BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
- BlockDriverState **file)
-{
- *pnum = nb_sectors;
- *file = bs->backing->bs;
- return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
- (sector_num << BDRV_SECTOR_BITS);
-}
-
static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
{
@@ -1115,7 +1105,7 @@ static BlockDriver bdrv_mirror_top = {
.bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
.bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
.bdrv_co_flush = bdrv_mirror_top_flush,
- .bdrv_co_get_block_status = bdrv_mirror_top_get_block_status,
+ .bdrv_co_get_block_status = bdrv_co_get_block_status_from_backing,
.bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
.bdrv_close = bdrv_mirror_top_close,
.bdrv_child_perm = bdrv_mirror_top_child_perm,
diff --git a/block/qapi.c b/block/qapi.c
index 5f1a71f5d2..7fa2437923 100644
--- a/block/qapi.c
+++ b/block/qapi.c
@@ -66,10 +66,11 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
info->detect_zeroes = bs->detect_zeroes;
- if (blk && blk_get_public(blk)->throttle_state) {
+ if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) {
ThrottleConfig cfg;
+ BlockBackendPublic *blkp = blk_get_public(blk);
- throttle_group_get_config(blk, &cfg);
+ throttle_group_get_config(&blkp->throttle_group_member, &cfg);
info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
@@ -117,7 +118,8 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
info->iops_size = cfg.op_size;
info->has_group = true;
- info->group = g_strdup(throttle_group_get_name(blk));
+ info->group =
+ g_strdup(throttle_group_get_name(&blkp->throttle_group_member));
}
info->write_threshold = bdrv_write_threshold_get(bs);
diff --git a/block/qcow.c b/block/qcow.c
index 63904a26ee..f450b00cfc 100644
--- a/block/qcow.c
+++ b/block/qcow.c
@@ -347,19 +347,22 @@ static int qcow_reopen_prepare(BDRVReopenState *state,
* 'compressed_size'. 'compressed_size' must be > 0 and <
* cluster_size
*
- * return 0 if not allocated.
+ * return 0 if not allocated, 1 if *result is assigned, and negative
+ * errno on failure.
*/
-static uint64_t get_cluster_offset(BlockDriverState *bs,
- uint64_t offset, int allocate,
- int compressed_size,
- int n_start, int n_end)
+static int get_cluster_offset(BlockDriverState *bs,
+ uint64_t offset, int allocate,
+ int compressed_size,
+ int n_start, int n_end, uint64_t *result)
{
BDRVQcowState *s = bs->opaque;
- int min_index, i, j, l1_index, l2_index;
- uint64_t l2_offset, *l2_table, cluster_offset, tmp;
+ int min_index, i, j, l1_index, l2_index, ret;
+ int64_t l2_offset;
+ uint64_t *l2_table, cluster_offset, tmp;
uint32_t min_count;
int new_l2_table;
+ *result = 0;
l1_index = offset >> (s->l2_bits + s->cluster_bits);
l2_offset = s->l1_table[l1_index];
new_l2_table = 0;
@@ -368,15 +371,20 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
return 0;
/* allocate a new l2 entry */
l2_offset = bdrv_getlength(bs->file->bs);
+ if (l2_offset < 0) {
+ return l2_offset;
+ }
/* round to cluster size */
- l2_offset = (l2_offset + s->cluster_size - 1) & ~(s->cluster_size - 1);
+ l2_offset = QEMU_ALIGN_UP(l2_offset, s->cluster_size);
/* update the L1 entry */
s->l1_table[l1_index] = l2_offset;
tmp = cpu_to_be64(l2_offset);
- if (bdrv_pwrite_sync(bs->file,
- s->l1_table_offset + l1_index * sizeof(tmp),
- &tmp, sizeof(tmp)) < 0)
- return 0;
+ ret = bdrv_pwrite_sync(bs->file,
+ s->l1_table_offset + l1_index * sizeof(tmp),
+ &tmp, sizeof(tmp));
+ if (ret < 0) {
+ return ret;
+ }
new_l2_table = 1;
}
for(i = 0; i < L2_CACHE_SIZE; i++) {
@@ -403,14 +411,17 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
l2_table = s->l2_cache + (min_index << s->l2_bits);
if (new_l2_table) {
memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
- if (bdrv_pwrite_sync(bs->file, l2_offset, l2_table,
- s->l2_size * sizeof(uint64_t)) < 0)
- return 0;
+ ret = bdrv_pwrite_sync(bs->file, l2_offset, l2_table,
+ s->l2_size * sizeof(uint64_t));
+ if (ret < 0) {
+ return ret;
+ }
} else {
- if (bdrv_pread(bs->file, l2_offset, l2_table,
- s->l2_size * sizeof(uint64_t)) !=
- s->l2_size * sizeof(uint64_t))
- return 0;
+ ret = bdrv_pread(bs->file, l2_offset, l2_table,
+ s->l2_size * sizeof(uint64_t));
+ if (ret < 0) {
+ return ret;
+ }
}
s->l2_cache_offsets[min_index] = l2_offset;
s->l2_cache_counts[min_index] = 1;
@@ -427,24 +438,36 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
/* if the cluster is already compressed, we must
decompress it in the case it is not completely
overwritten */
- if (decompress_cluster(bs, cluster_offset) < 0)
- return 0;
+ if (decompress_cluster(bs, cluster_offset) < 0) {
+ return -EIO;
+ }
cluster_offset = bdrv_getlength(bs->file->bs);
- cluster_offset = (cluster_offset + s->cluster_size - 1) &
- ~(s->cluster_size - 1);
+ if ((int64_t) cluster_offset < 0) {
+ return cluster_offset;
+ }
+ cluster_offset = QEMU_ALIGN_UP(cluster_offset, s->cluster_size);
/* write the cluster content */
- if (bdrv_pwrite(bs->file, cluster_offset, s->cluster_cache,
- s->cluster_size) !=
- s->cluster_size)
- return -1;
+ ret = bdrv_pwrite(bs->file, cluster_offset, s->cluster_cache,
+ s->cluster_size);
+ if (ret < 0) {
+ return ret;
+ }
} else {
cluster_offset = bdrv_getlength(bs->file->bs);
+ if ((int64_t) cluster_offset < 0) {
+ return cluster_offset;
+ }
if (allocate == 1) {
/* round to cluster size */
- cluster_offset = (cluster_offset + s->cluster_size - 1) &
- ~(s->cluster_size - 1);
- bdrv_truncate(bs->file, cluster_offset + s->cluster_size,
- PREALLOC_MODE_OFF, NULL);
+ cluster_offset = QEMU_ALIGN_UP(cluster_offset, s->cluster_size);
+ if (cluster_offset + s->cluster_size > INT64_MAX) {
+ return -E2BIG;
+ }
+ ret = bdrv_truncate(bs->file, cluster_offset + s->cluster_size,
+ PREALLOC_MODE_OFF, NULL);
+ if (ret < 0) {
+ return ret;
+ }
/* if encrypted, we must initialize the cluster
content which won't be written */
if (bs->encrypted &&
@@ -459,13 +482,14 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
s->cluster_data,
BDRV_SECTOR_SIZE,
NULL) < 0) {
- errno = EIO;
- return -1;
+ return -EIO;
+ }
+ ret = bdrv_pwrite(bs->file,
+ cluster_offset + i * 512,
+ s->cluster_data, 512);
+ if (ret < 0) {
+ return ret;
}
- if (bdrv_pwrite(bs->file,
- cluster_offset + i * 512,
- s->cluster_data, 512) != 512)
- return -1;
}
}
}
@@ -477,23 +501,29 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
/* update L2 table */
tmp = cpu_to_be64(cluster_offset);
l2_table[l2_index] = tmp;
- if (bdrv_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp),
- &tmp, sizeof(tmp)) < 0)
- return 0;
+ ret = bdrv_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp),
+ &tmp, sizeof(tmp));
+ if (ret < 0) {
+ return ret;
+ }
}
- return cluster_offset;
+ *result = cluster_offset;
+ return 1;
}
static int64_t coroutine_fn qcow_co_get_block_status(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file)
{
BDRVQcowState *s = bs->opaque;
- int index_in_cluster, n;
+ int index_in_cluster, n, ret;
uint64_t cluster_offset;
qemu_co_mutex_lock(&s->lock);
- cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
+ ret = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0, &cluster_offset);
qemu_co_mutex_unlock(&s->lock);
+ if (ret < 0) {
+ return ret;
+ }
index_in_cluster = sector_num & (s->cluster_sectors - 1);
n = s->cluster_sectors - index_in_cluster;
if (n > nb_sectors)
@@ -585,8 +615,11 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
while (nb_sectors != 0) {
/* prepare next request */
- cluster_offset = get_cluster_offset(bs, sector_num << 9,
- 0, 0, 0, 0);
+ ret = get_cluster_offset(bs, sector_num << 9,
+ 0, 0, 0, 0, &cluster_offset);
+ if (ret < 0) {
+ break;
+ }
index_in_cluster = sector_num & (s->cluster_sectors - 1);
n = s->cluster_sectors - index_in_cluster;
if (n > nb_sectors) {
@@ -603,7 +636,7 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
ret = bdrv_co_readv(bs->backing, sector_num, n, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
- goto fail;
+ break;
}
} else {
/* Note: in this case, no need to wait */
@@ -612,13 +645,15 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
} else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
/* add AIO support for compressed blocks ? */
if (decompress_cluster(bs, cluster_offset) < 0) {
- goto fail;
+ ret = -EIO;
+ break;
}
memcpy(buf,
s->cluster_cache + index_in_cluster * 512, 512 * n);
} else {
if ((cluster_offset & 511) != 0) {
- goto fail;
+ ret = -EIO;
+ break;
}
hd_iov.iov_base = (void *)buf;
hd_iov.iov_len = n * 512;
@@ -635,7 +670,8 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
assert(s->crypto);
if (qcrypto_block_decrypt(s->crypto, sector_num, buf,
n * BDRV_SECTOR_SIZE, NULL) < 0) {
- goto fail;
+ ret = -EIO;
+ break;
}
}
}
@@ -646,7 +682,6 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
buf += n * 512;
}
-done:
qemu_co_mutex_unlock(&s->lock);
if (qiov->niov > 1) {
@@ -655,10 +690,6 @@ done:
}
return ret;
-
-fail:
- ret = -EIO;
- goto done;
}
static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
@@ -697,9 +728,12 @@ static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
if (n > nb_sectors) {
n = nb_sectors;
}
- cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0,
- index_in_cluster,
- index_in_cluster + n);
+ ret = get_cluster_offset(bs, sector_num << 9, 1, 0,
+ index_in_cluster,
+ index_in_cluster + n, &cluster_offset);
+ if (ret < 0) {
+ break;
+ }
if (!cluster_offset || (cluster_offset & 511) != 0) {
ret = -EIO;
break;
@@ -995,8 +1029,11 @@ qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
goto success;
}
qemu_co_mutex_lock(&s->lock);
- cluster_offset = get_cluster_offset(bs, offset, 2, out_len, 0, 0);
+ ret = get_cluster_offset(bs, offset, 2, out_len, 0, 0, &cluster_offset);
qemu_co_mutex_unlock(&s->lock);
+ if (ret < 0) {
+ goto fail;
+ }
if (cluster_offset == 0) {
ret = -EIO;
goto fail;
diff --git a/block/qcow2.c b/block/qcow2.c
index 2ec399663e..bae5893327 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -2036,6 +2036,14 @@ static int qcow2_inactivate(BlockDriverState *bs)
int ret, result = 0;
Error *local_err = NULL;
+ qcow2_store_persistent_dirty_bitmaps(bs, &local_err);
+ if (local_err != NULL) {
+ result = -EINVAL;
+ error_report_err(local_err);
+ error_report("Persistent bitmaps are lost for node '%s'",
+ bdrv_get_device_or_node_name(bs));
+ }
+
ret = qcow2_cache_flush(bs, s->l2_table_cache);
if (ret) {
result = ret;
@@ -2050,14 +2058,6 @@ static int qcow2_inactivate(BlockDriverState *bs)
strerror(-ret));
}
- qcow2_store_persistent_dirty_bitmaps(bs, &local_err);
- if (local_err != NULL) {
- result = -EINVAL;
- error_report_err(local_err);
- error_report("Persistent bitmaps are lost for node '%s'",
- bdrv_get_device_or_node_name(bs));
- }
-
if (result == 0) {
qcow2_mark_clean(bs);
}
diff --git a/block/raw-format.c b/block/raw-format.c
index 142649ed56..ab552c0954 100644
--- a/block/raw-format.c
+++ b/block/raw-format.c
@@ -372,11 +372,6 @@ static int raw_truncate(BlockDriverState *bs, int64_t offset,
return bdrv_truncate(bs->file, offset, prealloc, errp);
}
-static int raw_media_changed(BlockDriverState *bs)
-{
- return bdrv_media_changed(bs->file->bs);
-}
-
static void raw_eject(BlockDriverState *bs, bool eject_flag)
{
bdrv_eject(bs->file->bs, eject_flag);
@@ -510,7 +505,6 @@ BlockDriver bdrv_raw = {
.bdrv_refresh_limits = &raw_refresh_limits,
.bdrv_probe_blocksizes = &raw_probe_blocksizes,
.bdrv_probe_geometry = &raw_probe_geometry,
- .bdrv_media_changed = &raw_media_changed,
.bdrv_eject = &raw_eject,
.bdrv_lock_medium = &raw_lock_medium,
.bdrv_co_ioctl = &raw_co_ioctl,
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 890bfded3f..6ba992c8d7 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -25,12 +25,20 @@
#include "qemu/osdep.h"
#include "sysemu/block-backend.h"
#include "block/throttle-groups.h"
+#include "qemu/throttle-options.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
#include "sysemu/qtest.h"
+#include "qapi/error.h"
+#include "qapi-visit.h"
+#include "qom/object.h"
+#include "qom/object_interfaces.h"
+
+static void throttle_group_obj_init(Object *obj);
+static void throttle_group_obj_complete(UserCreatable *obj, Error **errp);
/* The ThrottleGroup structure (with its ThrottleState) is shared
- * among different BlockBackends and it's independent from
+ * among different ThrottleGroupMembers and it's independent from
* AioContext, so in order to use it from different threads it needs
* its own locking.
*
@@ -40,82 +48,95 @@
* The whole ThrottleGroup structure is private and invisible to
* outside users, that only use it through its ThrottleState.
*
- * In addition to the ThrottleGroup structure, BlockBackendPublic has
+ * In addition to the ThrottleGroup structure, ThrottleGroupMember has
* fields that need to be accessed by other members of the group and
* therefore also need to be protected by this lock. Once a
- * BlockBackend is registered in a group those fields can be accessed
+ * ThrottleGroupMember is registered in a group those fields can be accessed
* by other threads any time.
*
* Again, all this is handled internally and is mostly transparent to
* the outside. The 'throttle_timers' field however has an additional
* constraint because it may be temporarily invalid (see for example
* blk_set_aio_context()). Therefore in this file a thread will
- * access some other BlockBackend's timers only after verifying that
- * that BlockBackend has throttled requests in the queue.
+ * access some other ThrottleGroupMember's timers only after verifying that
+ * that ThrottleGroupMember has throttled requests in the queue.
*/
typedef struct ThrottleGroup {
+ Object parent_obj;
+
+ /* refuse individual property change if initialization is complete */
+ bool is_initialized;
char *name; /* This is constant during the lifetime of the group */
QemuMutex lock; /* This lock protects the following four fields */
ThrottleState ts;
- QLIST_HEAD(, BlockBackendPublic) head;
- BlockBackend *tokens[2];
+ QLIST_HEAD(, ThrottleGroupMember) head;
+ ThrottleGroupMember *tokens[2];
bool any_timer_armed[2];
QEMUClockType clock_type;
- /* These two are protected by the global throttle_groups_lock */
- unsigned refcount;
+ /* This field is protected by the global QEMU mutex */
QTAILQ_ENTRY(ThrottleGroup) list;
} ThrottleGroup;
-static QemuMutex throttle_groups_lock;
+/* This is protected by the global QEMU mutex */
static QTAILQ_HEAD(, ThrottleGroup) throttle_groups =
QTAILQ_HEAD_INITIALIZER(throttle_groups);
+
+/* This function reads throttle_groups and must be called under the global
+ * mutex.
+ */
+static ThrottleGroup *throttle_group_by_name(const char *name)
+{
+ ThrottleGroup *iter;
+
+ /* Look for an existing group with that name */
+ QTAILQ_FOREACH(iter, &throttle_groups, list) {
+ if (!g_strcmp0(name, iter->name)) {
+ return iter;
+ }
+ }
+
+ return NULL;
+}
+
+/* This function reads throttle_groups and must be called under the global
+ * mutex.
+ */
+bool throttle_group_exists(const char *name)
+{
+ return throttle_group_by_name(name) != NULL;
+}
+
/* Increments the reference count of a ThrottleGroup given its name.
*
* If no ThrottleGroup is found with the given name a new one is
* created.
*
+ * This function edits throttle_groups and must be called under the global
+ * mutex.
+ *
* @name: the name of the ThrottleGroup
* @ret: the ThrottleState member of the ThrottleGroup
*/
ThrottleState *throttle_group_incref(const char *name)
{
ThrottleGroup *tg = NULL;
- ThrottleGroup *iter;
-
- qemu_mutex_lock(&throttle_groups_lock);
/* Look for an existing group with that name */
- QTAILQ_FOREACH(iter, &throttle_groups, list) {
- if (!strcmp(name, iter->name)) {
- tg = iter;
- break;
- }
- }
-
- /* Create a new one if not found */
- if (!tg) {
- tg = g_new0(ThrottleGroup, 1);
+ tg = throttle_group_by_name(name);
+
+ if (tg) {
+ object_ref(OBJECT(tg));
+ } else {
+ /* Create a new one if not found */
+ /* new ThrottleGroup obj will have a refcnt = 1 */
+ tg = THROTTLE_GROUP(object_new(TYPE_THROTTLE_GROUP));
tg->name = g_strdup(name);
- tg->clock_type = QEMU_CLOCK_REALTIME;
-
- if (qtest_enabled()) {
- /* For testing block IO throttling only */
- tg->clock_type = QEMU_CLOCK_VIRTUAL;
- }
- qemu_mutex_init(&tg->lock);
- throttle_init(&tg->ts);
- QLIST_INIT(&tg->head);
-
- QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
+ throttle_group_obj_complete(USER_CREATABLE(tg), &error_abort);
}
- tg->refcount++;
-
- qemu_mutex_unlock(&throttle_groups_lock);
-
return &tg->ts;
}
@@ -124,130 +145,123 @@ ThrottleState *throttle_group_incref(const char *name)
* When the reference count reaches zero the ThrottleGroup is
* destroyed.
*
+ * This function edits throttle_groups and must be called under the global
+ * mutex.
+ *
* @ts: The ThrottleGroup to unref, given by its ThrottleState member
*/
void throttle_group_unref(ThrottleState *ts)
{
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
-
- qemu_mutex_lock(&throttle_groups_lock);
- if (--tg->refcount == 0) {
- QTAILQ_REMOVE(&throttle_groups, tg, list);
- qemu_mutex_destroy(&tg->lock);
- g_free(tg->name);
- g_free(tg);
- }
- qemu_mutex_unlock(&throttle_groups_lock);
+ object_unref(OBJECT(tg));
}
-/* Get the name from a BlockBackend's ThrottleGroup. The name (and the pointer)
+/* Get the name from a ThrottleGroupMember's group. The name (and the pointer)
* is guaranteed to remain constant during the lifetime of the group.
*
- * @blk: a BlockBackend that is member of a throttling group
+ * @tgm: a ThrottleGroupMember
* @ret: the name of the group.
*/
-const char *throttle_group_get_name(BlockBackend *blk)
+const char *throttle_group_get_name(ThrottleGroupMember *tgm)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
+ ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
return tg->name;
}
-/* Return the next BlockBackend in the round-robin sequence, simulating a
- * circular list.
+/* Return the next ThrottleGroupMember in the round-robin sequence, simulating
+ * a circular list.
*
* This assumes that tg->lock is held.
*
- * @blk: the current BlockBackend
- * @ret: the next BlockBackend in the sequence
+ * @tgm: the current ThrottleGroupMember
+ * @ret: the next ThrottleGroupMember in the sequence
*/
-static BlockBackend *throttle_group_next_blk(BlockBackend *blk)
+static ThrottleGroupMember *throttle_group_next_tgm(ThrottleGroupMember *tgm)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleState *ts = blkp->throttle_state;
+ ThrottleState *ts = tgm->throttle_state;
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
- BlockBackendPublic *next = QLIST_NEXT(blkp, round_robin);
+ ThrottleGroupMember *next = QLIST_NEXT(tgm, round_robin);
if (!next) {
next = QLIST_FIRST(&tg->head);
}
- return blk_by_public(next);
+ return next;
}
/*
- * Return whether a BlockBackend has pending requests.
+ * Return whether a ThrottleGroupMember has pending requests.
*
* This assumes that tg->lock is held.
*
- * @blk: the BlockBackend
- * @is_write: the type of operation (read/write)
- * @ret: whether the BlockBackend has pending requests.
+ * @tgm: the ThrottleGroupMember
+ * @is_write: the type of operation (read/write)
+ * @ret: whether the ThrottleGroupMember has pending requests.
*/
-static inline bool blk_has_pending_reqs(BlockBackend *blk,
+static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
bool is_write)
{
- const BlockBackendPublic *blkp = blk_get_public(blk);
- return blkp->pending_reqs[is_write];
+ return tgm->pending_reqs[is_write];
}
-/* Return the next BlockBackend in the round-robin sequence with pending I/O
- * requests.
+/* Return the next ThrottleGroupMember in the round-robin sequence with pending
+ * I/O requests.
*
* This assumes that tg->lock is held.
*
- * @blk: the current BlockBackend
+ * @tgm: the current ThrottleGroupMember
* @is_write: the type of operation (read/write)
- * @ret: the next BlockBackend with pending requests, or blk if there is
- * none.
+ * @ret: the next ThrottleGroupMember with pending requests, or tgm if
+ * there is none.
*/
-static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write)
+static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
+ bool is_write)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
- BlockBackend *token, *start;
+ ThrottleState *ts = tgm->throttle_state;
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
+ ThrottleGroupMember *token, *start;
start = token = tg->tokens[is_write];
/* get next bs round in round robin style */
- token = throttle_group_next_blk(token);
- while (token != start && !blk_has_pending_reqs(token, is_write)) {
- token = throttle_group_next_blk(token);
+ token = throttle_group_next_tgm(token);
+ while (token != start && !tgm_has_pending_reqs(token, is_write)) {
+ token = throttle_group_next_tgm(token);
}
/* If no IO are queued for scheduling on the next round robin token
- * then decide the token is the current bs because chances are
- * the current bs get the current request queued.
+ * then decide the token is the current tgm because chances are
+ * the current tgm got the current request queued.
*/
- if (token == start && !blk_has_pending_reqs(token, is_write)) {
- token = blk;
+ if (token == start && !tgm_has_pending_reqs(token, is_write)) {
+ token = tgm;
}
- /* Either we return the original BB, or one with pending requests */
- assert(token == blk || blk_has_pending_reqs(token, is_write));
+ /* Either we return the original TGM, or one with pending requests */
+ assert(token == tgm || tgm_has_pending_reqs(token, is_write));
return token;
}
-/* Check if the next I/O request for a BlockBackend needs to be throttled or
- * not. If there's no timer set in this group, set one and update the token
- * accordingly.
+/* Check if the next I/O request for a ThrottleGroupMember needs to be
+ * throttled or not. If there's no timer set in this group, set one and update
+ * the token accordingly.
*
* This assumes that tg->lock is held.
*
- * @blk: the current BlockBackend
+ * @tgm: the current ThrottleGroupMember
* @is_write: the type of operation (read/write)
* @ret: whether the I/O request needs to be throttled or not
*/
-static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
+static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
+ bool is_write)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleState *ts = blkp->throttle_state;
- ThrottleTimers *tt = &blkp->throttle_timers;
+ ThrottleState *ts = tgm->throttle_state;
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
+ ThrottleTimers *tt = &tgm->throttle_timers;
bool must_wait;
- if (atomic_read(&blkp->io_limits_disabled)) {
+ if (atomic_read(&tgm->io_limits_disabled)) {
return false;
}
@@ -258,30 +272,29 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
must_wait = throttle_schedule_timer(ts, tt, is_write);
- /* If a timer just got armed, set blk as the current token */
+ /* If a timer just got armed, set tgm as the current token */
if (must_wait) {
- tg->tokens[is_write] = blk;
+ tg->tokens[is_write] = tgm;
tg->any_timer_armed[is_write] = true;
}
return must_wait;
}
-/* Start the next pending I/O request for a BlockBackend. Return whether
+/* Start the next pending I/O request for a ThrottleGroupMember. Return whether
* any request was actually pending.
*
- * @blk: the current BlockBackend
+ * @tgm: the current ThrottleGroupMember
* @is_write: the type of operation (read/write)
*/
-static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk,
+static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tgm,
bool is_write)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
bool ret;
- qemu_co_mutex_lock(&blkp->throttled_reqs_lock);
- ret = qemu_co_queue_next(&blkp->throttled_reqs[is_write]);
- qemu_co_mutex_unlock(&blkp->throttled_reqs_lock);
+ qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
+ ret = qemu_co_queue_next(&tgm->throttled_reqs[is_write]);
+ qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
return ret;
}
@@ -290,19 +303,19 @@ static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk,
*
* This assumes that tg->lock is held.
*
- * @blk: the current BlockBackend
+ * @tgm: the current ThrottleGroupMember
* @is_write: the type of operation (read/write)
*/
-static void schedule_next_request(BlockBackend *blk, bool is_write)
+static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
+ ThrottleState *ts = tgm->throttle_state;
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
bool must_wait;
- BlockBackend *token;
+ ThrottleGroupMember *token;
/* Check if there's any pending request to schedule next */
- token = next_throttle_token(blk, is_write);
- if (!blk_has_pending_reqs(token, is_write)) {
+ token = next_throttle_token(tgm, is_write);
+ if (!tgm_has_pending_reqs(token, is_write)) {
return;
}
@@ -311,12 +324,12 @@ static void schedule_next_request(BlockBackend *blk, bool is_write)
/* If it doesn't have to wait, queue it for immediate execution */
if (!must_wait) {
- /* Give preference to requests from the current blk */
+ /* Give preference to requests from the current tgm */
if (qemu_in_coroutine() &&
- throttle_group_co_restart_queue(blk, is_write)) {
- token = blk;
+ throttle_group_co_restart_queue(tgm, is_write)) {
+ token = tgm;
} else {
- ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
+ ThrottleTimers *tt = &token->throttle_timers;
int64_t now = qemu_clock_get_ns(tg->clock_type);
timer_mod(tt->timers[is_write], now);
tg->any_timer_armed[is_write] = true;
@@ -329,90 +342,86 @@ static void schedule_next_request(BlockBackend *blk, bool is_write)
* if necessary, and schedule the next request using a round robin
* algorithm.
*
- * @blk: the current BlockBackend
+ * @tgm: the current ThrottleGroupMember
* @bytes: the number of bytes for this I/O
* @is_write: the type of operation (read/write)
*/
-void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
+void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
unsigned int bytes,
bool is_write)
{
bool must_wait;
- BlockBackend *token;
-
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
+ ThrottleGroupMember *token;
+ ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
qemu_mutex_lock(&tg->lock);
/* First we check if this I/O has to be throttled. */
- token = next_throttle_token(blk, is_write);
+ token = next_throttle_token(tgm, is_write);
must_wait = throttle_group_schedule_timer(token, is_write);
/* Wait if there's a timer set or queued requests of this type */
- if (must_wait || blkp->pending_reqs[is_write]) {
- blkp->pending_reqs[is_write]++;
+ if (must_wait || tgm->pending_reqs[is_write]) {
+ tgm->pending_reqs[is_write]++;
qemu_mutex_unlock(&tg->lock);
- qemu_co_mutex_lock(&blkp->throttled_reqs_lock);
- qemu_co_queue_wait(&blkp->throttled_reqs[is_write],
- &blkp->throttled_reqs_lock);
- qemu_co_mutex_unlock(&blkp->throttled_reqs_lock);
+ qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
+ qemu_co_queue_wait(&tgm->throttled_reqs[is_write],
+ &tgm->throttled_reqs_lock);
+ qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
qemu_mutex_lock(&tg->lock);
- blkp->pending_reqs[is_write]--;
+ tgm->pending_reqs[is_write]--;
}
/* The I/O will be executed, so do the accounting */
- throttle_account(blkp->throttle_state, is_write, bytes);
+ throttle_account(tgm->throttle_state, is_write, bytes);
/* Schedule the next request */
- schedule_next_request(blk, is_write);
+ schedule_next_request(tgm, is_write);
qemu_mutex_unlock(&tg->lock);
}
typedef struct {
- BlockBackend *blk;
+ ThrottleGroupMember *tgm;
bool is_write;
} RestartData;
static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
{
RestartData *data = opaque;
- BlockBackend *blk = data->blk;
+ ThrottleGroupMember *tgm = data->tgm;
+ ThrottleState *ts = tgm->throttle_state;
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
bool is_write = data->is_write;
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
bool empty_queue;
- empty_queue = !throttle_group_co_restart_queue(blk, is_write);
+ empty_queue = !throttle_group_co_restart_queue(tgm, is_write);
/* If the request queue was empty then we have to take care of
* scheduling the next one */
if (empty_queue) {
qemu_mutex_lock(&tg->lock);
- schedule_next_request(blk, is_write);
+ schedule_next_request(tgm, is_write);
qemu_mutex_unlock(&tg->lock);
}
}
-static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
+static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
{
Coroutine *co;
RestartData rd = {
- .blk = blk,
+ .tgm = tgm,
.is_write = is_write
};
co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd);
- aio_co_enter(blk_get_aio_context(blk), co);
+ aio_co_enter(tgm->aio_context, co);
}
-void throttle_group_restart_blk(BlockBackend *blk)
+void throttle_group_restart_tgm(ThrottleGroupMember *tgm)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
-
- if (blkp->throttle_state) {
- throttle_group_restart_queue(blk, 0);
- throttle_group_restart_queue(blk, 1);
+ if (tgm->throttle_state) {
+ throttle_group_restart_queue(tgm, 0);
+ throttle_group_restart_queue(tgm, 1);
}
}
@@ -420,32 +429,30 @@ void throttle_group_restart_blk(BlockBackend *blk)
* to throttle_config(), but guarantees atomicity within the
* throttling group.
*
- * @blk: a BlockBackend that is a member of the group
+ * @tgm: a ThrottleGroupMember that is a member of the group
* @cfg: the configuration to set
*/
-void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg)
+void throttle_group_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleState *ts = blkp->throttle_state;
+ ThrottleState *ts = tgm->throttle_state;
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
qemu_mutex_lock(&tg->lock);
throttle_config(ts, tg->clock_type, cfg);
qemu_mutex_unlock(&tg->lock);
- throttle_group_restart_blk(blk);
+ throttle_group_restart_tgm(tgm);
}
/* Get the throttle configuration from a particular group. Similar to
* throttle_get_config(), but guarantees atomicity within the
* throttling group.
*
- * @blk: a BlockBackend that is a member of the group
+ * @tgm: a ThrottleGroupMember that is a member of the group
* @cfg: the configuration will be written here
*/
-void throttle_group_get_config(BlockBackend *blk, ThrottleConfig *cfg)
+void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleState *ts = blkp->throttle_state;
+ ThrottleState *ts = tgm->throttle_state;
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
qemu_mutex_lock(&tg->lock);
throttle_get_config(ts, cfg);
@@ -455,13 +462,12 @@ void throttle_group_get_config(BlockBackend *blk, ThrottleConfig *cfg)
/* ThrottleTimers callback. This wakes up a request that was waiting
* because it had been throttled.
*
- * @blk: the BlockBackend whose request had been throttled
+ * @tgm: the ThrottleGroupMember whose request had been throttled
* @is_write: the type of operation (read/write)
*/
-static void timer_cb(BlockBackend *blk, bool is_write)
+static void timer_cb(ThrottleGroupMember *tgm, bool is_write)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleState *ts = blkp->throttle_state;
+ ThrottleState *ts = tgm->throttle_state;
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
/* The timer has just been fired, so we can update the flag */
@@ -470,7 +476,7 @@ static void timer_cb(BlockBackend *blk, bool is_write)
qemu_mutex_unlock(&tg->lock);
/* Run the request that was waiting for this timer */
- throttle_group_restart_queue(blk, is_write);
+ throttle_group_restart_queue(tgm, is_write);
}
static void read_timer_cb(void *opaque)
@@ -483,85 +489,445 @@ static void write_timer_cb(void *opaque)
timer_cb(opaque, true);
}
-/* Register a BlockBackend in the throttling group, also initializing its
- * timers and updating its throttle_state pointer to point to it. If a
+/* Register a ThrottleGroupMember from the throttling group, also initializing
+ * its timers and updating its throttle_state pointer to point to it. If a
* throttling group with that name does not exist yet, it will be created.
*
- * @blk: the BlockBackend to insert
+ * This function edits throttle_groups and must be called under the global
+ * mutex.
+ *
+ * @tgm: the ThrottleGroupMember to insert
* @groupname: the name of the group
+ * @ctx: the AioContext to use
*/
-void throttle_group_register_blk(BlockBackend *blk, const char *groupname)
+void throttle_group_register_tgm(ThrottleGroupMember *tgm,
+ const char *groupname,
+ AioContext *ctx)
{
int i;
- BlockBackendPublic *blkp = blk_get_public(blk);
ThrottleState *ts = throttle_group_incref(groupname);
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
- blkp->throttle_state = ts;
+
+ tgm->throttle_state = ts;
+ tgm->aio_context = ctx;
qemu_mutex_lock(&tg->lock);
- /* If the ThrottleGroup is new set this BlockBackend as the token */
+ /* If the ThrottleGroup is new set this ThrottleGroupMember as the token */
for (i = 0; i < 2; i++) {
if (!tg->tokens[i]) {
- tg->tokens[i] = blk;
+ tg->tokens[i] = tgm;
}
}
- QLIST_INSERT_HEAD(&tg->head, blkp, round_robin);
+ QLIST_INSERT_HEAD(&tg->head, tgm, round_robin);
- throttle_timers_init(&blkp->throttle_timers,
- blk_get_aio_context(blk),
+ throttle_timers_init(&tgm->throttle_timers,
+ tgm->aio_context,
tg->clock_type,
read_timer_cb,
write_timer_cb,
- blk);
+ tgm);
+ qemu_co_mutex_init(&tgm->throttled_reqs_lock);
+ qemu_co_queue_init(&tgm->throttled_reqs[0]);
+ qemu_co_queue_init(&tgm->throttled_reqs[1]);
qemu_mutex_unlock(&tg->lock);
}
-/* Unregister a BlockBackend from its group, removing it from the list,
+/* Unregister a ThrottleGroupMember from its group, removing it from the list,
* destroying the timers and setting the throttle_state pointer to NULL.
*
- * The BlockBackend must not have pending throttled requests, so the caller has
- * to drain them first.
+ * The ThrottleGroupMember must not have pending throttled requests, so the
+ * caller has to drain them first.
*
* The group will be destroyed if it's empty after this operation.
*
- * @blk: the BlockBackend to remove
+ * @tgm the ThrottleGroupMember to remove
*/
-void throttle_group_unregister_blk(BlockBackend *blk)
+void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
{
- BlockBackendPublic *blkp = blk_get_public(blk);
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
+ ThrottleState *ts = tgm->throttle_state;
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
+ ThrottleGroupMember *token;
int i;
- assert(blkp->pending_reqs[0] == 0 && blkp->pending_reqs[1] == 0);
- assert(qemu_co_queue_empty(&blkp->throttled_reqs[0]));
- assert(qemu_co_queue_empty(&blkp->throttled_reqs[1]));
+ if (!ts) {
+ /* Discard already unregistered tgm */
+ return;
+ }
+
+ assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
qemu_mutex_lock(&tg->lock);
for (i = 0; i < 2; i++) {
- if (tg->tokens[i] == blk) {
- BlockBackend *token = throttle_group_next_blk(blk);
- /* Take care of the case where this is the last blk in the group */
- if (token == blk) {
+ if (tg->tokens[i] == tgm) {
+ token = throttle_group_next_tgm(tgm);
+ /* Take care of the case where this is the last tgm in the group */
+ if (token == tgm) {
token = NULL;
}
tg->tokens[i] = token;
}
}
- /* remove the current blk from the list */
- QLIST_REMOVE(blkp, round_robin);
- throttle_timers_destroy(&blkp->throttle_timers);
+ /* remove the current tgm from the list */
+ QLIST_REMOVE(tgm, round_robin);
+ throttle_timers_destroy(&tgm->throttle_timers);
qemu_mutex_unlock(&tg->lock);
throttle_group_unref(&tg->ts);
- blkp->throttle_state = NULL;
+ tgm->throttle_state = NULL;
+}
+
+void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
+ AioContext *new_context)
+{
+ ThrottleTimers *tt = &tgm->throttle_timers;
+ throttle_timers_attach_aio_context(tt, new_context);
+ tgm->aio_context = new_context;
+}
+
+void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
+{
+ ThrottleTimers *tt = &tgm->throttle_timers;
+ throttle_timers_detach_aio_context(tt);
+ tgm->aio_context = NULL;
+}
+
+#undef THROTTLE_OPT_PREFIX
+#define THROTTLE_OPT_PREFIX "x-"
+
+/* Helper struct and array for QOM property setter/getter */
+typedef struct {
+ const char *name;
+ BucketType type;
+ enum {
+ AVG,
+ MAX,
+ BURST_LENGTH,
+ IOPS_SIZE,
+ } category;
+} ThrottleParamInfo;
+
+static ThrottleParamInfo properties[] = {
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL,
+ THROTTLE_OPS_TOTAL, AVG,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX,
+ THROTTLE_OPS_TOTAL, MAX,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_TOTAL_MAX_LENGTH,
+ THROTTLE_OPS_TOTAL, BURST_LENGTH,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ,
+ THROTTLE_OPS_READ, AVG,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX,
+ THROTTLE_OPS_READ, MAX,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_READ_MAX_LENGTH,
+ THROTTLE_OPS_READ, BURST_LENGTH,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE,
+ THROTTLE_OPS_WRITE, AVG,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX,
+ THROTTLE_OPS_WRITE, MAX,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_WRITE_MAX_LENGTH,
+ THROTTLE_OPS_WRITE, BURST_LENGTH,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL,
+ THROTTLE_BPS_TOTAL, AVG,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX,
+ THROTTLE_BPS_TOTAL, MAX,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_BPS_TOTAL_MAX_LENGTH,
+ THROTTLE_BPS_TOTAL, BURST_LENGTH,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ,
+ THROTTLE_BPS_READ, AVG,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX,
+ THROTTLE_BPS_READ, MAX,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_BPS_READ_MAX_LENGTH,
+ THROTTLE_BPS_READ, BURST_LENGTH,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE,
+ THROTTLE_BPS_WRITE, AVG,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX,
+ THROTTLE_BPS_WRITE, MAX,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_BPS_WRITE_MAX_LENGTH,
+ THROTTLE_BPS_WRITE, BURST_LENGTH,
+ },
+ {
+ THROTTLE_OPT_PREFIX QEMU_OPT_IOPS_SIZE,
+ 0, IOPS_SIZE,
+ }
+};
+
+/* This function edits throttle_groups and must be called under the global
+ * mutex */
+static void throttle_group_obj_init(Object *obj)
+{
+ ThrottleGroup *tg = THROTTLE_GROUP(obj);
+
+ tg->clock_type = QEMU_CLOCK_REALTIME;
+ if (qtest_enabled()) {
+ /* For testing block IO throttling only */
+ tg->clock_type = QEMU_CLOCK_VIRTUAL;
+ }
+ tg->is_initialized = false;
+ qemu_mutex_init(&tg->lock);
+ throttle_init(&tg->ts);
+ QLIST_INIT(&tg->head);
}
+/* This function edits throttle_groups and must be called under the global
+ * mutex */
+static void throttle_group_obj_complete(UserCreatable *obj, Error **errp)
+{
+ ThrottleGroup *tg = THROTTLE_GROUP(obj);
+ ThrottleConfig cfg;
+
+ /* set group name to object id if it exists */
+ if (!tg->name && tg->parent_obj.parent) {
+ tg->name = object_get_canonical_path_component(OBJECT(obj));
+ }
+ /* We must have a group name at this point */
+ assert(tg->name);
+
+ /* error if name is duplicate */
+ if (throttle_group_exists(tg->name)) {
+ error_setg(errp, "A group with this name already exists");
+ return;
+ }
+
+ /* check validity */
+ throttle_get_config(&tg->ts, &cfg);
+ if (!throttle_is_valid(&cfg, errp)) {
+ return;
+ }
+ throttle_config(&tg->ts, tg->clock_type, &cfg);
+ QTAILQ_INSERT_TAIL(&throttle_groups, tg, list);
+ tg->is_initialized = true;
+}
+
+/* This function edits throttle_groups and must be called under the global
+ * mutex */
+static void throttle_group_obj_finalize(Object *obj)
+{
+ ThrottleGroup *tg = THROTTLE_GROUP(obj);
+ if (tg->is_initialized) {
+ QTAILQ_REMOVE(&throttle_groups, tg, list);
+ }
+ qemu_mutex_destroy(&tg->lock);
+ g_free(tg->name);
+}
+
+static void throttle_group_set(Object *obj, Visitor *v, const char * name,
+ void *opaque, Error **errp)
+
+{
+ ThrottleGroup *tg = THROTTLE_GROUP(obj);
+ ThrottleConfig *cfg;
+ ThrottleParamInfo *info = opaque;
+ Error *local_err = NULL;
+ int64_t value;
+
+ /* If we have finished initialization, don't accept individual property
+ * changes through QOM. Throttle configuration limits must be set in one
+ * transaction, as certain combinations are invalid.
+ */
+ if (tg->is_initialized) {
+ error_setg(&local_err, "Property cannot be set after initialization");
+ goto ret;
+ }
+
+ visit_type_int64(v, name, &value, &local_err);
+ if (local_err) {
+ goto ret;
+ }
+ if (value < 0) {
+ error_setg(&local_err, "Property values cannot be negative");
+ goto ret;
+ }
+
+ cfg = &tg->ts.cfg;
+ switch (info->category) {
+ case AVG:
+ cfg->buckets[info->type].avg = value;
+ break;
+ case MAX:
+ cfg->buckets[info->type].max = value;
+ break;
+ case BURST_LENGTH:
+ if (value > UINT_MAX) {
+ error_setg(&local_err, "%s value must be in the"
+ "range [0, %u]", info->name, UINT_MAX);
+ goto ret;
+ }
+ cfg->buckets[info->type].burst_length = value;
+ break;
+ case IOPS_SIZE:
+ cfg->op_size = value;
+ break;
+ }
+
+ret:
+ error_propagate(errp, local_err);
+ return;
+
+}
+
+static void throttle_group_get(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ ThrottleGroup *tg = THROTTLE_GROUP(obj);
+ ThrottleConfig cfg;
+ ThrottleParamInfo *info = opaque;
+ int64_t value;
+
+ throttle_get_config(&tg->ts, &cfg);
+ switch (info->category) {
+ case AVG:
+ value = cfg.buckets[info->type].avg;
+ break;
+ case MAX:
+ value = cfg.buckets[info->type].max;
+ break;
+ case BURST_LENGTH:
+ value = cfg.buckets[info->type].burst_length;
+ break;
+ case IOPS_SIZE:
+ value = cfg.op_size;
+ break;
+ }
+
+ visit_type_int64(v, name, &value, errp);
+}
+
+static void throttle_group_set_limits(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+
+{
+ ThrottleGroup *tg = THROTTLE_GROUP(obj);
+ ThrottleConfig cfg;
+ ThrottleLimits arg = { 0 };
+ ThrottleLimits *argp = &arg;
+ Error *local_err = NULL;
+
+ visit_type_ThrottleLimits(v, name, &argp, &local_err);
+ if (local_err) {
+ goto ret;
+ }
+ qemu_mutex_lock(&tg->lock);
+ throttle_get_config(&tg->ts, &cfg);
+ throttle_limits_to_config(argp, &cfg, &local_err);
+ if (local_err) {
+ goto unlock;
+ }
+ throttle_config(&tg->ts, tg->clock_type, &cfg);
+
+unlock:
+ qemu_mutex_unlock(&tg->lock);
+ret:
+ error_propagate(errp, local_err);
+ return;
+}
+
+static void throttle_group_get_limits(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ ThrottleGroup *tg = THROTTLE_GROUP(obj);
+ ThrottleConfig cfg;
+ ThrottleLimits arg = { 0 };
+ ThrottleLimits *argp = &arg;
+
+ qemu_mutex_lock(&tg->lock);
+ throttle_get_config(&tg->ts, &cfg);
+ qemu_mutex_unlock(&tg->lock);
+
+ throttle_config_to_limits(&cfg, argp);
+
+ visit_type_ThrottleLimits(v, name, &argp, errp);
+}
+
+static bool throttle_group_can_be_deleted(UserCreatable *uc)
+{
+ return OBJECT(uc)->ref == 1;
+}
+
+static void throttle_group_obj_class_init(ObjectClass *klass, void *class_data)
+{
+ size_t i = 0;
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
+
+ ucc->complete = throttle_group_obj_complete;
+ ucc->can_be_deleted = throttle_group_can_be_deleted;
+
+ /* individual properties */
+ for (i = 0; i < sizeof(properties) / sizeof(ThrottleParamInfo); i++) {
+ object_class_property_add(klass,
+ properties[i].name,
+ "int",
+ throttle_group_get,
+ throttle_group_set,
+ NULL, &properties[i],
+ &error_abort);
+ }
+
+ /* ThrottleLimits */
+ object_class_property_add(klass,
+ "limits", "ThrottleLimits",
+ throttle_group_get_limits,
+ throttle_group_set_limits,
+ NULL, NULL,
+ &error_abort);
+}
+
+static const TypeInfo throttle_group_info = {
+ .name = TYPE_THROTTLE_GROUP,
+ .parent = TYPE_OBJECT,
+ .class_init = throttle_group_obj_class_init,
+ .instance_size = sizeof(ThrottleGroup),
+ .instance_init = throttle_group_obj_init,
+ .instance_finalize = throttle_group_obj_finalize,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_USER_CREATABLE },
+ { }
+ },
+};
+
static void throttle_groups_init(void)
{
- qemu_mutex_init(&throttle_groups_lock);
+ type_register_static(&throttle_group_info);
}
-block_init(throttle_groups_init);
+type_init(throttle_groups_init);
diff --git a/block/throttle.c b/block/throttle.c
new file mode 100644
index 0000000000..5bca76300f
--- /dev/null
+++ b/block/throttle.c
@@ -0,0 +1,237 @@
+/*
+ * QEMU block throttling filter driver infrastructure
+ *
+ * Copyright (c) 2017 Manos Pitsidianakis
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "block/throttle-groups.h"
+#include "qemu/throttle-options.h"
+#include "qapi/error.h"
+
+static QemuOptsList throttle_opts = {
+ .name = "throttle",
+ .head = QTAILQ_HEAD_INITIALIZER(throttle_opts.head),
+ .desc = {
+ {
+ .name = QEMU_OPT_THROTTLE_GROUP_NAME,
+ .type = QEMU_OPT_STRING,
+ .help = "Name of the throttle group",
+ },
+ { /* end of list */ }
+ },
+};
+
+static int throttle_configure_tgm(BlockDriverState *bs,
+ ThrottleGroupMember *tgm,
+ QDict *options, Error **errp)
+{
+ int ret;
+ const char *group_name;
+ Error *local_err = NULL;
+ QemuOpts *opts = qemu_opts_create(&throttle_opts, NULL, 0, &error_abort);
+
+ qemu_opts_absorb_qdict(opts, options, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ ret = -EINVAL;
+ goto fin;
+ }
+
+ group_name = qemu_opt_get(opts, QEMU_OPT_THROTTLE_GROUP_NAME);
+ if (!group_name) {
+ error_setg(errp, "Please specify a throttle group");
+ ret = -EINVAL;
+ goto fin;
+ } else if (!throttle_group_exists(group_name)) {
+ error_setg(errp, "Throttle group '%s' does not exist", group_name);
+ ret = -EINVAL;
+ goto fin;
+ }
+
+ /* Register membership to group with name group_name */
+ throttle_group_register_tgm(tgm, group_name, bdrv_get_aio_context(bs));
+ ret = 0;
+fin:
+ qemu_opts_del(opts);
+ return ret;
+}
+
+static int throttle_open(BlockDriverState *bs, QDict *options,
+ int flags, Error **errp)
+{
+ ThrottleGroupMember *tgm = bs->opaque;
+
+ bs->file = bdrv_open_child(NULL, options, "file", bs,
+ &child_file, false, errp);
+ if (!bs->file) {
+ return -EINVAL;
+ }
+ bs->supported_write_flags = bs->file->bs->supported_write_flags;
+ bs->supported_zero_flags = bs->file->bs->supported_zero_flags;
+
+ return throttle_configure_tgm(bs, tgm, options, errp);
+}
+
+static void throttle_close(BlockDriverState *bs)
+{
+ ThrottleGroupMember *tgm = bs->opaque;
+ throttle_group_unregister_tgm(tgm);
+}
+
+
+static int64_t throttle_getlength(BlockDriverState *bs)
+{
+ return bdrv_getlength(bs->file->bs);
+}
+
+static int coroutine_fn throttle_co_preadv(BlockDriverState *bs,
+ uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
+{
+
+ ThrottleGroupMember *tgm = bs->opaque;
+ throttle_group_co_io_limits_intercept(tgm, bytes, false);
+
+ return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
+}
+
+static int coroutine_fn throttle_co_pwritev(BlockDriverState *bs,
+ uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
+{
+ ThrottleGroupMember *tgm = bs->opaque;
+ throttle_group_co_io_limits_intercept(tgm, bytes, true);
+
+ return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
+}
+
+static int coroutine_fn throttle_co_pwrite_zeroes(BlockDriverState *bs,
+ int64_t offset, int bytes,
+ BdrvRequestFlags flags)
+{
+ ThrottleGroupMember *tgm = bs->opaque;
+ throttle_group_co_io_limits_intercept(tgm, bytes, true);
+
+ return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
+}
+
+static int coroutine_fn throttle_co_pdiscard(BlockDriverState *bs,
+ int64_t offset, int bytes)
+{
+ ThrottleGroupMember *tgm = bs->opaque;
+ throttle_group_co_io_limits_intercept(tgm, bytes, true);
+
+ return bdrv_co_pdiscard(bs->file->bs, offset, bytes);
+}
+
+static int throttle_co_flush(BlockDriverState *bs)
+{
+ return bdrv_co_flush(bs->file->bs);
+}
+
+static void throttle_detach_aio_context(BlockDriverState *bs)
+{
+ ThrottleGroupMember *tgm = bs->opaque;
+ throttle_group_detach_aio_context(tgm);
+}
+
+static void throttle_attach_aio_context(BlockDriverState *bs,
+ AioContext *new_context)
+{
+ ThrottleGroupMember *tgm = bs->opaque;
+ throttle_group_attach_aio_context(tgm, new_context);
+}
+
+static int throttle_reopen_prepare(BDRVReopenState *reopen_state,
+ BlockReopenQueue *queue, Error **errp)
+{
+ ThrottleGroupMember *tgm;
+
+ assert(reopen_state != NULL);
+ assert(reopen_state->bs != NULL);
+
+ reopen_state->opaque = g_new0(ThrottleGroupMember, 1);
+ tgm = reopen_state->opaque;
+
+ return throttle_configure_tgm(reopen_state->bs, tgm, reopen_state->options,
+ errp);
+}
+
+static void throttle_reopen_commit(BDRVReopenState *reopen_state)
+{
+ ThrottleGroupMember *old_tgm = reopen_state->bs->opaque;
+ ThrottleGroupMember *new_tgm = reopen_state->opaque;
+
+ throttle_group_unregister_tgm(old_tgm);
+ g_free(old_tgm);
+ reopen_state->bs->opaque = new_tgm;
+ reopen_state->opaque = NULL;
+}
+
+static void throttle_reopen_abort(BDRVReopenState *reopen_state)
+{
+ ThrottleGroupMember *tgm = reopen_state->opaque;
+
+ throttle_group_unregister_tgm(tgm);
+ g_free(tgm);
+ reopen_state->opaque = NULL;
+}
+
+static bool throttle_recurse_is_first_non_filter(BlockDriverState *bs,
+ BlockDriverState *candidate)
+{
+ return bdrv_recurse_is_first_non_filter(bs->file->bs, candidate);
+}
+
+static BlockDriver bdrv_throttle = {
+ .format_name = "throttle",
+ .protocol_name = "throttle",
+ .instance_size = sizeof(ThrottleGroupMember),
+
+ .bdrv_file_open = throttle_open,
+ .bdrv_close = throttle_close,
+ .bdrv_co_flush = throttle_co_flush,
+
+ .bdrv_child_perm = bdrv_filter_default_perms,
+
+ .bdrv_getlength = throttle_getlength,
+
+ .bdrv_co_preadv = throttle_co_preadv,
+ .bdrv_co_pwritev = throttle_co_pwritev,
+
+ .bdrv_co_pwrite_zeroes = throttle_co_pwrite_zeroes,
+ .bdrv_co_pdiscard = throttle_co_pdiscard,
+
+ .bdrv_recurse_is_first_non_filter = throttle_recurse_is_first_non_filter,
+
+ .bdrv_attach_aio_context = throttle_attach_aio_context,
+ .bdrv_detach_aio_context = throttle_detach_aio_context,
+
+ .bdrv_reopen_prepare = throttle_reopen_prepare,
+ .bdrv_reopen_commit = throttle_reopen_commit,
+ .bdrv_reopen_abort = throttle_reopen_abort,
+ .bdrv_co_get_block_status = bdrv_co_get_block_status_from_file,
+
+ .is_filter = true,
+};
+
+static void bdrv_throttle_init(void)
+{
+ bdrv_register(&bdrv_throttle);
+}
+
+block_init(bdrv_throttle_init);