summaryrefslogtreecommitdiff
path: root/block/cloop.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cloop.c')
-rw-r--r--block/cloop.c81
1 files changed, 71 insertions, 10 deletions
diff --git a/block/cloop.c b/block/cloop.c
index b907023e10..b6ad50fbb4 100644
--- a/block/cloop.c
+++ b/block/cloop.c
@@ -26,6 +26,9 @@
#include "qemu/module.h"
#include <zlib.h>
+/* Maximum compressed block size */
+#define MAX_BLOCK_SIZE (64 * 1024 * 1024)
+
typedef struct BDRVCloopState {
CoMutex lock;
uint32_t block_size;
@@ -68,6 +71,26 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
return ret;
}
s->block_size = be32_to_cpu(s->block_size);
+ if (s->block_size % 512) {
+ error_setg(errp, "block_size %u must be a multiple of 512",
+ s->block_size);
+ return -EINVAL;
+ }
+ if (s->block_size == 0) {
+ error_setg(errp, "block_size cannot be zero");
+ return -EINVAL;
+ }
+
+ /* cloop's create_compressed_fs.c warns about block sizes beyond 256 KB but
+ * we can accept more. Prevent ridiculous values like 4 GB - 1 since we
+ * need a buffer this big.
+ */
+ if (s->block_size > MAX_BLOCK_SIZE) {
+ error_setg(errp, "block_size %u must be %u MB or less",
+ s->block_size,
+ MAX_BLOCK_SIZE / (1024 * 1024));
+ return -EINVAL;
+ }
ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
if (ret < 0) {
@@ -76,7 +99,23 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
s->n_blocks = be32_to_cpu(s->n_blocks);
/* read offsets */
- offsets_size = s->n_blocks * sizeof(uint64_t);
+ if (s->n_blocks > (UINT32_MAX - 1) / sizeof(uint64_t)) {
+ /* Prevent integer overflow */
+ error_setg(errp, "n_blocks %u must be %zu or less",
+ s->n_blocks,
+ (UINT32_MAX - 1) / sizeof(uint64_t));
+ return -EINVAL;
+ }
+ offsets_size = (s->n_blocks + 1) * sizeof(uint64_t);
+ if (offsets_size > 512 * 1024 * 1024) {
+ /* Prevent ridiculous offsets_size which causes memory allocation to
+ * fail or overflows bdrv_pread() size. In practice the 512 MB
+ * offsets[] limit supports 16 TB images at 256 KB block size.
+ */
+ error_setg(errp, "image requires too many offsets, "
+ "try increasing block size");
+ return -EINVAL;
+ }
s->offsets = g_malloc(offsets_size);
ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
@@ -84,13 +123,37 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
- for(i=0;i<s->n_blocks;i++) {
+ for (i = 0; i < s->n_blocks + 1; i++) {
+ uint64_t size;
+
s->offsets[i] = be64_to_cpu(s->offsets[i]);
- if (i > 0) {
- uint32_t size = s->offsets[i] - s->offsets[i - 1];
- if (size > max_compressed_block_size) {
- max_compressed_block_size = size;
- }
+ if (i == 0) {
+ continue;
+ }
+
+ if (s->offsets[i] < s->offsets[i - 1]) {
+ error_setg(errp, "offsets not monotonically increasing at "
+ "index %u, image file is corrupt", i);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ size = s->offsets[i] - s->offsets[i - 1];
+
+ /* Compressed blocks should be smaller than the uncompressed block size
+ * but maybe compression performed poorly so the compressed block is
+ * actually bigger. Clamp down on unrealistic values to prevent
+ * ridiculous s->compressed_block allocation.
+ */
+ if (size > 2 * MAX_BLOCK_SIZE) {
+ error_setg(errp, "invalid compressed block size at index %u, "
+ "image file is corrupt", i);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (size > max_compressed_block_size) {
+ max_compressed_block_size = size;
}
}
@@ -180,9 +243,7 @@ static coroutine_fn int cloop_co_read(BlockDriverState *bs, int64_t sector_num,
static void cloop_close(BlockDriverState *bs)
{
BDRVCloopState *s = bs->opaque;
- if (s->n_blocks > 0) {
- g_free(s->offsets);
- }
+ g_free(s->offsets);
g_free(s->compressed_block);
g_free(s->uncompressed_block);
inflateEnd(&s->zstream);