summaryrefslogtreecommitdiff
path: root/block.c
diff options
context:
space:
mode:
authoraliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>2009-01-22 16:59:24 +0000
committeraliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>2009-01-22 16:59:24 +0000
commit3b69e4b9ad3483bafcc3adc948703dc78e84ed33 (patch)
treee1fff08e9e152c580dce839151e376395ea3715f /block.c
parent44e3ee8a2aaee5df49cd440446fee4525144255f (diff)
downloadqemu-3b69e4b9ad3483bafcc3adc948703dc78e84ed33.tar.gz
Vectored block device API (Avi Kivity)
Most devices that are capable of DMA are also capable of scatter-gather. With the memory mapping API, this means that the device code needs to be able to access discontiguous host memory regions. For block devices, this translates to vectored I/O. This patch implements an aynchronous vectored interface for the qemu block devices. At the moment all I/O is bounced and submitted through the non-vectored API; in the future we will convert block devices to natively support vectored I/O wherever possible. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6397 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'block.c')
-rw-r--r--block.c68
1 files changed, 68 insertions, 0 deletions
diff --git a/block.c b/block.c
index 32503271c7..f570afcb0f 100644
--- a/block.c
+++ b/block.c
@@ -1246,6 +1246,69 @@ char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
/**************************************************************/
/* async I/Os */
+typedef struct VectorTranslationState {
+ QEMUIOVector *iov;
+ uint8_t *bounce;
+ int is_write;
+ BlockDriverAIOCB *aiocb;
+ BlockDriverAIOCB *this_aiocb;
+} VectorTranslationState;
+
+static void bdrv_aio_rw_vector_cb(void *opaque, int ret)
+{
+ VectorTranslationState *s = opaque;
+
+ if (!s->is_write) {
+ qemu_iovec_from_buffer(s->iov, s->bounce);
+ }
+ qemu_free(s->bounce);
+ s->this_aiocb->cb(s->this_aiocb->opaque, ret);
+ qemu_aio_release(s->this_aiocb);
+}
+
+static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
+ int64_t sector_num,
+ QEMUIOVector *iov,
+ int nb_sectors,
+ BlockDriverCompletionFunc *cb,
+ void *opaque,
+ int is_write)
+
+{
+ VectorTranslationState *s = qemu_mallocz(sizeof(*s));
+ BlockDriverAIOCB *aiocb = qemu_aio_get(bs, cb, opaque);
+
+ s->this_aiocb = aiocb;
+ s->iov = iov;
+ s->bounce = qemu_memalign(512, nb_sectors * 512);
+ s->is_write = is_write;
+ if (is_write) {
+ qemu_iovec_to_buffer(s->iov, s->bounce);
+ s->aiocb = bdrv_aio_write(bs, sector_num, s->bounce, nb_sectors,
+ bdrv_aio_rw_vector_cb, s);
+ } else {
+ s->aiocb = bdrv_aio_read(bs, sector_num, s->bounce, nb_sectors,
+ bdrv_aio_rw_vector_cb, s);
+ }
+ return aiocb;
+}
+
+BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
+ QEMUIOVector *iov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors,
+ cb, opaque, 0);
+}
+
+BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
+ QEMUIOVector *iov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors,
+ cb, opaque, 1);
+}
+
BlockDriverAIOCB *bdrv_aio_read(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
@@ -1294,6 +1357,11 @@ void bdrv_aio_cancel(BlockDriverAIOCB *acb)
{
BlockDriver *drv = acb->bs->drv;
+ if (acb->cb == bdrv_aio_rw_vector_cb) {
+ VectorTranslationState *s = acb->opaque;
+ acb = s->aiocb;
+ }
+
drv->bdrv_aio_cancel(acb);
}