From 3d9b49254f893f2a3739400e536de25db1cdc5f9 Mon Sep 17 00:00:00 2001 From: Michael Tokarev Date: Sat, 10 Mar 2012 16:54:23 +0400 Subject: consolidate qemu_iovec_memset{,_skip}() into single function and use existing iov_memset() This patch combines two functions into one, and replaces the implementation with already existing iov_memset() from iov.c. The new prototype of qemu_iovec_memset(): size_t qemu_iovec_memset(qiov, size_t offset, int fillc, size_t bytes) It is different from former qemu_iovec_memset_skip(), and I want to make other functions to be consistent with it too: first how much to skip, second what, and 3rd how many of it. It also returns actual number of bytes filled in, which may be less than the requested `bytes' if qiov is smaller than offset+bytes, in the same way iov_memset() does. While at it, use utility function iov_memset() from iov.h in posix-aio-compat.c, where qiov was used. Signed-off-by: Michael Tokarev --- block/qcow2.c | 6 +++--- block/qed.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/qcow2.c b/block/qcow2.c index c2e49cded3..fcbf95273b 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -510,7 +510,7 @@ int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov, else n1 = bs->total_sectors - sector_num; - qemu_iovec_memset_skip(qiov, 0, 512 * (nb_sectors - n1), 512 * n1); + qemu_iovec_memset(qiov, 512 * n1, 0, 512 * (nb_sectors - n1)); return n1; } @@ -571,7 +571,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, } } else { /* Note: in this case, no need to wait */ - qemu_iovec_memset(&hd_qiov, 0, 512 * cur_nr_sectors); + qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors); } break; @@ -580,7 +580,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, ret = -EIO; goto fail; } - qemu_iovec_memset(&hd_qiov, 0, 512 * cur_nr_sectors); + qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors); break; case QCOW2_CLUSTER_COMPRESSED: diff --git a/block/qed.c b/block/qed.c index 30a31f907f..40bdb5364a 100644 --- a/block/qed.c +++ b/block/qed.c @@ -736,7 +736,7 @@ static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, /* Zero all sectors if reading beyond the end of the backing file */ if (pos >= backing_length || pos + qiov->size > backing_length) { - qemu_iovec_memset(qiov, 0, qiov->size); + qemu_iovec_memset(qiov, 0, 0, qiov->size); } /* Complete now if there are no backing file sectors to read */ @@ -1251,7 +1251,7 @@ static void qed_aio_read_data(void *opaque, int ret, /* Handle zero cluster and backing file reads */ if (ret == QED_CLUSTER_ZERO) { - qemu_iovec_memset(&acb->cur_qiov, 0, acb->cur_qiov.size); + qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); qed_aio_next_io(acb, 0); return; } else if (ret != QED_CLUSTER_FOUND) { -- cgit v1.2.1 From 03396148bca54c0e81ad8eecb12a136456d14c16 Mon Sep 17 00:00:00 2001 From: Michael Tokarev Date: Thu, 7 Jun 2012 20:17:55 +0400 Subject: allow qemu_iovec_from_buffer() to specify offset from which to start copying Similar to qemu_iovec_memset(QEMUIOVector *qiov, size_t offset, int c, size_t bytes); the new prototype is: qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset, const void *buf, size_t bytes); The processing starts at offset bytes within qiov. This way, we may copy a bounce buffer directly to a middle of qiov. This is exactly the same function as iov_from_buf() from iov.c, so use the existing implementation and rename it to qemu_iovec_from_buf() to be shorter and to match the utility function. As with utility implementation, we now assert that the offset is inside actual iovec. Nothing changed for current callers, because `offset' parameter is new. While at it, stop using "bounce-qiov" in block/qcow2.c and copy decrypted data directly from cluster_data instead of recreating a temp qiov for doing that. Signed-off-by: Michael Tokarev --- block/curl.c | 6 +++--- block/qcow.c | 2 +- block/qcow2.c | 9 +++------ block/rbd.c | 2 +- 4 files changed, 8 insertions(+), 11 deletions(-) (limited to 'block') diff --git a/block/curl.c b/block/curl.c index bf3680ba57..e7c3634d35 100644 --- a/block/curl.c +++ b/block/curl.c @@ -140,8 +140,8 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque) continue; if ((s->buf_off >= acb->end)) { - qemu_iovec_from_buffer(acb->qiov, s->orig_buf + acb->start, - acb->end - acb->start); + qemu_iovec_from_buf(acb->qiov, 0, s->orig_buf + acb->start, + acb->end - acb->start); acb->common.cb(acb->common.opaque, 0); qemu_aio_release(acb); s->acb[i] = NULL; @@ -176,7 +176,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len, { char *buf = state->orig_buf + (start - state->buf_start); - qemu_iovec_from_buffer(acb->qiov, buf, len); + qemu_iovec_from_buf(acb->qiov, 0, buf, len); acb->common.cb(acb->common.opaque, 0); return FIND_RET_OK; diff --git a/block/qcow.c b/block/qcow.c index 35dff497ae..728010319f 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -540,7 +540,7 @@ done: qemu_co_mutex_unlock(&s->lock); if (qiov->niov > 1) { - qemu_iovec_from_buffer(qiov, orig_buf, qiov->size); + qemu_iovec_from_buf(qiov, 0, orig_buf, qiov->size); qemu_vfree(orig_buf); } diff --git a/block/qcow2.c b/block/qcow2.c index fcbf95273b..ccc599b519 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -590,7 +590,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, goto fail; } - qemu_iovec_from_buffer(&hd_qiov, + qemu_iovec_from_buf(&hd_qiov, 0, s->cluster_cache + index_in_cluster * 512, 512 * cur_nr_sectors); break; @@ -630,11 +630,8 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, if (s->crypt_method) { qcow2_encrypt_sectors(s, sector_num, cluster_data, cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key); - qemu_iovec_reset(&hd_qiov); - qemu_iovec_copy(&hd_qiov, qiov, bytes_done, - cur_nr_sectors * 512); - qemu_iovec_from_buffer(&hd_qiov, cluster_data, - 512 * cur_nr_sectors); + qemu_iovec_from_buf(qiov, bytes_done, + cluster_data, 512 * cur_nr_sectors); } break; diff --git a/block/rbd.c b/block/rbd.c index 1280d66d3c..8bb3252bc3 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -620,7 +620,7 @@ static void rbd_aio_bh_cb(void *opaque) RBDAIOCB *acb = opaque; if (acb->cmd == RBD_AIO_READ) { - qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size); + qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); } qemu_vfree(acb->bounce); acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret)); -- cgit v1.2.1 From 1b093c480a32051cc856b6ab2395d8cbc3ae99da Mon Sep 17 00:00:00 2001 From: Michael Tokarev Date: Mon, 12 Mar 2012 21:28:06 +0400 Subject: consolidate qemu_iovec_copy() and qemu_iovec_concat() and make them consistent qemu_iovec_concat() is currently a wrapper for qemu_iovec_copy(), use the former (with extra "0" arg) in a few places where it is used. Change skip argument of qemu_iovec_copy() from uint64_t to size_t, since size of qiov itself is size_t, so there's no way to skip larger sizes. Rename it to soffset, to make it clear that the offset is applied to src. Also change the only usage of uint64_t in hw/9pfs/virtio-9p.c, in v9fs_init_qiov_from_pdu() - all callers of it actually uses size_t too, not uint64_t. One added restriction: as for all other iovec-related functions, soffset must point inside src. Order of argumens is already good: qemu_iovec_memset(QEMUIOVector *qiov, size_t offset, int c, size_t bytes) vs: qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t soffset, size_t sbytes) (note soffset is after _src_ not dst, since it applies to src; for memset it applies to qiov). Note that in many places where this function is used, the previous call is qemu_iovec_reset(), which means many callers actually want copy (replacing dst content), not concat. So we may want to add a wrapper like qemu_iovec_copy() with the same arguments but which calls qemu_iovec_reset() before _concat(). Signed-off-by: Michael Tokarev --- block/qcow2.c | 4 ++-- block/qed.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/qcow2.c b/block/qcow2.c index ccc599b519..8458d10c10 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -549,7 +549,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, index_in_cluster = sector_num & (s->cluster_sectors - 1); qemu_iovec_reset(&hd_qiov); - qemu_iovec_copy(&hd_qiov, qiov, bytes_done, + qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_nr_sectors * 512); switch (ret) { @@ -720,7 +720,7 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, assert((cluster_offset & 511) == 0); qemu_iovec_reset(&hd_qiov); - qemu_iovec_copy(&hd_qiov, qiov, bytes_done, + qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_nr_sectors * 512); if (s->crypt_method) { diff --git a/block/qed.c b/block/qed.c index 40bdb5364a..847417a3d6 100644 --- a/block/qed.c +++ b/block/qed.c @@ -1131,7 +1131,7 @@ static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) acb->cur_nclusters = qed_bytes_to_clusters(s, qed_offset_into_cluster(s, acb->cur_pos) + len); - qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); + qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); if (acb->flags & QED_AIOCB_ZERO) { /* Skip ahead if the clusters are already zero */ @@ -1177,7 +1177,7 @@ static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) /* Calculate the I/O vector */ acb->cur_cluster = offset; - qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); + qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); /* Do the actual write */ qed_aio_write_main(acb, 0); @@ -1247,7 +1247,7 @@ static void qed_aio_read_data(void *opaque, int ret, goto err; } - qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); + qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); /* Handle zero cluster and backing file reads */ if (ret == QED_CLUSTER_ZERO) { -- cgit v1.2.1 From d5e6b1619c516fa1e2ee4d8d20f08fcda4fb67a0 Mon Sep 17 00:00:00 2001 From: Michael Tokarev Date: Thu, 7 Jun 2012 20:21:06 +0400 Subject: change qemu_iovec_to_buf() to match other to,from_buf functions It now allows specifying offset within qiov to start from and amount of bytes to copy. Actual implementation is just a call to iov_to_buf(). Signed-off-by: Michael Tokarev --- block/iscsi.c | 3 +-- block/qcow.c | 2 +- block/qcow2.c | 2 +- block/rbd.c | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/iscsi.c b/block/iscsi.c index 22888a0845..ecb7a2211e 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -240,8 +240,7 @@ iscsi_aio_writev(BlockDriverState *bs, int64_t sector_num, /* this will allow us to get rid of 'buf' completely */ size = nb_sectors * BDRV_SECTOR_SIZE; acb->buf = g_malloc(size); - qemu_iovec_to_buffer(acb->qiov, acb->buf); - + qemu_iovec_to_buf(acb->qiov, 0, acb->buf, size); acb->task = malloc(sizeof(struct scsi_task)); if (acb->task == NULL) { diff --git a/block/qcow.c b/block/qcow.c index 728010319f..7b5ab87d2d 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -569,7 +569,7 @@ static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num, if (qiov->niov > 1) { buf = orig_buf = qemu_blockalign(bs, qiov->size); - qemu_iovec_to_buffer(qiov, buf); + qemu_iovec_to_buf(qiov, 0, buf, qiov->size); } else { orig_buf = NULL; buf = (uint8_t *)qiov->iov->iov_base; diff --git a/block/qcow2.c b/block/qcow2.c index 8458d10c10..1b5b36cfc1 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -731,7 +731,7 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, assert(hd_qiov.size <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); - qemu_iovec_to_buffer(&hd_qiov, cluster_data); + qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); qcow2_encrypt_sectors(s, sector_num, cluster_data, cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key); diff --git a/block/rbd.c b/block/rbd.c index 8bb3252bc3..49a4787b55 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -674,7 +674,7 @@ static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs, acb->bh = NULL; if (cmd == RBD_AIO_WRITE) { - qemu_iovec_to_buffer(acb->qiov, acb->bounce); + qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); } buf = acb->bounce; -- cgit v1.2.1 From 2fc8ae1dd77fbc55146b602f703add6dc314dea4 Mon Sep 17 00:00:00 2001 From: Michael Tokarev Date: Thu, 7 Jun 2012 20:22:46 +0400 Subject: cleanup qemu_co_sendv(), qemu_co_recvv() and friends The same as for non-coroutine versions in previous patches: rename arguments to be more obvious, change type of arguments from int to size_t where appropriate, and use common code for send and receive paths (with one extra argument) since these are exactly the same. Use common iov_send_recv() directly. qemu_co_sendv(), qemu_co_recvv(), and qemu_co_recv() are now trivial #define's merely adding one extra arg. qemu_co_sendv() and qemu_co_recvv() callers are converted to different argument order and extra `iov_cnt' argument. Signed-off-by: Michael Tokarev --- block/nbd.c | 18 ++++++++++-------- block/sheepdog.c | 6 +++--- 2 files changed, 13 insertions(+), 11 deletions(-) (limited to 'block') diff --git a/block/nbd.c b/block/nbd.c index 1212614223..2bce47bf7a 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -196,7 +196,7 @@ static void nbd_restart_write(void *opaque) } static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, - struct iovec *iov, int offset) + QEMUIOVector *qiov, int offset) { int rc, ret; @@ -205,8 +205,9 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, nbd_have_request, s); rc = nbd_send_request(s->sock, request); - if (rc >= 0 && iov) { - ret = qemu_co_sendv(s->sock, iov, request->len, offset); + if (rc >= 0 && qiov) { + ret = qemu_co_sendv(s->sock, qiov->iov, qiov->niov, + offset, request->len); if (ret != request->len) { return -EIO; } @@ -220,7 +221,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, static void nbd_co_receive_reply(BDRVNBDState *s, struct nbd_request *request, struct nbd_reply *reply, - struct iovec *iov, int offset) + QEMUIOVector *qiov, int offset) { int ret; @@ -231,8 +232,9 @@ static void nbd_co_receive_reply(BDRVNBDState *s, struct nbd_request *request, if (reply->handle != request->handle) { reply->error = EIO; } else { - if (iov && reply->error == 0) { - ret = qemu_co_recvv(s->sock, iov, request->len, offset); + if (qiov && reply->error == 0) { + ret = qemu_co_recvv(s->sock, qiov->iov, qiov->niov, + offset, request->len); if (ret != request->len) { reply->error = EIO; } @@ -349,7 +351,7 @@ static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num, if (ret < 0) { reply.error = -ret; } else { - nbd_co_receive_reply(s, &request, &reply, qiov->iov, offset); + nbd_co_receive_reply(s, &request, &reply, qiov, offset); } nbd_coroutine_end(s, &request); return -reply.error; @@ -374,7 +376,7 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num, request.len = nb_sectors * 512; nbd_coroutine_start(s, &request); - ret = nbd_co_send_request(s, &request, qiov->iov, offset); + ret = nbd_co_send_request(s, &request, qiov, offset); if (ret < 0) { reply.error = -ret; } else { diff --git a/block/sheepdog.c b/block/sheepdog.c index f46ca8fb69..2c7aecec4f 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -719,8 +719,8 @@ static void coroutine_fn aio_read_response(void *opaque) } break; case AIOCB_READ_UDATA: - ret = qemu_co_recvv(fd, acb->qiov->iov, rsp.data_length, - aio_req->iov_offset); + ret = qemu_co_recvv(fd, acb->qiov->iov, acb->qiov->niov, + aio_req->iov_offset, rsp.data_length); if (ret < 0) { error_report("failed to get the data, %s", strerror(errno)); goto out; @@ -992,7 +992,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, } if (wlen) { - ret = qemu_co_sendv(s->fd, iov, wlen, aio_req->iov_offset); + ret = qemu_co_sendv(s->fd, iov, niov, aio_req->iov_offset, wlen); if (ret < 0) { qemu_co_mutex_unlock(&s->lock); error_report("failed to send a data, %s", strerror(errno)); -- cgit v1.2.1