From d8b441a3fbfd075c48ab2a519d779d926624ed79 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 24 Jul 2015 03:38:28 -0600 Subject: xen/HVM: atomically access pointers in bufioreq handling The number of slots per page being 511 (i.e. not a power of two) means that the (32-bit) read and write indexes going beyond 2^32 will likely disturb operation. The hypervisor side gets I/O req server creation extended so we can indicate that we're using suitable atomic accesses where needed, allowing it to atomically canonicalize both pointers when both have gone through at least one cycle. The Xen side counterpart (which is not a functional prereq to this change, albeit a build one) went in already (commit b7007bc6f9). Signed-off-by: Jan Beulich Signed-off-by: Stefano Stabellini --- xen-hvm.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'xen-hvm.c') diff --git a/xen-hvm.c b/xen-hvm.c index 0f81f7fd9c..cbd0a79afd 100644 --- a/xen-hvm.c +++ b/xen-hvm.c @@ -984,19 +984,30 @@ static void handle_ioreq(XenIOState *state, ioreq_t *req) static int handle_buffered_iopage(XenIOState *state) { + buffered_iopage_t *buf_page = state->buffered_io_page; buf_ioreq_t *buf_req = NULL; ioreq_t req; int qw; - if (!state->buffered_io_page) { + if (!buf_page) { return 0; } memset(&req, 0x00, sizeof(req)); - while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) { - buf_req = &state->buffered_io_page->buf_ioreq[ - state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM]; + for (;;) { + uint32_t rdptr = buf_page->read_pointer, wrptr; + + xen_rmb(); + wrptr = buf_page->write_pointer; + xen_rmb(); + if (rdptr != buf_page->read_pointer) { + continue; + } + if (rdptr == wrptr) { + break; + } + buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; req.size = 1UL << buf_req->size; req.count = 1; req.addr = buf_req->addr; @@ -1008,15 +1019,14 @@ static int handle_buffered_iopage(XenIOState *state) req.data_is_ptr = 0; qw = (req.size == 8); if (qw) { - buf_req = &state->buffered_io_page->buf_ioreq[ - (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM]; + buf_req = &buf_page->buf_ioreq[(rdptr + 1) % + IOREQ_BUFFER_SLOT_NUM]; req.data |= ((uint64_t)buf_req->data) << 32; } handle_ioreq(state, &req); - xen_mb(); - state->buffered_io_page->read_pointer += qw ? 2 : 1; + atomic_add(&buf_page->read_pointer, qw + 1); } return req.count; -- cgit v1.2.1