From a6ffc4232ab649ea91bd951f8c4f9cc598a66fd6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 16 Oct 2017 16:43:01 +0200 Subject: kvm: simplify kvm_align_section() Use ROUND_UP and simplify the code a bit. Signed-off-by: David Hildenbrand Message-Id: <20171016144302.24284-7-david@redhat.com> Signed-off-by: Paolo Bonzini --- accel/kvm/kvm-all.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'accel') diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 2835bb3801..f290f487a5 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -197,26 +197,20 @@ static hwaddr kvm_align_section(MemoryRegionSection *section, hwaddr *start) { hwaddr size = int128_get64(section->size); - hwaddr delta; - - *start = section->offset_within_address_space; + hwaddr delta, aligned; /* kvm works in page size chunks, but the function may be called with sub-page size and unaligned start address. Pad the start address to next and truncate size to previous page boundary. */ - delta = qemu_real_host_page_size - (*start & ~qemu_real_host_page_mask); - delta &= ~qemu_real_host_page_mask; - *start += delta; + aligned = ROUND_UP(section->offset_within_address_space, + qemu_real_host_page_size); + delta = aligned - section->offset_within_address_space; + *start = aligned; if (delta > size) { return 0; } - size -= delta; - size &= qemu_real_host_page_mask; - if (*start & ~qemu_real_host_page_mask) { - return 0; - } - return size; + return (size - delta) & qemu_real_host_page_mask; } int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, -- cgit v1.2.1