summaryrefslogtreecommitdiff
path: root/arch_init.c
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2012-08-13 16:02:11 -0500
committerAnthony Liguori <aliguori@us.ibm.com>2012-08-13 16:02:11 -0500
commitac839ccd8c30fe5706cc43f00e056049d6e55377 (patch)
treed2e37ee8bd7750ce1dd01c4dd6cec21a87551682 /arch_init.c
parent6a1f9d0c1fbf186d3d68cb2af43d047637c93072 (diff)
parentdd051c7217eae04191169ac62f6ffb7531c8da32 (diff)
downloadqemu-ac839ccd8c30fe5706cc43f00e056049d6e55377.tar.gz
Merge remote-tracking branch 'quintela/migration-next-20120808' into staging
* quintela/migration-next-20120808: Restart optimization on stage3 update version Add XBZRLE statistics Add migration accounting for normal and duplicate pages Change total_time to total-time in MigrationStats Add migrate_set_cache_size command Add XBZRLE to ram_save_block and ram_save_live Add xbzrle_encode_buffer and xbzrle_decode_buffer functions Add uleb encoding/decoding functions Add cache handling functions Add XBZRLE documentation Add migrate-set-capabilities Add migration capabilities
Diffstat (limited to 'arch_init.c')
-rw-r--r--arch_init.c246
1 files changed, 241 insertions, 5 deletions
diff --git a/arch_init.c b/arch_init.c
index 7b65c4888b..9b46bfcaa5 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -43,6 +43,7 @@
#include "hw/smbios.h"
#include "exec-memory.h"
#include "hw/pcspk.h"
+#include "qemu/page_cache.h"
#ifdef DEBUG_ARCH_INIT
#define DPRINTF(fmt, ...) \
@@ -106,6 +107,7 @@ const uint32_t arch_type = QEMU_ARCH;
#define RAM_SAVE_FLAG_PAGE 0x08
#define RAM_SAVE_FLAG_EOS 0x10
#define RAM_SAVE_FLAG_CONTINUE 0x20
+#define RAM_SAVE_FLAG_XBZRLE 0x40
#ifdef __ALTIVEC__
#include <altivec.h>
@@ -173,6 +175,92 @@ static int is_dup_page(uint8_t *page)
return 1;
}
+/* struct contains XBZRLE cache and a static page
+ used by the compression */
+static struct {
+ /* buffer used for XBZRLE encoding */
+ uint8_t *encoded_buf;
+ /* buffer for storing page content */
+ uint8_t *current_buf;
+ /* buffer used for XBZRLE decoding */
+ uint8_t *decoded_buf;
+ /* Cache for XBZRLE */
+ PageCache *cache;
+} XBZRLE = {
+ .encoded_buf = NULL,
+ .current_buf = NULL,
+ .decoded_buf = NULL,
+ .cache = NULL,
+};
+
+
+int64_t xbzrle_cache_resize(int64_t new_size)
+{
+ if (XBZRLE.cache != NULL) {
+ return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
+ TARGET_PAGE_SIZE;
+ }
+ return pow2floor(new_size);
+}
+
+/* accounting for migration statistics */
+typedef struct AccountingInfo {
+ uint64_t dup_pages;
+ uint64_t norm_pages;
+ uint64_t iterations;
+ uint64_t xbzrle_bytes;
+ uint64_t xbzrle_pages;
+ uint64_t xbzrle_cache_miss;
+ uint64_t xbzrle_overflows;
+} AccountingInfo;
+
+static AccountingInfo acct_info;
+
+static void acct_clear(void)
+{
+ memset(&acct_info, 0, sizeof(acct_info));
+}
+
+uint64_t dup_mig_bytes_transferred(void)
+{
+ return acct_info.dup_pages * TARGET_PAGE_SIZE;
+}
+
+uint64_t dup_mig_pages_transferred(void)
+{
+ return acct_info.dup_pages;
+}
+
+uint64_t norm_mig_bytes_transferred(void)
+{
+ return acct_info.norm_pages * TARGET_PAGE_SIZE;
+}
+
+uint64_t norm_mig_pages_transferred(void)
+{
+ return acct_info.norm_pages;
+}
+
+uint64_t xbzrle_mig_bytes_transferred(void)
+{
+ return acct_info.xbzrle_bytes;
+}
+
+uint64_t xbzrle_mig_pages_transferred(void)
+{
+ return acct_info.xbzrle_pages;
+}
+
+uint64_t xbzrle_mig_pages_cache_miss(void)
+{
+ return acct_info.xbzrle_cache_miss;
+}
+
+uint64_t xbzrle_mig_pages_overflow(void)
+{
+ return acct_info.xbzrle_overflows;
+}
+
static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
int cont, int flag)
{
@@ -185,6 +273,61 @@ static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
}
+#define ENCODING_FLAG_XBZRLE 0x1
+
+static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
+ ram_addr_t current_addr, RAMBlock *block,
+ ram_addr_t offset, int cont, bool last_stage)
+{
+ int encoded_len = 0, bytes_sent = -1;
+ uint8_t *prev_cached_page;
+
+ if (!cache_is_cached(XBZRLE.cache, current_addr)) {
+ if (!last_stage) {
+ cache_insert(XBZRLE.cache, current_addr,
+ g_memdup(current_data, TARGET_PAGE_SIZE));
+ }
+ acct_info.xbzrle_cache_miss++;
+ return -1;
+ }
+
+ prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
+
+ /* save current buffer into memory */
+ memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
+
+ /* XBZRLE encoding (if there is no overflow) */
+ encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
+ TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
+ TARGET_PAGE_SIZE);
+ if (encoded_len == 0) {
+ DPRINTF("Skipping unmodified page\n");
+ return 0;
+ } else if (encoded_len == -1) {
+ DPRINTF("Overflow\n");
+ acct_info.xbzrle_overflows++;
+ /* update data in the cache */
+ memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
+ return -1;
+ }
+
+ /* we need to update the data in the cache, in order to get the same data */
+ if (!last_stage) {
+ memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
+ }
+
+ /* Send XBZRLE based compressed page */
+ save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
+ qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
+ qemu_put_be16(f, encoded_len);
+ qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
+ bytes_sent = encoded_len + 1 + 2;
+ acct_info.xbzrle_pages++;
+ acct_info.xbzrle_bytes += bytes_sent;
+
+ return bytes_sent;
+}
+
static RAMBlock *last_block;
static ram_addr_t last_offset;
@@ -196,12 +339,13 @@ static ram_addr_t last_offset;
* n: the amount of bytes written in other case
*/
-static int ram_save_block(QEMUFile *f)
+static int ram_save_block(QEMUFile *f, bool last_stage)
{
RAMBlock *block = last_block;
ram_addr_t offset = last_offset;
int bytes_sent = -1;
MemoryRegion *mr;
+ ram_addr_t current_addr;
if (!block)
block = QLIST_FIRST(&ram_list.blocks);
@@ -219,16 +363,31 @@ static int ram_save_block(QEMUFile *f)
p = memory_region_get_ram_ptr(mr) + offset;
if (is_dup_page(p)) {
+ acct_info.dup_pages++;
save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS);
qemu_put_byte(f, *p);
bytes_sent = 1;
- } else {
+ } else if (migrate_use_xbzrle()) {
+ current_addr = block->offset + offset;
+ bytes_sent = save_xbzrle_page(f, p, current_addr, block,
+ offset, cont, last_stage);
+ if (!last_stage) {
+ p = get_cached_data(XBZRLE.cache, current_addr);
+ }
+ }
+
+ /* either we didn't send yet (we may have had XBZRLE overflow) */
+ if (bytes_sent == -1) {
save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
bytes_sent = TARGET_PAGE_SIZE;
+ acct_info.norm_pages++;
}
- break;
+ /* if page is unmodified, continue to the next */
+ if (bytes_sent != 0) {
+ break;
+ }
}
offset += TARGET_PAGE_SIZE;
@@ -306,6 +465,15 @@ static void sort_ram_list(void)
static void migration_end(void)
{
memory_global_dirty_log_stop();
+
+ if (migrate_use_xbzrle()) {
+ cache_fini(XBZRLE.cache);
+ g_free(XBZRLE.cache);
+ g_free(XBZRLE.encoded_buf);
+ g_free(XBZRLE.current_buf);
+ g_free(XBZRLE.decoded_buf);
+ XBZRLE.cache = NULL;
+ }
}
static void ram_migration_cancel(void *opaque)
@@ -325,6 +493,19 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
last_offset = 0;
sort_ram_list();
+ if (migrate_use_xbzrle()) {
+ XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
+ TARGET_PAGE_SIZE,
+ TARGET_PAGE_SIZE);
+ if (!XBZRLE.cache) {
+ DPRINTF("Error creating cache\n");
+ return -1;
+ }
+ XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
+ XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
+ acct_clear();
+ }
+
/* Make sure all dirty bits are set */
QLIST_FOREACH(block, &ram_list.blocks, next) {
for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
@@ -365,12 +546,13 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
while ((ret = qemu_file_rate_limit(f)) == 0) {
int bytes_sent;
- bytes_sent = ram_save_block(f);
+ bytes_sent = ram_save_block(f, false);
/* no more blocks to sent */
if (bytes_sent < 0) {
break;
}
bytes_transferred += bytes_sent;
+ acct_info.iterations++;
/* we want to check in the 1st loop, just in case it was the 1st time
and we had to sync the dirty bitmap.
qemu_get_clock_ns() is a bit expensive, so we only check each some
@@ -426,7 +608,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
while (true) {
int bytes_sent;
- bytes_sent = ram_save_block(f);
+ bytes_sent = ram_save_block(f, true);
/* no more blocks to sent */
if (bytes_sent < 0) {
break;
@@ -440,6 +622,47 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
return 0;
}
+static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
+{
+ int ret, rc = 0;
+ unsigned int xh_len;
+ int xh_flags;
+
+ if (!XBZRLE.decoded_buf) {
+ XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
+ }
+
+ /* extract RLE header */
+ xh_flags = qemu_get_byte(f);
+ xh_len = qemu_get_be16(f);
+
+ if (xh_flags != ENCODING_FLAG_XBZRLE) {
+ fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
+ return -1;
+ }
+
+ if (xh_len > TARGET_PAGE_SIZE) {
+ fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
+ return -1;
+ }
+ /* load data and decode */
+ qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
+
+ /* decode RLE */
+ ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
+ TARGET_PAGE_SIZE);
+ if (ret == -1) {
+ fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
+ rc = -1;
+ } else if (ret > TARGET_PAGE_SIZE) {
+ fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
+ ret, TARGET_PAGE_SIZE);
+ abort();
+ }
+
+ return rc;
+}
+
static inline void *host_from_stream_offset(QEMUFile *f,
ram_addr_t offset,
int flags)
@@ -553,6 +776,19 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
+ } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
+ if (!migrate_use_xbzrle()) {
+ return -EINVAL;
+ }
+ void *host = host_from_stream_offset(f, addr, flags);
+ if (!host) {
+ return -EINVAL;
+ }
+
+ if (load_xbzrle(f, addr, host) < 0) {
+ ret = -EINVAL;
+ goto done;
+ }
}
error = qemu_file_get_error(f);
if (error) {