summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/block/aes.h26
-rw-r--r--include/block/aio.h240
-rw-r--r--include/block/block.h448
-rw-r--r--include/block/block_int.h366
-rw-r--r--include/block/blockjob.h278
-rw-r--r--include/block/coroutine.h211
-rw-r--r--include/block/coroutine_int.h49
-rw-r--r--include/block/nbd.h100
-rw-r--r--include/block/thread-pool.h34
-rw-r--r--include/bt/bt.h20
-rw-r--r--include/char/char.h254
-rw-r--r--include/config.h2
-rw-r--r--include/disas/bfd.h483
-rw-r--r--include/disas/disas.h43
-rw-r--r--include/elf.h1308
-rw-r--r--include/exec/address-spaces.h41
-rw-r--r--include/exec/cpu-all.h533
-rw-r--r--include/exec/cpu-common.h124
-rw-r--r--include/exec/cpu-defs.h207
-rw-r--r--include/exec/cputlb.h46
-rw-r--r--include/exec/def-helper.h275
-rw-r--r--include/exec/exec-all.h412
-rw-r--r--include/exec/gdbstub.h53
-rw-r--r--include/exec/gen-icount.h53
-rw-r--r--include/exec/hwaddr.h24
-rw-r--r--include/exec/ioport.h78
-rw-r--r--include/exec/iorange.h31
-rw-r--r--include/exec/memory-internal.h141
-rw-r--r--include/exec/memory.h898
-rw-r--r--include/exec/poison.h64
-rw-r--r--include/exec/softmmu-semi.h77
-rw-r--r--include/exec/softmmu_defs.h37
-rw-r--r--include/exec/softmmu_exec.h163
-rw-r--r--include/exec/softmmu_header.h213
-rw-r--r--include/exec/softmmu_template.h354
-rw-r--r--include/exec/spinlock.h49
-rw-r--r--include/exec/user/abitypes.h36
-rw-r--r--include/exec/user/thunk.h189
-rw-r--r--include/fpu/softfloat.h641
-rw-r--r--include/libfdt_env.h36
-rw-r--r--include/migration/block.h23
-rw-r--r--include/migration/migration.h138
-rw-r--r--include/migration/page_cache.h (renamed from include/qemu/page_cache.h)0
-rw-r--r--include/migration/qemu-file.h236
-rw-r--r--include/migration/vmstate.h640
-rw-r--r--include/monitor/monitor.h101
-rw-r--r--include/monitor/readline.h55
-rw-r--r--include/net/checksum.h29
-rw-r--r--include/net/net.h175
-rw-r--r--include/net/queue.h58
-rw-r--r--include/net/slirp.h47
-rw-r--r--include/net/tap.h67
-rw-r--r--include/qapi/dealloc-visitor.h26
-rw-r--r--include/qapi/error.h80
-rw-r--r--include/qapi/opts-visitor.h31
-rw-r--r--include/qapi/qmp-input-visitor.h29
-rw-r--r--include/qapi/qmp-output-visitor.h28
-rw-r--r--include/qapi/qmp/dispatch.h55
-rw-r--r--include/qapi/qmp/json-lexer.h51
-rw-r--r--include/qapi/qmp/json-parser.h24
-rw-r--r--include/qapi/qmp/json-streamer.h40
-rw-r--r--include/qapi/qmp/qbool.h29
-rw-r--r--include/qapi/qmp/qdict.h67
-rw-r--r--include/qapi/qmp/qerror.h252
-rw-r--r--include/qapi/qmp/qfloat.h29
-rw-r--r--include/qapi/qmp/qint.h28
-rw-r--r--include/qapi/qmp/qjson.h29
-rw-r--r--include/qapi/qmp/qlist.h64
-rw-r--r--include/qapi/qmp/qobject.h112
-rw-r--r--include/qapi/qmp/qstring.h35
-rw-r--r--include/qapi/qmp/types.h25
-rw-r--r--include/qapi/string-input-visitor.h25
-rw-r--r--include/qapi/string-output-visitor.h26
-rw-r--r--include/qapi/visitor-impl.h63
-rw-r--r--include/qapi/visitor.h55
-rw-r--r--include/qemu-common.h426
-rw-r--r--include/qemu/acl.h74
-rw-r--r--include/qemu/atomic.h67
-rw-r--r--include/qemu/bitmap.h222
-rw-r--r--include/qemu/bitops.h362
-rw-r--r--include/qemu/bswap.h713
-rw-r--r--include/qemu/cache-utils.h44
-rw-r--r--include/qemu/compatfd.h44
-rw-r--r--include/qemu/compiler.h58
-rw-r--r--include/qemu/config-file.h30
-rw-r--r--include/qemu/envlist.h22
-rw-r--r--include/qemu/error-report.h43
-rw-r--r--include/qemu/event_notifier.h46
-rw-r--r--include/qemu/host-utils.h240
-rw-r--r--include/qemu/int128.h116
-rw-r--r--include/qemu/iov.h115
-rw-r--r--include/qemu/log.h160
-rw-r--r--include/qemu/main-loop.h306
-rw-r--r--include/qemu/module.h40
-rw-r--r--include/qemu/notify.h43
-rw-r--r--include/qemu/option.h158
-rw-r--r--include/qemu/option_int.h54
-rw-r--r--include/qemu/osdep.h178
-rw-r--r--include/qemu/queue.h414
-rw-r--r--include/qemu/range.h29
-rw-r--r--include/qemu/ratelimit.h2
-rw-r--r--include/qemu/rng-random.h22
-rw-r--r--include/qemu/rng.h93
-rw-r--r--include/qemu/sockets.h77
-rw-r--r--include/qemu/thread-posix.h28
-rw-r--r--include/qemu/thread-win32.h29
-rw-r--r--include/qemu/thread.h56
-rw-r--r--include/qemu/timer.h310
-rw-r--r--include/qemu/tls.h52
-rw-r--r--include/qemu/typedefs.h61
-rw-r--r--include/qemu/uri.h113
-rw-r--r--include/qemu/xattr.h30
-rw-r--r--include/qom/cpu.h (renamed from include/qemu/cpu.h)77
-rw-r--r--include/qom/object.h (renamed from include/qemu/object.h)47
-rw-r--r--include/qom/qom-qobject.h (renamed from include/qemu/qom-qobject.h)2
-rw-r--r--include/sysemu/arch_init.h39
-rw-r--r--include/sysemu/balloon.h29
-rw-r--r--include/sysemu/blockdev.h69
-rw-r--r--include/sysemu/cpus.h24
-rw-r--r--include/sysemu/device_tree.h54
-rw-r--r--include/sysemu/dma.h282
-rw-r--r--include/sysemu/dump.h35
-rw-r--r--include/sysemu/kvm.h280
-rw-r--r--include/sysemu/memory_mapping.h64
-rw-r--r--include/sysemu/os-posix.h51
-rw-r--r--include/sysemu/os-win32.h99
-rw-r--r--include/sysemu/qtest.h53
-rw-r--r--include/sysemu/seccomp.h22
-rw-r--r--include/sysemu/sysemu.h186
-rw-r--r--include/sysemu/xen-mapcache.h56
-rw-r--r--include/ui/console.h485
-rw-r--r--include/ui/pixel_ops.h53
-rw-r--r--include/ui/qemu-pixman.h39
-rw-r--r--include/ui/qemu-spice.h83
-rw-r--r--include/ui/spice-display.h134
135 files changed, 18533 insertions, 16 deletions
diff --git a/include/block/aes.h b/include/block/aes.h
new file mode 100644
index 0000000000..a0167eb7d5
--- /dev/null
+++ b/include/block/aes.h
@@ -0,0 +1,26 @@
+#ifndef QEMU_AES_H
+#define QEMU_AES_H
+
+#define AES_MAXNR 14
+#define AES_BLOCK_SIZE 16
+
+struct aes_key_st {
+ uint32_t rd_key[4 *(AES_MAXNR + 1)];
+ int rounds;
+};
+typedef struct aes_key_st AES_KEY;
+
+int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+ AES_KEY *key);
+int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+ AES_KEY *key);
+
+void AES_encrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+void AES_decrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
+ const unsigned long length, const AES_KEY *key,
+ unsigned char *ivec, const int enc);
+
+#endif
diff --git a/include/block/aio.h b/include/block/aio.h
new file mode 100644
index 0000000000..0933f05878
--- /dev/null
+++ b/include/block/aio.h
@@ -0,0 +1,240 @@
+/*
+ * QEMU aio implementation
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_AIO_H
+#define QEMU_AIO_H
+
+#include "qemu-common.h"
+#include "qemu/queue.h"
+#include "qemu/event_notifier.h"
+
+typedef struct BlockDriverAIOCB BlockDriverAIOCB;
+typedef void BlockDriverCompletionFunc(void *opaque, int ret);
+
+typedef struct AIOCBInfo {
+ void (*cancel)(BlockDriverAIOCB *acb);
+ size_t aiocb_size;
+} AIOCBInfo;
+
+struct BlockDriverAIOCB {
+ const AIOCBInfo *aiocb_info;
+ BlockDriverState *bs;
+ BlockDriverCompletionFunc *cb;
+ void *opaque;
+};
+
+void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
+ BlockDriverCompletionFunc *cb, void *opaque);
+void qemu_aio_release(void *p);
+
+typedef struct AioHandler AioHandler;
+typedef void QEMUBHFunc(void *opaque);
+typedef void IOHandler(void *opaque);
+
+typedef struct AioContext {
+ GSource source;
+
+ /* The list of registered AIO handlers */
+ QLIST_HEAD(, AioHandler) aio_handlers;
+
+ /* This is a simple lock used to protect the aio_handlers list.
+ * Specifically, it's used to ensure that no callbacks are removed while
+ * we're walking and dispatching callbacks.
+ */
+ int walking_handlers;
+
+ /* Anchor of the list of Bottom Halves belonging to the context */
+ struct QEMUBH *first_bh;
+
+ /* A simple lock used to protect the first_bh list, and ensure that
+ * no callbacks are removed while we're walking and dispatching callbacks.
+ */
+ int walking_bh;
+
+ /* Used for aio_notify. */
+ EventNotifier notifier;
+} AioContext;
+
+/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
+typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);
+
+/**
+ * aio_context_new: Allocate a new AioContext.
+ *
+ * AioContext provide a mini event-loop that can be waited on synchronously.
+ * They also provide bottom halves, a service to execute a piece of code
+ * as soon as possible.
+ */
+AioContext *aio_context_new(void);
+
+/**
+ * aio_context_ref:
+ * @ctx: The AioContext to operate on.
+ *
+ * Add a reference to an AioContext.
+ */
+void aio_context_ref(AioContext *ctx);
+
+/**
+ * aio_context_unref:
+ * @ctx: The AioContext to operate on.
+ *
+ * Drop a reference to an AioContext.
+ */
+void aio_context_unref(AioContext *ctx);
+
+/**
+ * aio_bh_new: Allocate a new bottom half structure.
+ *
+ * Bottom halves are lightweight callbacks whose invocation is guaranteed
+ * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
+ * is opaque and must be allocated prior to its use.
+ */
+QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
+
+/**
+ * aio_notify: Force processing of pending events.
+ *
+ * Similar to signaling a condition variable, aio_notify forces
+ * aio_wait to exit, so that the next call will re-examine pending events.
+ * The caller of aio_notify will usually call aio_wait again very soon,
+ * or go through another iteration of the GLib main loop. Hence, aio_notify
+ * also has the side effect of recalculating the sets of file descriptors
+ * that the main loop waits for.
+ *
+ * Calling aio_notify is rarely necessary, because for example scheduling
+ * a bottom half calls it already.
+ */
+void aio_notify(AioContext *ctx);
+
+/**
+ * aio_bh_poll: Poll bottom halves for an AioContext.
+ *
+ * These are internal functions used by the QEMU main loop.
+ */
+int aio_bh_poll(AioContext *ctx);
+
+/**
+ * qemu_bh_schedule: Schedule a bottom half.
+ *
+ * Scheduling a bottom half interrupts the main loop and causes the
+ * execution of the callback that was passed to qemu_bh_new.
+ *
+ * Bottom halves that are scheduled from a bottom half handler are instantly
+ * invoked. This can create an infinite loop if a bottom half handler
+ * schedules itself.
+ *
+ * @bh: The bottom half to be scheduled.
+ */
+void qemu_bh_schedule(QEMUBH *bh);
+
+/**
+ * qemu_bh_cancel: Cancel execution of a bottom half.
+ *
+ * Canceling execution of a bottom half undoes the effect of calls to
+ * qemu_bh_schedule without freeing its resources yet. While cancellation
+ * itself is also wait-free and thread-safe, it can of course race with the
+ * loop that executes bottom halves unless you are holding the iothread
+ * mutex. This makes it mostly useless if you are not holding the mutex.
+ *
+ * @bh: The bottom half to be canceled.
+ */
+void qemu_bh_cancel(QEMUBH *bh);
+
+/**
+ *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
+ *
+ * Deleting a bottom half frees the memory that was allocated for it by
+ * qemu_bh_new. It also implies canceling the bottom half if it was
+ * scheduled.
+ *
+ * @bh: The bottom half to be deleted.
+ */
+void qemu_bh_delete(QEMUBH *bh);
+
+/* Return whether there are any pending callbacks from the GSource
+ * attached to the AioContext.
+ *
+ * This is used internally in the implementation of the GSource.
+ */
+bool aio_pending(AioContext *ctx);
+
+/* Progress in completing AIO work to occur. This can issue new pending
+ * aio as a result of executing I/O completion or bh callbacks.
+ *
+ * If there is no pending AIO operation or completion (bottom half),
+ * return false. If there are pending bottom halves, return true.
+ *
+ * If there are no pending bottom halves, but there are pending AIO
+ * operations, it may not be possible to make any progress without
+ * blocking. If @blocking is true, this function will wait until one
+ * or more AIO events have completed, to ensure something has moved
+ * before returning.
+ *
+ * If @blocking is false, this function will also return false if the
+ * function cannot make any progress without blocking.
+ */
+bool aio_poll(AioContext *ctx, bool blocking);
+
+#ifdef CONFIG_POSIX
+/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
+typedef int (AioFlushHandler)(void *opaque);
+
+/* Register a file descriptor and associated callbacks. Behaves very similarly
+ * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
+ * be invoked when using qemu_aio_wait().
+ *
+ * Code that invokes AIO completion functions should rely on this function
+ * instead of qemu_set_fd_handler[2].
+ */
+void aio_set_fd_handler(AioContext *ctx,
+ int fd,
+ IOHandler *io_read,
+ IOHandler *io_write,
+ AioFlushHandler *io_flush,
+ void *opaque);
+#endif
+
+/* Register an event notifier and associated callbacks. Behaves very similarly
+ * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
+ * will be invoked when using qemu_aio_wait().
+ *
+ * Code that invokes AIO completion functions should rely on this function
+ * instead of event_notifier_set_handler.
+ */
+void aio_set_event_notifier(AioContext *ctx,
+ EventNotifier *notifier,
+ EventNotifierHandler *io_read,
+ AioFlushEventNotifierHandler *io_flush);
+
+/* Return a GSource that lets the main loop poll the file descriptors attached
+ * to this AioContext.
+ */
+GSource *aio_get_g_source(AioContext *ctx);
+
+/* Functions to operate on the main QEMU AioContext. */
+
+bool qemu_aio_wait(void);
+void qemu_aio_set_event_notifier(EventNotifier *notifier,
+ EventNotifierHandler *io_read,
+ AioFlushEventNotifierHandler *io_flush);
+
+#ifdef CONFIG_POSIX
+void qemu_aio_set_fd_handler(int fd,
+ IOHandler *io_read,
+ IOHandler *io_write,
+ AioFlushHandler *io_flush,
+ void *opaque);
+#endif
+
+#endif
diff --git a/include/block/block.h b/include/block/block.h
new file mode 100644
index 0000000000..0719339231
--- /dev/null
+++ b/include/block/block.h
@@ -0,0 +1,448 @@
+#ifndef BLOCK_H
+#define BLOCK_H
+
+#include "block/aio.h"
+#include "qemu-common.h"
+#include "qemu/option.h"
+#include "block/coroutine.h"
+#include "qapi/qmp/qobject.h"
+#include "qapi-types.h"
+
+/* block.c */
+typedef struct BlockDriver BlockDriver;
+typedef struct BlockJob BlockJob;
+
+typedef struct BlockDriverInfo {
+ /* in bytes, 0 if irrelevant */
+ int cluster_size;
+ /* offset at which the VM state can be saved (0 if not possible) */
+ int64_t vm_state_offset;
+ bool is_dirty;
+} BlockDriverInfo;
+
+typedef struct BlockFragInfo {
+ uint64_t allocated_clusters;
+ uint64_t total_clusters;
+ uint64_t fragmented_clusters;
+} BlockFragInfo;
+
+typedef struct QEMUSnapshotInfo {
+ char id_str[128]; /* unique snapshot id */
+ /* the following fields are informative. They are not needed for
+ the consistency of the snapshot */
+ char name[256]; /* user chosen name */
+ uint64_t vm_state_size; /* VM state info size */
+ uint32_t date_sec; /* UTC date of the snapshot */
+ uint32_t date_nsec;
+ uint64_t vm_clock_nsec; /* VM clock relative to boot */
+} QEMUSnapshotInfo;
+
+/* Callbacks for block device models */
+typedef struct BlockDevOps {
+ /*
+ * Runs when virtual media changed (monitor commands eject, change)
+ * Argument load is true on load and false on eject.
+ * Beware: doesn't run when a host device's physical media
+ * changes. Sure would be useful if it did.
+ * Device models with removable media must implement this callback.
+ */
+ void (*change_media_cb)(void *opaque, bool load);
+ /*
+ * Runs when an eject request is issued from the monitor, the tray
+ * is closed, and the medium is locked.
+ * Device models that do not implement is_medium_locked will not need
+ * this callback. Device models that can lock the medium or tray might
+ * want to implement the callback and unlock the tray when "force" is
+ * true, even if they do not support eject requests.
+ */
+ void (*eject_request_cb)(void *opaque, bool force);
+ /*
+ * Is the virtual tray open?
+ * Device models implement this only when the device has a tray.
+ */
+ bool (*is_tray_open)(void *opaque);
+ /*
+ * Is the virtual medium locked into the device?
+ * Device models implement this only when device has such a lock.
+ */
+ bool (*is_medium_locked)(void *opaque);
+ /*
+ * Runs when the size changed (e.g. monitor command block_resize)
+ */
+ void (*resize_cb)(void *opaque);
+} BlockDevOps;
+
+#define BDRV_O_RDWR 0x0002
+#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */
+#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
+#define BDRV_O_CACHE_WB 0x0040 /* use write-back caching */
+#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */
+#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
+#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
+#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
+#define BDRV_O_INCOMING 0x0800 /* consistency hint for incoming migration */
+#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
+#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
+
+#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH)
+
+#define BDRV_SECTOR_BITS 9
+#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
+#define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1)
+
+typedef enum {
+ BDRV_ACTION_REPORT, BDRV_ACTION_IGNORE, BDRV_ACTION_STOP
+} BlockErrorAction;
+
+typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
+
+typedef struct BDRVReopenState {
+ BlockDriverState *bs;
+ int flags;
+ void *opaque;
+} BDRVReopenState;
+
+
+void bdrv_iostatus_enable(BlockDriverState *bs);
+void bdrv_iostatus_reset(BlockDriverState *bs);
+void bdrv_iostatus_disable(BlockDriverState *bs);
+bool bdrv_iostatus_is_enabled(const BlockDriverState *bs);
+void bdrv_iostatus_set_err(BlockDriverState *bs, int error);
+void bdrv_info_print(Monitor *mon, const QObject *data);
+void bdrv_info(Monitor *mon, QObject **ret_data);
+void bdrv_stats_print(Monitor *mon, const QObject *data);
+void bdrv_info_stats(Monitor *mon, QObject **ret_data);
+
+/* disk I/O throttling */
+void bdrv_io_limits_enable(BlockDriverState *bs);
+void bdrv_io_limits_disable(BlockDriverState *bs);
+bool bdrv_io_limits_enabled(BlockDriverState *bs);
+
+void bdrv_init(void);
+void bdrv_init_with_whitelist(void);
+BlockDriver *bdrv_find_protocol(const char *filename);
+BlockDriver *bdrv_find_format(const char *format_name);
+BlockDriver *bdrv_find_whitelisted_format(const char *format_name);
+int bdrv_create(BlockDriver *drv, const char* filename,
+ QEMUOptionParameter *options);
+int bdrv_create_file(const char* filename, QEMUOptionParameter *options);
+BlockDriverState *bdrv_new(const char *device_name);
+void bdrv_make_anon(BlockDriverState *bs);
+void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old);
+void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top);
+void bdrv_delete(BlockDriverState *bs);
+int bdrv_parse_cache_flags(const char *mode, int *flags);
+int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags);
+int bdrv_open_backing_file(BlockDriverState *bs);
+int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
+ BlockDriver *drv);
+BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
+ BlockDriverState *bs, int flags);
+int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
+int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp);
+int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
+ BlockReopenQueue *queue, Error **errp);
+void bdrv_reopen_commit(BDRVReopenState *reopen_state);
+void bdrv_reopen_abort(BDRVReopenState *reopen_state);
+void bdrv_close(BlockDriverState *bs);
+void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify);
+int bdrv_attach_dev(BlockDriverState *bs, void *dev);
+void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev);
+void bdrv_detach_dev(BlockDriverState *bs, void *dev);
+void *bdrv_get_attached_dev(BlockDriverState *bs);
+void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
+ void *opaque);
+void bdrv_dev_eject_request(BlockDriverState *bs, bool force);
+bool bdrv_dev_has_removable_media(BlockDriverState *bs);
+bool bdrv_dev_is_tray_open(BlockDriverState *bs);
+bool bdrv_dev_is_medium_locked(BlockDriverState *bs);
+int bdrv_read(BlockDriverState *bs, int64_t sector_num,
+ uint8_t *buf, int nb_sectors);
+int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
+ uint8_t *buf, int nb_sectors);
+int bdrv_write(BlockDriverState *bs, int64_t sector_num,
+ const uint8_t *buf, int nb_sectors);
+int bdrv_pread(BlockDriverState *bs, int64_t offset,
+ void *buf, int count);
+int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
+ const void *buf, int count);
+int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
+ const void *buf, int count);
+int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, QEMUIOVector *qiov);
+int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, QEMUIOVector *qiov);
+/*
+ * Efficiently zero a region of the disk image. Note that this is a regular
+ * I/O request like read or write and should have a reasonable size. This
+ * function is not suitable for zeroing the entire image in a single request
+ * because it may allocate memory for the entire region.
+ */
+int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors);
+int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, int *pnum);
+int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
+ BlockDriverState *base,
+ int64_t sector_num,
+ int nb_sectors, int *pnum);
+BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
+ const char *backing_file);
+int bdrv_get_backing_file_depth(BlockDriverState *bs);
+int bdrv_truncate(BlockDriverState *bs, int64_t offset);
+int64_t bdrv_getlength(BlockDriverState *bs);
+int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
+void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
+int bdrv_commit(BlockDriverState *bs);
+int bdrv_commit_all(void);
+int bdrv_change_backing_file(BlockDriverState *bs,
+ const char *backing_file, const char *backing_fmt);
+void bdrv_register(BlockDriver *bdrv);
+int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
+ BlockDriverState *base);
+BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
+ BlockDriverState *bs);
+BlockDriverState *bdrv_find_base(BlockDriverState *bs);
+
+
+typedef struct BdrvCheckResult {
+ int corruptions;
+ int leaks;
+ int check_errors;
+ int corruptions_fixed;
+ int leaks_fixed;
+ BlockFragInfo bfi;
+} BdrvCheckResult;
+
+typedef enum {
+ BDRV_FIX_LEAKS = 1,
+ BDRV_FIX_ERRORS = 2,
+} BdrvCheckMode;
+
+int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
+
+/* async block I/O */
+typedef void BlockDriverDirtyHandler(BlockDriverState *bs, int64_t sector,
+ int sector_num);
+BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
+ QEMUIOVector *iov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque);
+BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
+ QEMUIOVector *iov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque);
+BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
+ BlockDriverCompletionFunc *cb, void *opaque);
+BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque);
+void bdrv_aio_cancel(BlockDriverAIOCB *acb);
+
+typedef struct BlockRequest {
+ /* Fields to be filled by multiwrite caller */
+ int64_t sector;
+ int nb_sectors;
+ QEMUIOVector *qiov;
+ BlockDriverCompletionFunc *cb;
+ void *opaque;
+
+ /* Filled by multiwrite implementation */
+ int error;
+} BlockRequest;
+
+int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs,
+ int num_reqs);
+
+/* sg packet commands */
+int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf);
+BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
+ unsigned long int req, void *buf,
+ BlockDriverCompletionFunc *cb, void *opaque);
+
+/* Invalidate any cached metadata used by image formats */
+void bdrv_invalidate_cache(BlockDriverState *bs);
+void bdrv_invalidate_cache_all(void);
+
+void bdrv_clear_incoming_migration_all(void);
+
+/* Ensure contents are flushed to disk. */
+int bdrv_flush(BlockDriverState *bs);
+int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
+void bdrv_flush_all(void);
+void bdrv_close_all(void);
+void bdrv_drain_all(void);
+
+int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
+int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
+int bdrv_has_zero_init(BlockDriverState *bs);
+int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
+ int *pnum);
+
+void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
+ BlockdevOnError on_write_error);
+BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read);
+BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error);
+void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
+ bool is_read, int error);
+int bdrv_is_read_only(BlockDriverState *bs);
+int bdrv_is_sg(BlockDriverState *bs);
+int bdrv_enable_write_cache(BlockDriverState *bs);
+void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce);
+int bdrv_is_inserted(BlockDriverState *bs);
+int bdrv_media_changed(BlockDriverState *bs);
+void bdrv_lock_medium(BlockDriverState *bs, bool locked);
+void bdrv_eject(BlockDriverState *bs, bool eject_flag);
+const char *bdrv_get_format_name(BlockDriverState *bs);
+BlockDriverState *bdrv_find(const char *name);
+BlockDriverState *bdrv_next(BlockDriverState *bs);
+void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs),
+ void *opaque);
+int bdrv_is_encrypted(BlockDriverState *bs);
+int bdrv_key_required(BlockDriverState *bs);
+int bdrv_set_key(BlockDriverState *bs, const char *key);
+int bdrv_query_missing_keys(void);
+void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
+ void *opaque);
+const char *bdrv_get_device_name(BlockDriverState *bs);
+int bdrv_get_flags(BlockDriverState *bs);
+int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
+ const uint8_t *buf, int nb_sectors);
+int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
+
+const char *bdrv_get_encrypted_filename(BlockDriverState *bs);
+void bdrv_get_backing_filename(BlockDriverState *bs,
+ char *filename, int filename_size);
+void bdrv_get_full_backing_filename(BlockDriverState *bs,
+ char *dest, size_t sz);
+BlockInfo *bdrv_query_info(BlockDriverState *s);
+BlockStats *bdrv_query_stats(const BlockDriverState *bs);
+int bdrv_can_snapshot(BlockDriverState *bs);
+int bdrv_is_snapshot(BlockDriverState *bs);
+BlockDriverState *bdrv_snapshots(void);
+int bdrv_snapshot_create(BlockDriverState *bs,
+ QEMUSnapshotInfo *sn_info);
+int bdrv_snapshot_goto(BlockDriverState *bs,
+ const char *snapshot_id);
+int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id);
+int bdrv_snapshot_list(BlockDriverState *bs,
+ QEMUSnapshotInfo **psn_info);
+int bdrv_snapshot_load_tmp(BlockDriverState *bs,
+ const char *snapshot_name);
+char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn);
+
+char *get_human_readable_size(char *buf, int buf_size, int64_t size);
+int path_is_absolute(const char *path);
+void path_combine(char *dest, int dest_size,
+ const char *base_path,
+ const char *filename);
+
+int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
+ int64_t pos, int size);
+
+int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
+ int64_t pos, int size);
+
+void bdrv_img_create(const char *filename, const char *fmt,
+ const char *base_filename, const char *base_fmt,
+ char *options, uint64_t img_size, int flags, Error **errp);
+
+void bdrv_set_buffer_alignment(BlockDriverState *bs, int align);
+void *qemu_blockalign(BlockDriverState *bs, size_t size);
+
+#define BDRV_SECTORS_PER_DIRTY_CHUNK 2048
+
+void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable);
+int bdrv_get_dirty(BlockDriverState *bs, int64_t sector);
+void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors);
+void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors);
+int64_t bdrv_get_next_dirty(BlockDriverState *bs, int64_t sector);
+int64_t bdrv_get_dirty_count(BlockDriverState *bs);
+
+void bdrv_enable_copy_on_read(BlockDriverState *bs);
+void bdrv_disable_copy_on_read(BlockDriverState *bs);
+
+void bdrv_set_in_use(BlockDriverState *bs, int in_use);
+int bdrv_in_use(BlockDriverState *bs);
+
+#ifdef CONFIG_LINUX_AIO
+int raw_get_aio_fd(BlockDriverState *bs);
+#else
+static inline int raw_get_aio_fd(BlockDriverState *bs)
+{
+ return -ENOTSUP;
+}
+#endif
+
+enum BlockAcctType {
+ BDRV_ACCT_READ,
+ BDRV_ACCT_WRITE,
+ BDRV_ACCT_FLUSH,
+ BDRV_MAX_IOTYPE,
+};
+
+typedef struct BlockAcctCookie {
+ int64_t bytes;
+ int64_t start_time_ns;
+ enum BlockAcctType type;
+} BlockAcctCookie;
+
+void bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
+ int64_t bytes, enum BlockAcctType type);
+void bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie);
+
+typedef enum {
+ BLKDBG_L1_UPDATE,
+
+ BLKDBG_L1_GROW_ALLOC_TABLE,
+ BLKDBG_L1_GROW_WRITE_TABLE,
+ BLKDBG_L1_GROW_ACTIVATE_TABLE,
+
+ BLKDBG_L2_LOAD,
+ BLKDBG_L2_UPDATE,
+ BLKDBG_L2_UPDATE_COMPRESSED,
+ BLKDBG_L2_ALLOC_COW_READ,
+ BLKDBG_L2_ALLOC_WRITE,
+
+ BLKDBG_READ_AIO,
+ BLKDBG_READ_BACKING_AIO,
+ BLKDBG_READ_COMPRESSED,
+
+ BLKDBG_WRITE_AIO,
+ BLKDBG_WRITE_COMPRESSED,
+
+ BLKDBG_VMSTATE_LOAD,
+ BLKDBG_VMSTATE_SAVE,
+
+ BLKDBG_COW_READ,
+ BLKDBG_COW_WRITE,
+
+ BLKDBG_REFTABLE_LOAD,
+ BLKDBG_REFTABLE_GROW,
+
+ BLKDBG_REFBLOCK_LOAD,
+ BLKDBG_REFBLOCK_UPDATE,
+ BLKDBG_REFBLOCK_UPDATE_PART,
+ BLKDBG_REFBLOCK_ALLOC,
+ BLKDBG_REFBLOCK_ALLOC_HOOKUP,
+ BLKDBG_REFBLOCK_ALLOC_WRITE,
+ BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS,
+ BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE,
+ BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE,
+
+ BLKDBG_CLUSTER_ALLOC,
+ BLKDBG_CLUSTER_ALLOC_BYTES,
+ BLKDBG_CLUSTER_FREE,
+
+ BLKDBG_EVENT_MAX,
+} BlkDebugEvent;
+
+#define BLKDBG_EVENT(bs, evt) bdrv_debug_event(bs, evt)
+void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event);
+
+int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
+ const char *tag);
+int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
+bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
+
+#endif
diff --git a/include/block/block_int.h b/include/block/block_int.h
new file mode 100644
index 0000000000..f83ffb8a08
--- /dev/null
+++ b/include/block/block_int.h
@@ -0,0 +1,366 @@
+/*
+ * QEMU System Emulator block driver
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCK_INT_H
+#define BLOCK_INT_H
+
+#include "block/block.h"
+#include "qemu/option.h"
+#include "qemu/queue.h"
+#include "block/coroutine.h"
+#include "qemu/timer.h"
+#include "qapi-types.h"
+#include "qapi/qmp/qerror.h"
+#include "monitor/monitor.h"
+
+#define BLOCK_FLAG_ENCRYPT 1
+#define BLOCK_FLAG_COMPAT6 4
+#define BLOCK_FLAG_LAZY_REFCOUNTS 8
+
+#define BLOCK_IO_LIMIT_READ 0
+#define BLOCK_IO_LIMIT_WRITE 1
+#define BLOCK_IO_LIMIT_TOTAL 2
+
+#define BLOCK_IO_SLICE_TIME 100000000
+#define NANOSECONDS_PER_SECOND 1000000000.0
+
+#define BLOCK_OPT_SIZE "size"
+#define BLOCK_OPT_ENCRYPT "encryption"
+#define BLOCK_OPT_COMPAT6 "compat6"
+#define BLOCK_OPT_BACKING_FILE "backing_file"
+#define BLOCK_OPT_BACKING_FMT "backing_fmt"
+#define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
+#define BLOCK_OPT_TABLE_SIZE "table_size"
+#define BLOCK_OPT_PREALLOC "preallocation"
+#define BLOCK_OPT_SUBFMT "subformat"
+#define BLOCK_OPT_COMPAT_LEVEL "compat"
+#define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
+
+typedef struct BdrvTrackedRequest BdrvTrackedRequest;
+
+typedef struct BlockIOLimit {
+ int64_t bps[3];
+ int64_t iops[3];
+} BlockIOLimit;
+
+typedef struct BlockIOBaseValue {
+ uint64_t bytes[2];
+ uint64_t ios[2];
+} BlockIOBaseValue;
+
+struct BlockDriver {
+ const char *format_name;
+ int instance_size;
+ int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
+ int (*bdrv_probe_device)(const char *filename);
+
+ /* For handling image reopen for split or non-split files */
+ int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
+ BlockReopenQueue *queue, Error **errp);
+ void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
+ void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
+
+ int (*bdrv_open)(BlockDriverState *bs, int flags);
+ int (*bdrv_file_open)(BlockDriverState *bs, const char *filename, int flags);
+ int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num,
+ uint8_t *buf, int nb_sectors);
+ int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num,
+ const uint8_t *buf, int nb_sectors);
+ void (*bdrv_close)(BlockDriverState *bs);
+ void (*bdrv_rebind)(BlockDriverState *bs);
+ int (*bdrv_create)(const char *filename, QEMUOptionParameter *options);
+ int (*bdrv_set_key)(BlockDriverState *bs, const char *key);
+ int (*bdrv_make_empty)(BlockDriverState *bs);
+ /* aio */
+ BlockDriverAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque);
+ BlockDriverAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque);
+ BlockDriverAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
+ BlockDriverCompletionFunc *cb, void *opaque);
+ BlockDriverAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque);
+
+ int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+ int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+ /*
+ * Efficiently zero a region of the disk image. Typically an image format
+ * would use a compact metadata representation to implement this. This
+ * function pointer may be NULL and .bdrv_co_writev() will be called
+ * instead.
+ */
+ int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors);
+ int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors);
+ int coroutine_fn (*bdrv_co_is_allocated)(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, int *pnum);
+
+ /*
+ * Invalidate any cached meta-data.
+ */
+ void (*bdrv_invalidate_cache)(BlockDriverState *bs);
+
+ /*
+ * Flushes all data that was already written to the OS all the way down to
+ * the disk (for example raw-posix calls fsync()).
+ */
+ int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
+
+ /*
+ * Flushes all internal caches to the OS. The data may still sit in a
+ * writeback cache of the host OS, but it will survive a crash of the qemu
+ * process.
+ */
+ int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs);
+
+ const char *protocol_name;
+ int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset);
+ int64_t (*bdrv_getlength)(BlockDriverState *bs);
+ int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs);
+ int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num,
+ const uint8_t *buf, int nb_sectors);
+
+ int (*bdrv_snapshot_create)(BlockDriverState *bs,
+ QEMUSnapshotInfo *sn_info);
+ int (*bdrv_snapshot_goto)(BlockDriverState *bs,
+ const char *snapshot_id);
+ int (*bdrv_snapshot_delete)(BlockDriverState *bs, const char *snapshot_id);
+ int (*bdrv_snapshot_list)(BlockDriverState *bs,
+ QEMUSnapshotInfo **psn_info);
+ int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
+ const char *snapshot_name);
+ int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
+
+ int (*bdrv_save_vmstate)(BlockDriverState *bs, const uint8_t *buf,
+ int64_t pos, int size);
+ int (*bdrv_load_vmstate)(BlockDriverState *bs, uint8_t *buf,
+ int64_t pos, int size);
+
+ int (*bdrv_change_backing_file)(BlockDriverState *bs,
+ const char *backing_file, const char *backing_fmt);
+
+ /* removable device specific */
+ int (*bdrv_is_inserted)(BlockDriverState *bs);
+ int (*bdrv_media_changed)(BlockDriverState *bs);
+ void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
+ void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
+
+ /* to control generic scsi devices */
+ int (*bdrv_ioctl)(BlockDriverState *bs, unsigned long int req, void *buf);
+ BlockDriverAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs,
+ unsigned long int req, void *buf,
+ BlockDriverCompletionFunc *cb, void *opaque);
+
+ /* List of options for creating images, terminated by name == NULL */
+ QEMUOptionParameter *create_options;
+
+
+ /*
+ * Returns 0 for completed check, -errno for internal errors.
+ * The check results are stored in result.
+ */
+ int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result,
+ BdrvCheckMode fix);
+
+ void (*bdrv_debug_event)(BlockDriverState *bs, BlkDebugEvent event);
+
+ /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
+ int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
+ const char *tag);
+ int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
+ bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
+
+ /*
+ * Returns 1 if newly created images are guaranteed to contain only
+ * zeros, 0 otherwise.
+ */
+ int (*bdrv_has_zero_init)(BlockDriverState *bs);
+
+ QLIST_ENTRY(BlockDriver) list;
+};
+
+/*
+ * Note: the function bdrv_append() copies and swaps contents of
+ * BlockDriverStates, so if you add new fields to this struct, please
+ * inspect bdrv_append() to determine if the new fields need to be
+ * copied as well.
+ */
+struct BlockDriverState {
+ int64_t total_sectors; /* if we are reading a disk image, give its
+ size in sectors */
+ int read_only; /* if true, the media is read only */
+ int open_flags; /* flags used to open the file, re-used for re-open */
+ int encrypted; /* if true, the media is encrypted */
+ int valid_key; /* if true, a valid encryption key has been set */
+ int sg; /* if true, the device is a /dev/sg* */
+ int copy_on_read; /* if true, copy read backing sectors into image
+ note this is a reference count */
+
+ BlockDriver *drv; /* NULL means no media */
+ void *opaque;
+
+ void *dev; /* attached device model, if any */
+ /* TODO change to DeviceState when all users are qdevified */
+ const BlockDevOps *dev_ops;
+ void *dev_opaque;
+
+ char filename[1024];
+ char backing_file[1024]; /* if non zero, the image is a diff of
+ this file image */
+ char backing_format[16]; /* if non-zero and backing_file exists */
+ int is_temporary;
+
+ BlockDriverState *backing_hd;
+ BlockDriverState *file;
+
+ NotifierList close_notifiers;
+
+ /* number of in-flight copy-on-read requests */
+ unsigned int copy_on_read_in_flight;
+
+ /* the time for latest disk I/O */
+ int64_t slice_time;
+ int64_t slice_start;
+ int64_t slice_end;
+ BlockIOLimit io_limits;
+ BlockIOBaseValue io_base;
+ CoQueue throttled_reqs;
+ QEMUTimer *block_timer;
+ bool io_limits_enabled;
+
+ /* I/O stats (display with "info blockstats"). */
+ uint64_t nr_bytes[BDRV_MAX_IOTYPE];
+ uint64_t nr_ops[BDRV_MAX_IOTYPE];
+ uint64_t total_time_ns[BDRV_MAX_IOTYPE];
+ uint64_t wr_highest_sector;
+
+ /* Whether the disk can expand beyond total_sectors */
+ int growable;
+
+ /* the memory alignment required for the buffers handled by this driver */
+ int buffer_alignment;
+
+ /* do we need to tell the quest if we have a volatile write cache? */
+ int enable_write_cache;
+
+ /* NOTE: the following infos are only hints for real hardware
+ drivers. They are not used by the block driver */
+ BlockdevOnError on_read_error, on_write_error;
+ bool iostatus_enabled;
+ BlockDeviceIoStatus iostatus;
+ char device_name[32];
+ unsigned long *dirty_bitmap;
+ int64_t dirty_count;
+ int in_use; /* users other than guest access, eg. block migration */
+ QTAILQ_ENTRY(BlockDriverState) list;
+
+ QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
+
+ /* long-running background operation */
+ BlockJob *job;
+
+};
+
+int get_tmp_filename(char *filename, int size);
+
+void bdrv_set_io_limits(BlockDriverState *bs,
+ BlockIOLimit *io_limits);
+
+#ifdef _WIN32
+int is_windows_drive(const char *filename);
+#endif
+void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
+ enum MonitorEvent ev,
+ BlockErrorAction action, bool is_read);
+
+/**
+ * stream_start:
+ * @bs: Block device to operate on.
+ * @base: Block device that will become the new base, or %NULL to
+ * flatten the whole backing file chain onto @bs.
+ * @base_id: The file name that will be written to @bs as the new
+ * backing file if the job completes. Ignored if @base is %NULL.
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @on_error: The action to take upon error.
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @errp: Error object.
+ *
+ * Start a streaming operation on @bs. Clusters that are unallocated
+ * in @bs, but allocated in any image between @base and @bs (both
+ * exclusive) will be written to @bs. At the end of a successful
+ * streaming job, the backing file of @bs will be changed to
+ * @base_id in the written image and to @base in the live BlockDriverState.
+ */
+void stream_start(BlockDriverState *bs, BlockDriverState *base,
+ const char *base_id, int64_t speed, BlockdevOnError on_error,
+ BlockDriverCompletionFunc *cb,
+ void *opaque, Error **errp);
+
+/**
+ * commit_start:
+ * @bs: Top Block device
+ * @base: Block device that will be written into, and become the new top
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @on_error: The action to take upon error.
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @errp: Error object.
+ *
+ */
+void commit_start(BlockDriverState *bs, BlockDriverState *base,
+ BlockDriverState *top, int64_t speed,
+ BlockdevOnError on_error, BlockDriverCompletionFunc *cb,
+ void *opaque, Error **errp);
+
+/*
+ * mirror_start:
+ * @bs: Block device to operate on.
+ * @target: Block device to write to.
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @mode: Whether to collapse all images in the chain to the target.
+ * @on_source_error: The action to take upon error reading from the source.
+ * @on_target_error: The action to take upon error writing to the target.
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @errp: Error object.
+ *
+ * Start a mirroring operation on @bs. Clusters that are allocated
+ * in @bs will be written to @bs until the job is cancelled or
+ * manually completed. At the end of a successful mirroring job,
+ * @bs will be switched to read from @target.
+ */
+void mirror_start(BlockDriverState *bs, BlockDriverState *target,
+ int64_t speed, MirrorSyncMode mode,
+ BlockdevOnError on_source_error,
+ BlockdevOnError on_target_error,
+ BlockDriverCompletionFunc *cb,
+ void *opaque, Error **errp);
+
+#endif /* BLOCK_INT_H */
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
new file mode 100644
index 0000000000..c290d07bba
--- /dev/null
+++ b/include/block/blockjob.h
@@ -0,0 +1,278 @@
+/*
+ * Declarations for long-running block device operations
+ *
+ * Copyright (c) 2011 IBM Corp.
+ * Copyright (c) 2012 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef BLOCKJOB_H
+#define BLOCKJOB_H 1
+
+#include "block/block.h"
+
+/**
+ * BlockJobType:
+ *
+ * A class type for block job objects.
+ */
+typedef struct BlockJobType {
+ /** Derived BlockJob struct size */
+ size_t instance_size;
+
+ /** String describing the operation, part of query-block-jobs QMP API */
+ const char *job_type;
+
+ /** Optional callback for job types that support setting a speed limit */
+ void (*set_speed)(BlockJob *job, int64_t speed, Error **errp);
+
+ /** Optional callback for job types that need to forward I/O status reset */
+ void (*iostatus_reset)(BlockJob *job);
+
+ /**
+ * Optional callback for job types whose completion must be triggered
+ * manually.
+ */
+ void (*complete)(BlockJob *job, Error **errp);
+} BlockJobType;
+
+/**
+ * BlockJob:
+ *
+ * Long-running operation on a BlockDriverState.
+ */
+struct BlockJob {
+ /** The job type, including the job vtable. */
+ const BlockJobType *job_type;
+
+ /** The block device on which the job is operating. */
+ BlockDriverState *bs;
+
+ /**
+ * The coroutine that executes the job. If not NULL, it is
+ * reentered when busy is false and the job is cancelled.
+ */
+ Coroutine *co;
+
+ /**
+ * Set to true if the job should cancel itself. The flag must
+ * always be tested just before toggling the busy flag from false
+ * to true. After a job has been cancelled, it should only yield
+ * if #qemu_aio_wait will ("sooner or later") reenter the coroutine.
+ */
+ bool cancelled;
+
+ /**
+ * Set to true if the job is either paused, or will pause itself
+ * as soon as possible (if busy == true).
+ */
+ bool paused;
+
+ /**
+ * Set to false by the job while it is in a quiescent state, where
+ * no I/O is pending and the job has yielded on any condition
+ * that is not detected by #qemu_aio_wait, such as a timer.
+ */
+ bool busy;
+
+ /** Status that is published by the query-block-jobs QMP API */
+ BlockDeviceIoStatus iostatus;
+
+ /** Offset that is published by the query-block-jobs QMP API */
+ int64_t offset;
+
+ /** Length that is published by the query-block-jobs QMP API */
+ int64_t len;
+
+ /** Speed that was set with @block_job_set_speed. */
+ int64_t speed;
+
+ /** The completion function that will be called when the job completes. */
+ BlockDriverCompletionFunc *cb;
+
+ /** The opaque value that is passed to the completion function. */
+ void *opaque;
+};
+
+/**
+ * block_job_create:
+ * @job_type: The class object for the newly-created job.
+ * @bs: The block
+ * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
+ * @cb: Completion function for the job.
+ * @opaque: Opaque pointer value passed to @cb.
+ * @errp: Error object.
+ *
+ * Create a new long-running block device job and return it. The job
+ * will call @cb asynchronously when the job completes. Note that
+ * @bs may have been closed at the time the @cb it is called. If
+ * this is the case, the job may be reported as either cancelled or
+ * completed.
+ *
+ * This function is not part of the public job interface; it should be
+ * called from a wrapper that is specific to the job type.
+ */
+void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
+ int64_t speed, BlockDriverCompletionFunc *cb,
+ void *opaque, Error **errp);
+
+/**
+ * block_job_sleep_ns:
+ * @job: The job that calls the function.
+ * @clock: The clock to sleep on.
+ * @ns: How many nanoseconds to stop for.
+ *
+ * Put the job to sleep (assuming that it wasn't canceled) for @ns
+ * nanoseconds. Canceling the job will interrupt the wait immediately.
+ */
+void block_job_sleep_ns(BlockJob *job, QEMUClock *clock, int64_t ns);
+
+/**
+ * block_job_completed:
+ * @job: The job being completed.
+ * @ret: The status code.
+ *
+ * Call the completion function that was registered at creation time, and
+ * free @job.
+ */
+void block_job_completed(BlockJob *job, int ret);
+
+/**
+ * block_job_set_speed:
+ * @job: The job to set the speed for.
+ * @speed: The new value
+ * @errp: Error object.
+ *
+ * Set a rate-limiting parameter for the job; the actual meaning may
+ * vary depending on the job type.
+ */
+void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
+
+/**
+ * block_job_cancel:
+ * @job: The job to be canceled.
+ *
+ * Asynchronously cancel the specified job.
+ */
+void block_job_cancel(BlockJob *job);
+
+/**
+ * block_job_complete:
+ * @job: The job to be completed.
+ * @errp: Error object.
+ *
+ * Asynchronously complete the specified job.
+ */
+void block_job_complete(BlockJob *job, Error **errp);
+
+/**
+ * block_job_is_cancelled:
+ * @job: The job being queried.
+ *
+ * Returns whether the job is scheduled for cancellation.
+ */
+bool block_job_is_cancelled(BlockJob *job);
+
+/**
+ * block_job_query:
+ * @job: The job to get information about.
+ *
+ * Return information about a job.
+ */
+BlockJobInfo *block_job_query(BlockJob *job);
+
+/**
+ * block_job_pause:
+ * @job: The job to be paused.
+ *
+ * Asynchronously pause the specified job.
+ */
+void block_job_pause(BlockJob *job);
+
+/**
+ * block_job_resume:
+ * @job: The job to be resumed.
+ *
+ * Resume the specified job.
+ */
+void block_job_resume(BlockJob *job);
+
+/**
+ * qobject_from_block_job:
+ * @job: The job whose information is requested.
+ *
+ * Return a QDict corresponding to @job's query-block-jobs entry.
+ */
+QObject *qobject_from_block_job(BlockJob *job);
+
+/**
+ * block_job_ready:
+ * @job: The job which is now ready to complete.
+ *
+ * Send a BLOCK_JOB_READY event for the specified job.
+ */
+void block_job_ready(BlockJob *job);
+
+/**
+ * block_job_is_paused:
+ * @job: The job being queried.
+ *
+ * Returns whether the job is currently paused, or will pause
+ * as soon as it reaches a sleeping point.
+ */
+bool block_job_is_paused(BlockJob *job);
+
+/**
+ * block_job_cancel_sync:
+ * @job: The job to be canceled.
+ *
+ * Synchronously cancel the job. The completion callback is called
+ * before the function returns. The job may actually complete
+ * instead of canceling itself; the circumstances under which this
+ * happens depend on the kind of job that is active.
+ *
+ * Returns the return value from the job if the job actually completed
+ * during the call, or -ECANCELED if it was canceled.
+ */
+int block_job_cancel_sync(BlockJob *job);
+
+/**
+ * block_job_iostatus_reset:
+ * @job: The job whose I/O status should be reset.
+ *
+ * Reset I/O status on @job and on BlockDriverState objects it uses,
+ * other than job->bs.
+ */
+void block_job_iostatus_reset(BlockJob *job);
+
+/**
+ * block_job_error_action:
+ * @job: The job to signal an error for.
+ * @bs: The block device on which to set an I/O error.
+ * @on_err: The error action setting.
+ * @is_read: Whether the operation was a read.
+ * @error: The error that was reported.
+ *
+ * Report an I/O error for a block job and possibly stop the VM. Return the
+ * action that was selected based on @on_err and @error.
+ */
+BlockErrorAction block_job_error_action(BlockJob *job, BlockDriverState *bs,
+ BlockdevOnError on_err,
+ int is_read, int error);
+#endif
diff --git a/include/block/coroutine.h b/include/block/coroutine.h
new file mode 100644
index 0000000000..c31fae3f3c
--- /dev/null
+++ b/include/block/coroutine.h
@@ -0,0 +1,211 @@
+/*
+ * QEMU coroutine implementation
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ * Kevin Wolf <kwolf@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_COROUTINE_H
+#define QEMU_COROUTINE_H
+
+#include <stdbool.h>
+#include "qemu/queue.h"
+#include "qemu/timer.h"
+
+/**
+ * Coroutines are a mechanism for stack switching and can be used for
+ * cooperative userspace threading. These functions provide a simple but
+ * useful flavor of coroutines that is suitable for writing sequential code,
+ * rather than callbacks, for operations that need to give up control while
+ * waiting for events to complete.
+ *
+ * These functions are re-entrant and may be used outside the global mutex.
+ */
+
+/**
+ * Mark a function that executes in coroutine context
+ *
+ * Functions that execute in coroutine context cannot be called directly from
+ * normal functions. In the future it would be nice to enable compiler or
+ * static checker support for catching such errors. This annotation might make
+ * it possible and in the meantime it serves as documentation.
+ *
+ * For example:
+ *
+ * static void coroutine_fn foo(void) {
+ * ....
+ * }
+ */
+#define coroutine_fn
+
+typedef struct Coroutine Coroutine;
+
+/**
+ * Coroutine entry point
+ *
+ * When the coroutine is entered for the first time, opaque is passed in as an
+ * argument.
+ *
+ * When this function returns, the coroutine is destroyed automatically and
+ * execution continues in the caller who last entered the coroutine.
+ */
+typedef void coroutine_fn CoroutineEntry(void *opaque);
+
+/**
+ * Create a new coroutine
+ *
+ * Use qemu_coroutine_enter() to actually transfer control to the coroutine.
+ */
+Coroutine *qemu_coroutine_create(CoroutineEntry *entry);
+
+/**
+ * Transfer control to a coroutine
+ *
+ * The opaque argument is passed as the argument to the entry point when
+ * entering the coroutine for the first time. It is subsequently ignored.
+ */
+void qemu_coroutine_enter(Coroutine *coroutine, void *opaque);
+
+/**
+ * Transfer control back to a coroutine's caller
+ *
+ * This function does not return until the coroutine is re-entered using
+ * qemu_coroutine_enter().
+ */
+void coroutine_fn qemu_coroutine_yield(void);
+
+/**
+ * Get the currently executing coroutine
+ */
+Coroutine *coroutine_fn qemu_coroutine_self(void);
+
+/**
+ * Return whether or not currently inside a coroutine
+ *
+ * This can be used to write functions that work both when in coroutine context
+ * and when not in coroutine context. Note that such functions cannot use the
+ * coroutine_fn annotation since they work outside coroutine context.
+ */
+bool qemu_in_coroutine(void);
+
+
+
+/**
+ * CoQueues are a mechanism to queue coroutines in order to continue executing
+ * them later. They provide the fundamental primitives on which coroutine locks
+ * are built.
+ */
+typedef struct CoQueue {
+ QTAILQ_HEAD(, Coroutine) entries;
+} CoQueue;
+
+/**
+ * Initialise a CoQueue. This must be called before any other operation is used
+ * on the CoQueue.
+ */
+void qemu_co_queue_init(CoQueue *queue);
+
+/**
+ * Adds the current coroutine to the CoQueue and transfers control to the
+ * caller of the coroutine.
+ */
+void coroutine_fn qemu_co_queue_wait(CoQueue *queue);
+
+/**
+ * Adds the current coroutine to the head of the CoQueue and transfers control to the
+ * caller of the coroutine.
+ */
+void coroutine_fn qemu_co_queue_wait_insert_head(CoQueue *queue);
+
+/**
+ * Restarts the next coroutine in the CoQueue and removes it from the queue.
+ *
+ * Returns true if a coroutine was restarted, false if the queue is empty.
+ */
+bool qemu_co_queue_next(CoQueue *queue);
+
+/**
+ * Restarts all coroutines in the CoQueue and leaves the queue empty.
+ */
+void qemu_co_queue_restart_all(CoQueue *queue);
+
+/**
+ * Checks if the CoQueue is empty.
+ */
+bool qemu_co_queue_empty(CoQueue *queue);
+
+
+/**
+ * Provides a mutex that can be used to synchronise coroutines
+ */
+typedef struct CoMutex {
+ bool locked;
+ CoQueue queue;
+} CoMutex;
+
+/**
+ * Initialises a CoMutex. This must be called before any other operation is used
+ * on the CoMutex.
+ */
+void qemu_co_mutex_init(CoMutex *mutex);
+
+/**
+ * Locks the mutex. If the lock cannot be taken immediately, control is
+ * transferred to the caller of the current coroutine.
+ */
+void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex);
+
+/**
+ * Unlocks the mutex and schedules the next coroutine that was waiting for this
+ * lock to be run.
+ */
+void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex);
+
+typedef struct CoRwlock {
+ bool writer;
+ int reader;
+ CoQueue queue;
+} CoRwlock;
+
+/**
+ * Initialises a CoRwlock. This must be called before any other operation
+ * is used on the CoRwlock
+ */
+void qemu_co_rwlock_init(CoRwlock *lock);
+
+/**
+ * Read locks the CoRwlock. If the lock cannot be taken immediately because
+ * of a parallel writer, control is transferred to the caller of the current
+ * coroutine.
+ */
+void qemu_co_rwlock_rdlock(CoRwlock *lock);
+
+/**
+ * Write Locks the mutex. If the lock cannot be taken immediately because
+ * of a parallel reader, control is transferred to the caller of the current
+ * coroutine.
+ */
+void qemu_co_rwlock_wrlock(CoRwlock *lock);
+
+/**
+ * Unlocks the read/write lock and schedules the next coroutine that was
+ * waiting for this lock to be run.
+ */
+void qemu_co_rwlock_unlock(CoRwlock *lock);
+
+/**
+ * Yield the coroutine for a given duration
+ *
+ * Note this function uses timers and hence only works when a main loop is in
+ * use. See main-loop.h and do not use from qemu-tool programs.
+ */
+void coroutine_fn co_sleep_ns(QEMUClock *clock, int64_t ns);
+
+#endif /* QEMU_COROUTINE_H */
diff --git a/include/block/coroutine_int.h b/include/block/coroutine_int.h
new file mode 100644
index 0000000000..17eb71e723
--- /dev/null
+++ b/include/block/coroutine_int.h
@@ -0,0 +1,49 @@
+/*
+ * Coroutine internals
+ *
+ * Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_COROUTINE_INT_H
+#define QEMU_COROUTINE_INT_H
+
+#include "qemu/queue.h"
+#include "block/coroutine.h"
+
+typedef enum {
+ COROUTINE_YIELD = 1,
+ COROUTINE_TERMINATE = 2,
+} CoroutineAction;
+
+struct Coroutine {
+ CoroutineEntry *entry;
+ void *entry_arg;
+ Coroutine *caller;
+ QSLIST_ENTRY(Coroutine) pool_next;
+ QTAILQ_ENTRY(Coroutine) co_queue_next;
+};
+
+Coroutine *qemu_coroutine_new(void);
+void qemu_coroutine_delete(Coroutine *co);
+CoroutineAction qemu_coroutine_switch(Coroutine *from, Coroutine *to,
+ CoroutineAction action);
+
+#endif
diff --git a/include/block/nbd.h b/include/block/nbd.h
new file mode 100644
index 0000000000..344f05b794
--- /dev/null
+++ b/include/block/nbd.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
+ *
+ * Network Block Device
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; under version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef NBD_H
+#define NBD_H
+
+#include <sys/types.h>
+
+#include "qemu-common.h"
+
+struct nbd_request {
+ uint32_t magic;
+ uint32_t type;
+ uint64_t handle;
+ uint64_t from;
+ uint32_t len;
+} QEMU_PACKED;
+
+struct nbd_reply {
+ uint32_t magic;
+ uint32_t error;
+ uint64_t handle;
+} QEMU_PACKED;
+
+#define NBD_FLAG_HAS_FLAGS (1 << 0) /* Flags are there */
+#define NBD_FLAG_READ_ONLY (1 << 1) /* Device is read-only */
+#define NBD_FLAG_SEND_FLUSH (1 << 2) /* Send FLUSH */
+#define NBD_FLAG_SEND_FUA (1 << 3) /* Send FUA (Force Unit Access) */
+#define NBD_FLAG_ROTATIONAL (1 << 4) /* Use elevator algorithm - rotational media */
+#define NBD_FLAG_SEND_TRIM (1 << 5) /* Send TRIM (discard) */
+
+#define NBD_CMD_MASK_COMMAND 0x0000ffff
+#define NBD_CMD_FLAG_FUA (1 << 16)
+
+enum {
+ NBD_CMD_READ = 0,
+ NBD_CMD_WRITE = 1,
+ NBD_CMD_DISC = 2,
+ NBD_CMD_FLUSH = 3,
+ NBD_CMD_TRIM = 4
+};
+
+#define NBD_DEFAULT_PORT 10809
+
+#define NBD_BUFFER_SIZE (1024*1024)
+
+ssize_t nbd_wr_sync(int fd, void *buffer, size_t size, bool do_read);
+int tcp_socket_outgoing(const char *address, uint16_t port);
+int tcp_socket_incoming(const char *address, uint16_t port);
+int tcp_socket_outgoing_spec(const char *address_and_port);
+int tcp_socket_incoming_spec(const char *address_and_port);
+int unix_socket_outgoing(const char *path);
+int unix_socket_incoming(const char *path);
+
+int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags,
+ off_t *size, size_t *blocksize);
+int nbd_init(int fd, int csock, uint32_t flags, off_t size, size_t blocksize);
+ssize_t nbd_send_request(int csock, struct nbd_request *request);
+ssize_t nbd_receive_reply(int csock, struct nbd_reply *reply);
+int nbd_client(int fd);
+int nbd_disconnect(int fd);
+
+typedef struct NBDExport NBDExport;
+typedef struct NBDClient NBDClient;
+
+NBDExport *nbd_export_new(BlockDriverState *bs, off_t dev_offset,
+ off_t size, uint32_t nbdflags,
+ void (*close)(NBDExport *));
+void nbd_export_close(NBDExport *exp);
+void nbd_export_get(NBDExport *exp);
+void nbd_export_put(NBDExport *exp);
+
+BlockDriverState *nbd_export_get_blockdev(NBDExport *exp);
+
+NBDExport *nbd_export_find(const char *name);
+void nbd_export_set_name(NBDExport *exp, const char *name);
+void nbd_export_close_all(void);
+
+NBDClient *nbd_client_new(NBDExport *exp, int csock,
+ void (*close)(NBDClient *));
+void nbd_client_close(NBDClient *client);
+void nbd_client_get(NBDClient *client);
+void nbd_client_put(NBDClient *client);
+
+#endif
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
new file mode 100644
index 0000000000..200703e35f
--- /dev/null
+++ b/include/block/thread-pool.h
@@ -0,0 +1,34 @@
+/*
+ * QEMU block layer thread pool
+ *
+ * Copyright IBM, Corp. 2008
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#ifndef QEMU_THREAD_POOL_H
+#define QEMU_THREAD_POOL_H 1
+
+#include "qemu-common.h"
+#include "qemu/queue.h"
+#include "qemu/thread.h"
+#include "block/coroutine.h"
+#include "block/block_int.h"
+
+typedef int ThreadPoolFunc(void *opaque);
+
+BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg,
+ BlockDriverCompletionFunc *cb, void *opaque);
+int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg);
+void thread_pool_submit(ThreadPoolFunc *func, void *arg);
+
+#endif
diff --git a/include/bt/bt.h b/include/bt/bt.h
new file mode 100644
index 0000000000..2bc6d53cca
--- /dev/null
+++ b/include/bt/bt.h
@@ -0,0 +1,20 @@
+#ifndef BT_HOST_H
+#define BT_HOST_H
+
+/* BT HCI info */
+
+struct HCIInfo {
+ int (*bdaddr_set)(struct HCIInfo *hci, const uint8_t *bd_addr);
+ void (*cmd_send)(struct HCIInfo *hci, const uint8_t *data, int len);
+ void (*sco_send)(struct HCIInfo *hci, const uint8_t *data, int len);
+ void (*acl_send)(struct HCIInfo *hci, const uint8_t *data, int len);
+ void *opaque;
+ void (*evt_recv)(void *opaque, const uint8_t *data, int len);
+ void (*acl_recv)(void *opaque, const uint8_t *data, int len);
+};
+
+/* bt-host.c */
+struct HCIInfo *bt_host_hci(const char *id);
+struct HCIInfo *qemu_next_hci(void);
+
+#endif
diff --git a/include/char/char.h b/include/char/char.h
new file mode 100644
index 0000000000..baa5d035fd
--- /dev/null
+++ b/include/char/char.h
@@ -0,0 +1,254 @@
+#ifndef QEMU_CHAR_H
+#define QEMU_CHAR_H
+
+#include "qemu-common.h"
+#include "qemu/queue.h"
+#include "qemu/option.h"
+#include "qemu/config-file.h"
+#include "block/aio.h"
+#include "qapi/qmp/qobject.h"
+#include "qapi/qmp/qstring.h"
+#include "qemu/main-loop.h"
+
+/* character device */
+
+#define CHR_EVENT_BREAK 0 /* serial break char */
+#define CHR_EVENT_FOCUS 1 /* focus to this terminal (modal input needed) */
+#define CHR_EVENT_OPENED 2 /* new connection established */
+#define CHR_EVENT_MUX_IN 3 /* mux-focus was set to this terminal */
+#define CHR_EVENT_MUX_OUT 4 /* mux-focus will move on */
+#define CHR_EVENT_CLOSED 5 /* connection closed */
+
+
+#define CHR_IOCTL_SERIAL_SET_PARAMS 1
+typedef struct {
+ int speed;
+ int parity;
+ int data_bits;
+ int stop_bits;
+} QEMUSerialSetParams;
+
+#define CHR_IOCTL_SERIAL_SET_BREAK 2
+
+#define CHR_IOCTL_PP_READ_DATA 3
+#define CHR_IOCTL_PP_WRITE_DATA 4
+#define CHR_IOCTL_PP_READ_CONTROL 5
+#define CHR_IOCTL_PP_WRITE_CONTROL 6
+#define CHR_IOCTL_PP_READ_STATUS 7
+#define CHR_IOCTL_PP_EPP_READ_ADDR 8
+#define CHR_IOCTL_PP_EPP_READ 9
+#define CHR_IOCTL_PP_EPP_WRITE_ADDR 10
+#define CHR_IOCTL_PP_EPP_WRITE 11
+#define CHR_IOCTL_PP_DATA_DIR 12
+
+#define CHR_IOCTL_SERIAL_SET_TIOCM 13
+#define CHR_IOCTL_SERIAL_GET_TIOCM 14
+
+#define CHR_TIOCM_CTS 0x020
+#define CHR_TIOCM_CAR 0x040
+#define CHR_TIOCM_DSR 0x100
+#define CHR_TIOCM_RI 0x080
+#define CHR_TIOCM_DTR 0x002
+#define CHR_TIOCM_RTS 0x004
+
+typedef void IOEventHandler(void *opaque, int event);
+
+struct CharDriverState {
+ void (*init)(struct CharDriverState *s);
+ int (*chr_write)(struct CharDriverState *s, const uint8_t *buf, int len);
+ void (*chr_update_read_handler)(struct CharDriverState *s);
+ int (*chr_ioctl)(struct CharDriverState *s, int cmd, void *arg);
+ int (*get_msgfd)(struct CharDriverState *s);
+ int (*chr_add_client)(struct CharDriverState *chr, int fd);
+ IOEventHandler *chr_event;
+ IOCanReadHandler *chr_can_read;
+ IOReadHandler *chr_read;
+ void *handler_opaque;
+ void (*chr_close)(struct CharDriverState *chr);
+ void (*chr_accept_input)(struct CharDriverState *chr);
+ void (*chr_set_echo)(struct CharDriverState *chr, bool echo);
+ void (*chr_guest_open)(struct CharDriverState *chr);
+ void (*chr_guest_close)(struct CharDriverState *chr);
+ void *opaque;
+ QEMUTimer *open_timer;
+ char *label;
+ char *filename;
+ int opened;
+ int avail_connections;
+ QTAILQ_ENTRY(CharDriverState) next;
+};
+
+/**
+ * @qemu_chr_new_from_opts:
+ *
+ * Create a new character backend from a QemuOpts list.
+ *
+ * @opts see qemu-config.c for a list of valid options
+ * @init not sure..
+ *
+ * Returns: a new character backend
+ */
+CharDriverState *qemu_chr_new_from_opts(QemuOpts *opts,
+ void (*init)(struct CharDriverState *s));
+
+/**
+ * @qemu_chr_new:
+ *
+ * Create a new character backend from a URI.
+ *
+ * @label the name of the backend
+ * @filename the URI
+ * @init not sure..
+ *
+ * Returns: a new character backend
+ */
+CharDriverState *qemu_chr_new(const char *label, const char *filename,
+ void (*init)(struct CharDriverState *s));
+
+/**
+ * @qemu_chr_delete:
+ *
+ * Destroy a character backend.
+ */
+void qemu_chr_delete(CharDriverState *chr);
+
+/**
+ * @qemu_chr_fe_set_echo:
+ *
+ * Ask the backend to override its normal echo setting. This only really
+ * applies to the stdio backend and is used by the QMP server such that you
+ * can see what you type if you try to type QMP commands.
+ *
+ * @echo true to enable echo, false to disable echo
+ */
+void qemu_chr_fe_set_echo(struct CharDriverState *chr, bool echo);
+
+/**
+ * @qemu_chr_fe_open:
+ *
+ * Open a character backend. This function call is an indication that the
+ * front end is ready to begin doing I/O.
+ */
+void qemu_chr_fe_open(struct CharDriverState *chr);
+
+/**
+ * @qemu_chr_fe_close:
+ *
+ * Close a character backend. This function call indicates that the front end
+ * no longer is able to process I/O. To process I/O again, the front end will
+ * call @qemu_chr_fe_open.
+ */
+void qemu_chr_fe_close(struct CharDriverState *chr);
+
+/**
+ * @qemu_chr_fe_printf:
+ *
+ * Write to a character backend using a printf style interface.
+ *
+ * @fmt see #printf
+ */
+void qemu_chr_fe_printf(CharDriverState *s, const char *fmt, ...)
+ GCC_FMT_ATTR(2, 3);
+
+/**
+ * @qemu_chr_fe_write:
+ *
+ * Write data to a character backend from the front end. This function will
+ * send data from the front end to the back end.
+ *
+ * @buf the data
+ * @len the number of bytes to send
+ *
+ * Returns: the number of bytes consumed
+ */
+int qemu_chr_fe_write(CharDriverState *s, const uint8_t *buf, int len);
+
+/**
+ * @qemu_chr_fe_ioctl:
+ *
+ * Issue a device specific ioctl to a backend.
+ *
+ * @cmd see CHR_IOCTL_*
+ * @arg the data associated with @cmd
+ *
+ * Returns: if @cmd is not supported by the backend, -ENOTSUP, otherwise the
+ * return value depends on the semantics of @cmd
+ */
+int qemu_chr_fe_ioctl(CharDriverState *s, int cmd, void *arg);
+
+/**
+ * @qemu_chr_fe_get_msgfd:
+ *
+ * For backends capable of fd passing, return the latest file descriptor passed
+ * by a client.
+ *
+ * Returns: -1 if fd passing isn't supported or there is no pending file
+ * descriptor. If a file descriptor is returned, subsequent calls to
+ * this function will return -1 until a client sends a new file
+ * descriptor.
+ */
+int qemu_chr_fe_get_msgfd(CharDriverState *s);
+
+/**
+ * @qemu_chr_be_can_write:
+ *
+ * Determine how much data the front end can currently accept. This function
+ * returns the number of bytes the front end can accept. If it returns 0, the
+ * front end cannot receive data at the moment. The function must be polled
+ * to determine when data can be received.
+ *
+ * Returns: the number of bytes the front end can receive via @qemu_chr_be_write
+ */
+int qemu_chr_be_can_write(CharDriverState *s);
+
+/**
+ * @qemu_chr_be_write:
+ *
+ * Write data from the back end to the front end. Before issuing this call,
+ * the caller should call @qemu_chr_be_can_write to determine how much data
+ * the front end can currently accept.
+ *
+ * @buf a buffer to receive data from the front end
+ * @len the number of bytes to receive from the front end
+ */
+void qemu_chr_be_write(CharDriverState *s, uint8_t *buf, int len);
+
+
+/**
+ * @qemu_chr_be_event:
+ *
+ * Send an event from the back end to the front end.
+ *
+ * @event the event to send
+ */
+void qemu_chr_be_event(CharDriverState *s, int event);
+
+void qemu_chr_add_handlers(CharDriverState *s,
+ IOCanReadHandler *fd_can_read,
+ IOReadHandler *fd_read,
+ IOEventHandler *fd_event,
+ void *opaque);
+
+void qemu_chr_generic_open(CharDriverState *s);
+void qemu_chr_accept_input(CharDriverState *s);
+int qemu_chr_add_client(CharDriverState *s, int fd);
+void qemu_chr_info_print(Monitor *mon, const QObject *ret_data);
+void qemu_chr_info(Monitor *mon, QObject **ret_data);
+CharDriverState *qemu_chr_find(const char *name);
+
+QemuOpts *qemu_chr_parse_compat(const char *label, const char *filename);
+
+/* add an eventfd to the qemu devices that are polled */
+CharDriverState *qemu_chr_open_eventfd(int eventfd);
+
+extern int term_escape_char;
+
+/* memory chardev */
+void qemu_chr_init_mem(CharDriverState *chr);
+void qemu_chr_close_mem(CharDriverState *chr);
+QString *qemu_chr_mem_to_qs(CharDriverState *chr);
+size_t qemu_chr_mem_osize(const CharDriverState *chr);
+
+CharDriverState *qemu_char_get_next_serial(void);
+
+#endif
diff --git a/include/config.h b/include/config.h
new file mode 100644
index 0000000000..e20f78696a
--- /dev/null
+++ b/include/config.h
@@ -0,0 +1,2 @@
+#include "config-host.h"
+#include "config-target.h"
diff --git a/include/disas/bfd.h b/include/disas/bfd.h
new file mode 100644
index 0000000000..3944b3cf7e
--- /dev/null
+++ b/include/disas/bfd.h
@@ -0,0 +1,483 @@
+/* Interface between the opcode library and its callers.
+ Written by Cygnus Support, 1993.
+
+ The opcode library (libopcodes.a) provides instruction decoders for
+ a large variety of instruction sets, callable with an identical
+ interface, for making instruction-processing programs more independent
+ of the instruction set being processed. */
+
+#ifndef DIS_ASM_H
+#define DIS_ASM_H
+
+#include "qemu-common.h"
+
+typedef void *PTR;
+typedef uint64_t bfd_vma;
+typedef int64_t bfd_signed_vma;
+typedef uint8_t bfd_byte;
+#define sprintf_vma(s,x) sprintf (s, "%0" PRIx64, x)
+#define snprintf_vma(s,ss,x) snprintf (s, ss, "%0" PRIx64, x)
+
+#define BFD64
+
+enum bfd_flavour {
+ bfd_target_unknown_flavour,
+ bfd_target_aout_flavour,
+ bfd_target_coff_flavour,
+ bfd_target_ecoff_flavour,
+ bfd_target_elf_flavour,
+ bfd_target_ieee_flavour,
+ bfd_target_nlm_flavour,
+ bfd_target_oasys_flavour,
+ bfd_target_tekhex_flavour,
+ bfd_target_srec_flavour,
+ bfd_target_ihex_flavour,
+ bfd_target_som_flavour,
+ bfd_target_os9k_flavour,
+ bfd_target_versados_flavour,
+ bfd_target_msdos_flavour,
+ bfd_target_evax_flavour
+};
+
+enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN };
+
+enum bfd_architecture
+{
+ bfd_arch_unknown, /* File arch not known */
+ bfd_arch_obscure, /* Arch known, not one of these */
+ bfd_arch_m68k, /* Motorola 68xxx */
+#define bfd_mach_m68000 1
+#define bfd_mach_m68008 2
+#define bfd_mach_m68010 3
+#define bfd_mach_m68020 4
+#define bfd_mach_m68030 5
+#define bfd_mach_m68040 6
+#define bfd_mach_m68060 7
+#define bfd_mach_cpu32 8
+#define bfd_mach_mcf5200 9
+#define bfd_mach_mcf5206e 10
+#define bfd_mach_mcf5307 11
+#define bfd_mach_mcf5407 12
+#define bfd_mach_mcf528x 13
+#define bfd_mach_mcfv4e 14
+#define bfd_mach_mcf521x 15
+#define bfd_mach_mcf5249 16
+#define bfd_mach_mcf547x 17
+#define bfd_mach_mcf548x 18
+ bfd_arch_vax, /* DEC Vax */
+ bfd_arch_i960, /* Intel 960 */
+ /* The order of the following is important.
+ lower number indicates a machine type that
+ only accepts a subset of the instructions
+ available to machines with higher numbers.
+ The exception is the "ca", which is
+ incompatible with all other machines except
+ "core". */
+
+#define bfd_mach_i960_core 1
+#define bfd_mach_i960_ka_sa 2
+#define bfd_mach_i960_kb_sb 3
+#define bfd_mach_i960_mc 4
+#define bfd_mach_i960_xa 5
+#define bfd_mach_i960_ca 6
+#define bfd_mach_i960_jx 7
+#define bfd_mach_i960_hx 8
+
+ bfd_arch_a29k, /* AMD 29000 */
+ bfd_arch_sparc, /* SPARC */
+#define bfd_mach_sparc 1
+/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */
+#define bfd_mach_sparc_sparclet 2
+#define bfd_mach_sparc_sparclite 3
+#define bfd_mach_sparc_v8plus 4
+#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_sparclite_le 6
+#define bfd_mach_sparc_v9 7
+#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */
+#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */
+/* Nonzero if MACH has the v9 instruction set. */
+#define bfd_mach_sparc_v9_p(mach) \
+ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \
+ && (mach) != bfd_mach_sparc_sparclite_le)
+ bfd_arch_mips, /* MIPS Rxxxx */
+#define bfd_mach_mips3000 3000
+#define bfd_mach_mips3900 3900
+#define bfd_mach_mips4000 4000
+#define bfd_mach_mips4010 4010
+#define bfd_mach_mips4100 4100
+#define bfd_mach_mips4300 4300
+#define bfd_mach_mips4400 4400
+#define bfd_mach_mips4600 4600
+#define bfd_mach_mips4650 4650
+#define bfd_mach_mips5000 5000
+#define bfd_mach_mips6000 6000
+#define bfd_mach_mips8000 8000
+#define bfd_mach_mips10000 10000
+#define bfd_mach_mips16 16
+ bfd_arch_i386, /* Intel 386 */
+#define bfd_mach_i386_i386 0
+#define bfd_mach_i386_i8086 1
+#define bfd_mach_i386_i386_intel_syntax 2
+#define bfd_mach_x86_64 3
+#define bfd_mach_x86_64_intel_syntax 4
+ bfd_arch_we32k, /* AT&T WE32xxx */
+ bfd_arch_tahoe, /* CCI/Harris Tahoe */
+ bfd_arch_i860, /* Intel 860 */
+ bfd_arch_romp, /* IBM ROMP PC/RT */
+ bfd_arch_alliant, /* Alliant */
+ bfd_arch_convex, /* Convex */
+ bfd_arch_m88k, /* Motorola 88xxx */
+ bfd_arch_pyramid, /* Pyramid Technology */
+ bfd_arch_h8300, /* Hitachi H8/300 */
+#define bfd_mach_h8300 1
+#define bfd_mach_h8300h 2
+#define bfd_mach_h8300s 3
+ bfd_arch_powerpc, /* PowerPC */
+#define bfd_mach_ppc 0
+#define bfd_mach_ppc64 1
+#define bfd_mach_ppc_403 403
+#define bfd_mach_ppc_403gc 4030
+#define bfd_mach_ppc_e500 500
+#define bfd_mach_ppc_505 505
+#define bfd_mach_ppc_601 601
+#define bfd_mach_ppc_602 602
+#define bfd_mach_ppc_603 603
+#define bfd_mach_ppc_ec603e 6031
+#define bfd_mach_ppc_604 604
+#define bfd_mach_ppc_620 620
+#define bfd_mach_ppc_630 630
+#define bfd_mach_ppc_750 750
+#define bfd_mach_ppc_860 860
+#define bfd_mach_ppc_a35 35
+#define bfd_mach_ppc_rs64ii 642
+#define bfd_mach_ppc_rs64iii 643
+#define bfd_mach_ppc_7400 7400
+ bfd_arch_rs6000, /* IBM RS/6000 */
+ bfd_arch_hppa, /* HP PA RISC */
+#define bfd_mach_hppa10 10
+#define bfd_mach_hppa11 11
+#define bfd_mach_hppa20 20
+#define bfd_mach_hppa20w 25
+ bfd_arch_d10v, /* Mitsubishi D10V */
+ bfd_arch_z8k, /* Zilog Z8000 */
+#define bfd_mach_z8001 1
+#define bfd_mach_z8002 2
+ bfd_arch_h8500, /* Hitachi H8/500 */
+ bfd_arch_sh, /* Hitachi SH */
+#define bfd_mach_sh 1
+#define bfd_mach_sh2 0x20
+#define bfd_mach_sh_dsp 0x2d
+#define bfd_mach_sh2a 0x2a
+#define bfd_mach_sh2a_nofpu 0x2b
+#define bfd_mach_sh2e 0x2e
+#define bfd_mach_sh3 0x30
+#define bfd_mach_sh3_nommu 0x31
+#define bfd_mach_sh3_dsp 0x3d
+#define bfd_mach_sh3e 0x3e
+#define bfd_mach_sh4 0x40
+#define bfd_mach_sh4_nofpu 0x41
+#define bfd_mach_sh4_nommu_nofpu 0x42
+#define bfd_mach_sh4a 0x4a
+#define bfd_mach_sh4a_nofpu 0x4b
+#define bfd_mach_sh4al_dsp 0x4d
+#define bfd_mach_sh5 0x50
+ bfd_arch_alpha, /* Dec Alpha */
+#define bfd_mach_alpha 1
+#define bfd_mach_alpha_ev4 0x10
+#define bfd_mach_alpha_ev5 0x20
+#define bfd_mach_alpha_ev6 0x30
+ bfd_arch_arm, /* Advanced Risc Machines ARM */
+#define bfd_mach_arm_unknown 0
+#define bfd_mach_arm_2 1
+#define bfd_mach_arm_2a 2
+#define bfd_mach_arm_3 3
+#define bfd_mach_arm_3M 4
+#define bfd_mach_arm_4 5
+#define bfd_mach_arm_4T 6
+#define bfd_mach_arm_5 7
+#define bfd_mach_arm_5T 8
+#define bfd_mach_arm_5TE 9
+#define bfd_mach_arm_XScale 10
+#define bfd_mach_arm_ep9312 11
+#define bfd_mach_arm_iWMMXt 12
+#define bfd_mach_arm_iWMMXt2 13
+ bfd_arch_ns32k, /* National Semiconductors ns32000 */
+ bfd_arch_w65, /* WDC 65816 */
+ bfd_arch_tic30, /* Texas Instruments TMS320C30 */
+ bfd_arch_v850, /* NEC V850 */
+#define bfd_mach_v850 0
+ bfd_arch_arc, /* Argonaut RISC Core */
+#define bfd_mach_arc_base 0
+ bfd_arch_m32r, /* Mitsubishi M32R/D */
+#define bfd_mach_m32r 0 /* backwards compatibility */
+ bfd_arch_mn10200, /* Matsushita MN10200 */
+ bfd_arch_mn10300, /* Matsushita MN10300 */
+ bfd_arch_cris, /* Axis CRIS */
+#define bfd_mach_cris_v0_v10 255
+#define bfd_mach_cris_v32 32
+#define bfd_mach_cris_v10_v32 1032
+ bfd_arch_microblaze, /* Xilinx MicroBlaze. */
+ bfd_arch_ia64, /* HP/Intel ia64 */
+#define bfd_mach_ia64_elf64 64
+#define bfd_mach_ia64_elf32 32
+ bfd_arch_lm32, /* Lattice Mico32 */
+#define bfd_mach_lm32 1
+ bfd_arch_last
+ };
+#define bfd_mach_s390_31 31
+#define bfd_mach_s390_64 64
+
+typedef struct symbol_cache_entry
+{
+ const char *name;
+ union
+ {
+ PTR p;
+ bfd_vma i;
+ } udata;
+} asymbol;
+
+enum dis_insn_type {
+ dis_noninsn, /* Not a valid instruction */
+ dis_nonbranch, /* Not a branch instruction */
+ dis_branch, /* Unconditional branch */
+ dis_condbranch, /* Conditional branch */
+ dis_jsr, /* Jump to subroutine */
+ dis_condjsr, /* Conditional jump to subroutine */
+ dis_dref, /* Data reference instruction */
+ dis_dref2 /* Two data references in instruction */
+};
+
+/* This struct is passed into the instruction decoding routine,
+ and is passed back out into each callback. The various fields are used
+ for conveying information from your main routine into your callbacks,
+ for passing information into the instruction decoders (such as the
+ addresses of the callback functions), or for passing information
+ back from the instruction decoders to their callers.
+
+ It must be initialized before it is first passed; this can be done
+ by hand, or using one of the initialization macros below. */
+
+typedef struct disassemble_info {
+ fprintf_function fprintf_func;
+ FILE *stream;
+ PTR application_data;
+
+ /* Target description. We could replace this with a pointer to the bfd,
+ but that would require one. There currently isn't any such requirement
+ so to avoid introducing one we record these explicitly. */
+ /* The bfd_flavour. This can be bfd_target_unknown_flavour. */
+ enum bfd_flavour flavour;
+ /* The bfd_arch value. */
+ enum bfd_architecture arch;
+ /* The bfd_mach value. */
+ unsigned long mach;
+ /* Endianness (for bi-endian cpus). Mono-endian cpus can ignore this. */
+ enum bfd_endian endian;
+
+ /* An array of pointers to symbols either at the location being disassembled
+ or at the start of the function being disassembled. The array is sorted
+ so that the first symbol is intended to be the one used. The others are
+ present for any misc. purposes. This is not set reliably, but if it is
+ not NULL, it is correct. */
+ asymbol **symbols;
+ /* Number of symbols in array. */
+ int num_symbols;
+
+ /* For use by the disassembler.
+ The top 16 bits are reserved for public use (and are documented here).
+ The bottom 16 bits are for the internal use of the disassembler. */
+ unsigned long flags;
+#define INSN_HAS_RELOC 0x80000000
+ PTR private_data;
+
+ /* Function used to get bytes to disassemble. MEMADDR is the
+ address of the stuff to be disassembled, MYADDR is the address to
+ put the bytes in, and LENGTH is the number of bytes to read.
+ INFO is a pointer to this struct.
+ Returns an errno value or 0 for success. */
+ int (*read_memory_func)
+ (bfd_vma memaddr, bfd_byte *myaddr, int length,
+ struct disassemble_info *info);
+
+ /* Function which should be called if we get an error that we can't
+ recover from. STATUS is the errno value from read_memory_func and
+ MEMADDR is the address that we were trying to read. INFO is a
+ pointer to this struct. */
+ void (*memory_error_func)
+ (int status, bfd_vma memaddr, struct disassemble_info *info);
+
+ /* Function called to print ADDR. */
+ void (*print_address_func)
+ (bfd_vma addr, struct disassemble_info *info);
+
+ /* Function called to determine if there is a symbol at the given ADDR.
+ If there is, the function returns 1, otherwise it returns 0.
+ This is used by ports which support an overlay manager where
+ the overlay number is held in the top part of an address. In
+ some circumstances we want to include the overlay number in the
+ address, (normally because there is a symbol associated with
+ that address), but sometimes we want to mask out the overlay bits. */
+ int (* symbol_at_address_func)
+ (bfd_vma addr, struct disassemble_info * info);
+
+ /* These are for buffer_read_memory. */
+ bfd_byte *buffer;
+ bfd_vma buffer_vma;
+ int buffer_length;
+
+ /* This variable may be set by the instruction decoder. It suggests
+ the number of bytes objdump should display on a single line. If
+ the instruction decoder sets this, it should always set it to
+ the same value in order to get reasonable looking output. */
+ int bytes_per_line;
+
+ /* the next two variables control the way objdump displays the raw data */
+ /* For example, if bytes_per_line is 8 and bytes_per_chunk is 4, the */
+ /* output will look like this:
+ 00: 00000000 00000000
+ with the chunks displayed according to "display_endian". */
+ int bytes_per_chunk;
+ enum bfd_endian display_endian;
+
+ /* Results from instruction decoders. Not all decoders yet support
+ this information. This info is set each time an instruction is
+ decoded, and is only valid for the last such instruction.
+
+ To determine whether this decoder supports this information, set
+ insn_info_valid to 0, decode an instruction, then check it. */
+
+ char insn_info_valid; /* Branch info has been set. */
+ char branch_delay_insns; /* How many sequential insn's will run before
+ a branch takes effect. (0 = normal) */
+ char data_size; /* Size of data reference in insn, in bytes */
+ enum dis_insn_type insn_type; /* Type of instruction */
+ bfd_vma target; /* Target address of branch or dref, if known;
+ zero if unknown. */
+ bfd_vma target2; /* Second target address for dref2 */
+
+ /* Command line options specific to the target disassembler. */
+ char * disassembler_options;
+
+} disassemble_info;
+
+
+/* Standard disassemblers. Disassemble one instruction at the given
+ target address. Return number of bytes processed. */
+typedef int (*disassembler_ftype) (bfd_vma, disassemble_info *);
+
+int print_insn_tci(bfd_vma, disassemble_info*);
+int print_insn_big_mips (bfd_vma, disassemble_info*);
+int print_insn_little_mips (bfd_vma, disassemble_info*);
+int print_insn_i386 (bfd_vma, disassemble_info*);
+int print_insn_m68k (bfd_vma, disassemble_info*);
+int print_insn_z8001 (bfd_vma, disassemble_info*);
+int print_insn_z8002 (bfd_vma, disassemble_info*);
+int print_insn_h8300 (bfd_vma, disassemble_info*);
+int print_insn_h8300h (bfd_vma, disassemble_info*);
+int print_insn_h8300s (bfd_vma, disassemble_info*);
+int print_insn_h8500 (bfd_vma, disassemble_info*);
+int print_insn_alpha (bfd_vma, disassemble_info*);
+disassembler_ftype arc_get_disassembler (int, int);
+int print_insn_arm (bfd_vma, disassemble_info*);
+int print_insn_sparc (bfd_vma, disassemble_info*);
+int print_insn_big_a29k (bfd_vma, disassemble_info*);
+int print_insn_little_a29k (bfd_vma, disassemble_info*);
+int print_insn_i960 (bfd_vma, disassemble_info*);
+int print_insn_sh (bfd_vma, disassemble_info*);
+int print_insn_shl (bfd_vma, disassemble_info*);
+int print_insn_hppa (bfd_vma, disassemble_info*);
+int print_insn_m32r (bfd_vma, disassemble_info*);
+int print_insn_m88k (bfd_vma, disassemble_info*);
+int print_insn_mn10200 (bfd_vma, disassemble_info*);
+int print_insn_mn10300 (bfd_vma, disassemble_info*);
+int print_insn_ns32k (bfd_vma, disassemble_info*);
+int print_insn_big_powerpc (bfd_vma, disassemble_info*);
+int print_insn_little_powerpc (bfd_vma, disassemble_info*);
+int print_insn_rs6000 (bfd_vma, disassemble_info*);
+int print_insn_w65 (bfd_vma, disassemble_info*);
+int print_insn_d10v (bfd_vma, disassemble_info*);
+int print_insn_v850 (bfd_vma, disassemble_info*);
+int print_insn_tic30 (bfd_vma, disassemble_info*);
+int print_insn_ppc (bfd_vma, disassemble_info*);
+int print_insn_s390 (bfd_vma, disassemble_info*);
+int print_insn_crisv32 (bfd_vma, disassemble_info*);
+int print_insn_crisv10 (bfd_vma, disassemble_info*);
+int print_insn_microblaze (bfd_vma, disassemble_info*);
+int print_insn_ia64 (bfd_vma, disassemble_info*);
+int print_insn_lm32 (bfd_vma, disassemble_info*);
+
+#if 0
+/* Fetch the disassembler for a given BFD, if that support is available. */
+disassembler_ftype disassembler(bfd *);
+#endif
+
+
+/* This block of definitions is for particular callers who read instructions
+ into a buffer before calling the instruction decoder. */
+
+/* Here is a function which callers may wish to use for read_memory_func.
+ It gets bytes from a buffer. */
+int buffer_read_memory(bfd_vma, bfd_byte *, int, struct disassemble_info *);
+
+/* This function goes with buffer_read_memory.
+ It prints a message using info->fprintf_func and info->stream. */
+void perror_memory(int, bfd_vma, struct disassemble_info *);
+
+
+/* Just print the address in hex. This is included for completeness even
+ though both GDB and objdump provide their own (to print symbolic
+ addresses). */
+void generic_print_address(bfd_vma, struct disassemble_info *);
+
+/* Always true. */
+int generic_symbol_at_address(bfd_vma, struct disassemble_info *);
+
+/* Macro to initialize a disassemble_info struct. This should be called
+ by all applications creating such a struct. */
+#define INIT_DISASSEMBLE_INFO(INFO, STREAM, FPRINTF_FUNC) \
+ (INFO).flavour = bfd_target_unknown_flavour, \
+ (INFO).arch = bfd_arch_unknown, \
+ (INFO).mach = 0, \
+ (INFO).endian = BFD_ENDIAN_UNKNOWN, \
+ INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC)
+
+/* Call this macro to initialize only the internal variables for the
+ disassembler. Architecture dependent things such as byte order, or machine
+ variant are not touched by this macro. This makes things much easier for
+ GDB which must initialize these things separately. */
+
+#define INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC) \
+ (INFO).fprintf_func = (FPRINTF_FUNC), \
+ (INFO).stream = (STREAM), \
+ (INFO).symbols = NULL, \
+ (INFO).num_symbols = 0, \
+ (INFO).private_data = NULL, \
+ (INFO).buffer = NULL, \
+ (INFO).buffer_vma = 0, \
+ (INFO).buffer_length = 0, \
+ (INFO).read_memory_func = buffer_read_memory, \
+ (INFO).memory_error_func = perror_memory, \
+ (INFO).print_address_func = generic_print_address, \
+ (INFO).symbol_at_address_func = generic_symbol_at_address, \
+ (INFO).flags = 0, \
+ (INFO).bytes_per_line = 0, \
+ (INFO).bytes_per_chunk = 0, \
+ (INFO).display_endian = BFD_ENDIAN_UNKNOWN, \
+ (INFO).disassembler_options = NULL, \
+ (INFO).insn_info_valid = 0
+
+#define _(x) x
+#define ATTRIBUTE_UNUSED __attribute__((unused))
+
+/* from libbfd */
+
+bfd_vma bfd_getl64 (const bfd_byte *addr);
+bfd_vma bfd_getl32 (const bfd_byte *addr);
+bfd_vma bfd_getb32 (const bfd_byte *addr);
+bfd_vma bfd_getl16 (const bfd_byte *addr);
+bfd_vma bfd_getb16 (const bfd_byte *addr);
+typedef bool bfd_boolean;
+
+#endif /* ! defined (DIS_ASM_H) */
diff --git a/include/disas/disas.h b/include/disas/disas.h
new file mode 100644
index 0000000000..c13ca9a3a4
--- /dev/null
+++ b/include/disas/disas.h
@@ -0,0 +1,43 @@
+#ifndef _QEMU_DISAS_H
+#define _QEMU_DISAS_H
+
+#include "qemu-common.h"
+
+#ifdef NEED_CPU_H
+/* Disassemble this for me please... (debugging). */
+void disas(FILE *out, void *code, unsigned long size);
+void target_disas(FILE *out, CPUArchState *env, target_ulong code,
+ target_ulong size, int flags);
+
+void monitor_disas(Monitor *mon, CPUArchState *env,
+ target_ulong pc, int nb_insn, int is_physical, int flags);
+
+/* Look up symbol for debugging purpose. Returns "" if unknown. */
+const char *lookup_symbol(target_ulong orig_addr);
+#endif
+
+struct syminfo;
+struct elf32_sym;
+struct elf64_sym;
+
+#if defined(CONFIG_USER_ONLY)
+typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_ulong orig_addr);
+#else
+typedef const char *(*lookup_symbol_t)(struct syminfo *s, hwaddr orig_addr);
+#endif
+
+struct syminfo {
+ lookup_symbol_t lookup_symbol;
+ unsigned int disas_num_syms;
+ union {
+ struct elf32_sym *elf32;
+ struct elf64_sym *elf64;
+ } disas_symtab;
+ const char *disas_strtab;
+ struct syminfo *next;
+};
+
+/* Filled in by elfload.c. Simplistic, but will do for now. */
+extern struct syminfo *syminfos;
+
+#endif /* _QEMU_DISAS_H */
diff --git a/include/elf.h b/include/elf.h
new file mode 100644
index 0000000000..a21ea535bd
--- /dev/null
+++ b/include/elf.h
@@ -0,0 +1,1308 @@
+#ifndef _QEMU_ELF_H
+#define _QEMU_ELF_H
+
+#include <inttypes.h>
+
+/* 32-bit ELF base types. */
+typedef uint32_t Elf32_Addr;
+typedef uint16_t Elf32_Half;
+typedef uint32_t Elf32_Off;
+typedef int32_t Elf32_Sword;
+typedef uint32_t Elf32_Word;
+
+/* 64-bit ELF base types. */
+typedef uint64_t Elf64_Addr;
+typedef uint16_t Elf64_Half;
+typedef int16_t Elf64_SHalf;
+typedef uint64_t Elf64_Off;
+typedef int32_t Elf64_Sword;
+typedef uint32_t Elf64_Word;
+typedef uint64_t Elf64_Xword;
+typedef int64_t Elf64_Sxword;
+
+/* These constants are for the segment types stored in the image headers */
+#define PT_NULL 0
+#define PT_LOAD 1
+#define PT_DYNAMIC 2
+#define PT_INTERP 3
+#define PT_NOTE 4
+#define PT_SHLIB 5
+#define PT_PHDR 6
+#define PT_LOPROC 0x70000000
+#define PT_HIPROC 0x7fffffff
+#define PT_MIPS_REGINFO 0x70000000
+#define PT_MIPS_OPTIONS 0x70000001
+
+/* Flags in the e_flags field of the header */
+/* MIPS architecture level. */
+#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
+#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
+#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
+#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
+#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
+#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
+#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
+
+/* The ABI of a file. */
+#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
+#define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */
+
+#define EF_MIPS_NOREORDER 0x00000001
+#define EF_MIPS_PIC 0x00000002
+#define EF_MIPS_CPIC 0x00000004
+#define EF_MIPS_ABI2 0x00000020
+#define EF_MIPS_OPTIONS_FIRST 0x00000080
+#define EF_MIPS_32BITMODE 0x00000100
+#define EF_MIPS_ABI 0x0000f000
+#define EF_MIPS_ARCH 0xf0000000
+
+/* These constants define the different elf file types */
+#define ET_NONE 0
+#define ET_REL 1
+#define ET_EXEC 2
+#define ET_DYN 3
+#define ET_CORE 4
+#define ET_LOPROC 0xff00
+#define ET_HIPROC 0xffff
+
+/* These constants define the various ELF target machines */
+#define EM_NONE 0
+#define EM_M32 1
+#define EM_SPARC 2
+#define EM_386 3
+#define EM_68K 4
+#define EM_88K 5
+#define EM_486 6 /* Perhaps disused */
+#define EM_860 7
+
+#define EM_MIPS 8 /* MIPS R3000 (officially, big-endian only) */
+
+#define EM_MIPS_RS4_BE 10 /* MIPS R4000 big-endian */
+
+#define EM_PARISC 15 /* HPPA */
+
+#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
+
+#define EM_PPC 20 /* PowerPC */
+#define EM_PPC64 21 /* PowerPC64 */
+
+#define EM_ARM 40 /* ARM */
+
+#define EM_SH 42 /* SuperH */
+
+#define EM_SPARCV9 43 /* SPARC v9 64-bit */
+
+#define EM_IA_64 50 /* HP/Intel IA-64 */
+
+#define EM_X86_64 62 /* AMD x86-64 */
+
+#define EM_S390 22 /* IBM S/390 */
+
+#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
+
+#define EM_V850 87 /* NEC v850 */
+
+#define EM_H8_300H 47 /* Hitachi H8/300H */
+#define EM_H8S 48 /* Hitachi H8S */
+#define EM_LATTICEMICO32 138 /* LatticeMico32 */
+
+#define EM_OPENRISC 92 /* OpenCores OpenRISC */
+
+#define EM_UNICORE32 110 /* UniCore32 */
+
+/*
+ * This is an interim value that we will use until the committee comes
+ * up with a final number.
+ */
+#define EM_ALPHA 0x9026
+
+/* Bogus old v850 magic number, used by old tools. */
+#define EM_CYGNUS_V850 0x9080
+
+/*
+ * This is the old interim value for S/390 architecture
+ */
+#define EM_S390_OLD 0xA390
+
+#define EM_MICROBLAZE 189
+#define EM_MICROBLAZE_OLD 0xBAAB
+
+#define EM_XTENSA 94 /* Tensilica Xtensa */
+
+/* This is the info that is needed to parse the dynamic section of the file */
+#define DT_NULL 0
+#define DT_NEEDED 1
+#define DT_PLTRELSZ 2
+#define DT_PLTGOT 3
+#define DT_HASH 4
+#define DT_STRTAB 5
+#define DT_SYMTAB 6
+#define DT_RELA 7
+#define DT_RELASZ 8
+#define DT_RELAENT 9
+#define DT_STRSZ 10
+#define DT_SYMENT 11
+#define DT_INIT 12
+#define DT_FINI 13
+#define DT_SONAME 14
+#define DT_RPATH 15
+#define DT_SYMBOLIC 16
+#define DT_REL 17
+#define DT_RELSZ 18
+#define DT_RELENT 19
+#define DT_PLTREL 20
+#define DT_DEBUG 21
+#define DT_TEXTREL 22
+#define DT_JMPREL 23
+#define DT_BINDNOW 24
+#define DT_INIT_ARRAY 25
+#define DT_FINI_ARRAY 26
+#define DT_INIT_ARRAYSZ 27
+#define DT_FINI_ARRAYSZ 28
+#define DT_RUNPATH 29
+#define DT_FLAGS 30
+#define DT_LOOS 0x6000000d
+#define DT_HIOS 0x6ffff000
+#define DT_LOPROC 0x70000000
+#define DT_HIPROC 0x7fffffff
+
+/* DT_ entries which fall between DT_VALRNGLO and DT_VALRNDHI use
+ the d_val field of the Elf*_Dyn structure. I.e. they contain scalars. */
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_VALRNGHI 0x6ffffdff
+
+/* DT_ entries which fall between DT_ADDRRNGLO and DT_ADDRRNGHI use
+ the d_ptr field of the Elf*_Dyn structure. I.e. they contain pointers. */
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_ADDRRNGHI 0x6ffffeff
+
+#define DT_VERSYM 0x6ffffff0
+#define DT_RELACOUNT 0x6ffffff9
+#define DT_RELCOUNT 0x6ffffffa
+#define DT_FLAGS_1 0x6ffffffb
+#define DT_VERDEF 0x6ffffffc
+#define DT_VERDEFNUM 0x6ffffffd
+#define DT_VERNEED 0x6ffffffe
+#define DT_VERNEEDNUM 0x6fffffff
+
+#define DT_MIPS_RLD_VERSION 0x70000001
+#define DT_MIPS_TIME_STAMP 0x70000002
+#define DT_MIPS_ICHECKSUM 0x70000003
+#define DT_MIPS_IVERSION 0x70000004
+#define DT_MIPS_FLAGS 0x70000005
+ #define RHF_NONE 0
+ #define RHF_HARDWAY 1
+ #define RHF_NOTPOT 2
+#define DT_MIPS_BASE_ADDRESS 0x70000006
+#define DT_MIPS_CONFLICT 0x70000008
+#define DT_MIPS_LIBLIST 0x70000009
+#define DT_MIPS_LOCAL_GOTNO 0x7000000a
+#define DT_MIPS_CONFLICTNO 0x7000000b
+#define DT_MIPS_LIBLISTNO 0x70000010
+#define DT_MIPS_SYMTABNO 0x70000011
+#define DT_MIPS_UNREFEXTNO 0x70000012
+#define DT_MIPS_GOTSYM 0x70000013
+#define DT_MIPS_HIPAGENO 0x70000014
+#define DT_MIPS_RLD_MAP 0x70000016
+
+/* This info is needed when parsing the symbol table */
+#define STB_LOCAL 0
+#define STB_GLOBAL 1
+#define STB_WEAK 2
+
+#define STT_NOTYPE 0
+#define STT_OBJECT 1
+#define STT_FUNC 2
+#define STT_SECTION 3
+#define STT_FILE 4
+
+#define ELF_ST_BIND(x) ((x) >> 4)
+#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
+#define ELF_ST_INFO(bind, type) (((bind) << 4) | ((type) & 0xf))
+#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
+#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
+
+/* Symbolic values for the entries in the auxiliary table
+ put on the initial stack */
+#define AT_NULL 0 /* end of vector */
+#define AT_IGNORE 1 /* entry should be ignored */
+#define AT_EXECFD 2 /* file descriptor of program */
+#define AT_PHDR 3 /* program headers for program */
+#define AT_PHENT 4 /* size of program header entry */
+#define AT_PHNUM 5 /* number of program headers */
+#define AT_PAGESZ 6 /* system page size */
+#define AT_BASE 7 /* base address of interpreter */
+#define AT_FLAGS 8 /* flags */
+#define AT_ENTRY 9 /* entry point of program */
+#define AT_NOTELF 10 /* program is not ELF */
+#define AT_UID 11 /* real uid */
+#define AT_EUID 12 /* effective uid */
+#define AT_GID 13 /* real gid */
+#define AT_EGID 14 /* effective gid */
+#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
+#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
+#define AT_CLKTCK 17 /* frequency at which times() increments */
+#define AT_FPUCW 18 /* info about fpu initialization by kernel */
+#define AT_DCACHEBSIZE 19 /* data cache block size */
+#define AT_ICACHEBSIZE 20 /* instruction cache block size */
+#define AT_UCACHEBSIZE 21 /* unified cache block size */
+#define AT_IGNOREPPC 22 /* ppc only; entry should be ignored */
+#define AT_SECURE 23 /* boolean, was exec suid-like? */
+#define AT_BASE_PLATFORM 24 /* string identifying real platforms */
+#define AT_RANDOM 25 /* address of 16 random bytes */
+#define AT_EXECFN 31 /* filename of the executable */
+#define AT_SYSINFO 32 /* address of kernel entry point */
+#define AT_SYSINFO_EHDR 33 /* address of kernel vdso */
+#define AT_L1I_CACHESHAPE 34 /* shapes of the caches: */
+#define AT_L1D_CACHESHAPE 35 /* bits 0-3: cache associativity. */
+#define AT_L2_CACHESHAPE 36 /* bits 4-7: log2 of line size. */
+#define AT_L3_CACHESHAPE 37 /* val&~255: cache size. */
+
+typedef struct dynamic{
+ Elf32_Sword d_tag;
+ union{
+ Elf32_Sword d_val;
+ Elf32_Addr d_ptr;
+ } d_un;
+} Elf32_Dyn;
+
+typedef struct {
+ Elf64_Sxword d_tag; /* entry tag value */
+ union {
+ Elf64_Xword d_val;
+ Elf64_Addr d_ptr;
+ } d_un;
+} Elf64_Dyn;
+
+/* The following are used with relocations */
+#define ELF32_R_SYM(x) ((x) >> 8)
+#define ELF32_R_TYPE(x) ((x) & 0xff)
+
+#define ELF64_R_SYM(i) ((i) >> 32)
+#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
+#define ELF64_R_TYPE_DATA(i) (((ELF64_R_TYPE(i) >> 8) ^ 0x00800000) - 0x00800000)
+
+#define R_386_NONE 0
+#define R_386_32 1
+#define R_386_PC32 2
+#define R_386_GOT32 3
+#define R_386_PLT32 4
+#define R_386_COPY 5
+#define R_386_GLOB_DAT 6
+#define R_386_JMP_SLOT 7
+#define R_386_RELATIVE 8
+#define R_386_GOTOFF 9
+#define R_386_GOTPC 10
+#define R_386_NUM 11
+/* Not a dynamic reloc, so not included in R_386_NUM. Used in TCG. */
+#define R_386_PC8 23
+
+#define R_MIPS_NONE 0
+#define R_MIPS_16 1
+#define R_MIPS_32 2
+#define R_MIPS_REL32 3
+#define R_MIPS_26 4
+#define R_MIPS_HI16 5
+#define R_MIPS_LO16 6
+#define R_MIPS_GPREL16 7
+#define R_MIPS_LITERAL 8
+#define R_MIPS_GOT16 9
+#define R_MIPS_PC16 10
+#define R_MIPS_CALL16 11
+#define R_MIPS_GPREL32 12
+/* The remaining relocs are defined on Irix, although they are not
+ in the MIPS ELF ABI. */
+#define R_MIPS_UNUSED1 13
+#define R_MIPS_UNUSED2 14
+#define R_MIPS_UNUSED3 15
+#define R_MIPS_SHIFT5 16
+#define R_MIPS_SHIFT6 17
+#define R_MIPS_64 18
+#define R_MIPS_GOT_DISP 19
+#define R_MIPS_GOT_PAGE 20
+#define R_MIPS_GOT_OFST 21
+/*
+ * The following two relocation types are specified in the MIPS ABI
+ * conformance guide version 1.2 but not yet in the psABI.
+ */
+#define R_MIPS_GOTHI16 22
+#define R_MIPS_GOTLO16 23
+#define R_MIPS_SUB 24
+#define R_MIPS_INSERT_A 25
+#define R_MIPS_INSERT_B 26
+#define R_MIPS_DELETE 27
+#define R_MIPS_HIGHER 28
+#define R_MIPS_HIGHEST 29
+/*
+ * The following two relocation types are specified in the MIPS ABI
+ * conformance guide version 1.2 but not yet in the psABI.
+ */
+#define R_MIPS_CALLHI16 30
+#define R_MIPS_CALLLO16 31
+/*
+ * This range is reserved for vendor specific relocations.
+ */
+#define R_MIPS_LOVENDOR 100
+#define R_MIPS_HIVENDOR 127
+
+
+/* SUN SPARC specific definitions. */
+
+/* Values for Elf64_Ehdr.e_flags. */
+
+#define EF_SPARCV9_MM 3
+#define EF_SPARCV9_TSO 0
+#define EF_SPARCV9_PSO 1
+#define EF_SPARCV9_RMO 2
+#define EF_SPARC_LEDATA 0x800000 /* little endian data */
+#define EF_SPARC_EXT_MASK 0xFFFF00
+#define EF_SPARC_32PLUS 0x000100 /* generic V8+ features */
+#define EF_SPARC_SUN_US1 0x000200 /* Sun UltraSPARC1 extensions */
+#define EF_SPARC_HAL_R1 0x000400 /* HAL R1 extensions */
+#define EF_SPARC_SUN_US3 0x000800 /* Sun UltraSPARCIII extensions */
+
+/*
+ * Sparc ELF relocation types
+ */
+#define R_SPARC_NONE 0
+#define R_SPARC_8 1
+#define R_SPARC_16 2
+#define R_SPARC_32 3
+#define R_SPARC_DISP8 4
+#define R_SPARC_DISP16 5
+#define R_SPARC_DISP32 6
+#define R_SPARC_WDISP30 7
+#define R_SPARC_WDISP22 8
+#define R_SPARC_HI22 9
+#define R_SPARC_22 10
+#define R_SPARC_13 11
+#define R_SPARC_LO10 12
+#define R_SPARC_GOT10 13
+#define R_SPARC_GOT13 14
+#define R_SPARC_GOT22 15
+#define R_SPARC_PC10 16
+#define R_SPARC_PC22 17
+#define R_SPARC_WPLT30 18
+#define R_SPARC_COPY 19
+#define R_SPARC_GLOB_DAT 20
+#define R_SPARC_JMP_SLOT 21
+#define R_SPARC_RELATIVE 22
+#define R_SPARC_UA32 23
+#define R_SPARC_PLT32 24
+#define R_SPARC_HIPLT22 25
+#define R_SPARC_LOPLT10 26
+#define R_SPARC_PCPLT32 27
+#define R_SPARC_PCPLT22 28
+#define R_SPARC_PCPLT10 29
+#define R_SPARC_10 30
+#define R_SPARC_11 31
+#define R_SPARC_64 32
+#define R_SPARC_OLO10 33
+#define R_SPARC_HH22 34
+#define R_SPARC_HM10 35
+#define R_SPARC_LM22 36
+#define R_SPARC_WDISP16 40
+#define R_SPARC_WDISP19 41
+#define R_SPARC_7 43
+#define R_SPARC_5 44
+#define R_SPARC_6 45
+
+/* Bits present in AT_HWCAP, primarily for Sparc32. */
+
+#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
+#define HWCAP_SPARC_STBAR 2
+#define HWCAP_SPARC_SWAP 4
+#define HWCAP_SPARC_MULDIV 8
+#define HWCAP_SPARC_V9 16
+#define HWCAP_SPARC_ULTRA3 32
+
+/*
+ * 68k ELF relocation types
+ */
+#define R_68K_NONE 0
+#define R_68K_32 1
+#define R_68K_16 2
+#define R_68K_8 3
+#define R_68K_PC32 4
+#define R_68K_PC16 5
+#define R_68K_PC8 6
+#define R_68K_GOT32 7
+#define R_68K_GOT16 8
+#define R_68K_GOT8 9
+#define R_68K_GOT32O 10
+#define R_68K_GOT16O 11
+#define R_68K_GOT8O 12
+#define R_68K_PLT32 13
+#define R_68K_PLT16 14
+#define R_68K_PLT8 15
+#define R_68K_PLT32O 16
+#define R_68K_PLT16O 17
+#define R_68K_PLT8O 18
+#define R_68K_COPY 19
+#define R_68K_GLOB_DAT 20
+#define R_68K_JMP_SLOT 21
+#define R_68K_RELATIVE 22
+
+/*
+ * Alpha ELF relocation types
+ */
+#define R_ALPHA_NONE 0 /* No reloc */
+#define R_ALPHA_REFLONG 1 /* Direct 32 bit */
+#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */
+#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */
+#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */
+#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */
+#define R_ALPHA_GPDISP 6 /* Add displacement to GP */
+#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */
+#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */
+#define R_ALPHA_SREL16 9 /* PC relative 16 bit */
+#define R_ALPHA_SREL32 10 /* PC relative 32 bit */
+#define R_ALPHA_SREL64 11 /* PC relative 64 bit */
+#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */
+#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */
+#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */
+#define R_ALPHA_COPY 24 /* Copy symbol at runtime */
+#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */
+#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
+#define R_ALPHA_RELATIVE 27 /* Adjust by program base */
+#define R_ALPHA_BRSGP 28
+#define R_ALPHA_TLSGD 29
+#define R_ALPHA_TLS_LDM 30
+#define R_ALPHA_DTPMOD64 31
+#define R_ALPHA_GOTDTPREL 32
+#define R_ALPHA_DTPREL64 33
+#define R_ALPHA_DTPRELHI 34
+#define R_ALPHA_DTPRELLO 35
+#define R_ALPHA_DTPREL16 36
+#define R_ALPHA_GOTTPREL 37
+#define R_ALPHA_TPREL64 38
+#define R_ALPHA_TPRELHI 39
+#define R_ALPHA_TPRELLO 40
+#define R_ALPHA_TPREL16 41
+
+#define SHF_ALPHA_GPREL 0x10000000
+
+
+/* PowerPC relocations defined by the ABIs */
+#define R_PPC_NONE 0
+#define R_PPC_ADDR32 1 /* 32bit absolute address */
+#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
+#define R_PPC_ADDR16 3 /* 16bit absolute address */
+#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
+#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
+#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
+#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10 /* PC relative 26 bit */
+#define R_PPC_REL14 11 /* PC relative 16 bit */
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
+/* Keep this the last entry. */
+#ifndef R_PPC_NUM
+#define R_PPC_NUM 37
+#endif
+
+/* ARM specific declarations */
+
+/* Processor specific flags for the ELF header e_flags field. */
+#define EF_ARM_RELEXEC 0x01
+#define EF_ARM_HASENTRY 0x02
+#define EF_ARM_INTERWORK 0x04
+#define EF_ARM_APCS_26 0x08
+#define EF_ARM_APCS_FLOAT 0x10
+#define EF_ARM_PIC 0x20
+#define EF_ALIGN8 0x40 /* 8-bit structure alignment is in use */
+#define EF_NEW_ABI 0x80
+#define EF_OLD_ABI 0x100
+#define EF_ARM_SOFT_FLOAT 0x200
+#define EF_ARM_VFP_FLOAT 0x400
+#define EF_ARM_MAVERICK_FLOAT 0x800
+
+/* Other constants defined in the ARM ELF spec. version B-01. */
+#define EF_ARM_SYMSARESORTED 0x04 /* NB conflicts with EF_INTERWORK */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x08 /* NB conflicts with EF_APCS26 */
+#define EF_ARM_MAPSYMSFIRST 0x10 /* NB conflicts with EF_APCS_FLOAT */
+#define EF_ARM_EABIMASK 0xFF000000
+
+/* Constants defined in AAELF. */
+#define EF_ARM_BE8 0x00800000
+#define EF_ARM_LE8 0x00400000
+
+#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK)
+#define EF_ARM_EABI_UNKNOWN 0x00000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
+/* Additional symbol types for Thumb */
+#define STT_ARM_TFUNC 0xd
+
+/* ARM-specific values for sh_flags */
+#define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */
+#define SHF_ARM_COMDEF 0x80000000 /* Section may be multiply defined
+ in the input to a link step */
+
+/* ARM-specific program header flags */
+#define PF_ARM_SB 0x10000000 /* Segment contains the location
+ addressed by the static base */
+
+/* ARM relocs. */
+#define R_ARM_NONE 0 /* No reloc */
+#define R_ARM_PC24 1 /* PC relative 26 bit branch */
+#define R_ARM_ABS32 2 /* Direct 32 bit */
+#define R_ARM_REL32 3 /* PC relative 32 bit */
+#define R_ARM_PC13 4
+#define R_ARM_ABS16 5 /* Direct 16 bit */
+#define R_ARM_ABS12 6 /* Direct 12 bit */
+#define R_ARM_THM_ABS5 7
+#define R_ARM_ABS8 8 /* Direct 8 bit */
+#define R_ARM_SBREL32 9
+#define R_ARM_THM_PC22 10
+#define R_ARM_THM_PC8 11
+#define R_ARM_AMP_VCALL9 12
+#define R_ARM_SWI24 13
+#define R_ARM_THM_SWI8 14
+#define R_ARM_XPC25 15
+#define R_ARM_THM_XPC22 16
+#define R_ARM_COPY 20 /* Copy symbol at runtime */
+#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
+#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
+#define R_ARM_RELATIVE 23 /* Adjust by program base */
+#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
+#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
+#define R_ARM_GOT32 26 /* 32 bit GOT entry */
+#define R_ARM_PLT32 27 /* 32 bit PLT address */
+#define R_ARM_CALL 28
+#define R_ARM_JUMP24 29
+#define R_ARM_GNU_VTENTRY 100
+#define R_ARM_GNU_VTINHERIT 101
+#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
+#define R_ARM_THM_PC9 103 /* thumb conditional branch */
+#define R_ARM_RXPC25 249
+#define R_ARM_RSBREL32 250
+#define R_ARM_THM_RPC22 251
+#define R_ARM_RREL32 252
+#define R_ARM_RABS22 253
+#define R_ARM_RPC24 254
+#define R_ARM_RBASE 255
+/* Keep this the last entry. */
+#define R_ARM_NUM 256
+
+/* s390 relocations defined by the ABIs */
+#define R_390_NONE 0 /* No reloc. */
+#define R_390_8 1 /* Direct 8 bit. */
+#define R_390_12 2 /* Direct 12 bit. */
+#define R_390_16 3 /* Direct 16 bit. */
+#define R_390_32 4 /* Direct 32 bit. */
+#define R_390_PC32 5 /* PC relative 32 bit. */
+#define R_390_GOT12 6 /* 12 bit GOT offset. */
+#define R_390_GOT32 7 /* 32 bit GOT offset. */
+#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
+#define R_390_COPY 9 /* Copy symbol at runtime. */
+#define R_390_GLOB_DAT 10 /* Create GOT entry. */
+#define R_390_JMP_SLOT 11 /* Create PLT entry. */
+#define R_390_RELATIVE 12 /* Adjust by program base. */
+#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
+#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */
+#define R_390_GOT16 15 /* 16 bit GOT offset. */
+#define R_390_PC16 16 /* PC relative 16 bit. */
+#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
+#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
+#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
+#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
+#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
+#define R_390_64 22 /* Direct 64 bit. */
+#define R_390_PC64 23 /* PC relative 64 bit. */
+#define R_390_GOT64 24 /* 64 bit GOT offset. */
+#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
+#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
+#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
+#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
+#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
+#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
+#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
+#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
+#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
+#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
+#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
+#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
+#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
+#define R_390_TLS_GDCALL 38 /* Tag for function call in general
+ dynamic TLS code. */
+#define R_390_TLS_LDCALL 39 /* Tag for function call in local
+ dynamic TLS code. */
+#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
+ thread local data. */
+#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
+ block offset. */
+#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
+ thread local data in LD code. */
+#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
+ negated static TLS block offset. */
+#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
+ static TLS block. */
+#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
+ block. */
+#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
+ block. */
+#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
+#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
+#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS
+ block. */
+/* Keep this the last entry. */
+#define R_390_NUM 57
+
+/* x86-64 relocation types */
+#define R_X86_64_NONE 0 /* No reloc */
+#define R_X86_64_64 1 /* Direct 64 bit */
+#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
+#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
+#define R_X86_64_PLT32 4 /* 32 bit PLT address */
+#define R_X86_64_COPY 5 /* Copy symbol at runtime */
+#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
+#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
+#define R_X86_64_RELATIVE 8 /* Adjust by program base */
+#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
+ offset to GOT */
+#define R_X86_64_32 10 /* Direct 32 bit zero extended */
+#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
+#define R_X86_64_16 12 /* Direct 16 bit zero extended */
+#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
+#define R_X86_64_8 14 /* Direct 8 bit sign extended */
+#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
+
+#define R_X86_64_NUM 16
+
+/* Legal values for e_flags field of Elf64_Ehdr. */
+
+#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
+
+/* HPPA specific definitions. */
+
+/* Legal values for e_flags field of Elf32_Ehdr. */
+
+#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
+#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
+#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
+#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
+#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
+ prediction. */
+#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
+#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
+
+/* Defined values for `e_flags & EF_PARISC_ARCH' are: */
+
+#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
+#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
+#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
+
+/* Additional section indeces. */
+
+#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
+ symbols in ANSI C. */
+#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
+
+/* Legal values for sh_type field of Elf32_Shdr. */
+
+#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
+#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
+#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
+
+/* Legal values for sh_flags field of Elf32_Shdr. */
+
+#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
+#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
+#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
+
+/* Legal values for ST_TYPE subfield of st_info (symbol type). */
+
+#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
+
+#define STT_HP_OPAQUE (STT_LOOS + 0x1)
+#define STT_HP_STUB (STT_LOOS + 0x2)
+
+/* HPPA relocs. */
+
+#define R_PARISC_NONE 0 /* No reloc. */
+#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
+#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
+#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
+#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
+#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
+#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
+#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
+#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
+#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
+#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
+#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
+#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
+#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
+#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
+#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
+#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
+#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
+#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
+#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
+#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
+#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
+#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
+#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
+#define R_PARISC_FPTR64 64 /* 64 bits function address. */
+#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
+#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
+#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
+#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
+#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
+#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
+#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
+#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
+#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
+#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
+#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
+#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
+#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
+#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
+#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
+#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
+#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
+#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
+#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
+#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
+#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
+#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
+#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
+#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
+#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
+#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
+#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
+#define R_PARISC_LORESERVE 128
+#define R_PARISC_COPY 128 /* Copy relocation. */
+#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
+#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
+#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
+#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
+#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
+#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
+#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
+#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
+#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
+#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
+#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
+#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
+#define R_PARISC_HIRESERVE 255
+
+/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
+
+#define PT_HP_TLS (PT_LOOS + 0x0)
+#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
+#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
+#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
+#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
+#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
+#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
+#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
+#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
+#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
+#define PT_HP_PARALLEL (PT_LOOS + 0x10)
+#define PT_HP_FASTBIND (PT_LOOS + 0x11)
+#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
+#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
+#define PT_HP_STACK (PT_LOOS + 0x14)
+
+#define PT_PARISC_ARCHEXT 0x70000000
+#define PT_PARISC_UNWIND 0x70000001
+
+/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */
+
+#define PF_PARISC_SBP 0x08000000
+
+#define PF_HP_PAGE_SIZE 0x00100000
+#define PF_HP_FAR_SHARED 0x00200000
+#define PF_HP_NEAR_SHARED 0x00400000
+#define PF_HP_CODE 0x01000000
+#define PF_HP_MODIFY 0x02000000
+#define PF_HP_LAZYSWAP 0x04000000
+#define PF_HP_SBP 0x08000000
+
+/* IA-64 specific declarations. */
+
+/* Processor specific flags for the Ehdr e_flags field. */
+#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
+#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
+#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
+
+/* Processor specific values for the Phdr p_type field. */
+#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
+#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
+
+/* Processor specific flags for the Phdr p_flags field. */
+#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
+
+/* Processor specific values for the Shdr sh_type field. */
+#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
+#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
+
+/* Processor specific flags for the Shdr sh_flags field. */
+#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
+#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
+
+/* Processor specific values for the Dyn d_tag field. */
+#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
+#define DT_IA_64_NUM 1
+
+/* IA-64 relocations. */
+#define R_IA64_NONE 0x00 /* none */
+#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
+#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
+#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
+#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
+#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
+#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
+#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
+#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
+#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
+#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
+#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
+#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
+#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
+#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
+#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
+#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
+#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
+#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
+#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
+#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
+#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
+#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
+#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
+#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
+#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
+#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
+#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
+#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
+#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
+#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
+#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
+#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
+#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
+#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
+#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
+#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
+#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
+#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
+#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
+#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
+#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY 0x84 /* copy relocation */
+#define R_IA64_SUB 0x85 /* Addend and symbol difference */
+#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
+#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
+#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
+#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
+#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
+#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
+#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
+#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
+#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
+#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
+#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
+#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
+#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
+#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
+#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
+#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
+
+typedef struct elf32_rel {
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+} Elf32_Rel;
+
+typedef struct elf64_rel {
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
+} Elf64_Rel;
+
+typedef struct elf32_rela{
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+ Elf32_Sword r_addend;
+} Elf32_Rela;
+
+typedef struct elf64_rela {
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
+ Elf64_Sxword r_addend; /* Constant addend used to compute value */
+} Elf64_Rela;
+
+typedef struct elf32_sym{
+ Elf32_Word st_name;
+ Elf32_Addr st_value;
+ Elf32_Word st_size;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf32_Half st_shndx;
+} Elf32_Sym;
+
+typedef struct elf64_sym {
+ Elf64_Word st_name; /* Symbol name, index in string tbl */
+ unsigned char st_info; /* Type and binding attributes */
+ unsigned char st_other; /* No defined meaning, 0 */
+ Elf64_Half st_shndx; /* Associated section index */
+ Elf64_Addr st_value; /* Value of the symbol */
+ Elf64_Xword st_size; /* Associated symbol size */
+} Elf64_Sym;
+
+
+#define EI_NIDENT 16
+
+/* Special value for e_phnum. This indicates that the real number of
+ program headers is too large to fit into e_phnum. Instead the real
+ value is in the field sh_info of section 0. */
+#define PN_XNUM 0xffff
+
+typedef struct elf32_hdr{
+ unsigned char e_ident[EI_NIDENT];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry; /* Entry point */
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
+} Elf32_Ehdr;
+
+typedef struct elf64_hdr {
+ unsigned char e_ident[16]; /* ELF "magic number" */
+ Elf64_Half e_type;
+ Elf64_Half e_machine;
+ Elf64_Word e_version;
+ Elf64_Addr e_entry; /* Entry point virtual address */
+ Elf64_Off e_phoff; /* Program header table file offset */
+ Elf64_Off e_shoff; /* Section header table file offset */
+ Elf64_Word e_flags;
+ Elf64_Half e_ehsize;
+ Elf64_Half e_phentsize;
+ Elf64_Half e_phnum;
+ Elf64_Half e_shentsize;
+ Elf64_Half e_shnum;
+ Elf64_Half e_shstrndx;
+} Elf64_Ehdr;
+
+/* These constants define the permissions on sections in the program
+ header, p_flags. */
+#define PF_R 0x4
+#define PF_W 0x2
+#define PF_X 0x1
+
+typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
+} Elf32_Phdr;
+
+typedef struct elf64_phdr {
+ Elf64_Word p_type;
+ Elf64_Word p_flags;
+ Elf64_Off p_offset; /* Segment file offset */
+ Elf64_Addr p_vaddr; /* Segment virtual address */
+ Elf64_Addr p_paddr; /* Segment physical address */
+ Elf64_Xword p_filesz; /* Segment size in file */
+ Elf64_Xword p_memsz; /* Segment size in memory */
+ Elf64_Xword p_align; /* Segment alignment, file & memory */
+} Elf64_Phdr;
+
+/* sh_type */
+#define SHT_NULL 0
+#define SHT_PROGBITS 1
+#define SHT_SYMTAB 2
+#define SHT_STRTAB 3
+#define SHT_RELA 4
+#define SHT_HASH 5
+#define SHT_DYNAMIC 6
+#define SHT_NOTE 7
+#define SHT_NOBITS 8
+#define SHT_REL 9
+#define SHT_SHLIB 10
+#define SHT_DYNSYM 11
+#define SHT_NUM 12
+#define SHT_LOPROC 0x70000000
+#define SHT_HIPROC 0x7fffffff
+#define SHT_LOUSER 0x80000000
+#define SHT_HIUSER 0xffffffff
+#define SHT_MIPS_LIST 0x70000000
+#define SHT_MIPS_CONFLICT 0x70000002
+#define SHT_MIPS_GPTAB 0x70000003
+#define SHT_MIPS_UCODE 0x70000004
+
+/* sh_flags */
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_MASKPROC 0xf0000000
+#define SHF_MIPS_GPREL 0x10000000
+
+/* special section indexes */
+#define SHN_UNDEF 0
+#define SHN_LORESERVE 0xff00
+#define SHN_LOPROC 0xff00
+#define SHN_HIPROC 0xff1f
+#define SHN_ABS 0xfff1
+#define SHN_COMMON 0xfff2
+#define SHN_HIRESERVE 0xffff
+#define SHN_MIPS_ACCOMON 0xff00
+
+typedef struct elf32_shdr {
+ Elf32_Word sh_name;
+ Elf32_Word sh_type;
+ Elf32_Word sh_flags;
+ Elf32_Addr sh_addr;
+ Elf32_Off sh_offset;
+ Elf32_Word sh_size;
+ Elf32_Word sh_link;
+ Elf32_Word sh_info;
+ Elf32_Word sh_addralign;
+ Elf32_Word sh_entsize;
+} Elf32_Shdr;
+
+typedef struct elf64_shdr {
+ Elf64_Word sh_name; /* Section name, index in string tbl */
+ Elf64_Word sh_type; /* Type of section */
+ Elf64_Xword sh_flags; /* Miscellaneous section attributes */
+ Elf64_Addr sh_addr; /* Section virtual addr at execution */
+ Elf64_Off sh_offset; /* Section file offset */
+ Elf64_Xword sh_size; /* Size of section in bytes */
+ Elf64_Word sh_link; /* Index of another section */
+ Elf64_Word sh_info; /* Additional section information */
+ Elf64_Xword sh_addralign; /* Section alignment */
+ Elf64_Xword sh_entsize; /* Entry size if section holds table */
+} Elf64_Shdr;
+
+#define EI_MAG0 0 /* e_ident[] indexes */
+#define EI_MAG1 1
+#define EI_MAG2 2
+#define EI_MAG3 3
+#define EI_CLASS 4
+#define EI_DATA 5
+#define EI_VERSION 6
+#define EI_OSABI 7
+#define EI_PAD 8
+
+#define ELFOSABI_NONE 0 /* UNIX System V ABI */
+#define ELFOSABI_SYSV 0 /* Alias. */
+#define ELFOSABI_HPUX 1 /* HP-UX */
+#define ELFOSABI_NETBSD 2 /* NetBSD. */
+#define ELFOSABI_LINUX 3 /* Linux. */
+#define ELFOSABI_SOLARIS 6 /* Sun Solaris. */
+#define ELFOSABI_AIX 7 /* IBM AIX. */
+#define ELFOSABI_IRIX 8 /* SGI Irix. */
+#define ELFOSABI_FREEBSD 9 /* FreeBSD. */
+#define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */
+#define ELFOSABI_MODESTO 11 /* Novell Modesto. */
+#define ELFOSABI_OPENBSD 12 /* OpenBSD. */
+#define ELFOSABI_ARM 97 /* ARM */
+#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
+
+#define ELFMAG0 0x7f /* EI_MAG */
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+#define ELFMAG "\177ELF"
+#define SELFMAG 4
+
+#define ELFCLASSNONE 0 /* EI_CLASS */
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+#define ELFCLASSNUM 3
+
+#define ELFDATANONE 0 /* e_ident[EI_DATA] */
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
+
+#define EV_NONE 0 /* e_version, EI_VERSION */
+#define EV_CURRENT 1
+#define EV_NUM 2
+
+/* Notes used in ET_CORE */
+#define NT_PRSTATUS 1
+#define NT_PRFPREG 2
+#define NT_PRPSINFO 3
+#define NT_TASKSTRUCT 4
+#define NT_AUXV 6
+#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
+
+
+/* Note header in a PT_NOTE section */
+typedef struct elf32_note {
+ Elf32_Word n_namesz; /* Name size */
+ Elf32_Word n_descsz; /* Content size */
+ Elf32_Word n_type; /* Content type */
+} Elf32_Nhdr;
+
+/* Note header in a PT_NOTE section */
+typedef struct elf64_note {
+ Elf64_Word n_namesz; /* Name size */
+ Elf64_Word n_descsz; /* Content size */
+ Elf64_Word n_type; /* Content type */
+} Elf64_Nhdr;
+
+
+/* This data structure represents a PT_LOAD segment. */
+struct elf32_fdpic_loadseg {
+ /* Core address to which the segment is mapped. */
+ Elf32_Addr addr;
+ /* VMA recorded in the program header. */
+ Elf32_Addr p_vaddr;
+ /* Size of this segment in memory. */
+ Elf32_Word p_memsz;
+};
+struct elf32_fdpic_loadmap {
+ /* Protocol version number, must be zero. */
+ Elf32_Half version;
+ /* Number of segments in this map. */
+ Elf32_Half nsegs;
+ /* The actual memory map. */
+ struct elf32_fdpic_loadseg segs[/*nsegs*/];
+};
+
+#ifdef ELF_CLASS
+#if ELF_CLASS == ELFCLASS32
+
+#define elfhdr elf32_hdr
+#define elf_phdr elf32_phdr
+#define elf_note elf32_note
+#define elf_shdr elf32_shdr
+#define elf_sym elf32_sym
+#define elf_addr_t Elf32_Off
+
+#ifdef ELF_USES_RELOCA
+# define ELF_RELOC Elf32_Rela
+#else
+# define ELF_RELOC Elf32_Rel
+#endif
+
+#else
+
+#define elfhdr elf64_hdr
+#define elf_phdr elf64_phdr
+#define elf_note elf64_note
+#define elf_shdr elf64_shdr
+#define elf_sym elf64_sym
+#define elf_addr_t Elf64_Off
+
+#ifdef ELF_USES_RELOCA
+# define ELF_RELOC Elf64_Rela
+#else
+# define ELF_RELOC Elf64_Rel
+#endif
+
+#endif /* ELF_CLASS */
+
+#ifndef ElfW
+# if ELF_CLASS == ELFCLASS32
+# define ElfW(x) Elf32_ ## x
+# define ELFW(x) ELF32_ ## x
+# else
+# define ElfW(x) Elf64_ ## x
+# define ELFW(x) ELF64_ ## x
+# endif
+#endif
+
+#endif /* ELF_CLASS */
+
+
+#endif /* _QEMU_ELF_H */
diff --git a/include/exec/address-spaces.h b/include/exec/address-spaces.h
new file mode 100644
index 0000000000..3d12cddeec
--- /dev/null
+++ b/include/exec/address-spaces.h
@@ -0,0 +1,41 @@
+/*
+ * Internal memory management interfaces
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef EXEC_MEMORY_H
+#define EXEC_MEMORY_H
+
+/*
+ * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless
+ * you're one of them.
+ */
+
+#include "exec/memory.h"
+
+#ifndef CONFIG_USER_ONLY
+
+/* Get the root memory region. This interface should only be used temporarily
+ * until a proper bus interface is available.
+ */
+MemoryRegion *get_system_memory(void);
+
+/* Get the root I/O port region. This interface should only be used
+ * temporarily until a proper bus interface is available.
+ */
+MemoryRegion *get_system_io(void);
+
+extern AddressSpace address_space_memory;
+extern AddressSpace address_space_io;
+
+#endif
+
+#endif
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
new file mode 100644
index 0000000000..439e88deb4
--- /dev/null
+++ b/include/exec/cpu-all.h
@@ -0,0 +1,533 @@
+/*
+ * defines common to all virtual CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_ALL_H
+#define CPU_ALL_H
+
+#include "qemu-common.h"
+#include "qemu/tls.h"
+#include "exec/cpu-common.h"
+#include "qemu/thread.h"
+
+/* some important defines:
+ *
+ * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
+ * memory accesses.
+ *
+ * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
+ * otherwise little endian.
+ *
+ * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
+ *
+ * TARGET_WORDS_BIGENDIAN : same for target cpu
+ */
+
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#define BSWAP_NEEDED
+#endif
+
+#ifdef BSWAP_NEEDED
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ return bswap16(s);
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ return bswap32(s);
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ return bswap64(s);
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+ *s = bswap16(*s);
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+ *s = bswap32(*s);
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+ *s = bswap64(*s);
+}
+
+#else
+
+static inline uint16_t tswap16(uint16_t s)
+{
+ return s;
+}
+
+static inline uint32_t tswap32(uint32_t s)
+{
+ return s;
+}
+
+static inline uint64_t tswap64(uint64_t s)
+{
+ return s;
+}
+
+static inline void tswap16s(uint16_t *s)
+{
+}
+
+static inline void tswap32s(uint32_t *s)
+{
+}
+
+static inline void tswap64s(uint64_t *s)
+{
+}
+
+#endif
+
+#if TARGET_LONG_SIZE == 4
+#define tswapl(s) tswap32(s)
+#define tswapls(s) tswap32s((uint32_t *)(s))
+#define bswaptls(s) bswap32s(s)
+#else
+#define tswapl(s) tswap64(s)
+#define tswapls(s) tswap64s((uint64_t *)(s))
+#define bswaptls(s) bswap64s(s)
+#endif
+
+/* CPU memory access without any memory or io remapping */
+
+/*
+ * the generic syntax for the memory accesses is:
+ *
+ * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
+ *
+ * store: st{type}{size}{endian}_{access_type}(ptr, val)
+ *
+ * type is:
+ * (empty): integer access
+ * f : float access
+ *
+ * sign is:
+ * (empty): for floats or 32 bit size
+ * u : unsigned
+ * s : signed
+ *
+ * size is:
+ * b: 8 bits
+ * w: 16 bits
+ * l: 32 bits
+ * q: 64 bits
+ *
+ * endian is:
+ * (empty): target cpu endianness or 8 bit access
+ * r : reversed target cpu endianness (not implemented yet)
+ * be : big endian (not implemented yet)
+ * le : little endian (not implemented yet)
+ *
+ * access_type is:
+ * raw : host memory access
+ * user : user mode access using soft MMU
+ * kernel : kernel mode access using soft MMU
+ */
+
+/* target-endianness CPU memory access functions */
+#if defined(TARGET_WORDS_BIGENDIAN)
+#define lduw_p(p) lduw_be_p(p)
+#define ldsw_p(p) ldsw_be_p(p)
+#define ldl_p(p) ldl_be_p(p)
+#define ldq_p(p) ldq_be_p(p)
+#define ldfl_p(p) ldfl_be_p(p)
+#define ldfq_p(p) ldfq_be_p(p)
+#define stw_p(p, v) stw_be_p(p, v)
+#define stl_p(p, v) stl_be_p(p, v)
+#define stq_p(p, v) stq_be_p(p, v)
+#define stfl_p(p, v) stfl_be_p(p, v)
+#define stfq_p(p, v) stfq_be_p(p, v)
+#else
+#define lduw_p(p) lduw_le_p(p)
+#define ldsw_p(p) ldsw_le_p(p)
+#define ldl_p(p) ldl_le_p(p)
+#define ldq_p(p) ldq_le_p(p)
+#define ldfl_p(p) ldfl_le_p(p)
+#define ldfq_p(p) ldfq_le_p(p)
+#define stw_p(p, v) stw_le_p(p, v)
+#define stl_p(p, v) stl_le_p(p, v)
+#define stq_p(p, v) stq_le_p(p, v)
+#define stfl_p(p, v) stfl_le_p(p, v)
+#define stfq_p(p, v) stfq_le_p(p, v)
+#endif
+
+/* MMU memory access macros */
+
+#if defined(CONFIG_USER_ONLY)
+#include <assert.h>
+#include "exec/user/abitypes.h"
+
+/* On some host systems the guest address space is reserved on the host.
+ * This allows the guest address space to be offset to a convenient location.
+ */
+#if defined(CONFIG_USE_GUEST_BASE)
+extern unsigned long guest_base;
+extern int have_guest_base;
+extern unsigned long reserved_va;
+#define GUEST_BASE guest_base
+#define RESERVED_VA reserved_va
+#else
+#define GUEST_BASE 0ul
+#define RESERVED_VA 0ul
+#endif
+
+/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
+#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
+
+#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
+#define h2g_valid(x) 1
+#else
+#define h2g_valid(x) ({ \
+ unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
+ (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
+ (!RESERVED_VA || (__guest < RESERVED_VA)); \
+})
+#endif
+
+#define h2g(x) ({ \
+ unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
+ /* Check if given address fits target address space */ \
+ assert(h2g_valid(x)); \
+ (abi_ulong)__ret; \
+})
+
+#define saddr(x) g2h(x)
+#define laddr(x) g2h(x)
+
+#else /* !CONFIG_USER_ONLY */
+/* NOTE: we use double casts if pointers and target_ulong have
+ different sizes */
+#define saddr(x) (uint8_t *)(intptr_t)(x)
+#define laddr(x) (uint8_t *)(intptr_t)(x)
+#endif
+
+#define ldub_raw(p) ldub_p(laddr((p)))
+#define ldsb_raw(p) ldsb_p(laddr((p)))
+#define lduw_raw(p) lduw_p(laddr((p)))
+#define ldsw_raw(p) ldsw_p(laddr((p)))
+#define ldl_raw(p) ldl_p(laddr((p)))
+#define ldq_raw(p) ldq_p(laddr((p)))
+#define ldfl_raw(p) ldfl_p(laddr((p)))
+#define ldfq_raw(p) ldfq_p(laddr((p)))
+#define stb_raw(p, v) stb_p(saddr((p)), v)
+#define stw_raw(p, v) stw_p(saddr((p)), v)
+#define stl_raw(p, v) stl_p(saddr((p)), v)
+#define stq_raw(p, v) stq_p(saddr((p)), v)
+#define stfl_raw(p, v) stfl_p(saddr((p)), v)
+#define stfq_raw(p, v) stfq_p(saddr((p)), v)
+
+
+#if defined(CONFIG_USER_ONLY)
+
+/* if user mode, no other memory access functions */
+#define ldub(p) ldub_raw(p)
+#define ldsb(p) ldsb_raw(p)
+#define lduw(p) lduw_raw(p)
+#define ldsw(p) ldsw_raw(p)
+#define ldl(p) ldl_raw(p)
+#define ldq(p) ldq_raw(p)
+#define ldfl(p) ldfl_raw(p)
+#define ldfq(p) ldfq_raw(p)
+#define stb(p, v) stb_raw(p, v)
+#define stw(p, v) stw_raw(p, v)
+#define stl(p, v) stl_raw(p, v)
+#define stq(p, v) stq_raw(p, v)
+#define stfl(p, v) stfl_raw(p, v)
+#define stfq(p, v) stfq_raw(p, v)
+
+#define cpu_ldub_code(env1, p) ldub_raw(p)
+#define cpu_ldsb_code(env1, p) ldsb_raw(p)
+#define cpu_lduw_code(env1, p) lduw_raw(p)
+#define cpu_ldsw_code(env1, p) ldsw_raw(p)
+#define cpu_ldl_code(env1, p) ldl_raw(p)
+#define cpu_ldq_code(env1, p) ldq_raw(p)
+
+#define cpu_ldub_data(env, addr) ldub_raw(addr)
+#define cpu_lduw_data(env, addr) lduw_raw(addr)
+#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
+#define cpu_ldl_data(env, addr) ldl_raw(addr)
+#define cpu_ldq_data(env, addr) ldq_raw(addr)
+
+#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
+#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
+
+#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
+#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
+#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
+#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
+#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
+
+#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
+#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
+
+#define ldub_kernel(p) ldub_raw(p)
+#define ldsb_kernel(p) ldsb_raw(p)
+#define lduw_kernel(p) lduw_raw(p)
+#define ldsw_kernel(p) ldsw_raw(p)
+#define ldl_kernel(p) ldl_raw(p)
+#define ldq_kernel(p) ldq_raw(p)
+#define ldfl_kernel(p) ldfl_raw(p)
+#define ldfq_kernel(p) ldfq_raw(p)
+#define stb_kernel(p, v) stb_raw(p, v)
+#define stw_kernel(p, v) stw_raw(p, v)
+#define stl_kernel(p, v) stl_raw(p, v)
+#define stq_kernel(p, v) stq_raw(p, v)
+#define stfl_kernel(p, v) stfl_raw(p, v)
+#define stfq_kernel(p, vt) stfq_raw(p, v)
+
+#define cpu_ldub_data(env, addr) ldub_raw(addr)
+#define cpu_lduw_data(env, addr) lduw_raw(addr)
+#define cpu_ldl_data(env, addr) ldl_raw(addr)
+
+#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
+#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
+#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
+#endif /* defined(CONFIG_USER_ONLY) */
+
+/* page related stuff */
+
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
+#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
+
+/* ??? These should be the larger of uintptr_t and target_ulong. */
+extern uintptr_t qemu_real_host_page_size;
+extern uintptr_t qemu_host_page_size;
+extern uintptr_t qemu_host_page_mask;
+
+#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
+
+/* same as PROT_xxx */
+#define PAGE_READ 0x0001
+#define PAGE_WRITE 0x0002
+#define PAGE_EXEC 0x0004
+#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
+#define PAGE_VALID 0x0008
+/* original state of the write flag (used when tracking self-modifying
+ code */
+#define PAGE_WRITE_ORG 0x0010
+#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
+/* FIXME: Code that sets/uses this is broken and needs to go away. */
+#define PAGE_RESERVED 0x0020
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+void page_dump(FILE *f);
+
+typedef int (*walk_memory_regions_fn)(void *, abi_ulong,
+ abi_ulong, unsigned long);
+int walk_memory_regions(void *, walk_memory_regions_fn);
+
+int page_get_flags(target_ulong address);
+void page_set_flags(target_ulong start, target_ulong end, int flags);
+int page_check_range(target_ulong start, target_ulong len, int flags);
+#endif
+
+CPUArchState *cpu_copy(CPUArchState *env);
+CPUArchState *qemu_get_cpu(int cpu);
+
+#define CPU_DUMP_CODE 0x00010000
+#define CPU_DUMP_FPU 0x00020000 /* dump FPU register state, not just integer */
+/* dump info about TCG QEMU's condition code optimization state */
+#define CPU_DUMP_CCOP 0x00040000
+
+void cpu_dump_state(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+void cpu_dump_statistics(CPUArchState *env, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+
+void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
+ GCC_FMT_ATTR(2, 3);
+extern CPUArchState *first_cpu;
+DECLARE_TLS(CPUArchState *,cpu_single_env);
+#define cpu_single_env tls_var(cpu_single_env)
+
+/* Flags for use in ENV->INTERRUPT_PENDING.
+
+ The numbers assigned here are non-sequential in order to preserve
+ binary compatibility with the vmstate dump. Bit 0 (0x0001) was
+ previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
+ the vmstate dump. */
+
+/* External hardware interrupt pending. This is typically used for
+ interrupts from devices. */
+#define CPU_INTERRUPT_HARD 0x0002
+
+/* Exit the current TB. This is typically used when some system-level device
+ makes some change to the memory mapping. E.g. the a20 line change. */
+#define CPU_INTERRUPT_EXITTB 0x0004
+
+/* Halt the CPU. */
+#define CPU_INTERRUPT_HALT 0x0020
+
+/* Debug event pending. */
+#define CPU_INTERRUPT_DEBUG 0x0080
+
+/* Several target-specific external hardware interrupts. Each target/cpu.h
+ should define proper names based on these defines. */
+#define CPU_INTERRUPT_TGT_EXT_0 0x0008
+#define CPU_INTERRUPT_TGT_EXT_1 0x0010
+#define CPU_INTERRUPT_TGT_EXT_2 0x0040
+#define CPU_INTERRUPT_TGT_EXT_3 0x0200
+#define CPU_INTERRUPT_TGT_EXT_4 0x1000
+
+/* Several target-specific internal interrupts. These differ from the
+ preceding target-specific interrupts in that they are intended to
+ originate from within the cpu itself, typically in response to some
+ instruction being executed. These, therefore, are not masked while
+ single-stepping within the debugger. */
+#define CPU_INTERRUPT_TGT_INT_0 0x0100
+#define CPU_INTERRUPT_TGT_INT_1 0x0400
+#define CPU_INTERRUPT_TGT_INT_2 0x0800
+#define CPU_INTERRUPT_TGT_INT_3 0x2000
+
+/* First unused bit: 0x4000. */
+
+/* The set of all bits that should be masked when single-stepping. */
+#define CPU_INTERRUPT_SSTEP_MASK \
+ (CPU_INTERRUPT_HARD \
+ | CPU_INTERRUPT_TGT_EXT_0 \
+ | CPU_INTERRUPT_TGT_EXT_1 \
+ | CPU_INTERRUPT_TGT_EXT_2 \
+ | CPU_INTERRUPT_TGT_EXT_3 \
+ | CPU_INTERRUPT_TGT_EXT_4)
+
+#ifndef CONFIG_USER_ONLY
+typedef void (*CPUInterruptHandler)(CPUArchState *, int);
+
+extern CPUInterruptHandler cpu_interrupt_handler;
+
+static inline void cpu_interrupt(CPUArchState *s, int mask)
+{
+ cpu_interrupt_handler(s, mask);
+}
+#else /* USER_ONLY */
+void cpu_interrupt(CPUArchState *env, int mask);
+#endif /* USER_ONLY */
+
+void cpu_reset_interrupt(CPUArchState *env, int mask);
+
+void cpu_exit(CPUArchState *s);
+
+/* Breakpoint/watchpoint flags */
+#define BP_MEM_READ 0x01
+#define BP_MEM_WRITE 0x02
+#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
+#define BP_STOP_BEFORE_ACCESS 0x04
+#define BP_WATCHPOINT_HIT 0x08
+#define BP_GDB 0x10
+#define BP_CPU 0x20
+
+int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
+ CPUBreakpoint **breakpoint);
+int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
+void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
+void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
+int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
+ int flags, CPUWatchpoint **watchpoint);
+int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
+ target_ulong len, int flags);
+void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
+void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
+
+#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
+#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
+#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
+
+void cpu_single_step(CPUArchState *env, int enabled);
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* Return the physical page corresponding to a virtual one. Use it
+ only for debugging because no protection checks are done. Return -1
+ if no page found. */
+hwaddr cpu_get_phys_page_debug(CPUArchState *env, target_ulong addr);
+
+/* memory API */
+
+extern int phys_ram_fd;
+extern ram_addr_t ram_size;
+
+/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
+#define RAM_PREALLOC_MASK (1 << 0)
+
+typedef struct RAMBlock {
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ ram_addr_t offset;
+ ram_addr_t length;
+ uint32_t flags;
+ char idstr[256];
+ /* Reads can take either the iothread or the ramlist lock.
+ * Writes must take both locks.
+ */
+ QTAILQ_ENTRY(RAMBlock) next;
+#if defined(__linux__) && !defined(TARGET_S390X)
+ int fd;
+#endif
+} RAMBlock;
+
+typedef struct RAMList {
+ QemuMutex mutex;
+ /* Protected by the iothread lock. */
+ uint8_t *phys_dirty;
+ RAMBlock *mru_block;
+ /* Protected by the ramlist lock. */
+ QTAILQ_HEAD(, RAMBlock) blocks;
+ uint32_t version;
+} RAMList;
+extern RAMList ram_list;
+
+extern const char *mem_path;
+extern int mem_prealloc;
+
+/* Flags stored in the low bits of the TLB virtual address. These are
+ defined so that fast path ram access is all zeros. */
+/* Zero if TLB entry is valid. */
+#define TLB_INVALID_MASK (1 << 3)
+/* Set if TLB entry references a clean RAM page. The iotlb entry will
+ contain the page physical address. */
+#define TLB_NOTDIRTY (1 << 4)
+/* Set if TLB entry is an IO callback. */
+#define TLB_MMIO (1 << 5)
+
+void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
+ram_addr_t last_ram_offset(void);
+void qemu_mutex_lock_ramlist(void);
+void qemu_mutex_unlock_ramlist(void);
+#endif /* !CONFIG_USER_ONLY */
+
+int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
+ uint8_t *buf, int len, int is_write);
+
+#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
new file mode 100644
index 0000000000..2e5f11f47f
--- /dev/null
+++ b/include/exec/cpu-common.h
@@ -0,0 +1,124 @@
+#ifndef CPU_COMMON_H
+#define CPU_COMMON_H 1
+
+/* CPU interfaces that are target independent. */
+
+#include "exec/hwaddr.h"
+
+#ifndef NEED_CPU_H
+#include "exec/poison.h"
+#endif
+
+#include "qemu/bswap.h"
+#include "qemu/queue.h"
+
+/**
+ * CPUListState:
+ * @cpu_fprintf: Print function.
+ * @file: File to print to using @cpu_fprint.
+ *
+ * State commonly used for iterating over CPU models.
+ */
+typedef struct CPUListState {
+ fprintf_function cpu_fprintf;
+ FILE *file;
+} CPUListState;
+
+#if !defined(CONFIG_USER_ONLY)
+
+enum device_endian {
+ DEVICE_NATIVE_ENDIAN,
+ DEVICE_BIG_ENDIAN,
+ DEVICE_LITTLE_ENDIAN,
+};
+
+/* address in the RAM (different from a physical address) */
+#if defined(CONFIG_XEN_BACKEND)
+typedef uint64_t ram_addr_t;
+# define RAM_ADDR_MAX UINT64_MAX
+# define RAM_ADDR_FMT "%" PRIx64
+#else
+typedef uintptr_t ram_addr_t;
+# define RAM_ADDR_MAX UINTPTR_MAX
+# define RAM_ADDR_FMT "%" PRIxPTR
+#endif
+
+/* memory API */
+
+typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
+typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
+
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
+/* This should only be used for ram local to a device. */
+void *qemu_get_ram_ptr(ram_addr_t addr);
+void qemu_put_ram_ptr(void *addr);
+/* This should not be used by devices. */
+int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
+void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev);
+
+void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
+ int len, int is_write);
+static inline void cpu_physical_memory_read(hwaddr addr,
+ void *buf, int len)
+{
+ cpu_physical_memory_rw(addr, buf, len, 0);
+}
+static inline void cpu_physical_memory_write(hwaddr addr,
+ const void *buf, int len)
+{
+ cpu_physical_memory_rw(addr, (void *)buf, len, 1);
+}
+void *cpu_physical_memory_map(hwaddr addr,
+ hwaddr *plen,
+ int is_write);
+void cpu_physical_memory_unmap(void *buffer, hwaddr len,
+ int is_write, hwaddr access_len);
+void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
+
+bool cpu_physical_memory_is_io(hwaddr phys_addr);
+
+/* Coalesced MMIO regions are areas where write operations can be reordered.
+ * This usually implies that write operations are side-effect free. This allows
+ * batching which can make a major impact on performance when using
+ * virtualization.
+ */
+void qemu_flush_coalesced_mmio_buffer(void);
+
+uint32_t ldub_phys(hwaddr addr);
+uint32_t lduw_le_phys(hwaddr addr);
+uint32_t lduw_be_phys(hwaddr addr);
+uint32_t ldl_le_phys(hwaddr addr);
+uint32_t ldl_be_phys(hwaddr addr);
+uint64_t ldq_le_phys(hwaddr addr);
+uint64_t ldq_be_phys(hwaddr addr);
+void stb_phys(hwaddr addr, uint32_t val);
+void stw_le_phys(hwaddr addr, uint32_t val);
+void stw_be_phys(hwaddr addr, uint32_t val);
+void stl_le_phys(hwaddr addr, uint32_t val);
+void stl_be_phys(hwaddr addr, uint32_t val);
+void stq_le_phys(hwaddr addr, uint64_t val);
+void stq_be_phys(hwaddr addr, uint64_t val);
+
+#ifdef NEED_CPU_H
+uint32_t lduw_phys(hwaddr addr);
+uint32_t ldl_phys(hwaddr addr);
+uint64_t ldq_phys(hwaddr addr);
+void stl_phys_notdirty(hwaddr addr, uint32_t val);
+void stq_phys_notdirty(hwaddr addr, uint64_t val);
+void stw_phys(hwaddr addr, uint32_t val);
+void stl_phys(hwaddr addr, uint32_t val);
+void stq_phys(hwaddr addr, uint64_t val);
+#endif
+
+void cpu_physical_memory_write_rom(hwaddr addr,
+ const uint8_t *buf, int len);
+
+extern struct MemoryRegion io_mem_ram;
+extern struct MemoryRegion io_mem_rom;
+extern struct MemoryRegion io_mem_unassigned;
+extern struct MemoryRegion io_mem_notdirty;
+
+#endif
+
+#endif /* !CPU_COMMON_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
new file mode 100644
index 0000000000..b22b4c6255
--- /dev/null
+++ b/include/exec/cpu-defs.h
@@ -0,0 +1,207 @@
+/*
+ * common defines for all CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPU_DEFS_H
+#define CPU_DEFS_H
+
+#ifndef NEED_CPU_H
+#error cpu.h included from common code
+#endif
+
+#include "config.h"
+#include <setjmp.h>
+#include <inttypes.h>
+#include <signal.h>
+#include "qemu/osdep.h"
+#include "qemu/queue.h"
+#include "exec/hwaddr.h"
+
+#ifndef TARGET_LONG_BITS
+#error TARGET_LONG_BITS must be defined before including this header
+#endif
+
+#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+typedef int16_t target_short __attribute__ ((aligned(TARGET_SHORT_ALIGNMENT)));
+typedef uint16_t target_ushort __attribute__((aligned(TARGET_SHORT_ALIGNMENT)));
+typedef int32_t target_int __attribute__((aligned(TARGET_INT_ALIGNMENT)));
+typedef uint32_t target_uint __attribute__((aligned(TARGET_INT_ALIGNMENT)));
+typedef int64_t target_llong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
+typedef uint64_t target_ullong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
+/* target_ulong is the type of a virtual address */
+#if TARGET_LONG_SIZE == 4
+typedef int32_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+typedef uint32_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+#define TARGET_FMT_lx "%08x"
+#define TARGET_FMT_ld "%d"
+#define TARGET_FMT_lu "%u"
+#elif TARGET_LONG_SIZE == 8
+typedef int64_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+typedef uint64_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+#define TARGET_FMT_lx "%016" PRIx64
+#define TARGET_FMT_ld "%" PRId64
+#define TARGET_FMT_lu "%" PRIu64
+#else
+#error TARGET_LONG_SIZE undefined
+#endif
+
+#define EXCP_INTERRUPT 0x10000 /* async interruption */
+#define EXCP_HLT 0x10001 /* hlt instruction reached */
+#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
+#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
+
+#define TB_JMP_CACHE_BITS 12
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+
+/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
+ addresses on the same page. The top bits are the same. This allows
+ TLB invalidation to quickly clear a subset of the hash table. */
+#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
+#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
+#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
+#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
+
+#if !defined(CONFIG_USER_ONLY)
+#define CPU_TLB_BITS 8
+#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
+
+#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
+#define CPU_TLB_ENTRY_BITS 4
+#else
+#define CPU_TLB_ENTRY_BITS 5
+#endif
+
+typedef struct CPUTLBEntry {
+ /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
+ bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
+ go directly to ram.
+ bit 3 : indicates that the entry is invalid
+ bit 2..0 : zero
+ */
+ target_ulong addr_read;
+ target_ulong addr_write;
+ target_ulong addr_code;
+ /* Addend to virtual address to get host address. IO accesses
+ use the corresponding iotlb value. */
+ uintptr_t addend;
+ /* padding to get a power of two size */
+ uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
+ (sizeof(target_ulong) * 3 +
+ ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
+ sizeof(uintptr_t))];
+} CPUTLBEntry;
+
+extern int CPUTLBEntry_wrong_size[sizeof(CPUTLBEntry) == (1 << CPU_TLB_ENTRY_BITS) ? 1 : -1];
+
+#define CPU_COMMON_TLB \
+ /* The meaning of the MMU modes is defined in the target code. */ \
+ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ target_ulong tlb_flush_addr; \
+ target_ulong tlb_flush_mask;
+
+#else
+
+#define CPU_COMMON_TLB
+
+#endif
+
+
+#ifdef HOST_WORDS_BIGENDIAN
+typedef struct icount_decr_u16 {
+ uint16_t high;
+ uint16_t low;
+} icount_decr_u16;
+#else
+typedef struct icount_decr_u16 {
+ uint16_t low;
+ uint16_t high;
+} icount_decr_u16;
+#endif
+
+struct qemu_work_item;
+
+typedef struct CPUBreakpoint {
+ target_ulong pc;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUBreakpoint) entry;
+} CPUBreakpoint;
+
+typedef struct CPUWatchpoint {
+ target_ulong vaddr;
+ target_ulong len_mask;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUWatchpoint) entry;
+} CPUWatchpoint;
+
+#define CPU_TEMP_BUF_NLONGS 128
+#define CPU_COMMON \
+ struct TranslationBlock *current_tb; /* currently executing TB */ \
+ /* soft mmu support */ \
+ /* in order to avoid passing too many arguments to the MMIO \
+ helpers, we store some rarely used information in the CPU \
+ context) */ \
+ uintptr_t mem_io_pc; /* host pc at which the memory was \
+ accessed */ \
+ target_ulong mem_io_vaddr; /* target virtual addr at which the \
+ memory was accessed */ \
+ uint32_t halted; /* Nonzero if the CPU is in suspend state */ \
+ uint32_t interrupt_request; \
+ volatile sig_atomic_t exit_request; \
+ CPU_COMMON_TLB \
+ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
+ /* buffer for temporaries in the code generator */ \
+ long temp_buf[CPU_TEMP_BUF_NLONGS]; \
+ \
+ int64_t icount_extra; /* Instructions until next timer event. */ \
+ /* Number of cycles left, with interrupt flag in high bit. \
+ This allows a single read-compare-cbranch-write sequence to test \
+ for both decrementer underflow and exceptions. */ \
+ union { \
+ uint32_t u32; \
+ icount_decr_u16 u16; \
+ } icount_decr; \
+ uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \
+ \
+ /* from this point: preserved by CPU reset */ \
+ /* ice debug support */ \
+ QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
+ int singlestep_enabled; \
+ \
+ QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
+ CPUWatchpoint *watchpoint_hit; \
+ \
+ struct GDBRegisterState *gdb_regs; \
+ \
+ /* Core interrupt code */ \
+ jmp_buf jmp_env; \
+ int exception_index; \
+ \
+ CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
+ int cpu_index; /* CPU index (informative) */ \
+ uint32_t host_tid; /* host thread ID */ \
+ int numa_node; /* NUMA node this cpu is belonging to */ \
+ int nr_cores; /* number of cores within this CPU package */ \
+ int nr_threads;/* number of threads within this CPU */ \
+ int running; /* Nonzero if cpu is currently running(usermode). */ \
+ /* user data */ \
+ void *opaque; \
+ \
+ const char *cpu_model_str;
+
+#endif
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
new file mode 100644
index 0000000000..733c885a1f
--- /dev/null
+++ b/include/exec/cputlb.h
@@ -0,0 +1,46 @@
+/*
+ * Common CPU TLB handling
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef CPUTLB_H
+#define CPUTLB_H
+
+#if !defined(CONFIG_USER_ONLY)
+/* cputlb.c */
+void tlb_protect_code(ram_addr_t ram_addr);
+void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
+ target_ulong vaddr);
+void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
+ uintptr_t length);
+MemoryRegionSection *phys_page_find(struct AddressSpaceDispatch *d,
+ hwaddr index);
+void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length);
+void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
+extern int tlb_flush_count;
+
+/* exec.c */
+void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr);
+hwaddr memory_region_section_get_iotlb(CPUArchState *env,
+ MemoryRegionSection *section,
+ target_ulong vaddr,
+ hwaddr paddr,
+ int prot,
+ target_ulong *address);
+bool memory_region_is_unassigned(MemoryRegion *mr);
+
+#endif
+#endif
diff --git a/include/exec/def-helper.h b/include/exec/def-helper.h
new file mode 100644
index 0000000000..022a9ceb6a
--- /dev/null
+++ b/include/exec/def-helper.h
@@ -0,0 +1,275 @@
+/* Helper file for declaring TCG helper functions.
+ Should be included at the start and end of target-foo/helper.h.
+
+ Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
+ functions. Names should be specified without the helper_ prefix, and
+ the return and argument types specified. 3 basic types are understood
+ (i32, i64 and ptr). Additional aliases are provided for convenience and
+ to match the types used by the C helper implementation.
+
+ The target helper.h should be included in all files that use/define
+ helper functions. THis will ensure that function prototypes are
+ consistent. In addition it should be included an extra two times for
+ helper.c, defining:
+ GEN_HELPER 1 to produce op generation functions (gen_helper_*)
+ GEN_HELPER 2 to do runtime registration helper functions.
+ */
+
+#ifndef DEF_HELPER_H
+#define DEF_HELPER_H 1
+
+#define HELPER(name) glue(helper_, name)
+
+#define GET_TCGV_i32 GET_TCGV_I32
+#define GET_TCGV_i64 GET_TCGV_I64
+#define GET_TCGV_ptr GET_TCGV_PTR
+
+/* Some types that make sense in C, but not for TCG. */
+#define dh_alias_i32 i32
+#define dh_alias_s32 i32
+#define dh_alias_int i32
+#define dh_alias_i64 i64
+#define dh_alias_s64 i64
+#define dh_alias_f32 i32
+#define dh_alias_f64 i64
+#if TARGET_LONG_BITS == 32
+#define dh_alias_tl i32
+#else
+#define dh_alias_tl i64
+#endif
+#define dh_alias_ptr ptr
+#define dh_alias_void void
+#define dh_alias_noreturn noreturn
+#define dh_alias_env ptr
+#define dh_alias(t) glue(dh_alias_, t)
+
+#define dh_ctype_i32 uint32_t
+#define dh_ctype_s32 int32_t
+#define dh_ctype_int int
+#define dh_ctype_i64 uint64_t
+#define dh_ctype_s64 int64_t
+#define dh_ctype_f32 float32
+#define dh_ctype_f64 float64
+#define dh_ctype_tl target_ulong
+#define dh_ctype_ptr void *
+#define dh_ctype_void void
+#define dh_ctype_noreturn void QEMU_NORETURN
+#define dh_ctype_env CPUArchState *
+#define dh_ctype(t) dh_ctype_##t
+
+/* We can't use glue() here because it falls foul of C preprocessor
+ recursive expansion rules. */
+#define dh_retvar_decl0_void void
+#define dh_retvar_decl0_noreturn void
+#define dh_retvar_decl0_i32 TCGv_i32 retval
+#define dh_retvar_decl0_i64 TCGv_i64 retval
+#define dh_retvar_decl0_ptr TCGv_ptr retval
+#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t))
+
+#define dh_retvar_decl_void
+#define dh_retvar_decl_noreturn
+#define dh_retvar_decl_i32 TCGv_i32 retval,
+#define dh_retvar_decl_i64 TCGv_i64 retval,
+#define dh_retvar_decl_ptr TCGv_ptr retval,
+#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
+
+#define dh_retvar_void TCG_CALL_DUMMY_ARG
+#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG
+#define dh_retvar_i32 GET_TCGV_i32(retval)
+#define dh_retvar_i64 GET_TCGV_i64(retval)
+#define dh_retvar_ptr GET_TCGV_ptr(retval)
+#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
+
+#define dh_is_64bit_void 0
+#define dh_is_64bit_noreturn 0
+#define dh_is_64bit_i32 0
+#define dh_is_64bit_i64 1
+#define dh_is_64bit_ptr (TCG_TARGET_REG_BITS == 64)
+#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t))
+
+#define dh_is_signed_void 0
+#define dh_is_signed_noreturn 0
+#define dh_is_signed_i32 0
+#define dh_is_signed_s32 1
+#define dh_is_signed_i64 0
+#define dh_is_signed_s64 1
+#define dh_is_signed_f32 0
+#define dh_is_signed_f64 0
+#define dh_is_signed_tl 0
+#define dh_is_signed_int 1
+/* ??? This is highly specific to the host cpu. There are even special
+ extension instructions that may be required, e.g. ia64's addp4. But
+ for now we don't support any 64-bit targets with 32-bit pointers. */
+#define dh_is_signed_ptr 0
+#define dh_is_signed_env dh_is_signed_ptr
+#define dh_is_signed(t) dh_is_signed_##t
+
+#define dh_sizemask(t, n) \
+ sizemask |= dh_is_64bit(t) << (n*2); \
+ sizemask |= dh_is_signed(t) << (n*2+1)
+
+#define dh_arg(t, n) \
+ args[n - 1] = glue(GET_TCGV_, dh_alias(t))(glue(arg, n)); \
+ dh_sizemask(t, n)
+
+#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n)
+
+
+#define DEF_HELPER_0(name, ret) \
+ DEF_HELPER_FLAGS_0(name, 0, ret)
+#define DEF_HELPER_1(name, ret, t1) \
+ DEF_HELPER_FLAGS_1(name, 0, ret, t1)
+#define DEF_HELPER_2(name, ret, t1, t2) \
+ DEF_HELPER_FLAGS_2(name, 0, ret, t1, t2)
+#define DEF_HELPER_3(name, ret, t1, t2, t3) \
+ DEF_HELPER_FLAGS_3(name, 0, ret, t1, t2, t3)
+#define DEF_HELPER_4(name, ret, t1, t2, t3, t4) \
+ DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4)
+#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
+ DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
+
+/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
+
+#endif /* DEF_HELPER_H */
+
+#ifndef GEN_HELPER
+/* Function prototypes. */
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+dh_ctype(ret) HELPER(name) (void);
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1));
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4));
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5));
+
+#undef GEN_HELPER
+#define GEN_HELPER -1
+
+#elif GEN_HELPER == 1
+/* Gen functions. */
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
+{ \
+ int sizemask; \
+ sizemask = dh_is_64bit(ret); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 0, NULL); \
+}
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1)) \
+{ \
+ TCGArg args[1]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 1, args); \
+}
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
+ dh_arg_decl(t2, 2)) \
+{ \
+ TCGArg args[2]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ dh_arg(t2, 2); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 2, args); \
+}
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
+ dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
+{ \
+ TCGArg args[3]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ dh_arg(t2, 2); \
+ dh_arg(t3, 3); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 3, args); \
+}
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) dh_arg_decl(t1, 1), \
+ dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
+{ \
+ TCGArg args[4]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ dh_arg(t2, 2); \
+ dh_arg(t3, 3); \
+ dh_arg(t4, 4); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 4, args); \
+}
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
+{ \
+ TCGArg args[5]; \
+ int sizemask = 0; \
+ dh_sizemask(ret, 0); \
+ dh_arg(t1, 1); \
+ dh_arg(t2, 2); \
+ dh_arg(t3, 3); \
+ dh_arg(t4, 4); \
+ dh_arg(t5, 5); \
+ tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 5, args); \
+}
+
+#undef GEN_HELPER
+#define GEN_HELPER -1
+
+#elif GEN_HELPER == 2
+/* Register helpers. */
+
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
+tcg_register_helper(HELPER(name), #name);
+
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
+DEF_HELPER_FLAGS_0(name, flags, ret)
+
+#undef GEN_HELPER
+#define GEN_HELPER -1
+
+#elif GEN_HELPER == -1
+/* Undefine macros. */
+
+#undef DEF_HELPER_FLAGS_0
+#undef DEF_HELPER_FLAGS_1
+#undef DEF_HELPER_FLAGS_2
+#undef DEF_HELPER_FLAGS_3
+#undef DEF_HELPER_FLAGS_4
+#undef DEF_HELPER_FLAGS_5
+#undef GEN_HELPER
+
+#endif
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
new file mode 100644
index 0000000000..46dca74fda
--- /dev/null
+++ b/include/exec/exec-all.h
@@ -0,0 +1,412 @@
+/*
+ * internal execution defines for qemu
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _EXEC_ALL_H_
+#define _EXEC_ALL_H_
+
+#include "qemu-common.h"
+
+/* allow to see translation results - the slowdown should be negligible, so we leave it */
+#define DEBUG_DISAS
+
+/* Page tracking code uses ram addresses in system mode, and virtual
+ addresses in userspace mode. Define tb_page_addr_t to be an appropriate
+ type. */
+#if defined(CONFIG_USER_ONLY)
+typedef abi_ulong tb_page_addr_t;
+#else
+typedef ram_addr_t tb_page_addr_t;
+#endif
+
+/* is_jmp field values */
+#define DISAS_NEXT 0 /* next instruction can be analyzed */
+#define DISAS_JUMP 1 /* only pc was modified dynamically */
+#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
+#define DISAS_TB_JUMP 3 /* only pc was modified statically */
+
+struct TranslationBlock;
+typedef struct TranslationBlock TranslationBlock;
+
+/* XXX: make safe guess about sizes */
+#define MAX_OP_PER_INSTR 208
+
+#if HOST_LONG_BITS == 32
+#define MAX_OPC_PARAM_PER_ARG 2
+#else
+#define MAX_OPC_PARAM_PER_ARG 1
+#endif
+#define MAX_OPC_PARAM_IARGS 5
+#define MAX_OPC_PARAM_OARGS 1
+#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
+
+/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
+ * and up to 4 + N parameters on 64-bit archs
+ * (N = number of input arguments + output arguments). */
+#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
+#define OPC_BUF_SIZE 640
+#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
+
+/* Maximum size a TCG op can expand to. This is complicated because a
+ single op may require several host instructions and register reloads.
+ For now take a wild guess at 192 bytes, which should allow at least
+ a couple of fixup instructions per argument. */
+#define TCG_MAX_OP_SIZE 192
+
+#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
+
+#include "qemu/log.h"
+
+void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
+void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
+void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
+ int pc_pos);
+
+void cpu_gen_init(void);
+int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
+ int *gen_code_size_ptr);
+bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
+
+void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
+void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
+TranslationBlock *tb_gen_code(CPUArchState *env,
+ target_ulong pc, target_ulong cs_base, int flags,
+ int cflags);
+void cpu_exec_init(CPUArchState *env);
+void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
+int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
+ int is_cpu_write_access);
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
+ int is_cpu_write_access);
+#if !defined(CONFIG_USER_ONLY)
+/* cputlb.c */
+void tlb_flush_page(CPUArchState *env, target_ulong addr);
+void tlb_flush(CPUArchState *env, int flush_global);
+void tlb_set_page(CPUArchState *env, target_ulong vaddr,
+ hwaddr paddr, int prot,
+ int mmu_idx, target_ulong size);
+void tb_invalidate_phys_addr(hwaddr addr);
+#else
+static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
+{
+}
+
+static inline void tlb_flush(CPUArchState *env, int flush_global)
+{
+}
+#endif
+
+#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
+
+#define CODE_GEN_PHYS_HASH_BITS 15
+#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
+
+/* estimated block size for TB allocation */
+/* XXX: use a per code average code fragment size and modulate it
+ according to the host CPU */
+#if defined(CONFIG_SOFTMMU)
+#define CODE_GEN_AVG_BLOCK_SIZE 128
+#else
+#define CODE_GEN_AVG_BLOCK_SIZE 64
+#endif
+
+#if defined(__arm__) || defined(_ARCH_PPC) \
+ || defined(__x86_64__) || defined(__i386__) \
+ || defined(__sparc__) \
+ || defined(CONFIG_TCG_INTERPRETER)
+#define USE_DIRECT_JUMP
+#endif
+
+struct TranslationBlock {
+ target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
+ target_ulong cs_base; /* CS base for this block */
+ uint64_t flags; /* flags defining in which context the code was generated */
+ uint16_t size; /* size of target code for this block (1 <=
+ size <= TARGET_PAGE_SIZE) */
+ uint16_t cflags; /* compile flags */
+#define CF_COUNT_MASK 0x7fff
+#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
+
+ uint8_t *tc_ptr; /* pointer to the translated code */
+ /* next matching tb for physical address. */
+ struct TranslationBlock *phys_hash_next;
+ /* first and second physical page containing code. The lower bit
+ of the pointer tells the index in page_next[] */
+ struct TranslationBlock *page_next[2];
+ tb_page_addr_t page_addr[2];
+
+ /* the following data are used to directly call another TB from
+ the code of this one. */
+ uint16_t tb_next_offset[2]; /* offset of original jump target */
+#ifdef USE_DIRECT_JUMP
+ uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
+#else
+ uintptr_t tb_next[2]; /* address of jump generated code */
+#endif
+ /* list of TBs jumping to this one. This is a circular list using
+ the two least significant bits of the pointers to tell what is
+ the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
+ jmp_first */
+ struct TranslationBlock *jmp_next[2];
+ struct TranslationBlock *jmp_first;
+ uint32_t icount;
+};
+
+static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
+{
+ target_ulong tmp;
+ tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
+ return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
+}
+
+static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+{
+ target_ulong tmp;
+ tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
+ return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
+ | (tmp & TB_JMP_ADDR_MASK));
+}
+
+static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
+{
+ return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
+}
+
+void tb_free(TranslationBlock *tb);
+void tb_flush(CPUArchState *env);
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+
+extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
+
+#if defined(USE_DIRECT_JUMP)
+
+#if defined(CONFIG_TCG_INTERPRETER)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+ /* patch the branch destination */
+ *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+ /* no need to flush icache explicitly */
+}
+#elif defined(_ARCH_PPC)
+void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
+#define tb_set_jmp_target1 ppc_tb_set_jmp_target
+#elif defined(__i386__) || defined(__x86_64__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+ /* patch the branch destination */
+ *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+ /* no need to flush icache explicitly */
+}
+#elif defined(__arm__)
+static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
+{
+#if !QEMU_GNUC_PREREQ(4, 1)
+ register unsigned long _beg __asm ("a1");
+ register unsigned long _end __asm ("a2");
+ register unsigned long _flg __asm ("a3");
+#endif
+
+ /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
+ *(uint32_t *)jmp_addr =
+ (*(uint32_t *)jmp_addr & ~0xffffff)
+ | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
+
+#if QEMU_GNUC_PREREQ(4, 1)
+ __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
+#else
+ /* flush icache */
+ _beg = jmp_addr;
+ _end = jmp_addr + 4;
+ _flg = 0;
+ __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
+#endif
+}
+#elif defined(__sparc__)
+void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
+#else
+#error tb_set_jmp_target1 is missing
+#endif
+
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+ int n, uintptr_t addr)
+{
+ uint16_t offset = tb->tb_jmp_offset[n];
+ tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
+}
+
+#else
+
+/* set the jump target */
+static inline void tb_set_jmp_target(TranslationBlock *tb,
+ int n, uintptr_t addr)
+{
+ tb->tb_next[n] = addr;
+}
+
+#endif
+
+static inline void tb_add_jump(TranslationBlock *tb, int n,
+ TranslationBlock *tb_next)
+{
+ /* NOTE: this test is only needed for thread safety */
+ if (!tb->jmp_next[n]) {
+ /* patch the native jump address */
+ tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
+
+ /* add in TB jmp circular list */
+ tb->jmp_next[n] = tb_next->jmp_first;
+ tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
+ }
+}
+
+#include "exec/spinlock.h"
+
+extern spinlock_t tb_lock;
+
+extern int tb_invalidated_flag;
+
+/* The return address may point to the start of the next instruction.
+ Subtracting one gets us the call instruction itself. */
+#if defined(CONFIG_TCG_INTERPRETER)
+/* Softmmu, Alpha, MIPS, SH4 and SPARC user mode emulations call GETPC().
+ For all others, GETPC remains undefined (which makes TCI a little faster. */
+# if defined(CONFIG_SOFTMMU) || \
+ defined(TARGET_ALPHA) || defined(TARGET_MIPS) || \
+ defined(TARGET_SH4) || defined(TARGET_SPARC)
+extern uintptr_t tci_tb_ptr;
+# define GETPC() tci_tb_ptr
+# endif
+#elif defined(__s390__) && !defined(__s390x__)
+# define GETPC() \
+ (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
+#elif defined(__arm__)
+/* Thumb return addresses have the low bit set, so we need to subtract two.
+ This is still safe in ARM mode because instructions are 4 bytes. */
+# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
+#else
+# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
+#endif
+
+#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
+/* qemu_ld/st optimization split code generation to fast and slow path, thus,
+ it needs special handling for an MMU helper which is called from the slow
+ path, to get the fast path's pc without any additional argument.
+ It uses a tricky solution which embeds the fast path pc into the slow path.
+
+ Code flow in slow path:
+ (1) pre-process
+ (2) call MMU helper
+ (3) jump to (5)
+ (4) fast path information (implementation specific)
+ (5) post-process (e.g. stack adjust)
+ (6) jump to corresponding code of the next of fast path
+ */
+# if defined(__i386__) || defined(__x86_64__)
+/* To avoid broken disassembling, long jmp is used for embedding fast path pc,
+ so that the destination is the next code of fast path, though this jmp is
+ never executed.
+
+ call MMU helper
+ jmp POST_PROC (2byte) <- GETRA()
+ jmp NEXT_CODE (5byte)
+ POST_PROCESS ... <- GETRA() + 7
+ */
+# define GETRA() ((uintptr_t)__builtin_return_address(0))
+# define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \
+ *(int32_t *)((void *)GETRA() + 3) - 1))
+# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
+# define GETRA() ((uintptr_t)__builtin_return_address(0))
+# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
+# else
+# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
+# endif
+bool is_tcg_gen_code(uintptr_t pc_ptr);
+# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
+#else
+# define GETPC_EXT() GETPC()
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+
+struct MemoryRegion *iotlb_to_region(hwaddr index);
+uint64_t io_mem_read(struct MemoryRegion *mr, hwaddr addr,
+ unsigned size);
+void io_mem_write(struct MemoryRegion *mr, hwaddr addr,
+ uint64_t value, unsigned size);
+
+void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
+ uintptr_t retaddr);
+
+#include "exec/softmmu_defs.h"
+
+#define ACCESS_TYPE (NB_MMU_MODES + 1)
+#define MEMSUFFIX _code
+
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
+{
+ return addr;
+}
+#else
+/* cputlb.c */
+tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
+#endif
+
+typedef void (CPUDebugExcpHandler)(CPUArchState *env);
+
+void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
+
+/* vl.c */
+extern int singlestep;
+
+/* cpu-exec.c */
+extern volatile sig_atomic_t exit_request;
+
+/* Deterministic execution requires that IO only be performed on the last
+ instruction of a TB so that interrupts take effect immediately. */
+static inline int can_do_io(CPUArchState *env)
+{
+ if (!use_icount) {
+ return 1;
+ }
+ /* If not executing code then assume we are ok. */
+ if (!env->current_tb) {
+ return 1;
+ }
+ return env->can_do_io != 0;
+}
+
+#endif
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
new file mode 100644
index 0000000000..668de66000
--- /dev/null
+++ b/include/exec/gdbstub.h
@@ -0,0 +1,53 @@
+#ifndef GDBSTUB_H
+#define GDBSTUB_H
+
+#define DEFAULT_GDBSTUB_PORT "1234"
+
+/* GDB breakpoint/watchpoint types */
+#define GDB_BREAKPOINT_SW 0
+#define GDB_BREAKPOINT_HW 1
+#define GDB_WATCHPOINT_WRITE 2
+#define GDB_WATCHPOINT_READ 3
+#define GDB_WATCHPOINT_ACCESS 4
+
+#ifdef NEED_CPU_H
+typedef void (*gdb_syscall_complete_cb)(CPUArchState *env,
+ target_ulong ret, target_ulong err);
+
+void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+int use_gdb_syscalls(void);
+void gdb_set_stop_cpu(CPUArchState *env);
+void gdb_exit(CPUArchState *, int);
+#ifdef CONFIG_USER_ONLY
+int gdb_queuesig (void);
+int gdb_handlesig (CPUArchState *, int);
+void gdb_signalled(CPUArchState *, int);
+void gdbserver_fork(CPUArchState *);
+#endif
+/* Get or set a register. Returns the size of the register. */
+typedef int (*gdb_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
+void gdb_register_coprocessor(CPUArchState *env,
+ gdb_reg_cb get_reg, gdb_reg_cb set_reg,
+ int num_regs, const char *xml, int g_pos);
+
+static inline int cpu_index(CPUArchState *env)
+{
+#if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
+ return env->host_tid;
+#else
+ return env->cpu_index + 1;
+#endif
+}
+
+#endif
+
+#ifdef CONFIG_USER_ONLY
+int gdbserver_start(int);
+#else
+int gdbserver_start(const char *port);
+#endif
+
+/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
+extern const char *const xml_builtin[][2];
+
+#endif
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
new file mode 100644
index 0000000000..8043b3ba26
--- /dev/null
+++ b/include/exec/gen-icount.h
@@ -0,0 +1,53 @@
+#ifndef GEN_ICOUNT_H
+#define GEN_ICOUNT_H 1
+
+#include "qemu/timer.h"
+
+/* Helpers for instruction counting code generation. */
+
+static TCGArg *icount_arg;
+static int icount_label;
+
+static inline void gen_icount_start(void)
+{
+ TCGv_i32 count;
+
+ if (!use_icount)
+ return;
+
+ icount_label = gen_new_label();
+ count = tcg_temp_local_new_i32();
+ tcg_gen_ld_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u32));
+ /* This is a horrid hack to allow fixing up the value later. */
+ icount_arg = tcg_ctx.gen_opparam_ptr + 1;
+ tcg_gen_subi_i32(count, count, 0xdeadbeef);
+
+ tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
+ tcg_gen_st16_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u16.low));
+ tcg_temp_free_i32(count);
+}
+
+static void gen_icount_end(TranslationBlock *tb, int num_insns)
+{
+ if (use_icount) {
+ *icount_arg = num_insns;
+ gen_set_label(icount_label);
+ tcg_gen_exit_tb((tcg_target_long)tb + 2);
+ }
+}
+
+static inline void gen_io_start(void)
+{
+ TCGv_i32 tmp = tcg_const_i32(1);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void gen_io_end(void)
+{
+ TCGv_i32 tmp = tcg_const_i32(0);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
+ tcg_temp_free_i32(tmp);
+}
+
+#endif
diff --git a/include/exec/hwaddr.h b/include/exec/hwaddr.h
new file mode 100644
index 0000000000..251cf9216f
--- /dev/null
+++ b/include/exec/hwaddr.h
@@ -0,0 +1,24 @@
+/* Define hwaddr if it exists. */
+
+#ifndef HWADDR_H
+#define HWADDR_H
+
+#ifndef CONFIG_USER_ONLY
+
+#define HWADDR_BITS 64
+/* hwaddr is the type of a physical address (its size can
+ be different from 'target_ulong'). */
+
+typedef uint64_t hwaddr;
+#define HWADDR_MAX UINT64_MAX
+#define TARGET_FMT_plx "%016" PRIx64
+#define HWADDR_PRId PRId64
+#define HWADDR_PRIi PRIi64
+#define HWADDR_PRIo PRIo64
+#define HWADDR_PRIu PRIu64
+#define HWADDR_PRIx PRIx64
+#define HWADDR_PRIX PRIX64
+
+#endif
+
+#endif
diff --git a/include/exec/ioport.h b/include/exec/ioport.h
new file mode 100644
index 0000000000..fc28350a3c
--- /dev/null
+++ b/include/exec/ioport.h
@@ -0,0 +1,78 @@
+/*
+ * defines ioport related functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**************************************************************************
+ * IO ports API
+ */
+
+#ifndef IOPORT_H
+#define IOPORT_H
+
+#include "qemu-common.h"
+#include "exec/iorange.h"
+
+typedef uint32_t pio_addr_t;
+#define FMT_pioaddr PRIx32
+
+#define MAX_IOPORTS (64 * 1024)
+#define IOPORTS_MASK (MAX_IOPORTS - 1)
+
+/* These should really be in isa.h, but are here to make pc.h happy. */
+typedef void (IOPortWriteFunc)(void *opaque, uint32_t address, uint32_t data);
+typedef uint32_t (IOPortReadFunc)(void *opaque, uint32_t address);
+typedef void (IOPortDestructor)(void *opaque);
+
+void ioport_register(IORange *iorange);
+int register_ioport_read(pio_addr_t start, int length, int size,
+ IOPortReadFunc *func, void *opaque);
+int register_ioport_write(pio_addr_t start, int length, int size,
+ IOPortWriteFunc *func, void *opaque);
+void isa_unassign_ioport(pio_addr_t start, int length);
+bool isa_is_ioport_assigned(pio_addr_t start);
+
+void cpu_outb(pio_addr_t addr, uint8_t val);
+void cpu_outw(pio_addr_t addr, uint16_t val);
+void cpu_outl(pio_addr_t addr, uint32_t val);
+uint8_t cpu_inb(pio_addr_t addr);
+uint16_t cpu_inw(pio_addr_t addr);
+uint32_t cpu_inl(pio_addr_t addr);
+
+struct MemoryRegion;
+struct MemoryRegionPortio;
+
+typedef struct PortioList {
+ const struct MemoryRegionPortio *ports;
+ struct MemoryRegion *address_space;
+ unsigned nr;
+ struct MemoryRegion **regions;
+ struct MemoryRegion **aliases;
+ void *opaque;
+ const char *name;
+} PortioList;
+
+void portio_list_init(PortioList *piolist,
+ const struct MemoryRegionPortio *callbacks,
+ void *opaque, const char *name);
+void portio_list_destroy(PortioList *piolist);
+void portio_list_add(PortioList *piolist,
+ struct MemoryRegion *address_space,
+ uint32_t addr);
+void portio_list_del(PortioList *piolist);
+
+#endif /* IOPORT_H */
diff --git a/include/exec/iorange.h b/include/exec/iorange.h
new file mode 100644
index 0000000000..cd980a8312
--- /dev/null
+++ b/include/exec/iorange.h
@@ -0,0 +1,31 @@
+#ifndef IORANGE_H
+#define IORANGE_H
+
+#include <stdint.h>
+
+typedef struct IORange IORange;
+typedef struct IORangeOps IORangeOps;
+
+struct IORangeOps {
+ void (*read)(IORange *iorange, uint64_t offset, unsigned width,
+ uint64_t *data);
+ void (*write)(IORange *iorange, uint64_t offset, unsigned width,
+ uint64_t data);
+ void (*destructor)(IORange *iorange);
+};
+
+struct IORange {
+ const IORangeOps *ops;
+ uint64_t base;
+ uint64_t len;
+};
+
+static inline void iorange_init(IORange *iorange, const IORangeOps *ops,
+ uint64_t base, uint64_t len)
+{
+ iorange->ops = ops;
+ iorange->base = base;
+ iorange->len = len;
+}
+
+#endif
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
new file mode 100644
index 0000000000..1da240039d
--- /dev/null
+++ b/include/exec/memory-internal.h
@@ -0,0 +1,141 @@
+/*
+ * Declarations for obsolete exec.c functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY. Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef MEMORY_INTERNAL_H
+#define MEMORY_INTERNAL_H
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/xen.h"
+
+typedef struct PhysPageEntry PhysPageEntry;
+
+struct PhysPageEntry {
+ uint16_t is_leaf : 1;
+ /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
+ uint16_t ptr : 15;
+};
+
+typedef struct AddressSpaceDispatch AddressSpaceDispatch;
+
+struct AddressSpaceDispatch {
+ /* This is a multi-level map on the physical address space.
+ * The bottom level has pointers to MemoryRegionSections.
+ */
+ PhysPageEntry phys_map;
+ MemoryListener listener;
+};
+
+void address_space_init_dispatch(AddressSpace *as);
+void address_space_destroy_dispatch(AddressSpace *as);
+
+ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
+ MemoryRegion *mr);
+ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
+void qemu_ram_free(ram_addr_t addr);
+void qemu_ram_free_from_ptr(ram_addr_t addr);
+
+struct MemoryRegion;
+struct MemoryRegionSection;
+
+void qemu_register_coalesced_mmio(hwaddr addr, ram_addr_t size);
+void qemu_unregister_coalesced_mmio(hwaddr addr, ram_addr_t size);
+
+#define VGA_DIRTY_FLAG 0x01
+#define CODE_DIRTY_FLAG 0x02
+#define MIGRATION_DIRTY_FLAG 0x08
+
+static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
+{
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
+}
+
+/* read dirty bit (return 0 or 1) */
+static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
+{
+ return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
+}
+
+static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
+ ram_addr_t length,
+ int dirty_flags)
+{
+ int ret = 0;
+ ram_addr_t addr, end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
+ for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+ ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
+ }
+ return ret;
+}
+
+static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
+{
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
+}
+
+static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
+{
+ cpu_physical_memory_set_dirty_flags(addr, 0xff);
+}
+
+static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
+{
+ int mask = ~dirty_flags;
+
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
+}
+
+static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
+ ram_addr_t length,
+ int dirty_flags)
+{
+ ram_addr_t addr, end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
+ for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+ cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
+ }
+ xen_modified_memory(addr, length);
+}
+
+static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
+ ram_addr_t length,
+ int dirty_flags)
+{
+ ram_addr_t addr, end;
+
+ end = TARGET_PAGE_ALIGN(start + length);
+ start &= TARGET_PAGE_MASK;
+ for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
+ cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
+ }
+}
+
+void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
+ int dirty_flags);
+
+extern const IORangeOps memory_region_iorange_ops;
+
+#endif
+
+#endif
diff --git a/include/exec/memory.h b/include/exec/memory.h
new file mode 100644
index 0000000000..2322732dce
--- /dev/null
+++ b/include/exec/memory.h
@@ -0,0 +1,898 @@
+/*
+ * Physical memory management API
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_H
+#define MEMORY_H
+
+#ifndef CONFIG_USER_ONLY
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "qemu-common.h"
+#include "exec/cpu-common.h"
+#include "exec/hwaddr.h"
+#include "qemu/queue.h"
+#include "exec/iorange.h"
+#include "exec/ioport.h"
+#include "qemu/int128.h"
+
+typedef struct MemoryRegionOps MemoryRegionOps;
+typedef struct MemoryRegion MemoryRegion;
+typedef struct MemoryRegionPortio MemoryRegionPortio;
+typedef struct MemoryRegionMmio MemoryRegionMmio;
+
+/* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
+ * registration.
+ */
+#define DIRTY_MEMORY_VGA 0
+#define DIRTY_MEMORY_CODE 1
+#define DIRTY_MEMORY_MIGRATION 3
+
+struct MemoryRegionMmio {
+ CPUReadMemoryFunc *read[3];
+ CPUWriteMemoryFunc *write[3];
+};
+
+/* Internal use; thunks between old-style IORange and MemoryRegions. */
+typedef struct MemoryRegionIORange MemoryRegionIORange;
+struct MemoryRegionIORange {
+ IORange iorange;
+ MemoryRegion *mr;
+ hwaddr offset;
+};
+
+/*
+ * Memory region callbacks
+ */
+struct MemoryRegionOps {
+ /* Read from the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ uint64_t (*read)(void *opaque,
+ hwaddr addr,
+ unsigned size);
+ /* Write to the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ void (*write)(void *opaque,
+ hwaddr addr,
+ uint64_t data,
+ unsigned size);
+
+ enum device_endian endianness;
+ /* Guest-visible constraints: */
+ struct {
+ /* If nonzero, specify bounds on access sizes beyond which a machine
+ * check is thrown.
+ */
+ unsigned min_access_size;
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise unaligned
+ * accesses throw machine checks.
+ */
+ bool unaligned;
+ /*
+ * If present, and returns #false, the transaction is not accepted
+ * by the device (and results in machine dependent behaviour such
+ * as a machine check exception).
+ */
+ bool (*accepts)(void *opaque, hwaddr addr,
+ unsigned size, bool is_write);
+ } valid;
+ /* Internal implementation constraints: */
+ struct {
+ /* If nonzero, specifies the minimum size implemented. Smaller sizes
+ * will be rounded upwards and a partial result will be returned.
+ */
+ unsigned min_access_size;
+ /* If nonzero, specifies the maximum size implemented. Larger sizes
+ * will be done as a series of accesses with smaller sizes.
+ */
+ unsigned max_access_size;
+ /* If true, unaligned accesses are supported. Otherwise all accesses
+ * are converted to (possibly multiple) naturally aligned accesses.
+ */
+ bool unaligned;
+ } impl;
+
+ /* If .read and .write are not present, old_portio may be used for
+ * backwards compatibility with old portio registration
+ */
+ const MemoryRegionPortio *old_portio;
+ /* If .read and .write are not present, old_mmio may be used for
+ * backwards compatibility with old mmio registration
+ */
+ const MemoryRegionMmio old_mmio;
+};
+
+typedef struct CoalescedMemoryRange CoalescedMemoryRange;
+typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
+
+struct MemoryRegion {
+ /* All fields are private - violators will be prosecuted */
+ const MemoryRegionOps *ops;
+ void *opaque;
+ MemoryRegion *parent;
+ Int128 size;
+ hwaddr addr;
+ void (*destructor)(MemoryRegion *mr);
+ ram_addr_t ram_addr;
+ bool subpage;
+ bool terminates;
+ bool readable;
+ bool ram;
+ bool readonly; /* For RAM regions */
+ bool enabled;
+ bool rom_device;
+ bool warning_printed; /* For reservations */
+ bool flush_coalesced_mmio;
+ MemoryRegion *alias;
+ hwaddr alias_offset;
+ unsigned priority;
+ bool may_overlap;
+ QTAILQ_HEAD(subregions, MemoryRegion) subregions;
+ QTAILQ_ENTRY(MemoryRegion) subregions_link;
+ QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
+ const char *name;
+ uint8_t dirty_log_mask;
+ unsigned ioeventfd_nb;
+ MemoryRegionIoeventfd *ioeventfds;
+};
+
+struct MemoryRegionPortio {
+ uint32_t offset;
+ uint32_t len;
+ unsigned size;
+ IOPortReadFunc *read;
+ IOPortWriteFunc *write;
+};
+
+#define PORTIO_END_OF_LIST() { }
+
+typedef struct AddressSpace AddressSpace;
+
+/**
+ * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
+ */
+struct AddressSpace {
+ /* All fields are private. */
+ const char *name;
+ MemoryRegion *root;
+ struct FlatView *current_map;
+ int ioeventfd_nb;
+ struct MemoryRegionIoeventfd *ioeventfds;
+ struct AddressSpaceDispatch *dispatch;
+ QTAILQ_ENTRY(AddressSpace) address_spaces_link;
+};
+
+typedef struct MemoryRegionSection MemoryRegionSection;
+
+/**
+ * MemoryRegionSection: describes a fragment of a #MemoryRegion
+ *
+ * @mr: the region, or %NULL if empty
+ * @address_space: the address space the region is mapped in
+ * @offset_within_region: the beginning of the section, relative to @mr's start
+ * @size: the size of the section; will not exceed @mr's boundaries
+ * @offset_within_address_space: the address of the first byte of the section
+ * relative to the region's address space
+ * @readonly: writes to this section are ignored
+ */
+struct MemoryRegionSection {
+ MemoryRegion *mr;
+ AddressSpace *address_space;
+ hwaddr offset_within_region;
+ uint64_t size;
+ hwaddr offset_within_address_space;
+ bool readonly;
+};
+
+typedef struct MemoryListener MemoryListener;
+
+/**
+ * MemoryListener: callbacks structure for updates to the physical memory map
+ *
+ * Allows a component to adjust to changes in the guest-visible memory map.
+ * Use with memory_listener_register() and memory_listener_unregister().
+ */
+struct MemoryListener {
+ void (*begin)(MemoryListener *listener);
+ void (*commit)(MemoryListener *listener);
+ void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_global_start)(MemoryListener *listener);
+ void (*log_global_stop)(MemoryListener *listener);
+ void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+ void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
+ bool match_data, uint64_t data, EventNotifier *e);
+ void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
+ hwaddr addr, hwaddr len);
+ /* Lower = earlier (during add), later (during del) */
+ unsigned priority;
+ AddressSpace *address_space_filter;
+ QTAILQ_ENTRY(MemoryListener) link;
+};
+
+/**
+ * memory_region_init: Initialize a memory region
+ *
+ * The region typically acts as a container for other memory regions. Use
+ * memory_region_add_subregion() to add subregions.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region; any subregions beyond this size will be clipped
+ */
+void memory_region_init(MemoryRegion *mr,
+ const char *name,
+ uint64_t size);
+/**
+ * memory_region_init_io: Initialize an I/O memory region.
+ *
+ * Accesses into the region will cause the callbacks in @ops to be called.
+ * if @size is nonzero, subregions will be clipped to @size.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @ops: a structure containing read and write callbacks to be used when
+ * I/O is performed on the region.
+ * @opaque: passed to to the read and write callbacks of the @ops structure.
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_io(MemoryRegion *mr,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_ram: Initialize RAM memory region. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @name: the name of the region.
+ * @size: size of the region.
+ */
+void memory_region_init_ram(MemoryRegion *mr,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_ram_ptr: Initialize RAM memory region from a
+ * user-provided pointer. Accesses into the
+ * region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @ptr: memory to be mapped; must contain at least @size bytes.
+ */
+void memory_region_init_ram_ptr(MemoryRegion *mr,
+ const char *name,
+ uint64_t size,
+ void *ptr);
+
+/**
+ * memory_region_init_alias: Initialize a memory region that aliases all or a
+ * part of another memory region.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @name: used for debugging; not visible to the user or ABI
+ * @orig: the region to be referenced; @mr will be equivalent to
+ * @orig between @offset and @offset + @size - 1.
+ * @offset: start of the section in @orig to be referenced.
+ * @size: size of the region.
+ */
+void memory_region_init_alias(MemoryRegion *mr,
+ const char *name,
+ MemoryRegion *orig,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
+ * handled via callbacks.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @ops: callbacks for write access handling.
+ * @name: the name of the region.
+ * @size: size of the region.
+ */
+void memory_region_init_rom_device(MemoryRegion *mr,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size);
+
+/**
+ * memory_region_init_reservation: Initialize a memory region that reserves
+ * I/O space.
+ *
+ * A reservation region primariy serves debugging purposes. It claims I/O
+ * space that is not supposed to be handled by QEMU itself. Any access via
+ * the memory API will cause an abort().
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_reservation(MemoryRegion *mr,
+ const char *name,
+ uint64_t size);
+/**
+ * memory_region_destroy: Destroy a memory region and reclaim all resources.
+ *
+ * @mr: the region to be destroyed. May not currently be a subregion
+ * (see memory_region_add_subregion()) or referenced in an alias
+ * (see memory_region_init_alias()).
+ */
+void memory_region_destroy(MemoryRegion *mr);
+
+/**
+ * memory_region_size: get a memory region's size.
+ *
+ * @mr: the memory region being queried.
+ */
+uint64_t memory_region_size(MemoryRegion *mr);
+
+/**
+ * memory_region_is_ram: check whether a memory region is random access
+ *
+ * Returns %true is a memory region is random access.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_ram(MemoryRegion *mr);
+
+/**
+ * memory_region_is_romd: check whether a memory region is ROMD
+ *
+ * Returns %true is a memory region is ROMD and currently set to allow
+ * direct reads.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_romd(MemoryRegion *mr)
+{
+ return mr->rom_device && mr->readable;
+}
+
+/**
+ * memory_region_name: get a memory region's name
+ *
+ * Returns the string that was used to initialize the memory region.
+ *
+ * @mr: the memory region being queried
+ */
+const char *memory_region_name(MemoryRegion *mr);
+
+/**
+ * memory_region_is_logging: return whether a memory region is logging writes
+ *
+ * Returns %true if the memory region is logging writes
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_logging(MemoryRegion *mr);
+
+/**
+ * memory_region_is_rom: check whether a memory region is ROM
+ *
+ * Returns %true is a memory region is read-only memory.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_rom(MemoryRegion *mr);
+
+/**
+ * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
+ *
+ * Returns a host pointer to a RAM memory region (created with
+ * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
+ * care.
+ *
+ * @mr: the memory region being queried.
+ */
+void *memory_region_get_ram_ptr(MemoryRegion *mr);
+
+/**
+ * memory_region_set_log: Turn dirty logging on or off for a region.
+ *
+ * Turns dirty logging on or off for a specified client (display, migration).
+ * Only meaningful for RAM regions.
+ *
+ * @mr: the memory region being updated.
+ * @log: whether dirty logging is to be enabled or disabled.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
+
+/**
+ * memory_region_get_dirty: Check whether a range of bytes is dirty
+ * for a specified client.
+ *
+ * Checks whether a range of bytes has been written to since the last
+ * call to memory_region_reset_dirty() with the same @client. Dirty logging
+ * must be enabled.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
+ *
+ * Marks a range of bytes as dirty, after it has been dirtied outside
+ * guest code.
+ *
+ * @mr: the memory region being dirtied.
+ * @addr: the address (relative to the start of the region) being dirtied.
+ * @size: size of the range being dirtied.
+ */
+void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size);
+
+/**
+ * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
+ * for a specified client. It clears them.
+ *
+ * Checks whether a range of bytes has been written to since the last
+ * call to memory_region_reset_dirty() with the same @client. Dirty logging
+ * must be enabled.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+/**
+ * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
+ * any external TLBs (e.g. kvm)
+ *
+ * Flushes dirty information from accelerators such as kvm and vhost-net
+ * and makes it available to users of the memory API.
+ *
+ * @mr: the region being flushed.
+ */
+void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
+
+/**
+ * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
+ * client.
+ *
+ * Marks a range of pages as no longer dirty.
+ *
+ * @mr: the region being updated.
+ * @addr: the start of the subrange being cleaned.
+ * @size: the size of the subrange being cleaned.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ * %DIRTY_MEMORY_VGA.
+ */
+void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
+ hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_readonly: Turn a memory region read-only (or read-write)
+ *
+ * Allows a memory region to be marked as read-only (turning it into a ROM).
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @readonly: whether rhe region is to be ROM or RAM.
+ */
+void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
+
+/**
+ * memory_region_rom_device_set_readable: enable/disable ROM readability
+ *
+ * Allows a ROM device (initialized with memory_region_init_rom_device() to
+ * to be marked as readable (default) or not readable. When it is readable,
+ * the device is mapped to guest memory. When not readable, reads are
+ * forwarded to the #MemoryRegion.read function.
+ *
+ * @mr: the memory region to be updated
+ * @readable: whether reads are satisified directly (%true) or via callbacks
+ * (%false)
+ */
+void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable);
+
+/**
+ * memory_region_set_coalescing: Enable memory coalescing for the region.
+ *
+ * Enabled writes to a region to be queued for later processing. MMIO ->write
+ * callbacks may be delayed until a non-coalesced MMIO is issued.
+ * Only useful for IO regions. Roughly similar to write-combining hardware.
+ *
+ * @mr: the memory region to be write coalesced
+ */
+void memory_region_set_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
+ * a region.
+ *
+ * Like memory_region_set_coalescing(), but works on a sub-range of a region.
+ * Multiple calls can be issued coalesced disjoint ranges.
+ *
+ * @mr: the memory region to be updated.
+ * @offset: the start of the range within the region to be coalesced.
+ * @size: the size of the subrange to be coalesced.
+ */
+void memory_region_add_coalescing(MemoryRegion *mr,
+ hwaddr offset,
+ uint64_t size);
+
+/**
+ * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
+ *
+ * Disables any coalescing caused by memory_region_set_coalescing() or
+ * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
+ * hardware.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
+ * accesses.
+ *
+ * Ensure that pending coalesced MMIO request are flushed before the memory
+ * region is accessed. This property is automatically enabled for all regions
+ * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_set_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
+ * accesses.
+ *
+ * Clear the automatic coalesced MMIO flushing enabled via
+ * memory_region_set_flush_coalesced. Note that this service has no effect on
+ * memory regions that have MMIO coalescing enabled for themselves. For them,
+ * automatic flushing will stop once coalescing is disabled.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_add_eventfd: Request an eventfd to be triggered when a word
+ * is written to a location.
+ *
+ * Marks a word in an IO region (initialized with memory_region_init_io())
+ * as a trigger for an eventfd event. The I/O callback will not be called.
+ * The caller must be prepared to handle failure (that is, take the required
+ * action if the callback _is_ called).
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ **/
+void memory_region_add_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_del_eventfd: Cancel an eventfd.
+ *
+ * Cancels an eventfd trigger requested by a previous
+ * memory_region_add_eventfd() call.
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ */
+void memory_region_del_eventfd(MemoryRegion *mr,
+ hwaddr addr,
+ unsigned size,
+ bool match_data,
+ uint64_t data,
+ EventNotifier *e);
+
+/**
+ * memory_region_add_subregion: Add a subregion to a container.
+ *
+ * Adds a subregion at @offset. The subregion may not overlap with other
+ * subregions (except for those explicitly marked as overlapping). A region
+ * may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ */
+void memory_region_add_subregion(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion);
+/**
+ * memory_region_add_subregion_overlap: Add a subregion to a container
+ * with overlap.
+ *
+ * Adds a subregion at @offset. The subregion may overlap with other
+ * subregions. Conflicts are resolved by having a higher @priority hide a
+ * lower @priority. Subregions without priority are taken as @priority 0.
+ * A region may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ * initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ * @priority: used for resolving overlaps; highest priority wins.
+ */
+void memory_region_add_subregion_overlap(MemoryRegion *mr,
+ hwaddr offset,
+ MemoryRegion *subregion,
+ unsigned priority);
+
+/**
+ * memory_region_get_ram_addr: Get the ram address associated with a memory
+ * region
+ *
+ * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
+ * code is being reworked.
+ */
+ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
+
+/**
+ * memory_region_del_subregion: Remove a subregion.
+ *
+ * Removes a subregion from its container.
+ *
+ * @mr: the container to be updated.
+ * @subregion: the region being removed; must be a current subregion of @mr.
+ */
+void memory_region_del_subregion(MemoryRegion *mr,
+ MemoryRegion *subregion);
+
+/*
+ * memory_region_set_enabled: dynamically enable or disable a region
+ *
+ * Enables or disables a memory region. A disabled memory region
+ * ignores all accesses to itself and its subregions. It does not
+ * obscure sibling subregions with lower priority - it simply behaves as
+ * if it was removed from the hierarchy.
+ *
+ * Regions default to being enabled.
+ *
+ * @mr: the region to be updated
+ * @enabled: whether to enable or disable the region
+ */
+void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
+
+/*
+ * memory_region_set_address: dynamically update the address of a region
+ *
+ * Dynamically updates the address of a region, relative to its parent.
+ * May be used on regions are currently part of a memory hierarchy.
+ *
+ * @mr: the region to be updated
+ * @addr: new address, relative to parent region
+ */
+void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
+
+/*
+ * memory_region_set_alias_offset: dynamically update a memory alias's offset
+ *
+ * Dynamically updates the offset into the target region that an alias points
+ * to, as if the fourth argument to memory_region_init_alias() has changed.
+ *
+ * @mr: the #MemoryRegion to be updated; should be an alias.
+ * @offset: the new offset into the target memory region
+ */
+void memory_region_set_alias_offset(MemoryRegion *mr,
+ hwaddr offset);
+
+/**
+ * memory_region_find: locate a MemoryRegion in an address space
+ *
+ * Locates the first #MemoryRegion within an address space given by
+ * @address_space that overlaps the range given by @addr and @size.
+ *
+ * Returns a #MemoryRegionSection that describes a contiguous overlap.
+ * It will have the following characteristics:
+ * .@offset_within_address_space >= @addr
+ * .@offset_within_address_space + .@size <= @addr + @size
+ * .@size = 0 iff no overlap was found
+ * .@mr is non-%NULL iff an overlap was found
+ *
+ * @address_space: a top-level (i.e. parentless) region that contains
+ * the region to be found
+ * @addr: start of the area within @address_space to be searched
+ * @size: size of the area to be searched
+ */
+MemoryRegionSection memory_region_find(MemoryRegion *address_space,
+ hwaddr addr, uint64_t size);
+
+/**
+ * memory_region_section_addr: get offset within MemoryRegionSection
+ *
+ * Returns offset within MemoryRegionSection
+ *
+ * @section: the memory region section being queried
+ * @addr: address in address space
+ */
+static inline hwaddr
+memory_region_section_addr(MemoryRegionSection *section,
+ hwaddr addr)
+{
+ addr -= section->offset_within_address_space;
+ addr += section->offset_within_region;
+ return addr;
+}
+
+/**
+ * memory_global_sync_dirty_bitmap: synchronize the dirty log for all memory
+ *
+ * Synchronizes the dirty page log for an entire address space.
+ * @address_space: a top-level (i.e. parentless) region that contains the
+ * memory being synchronized
+ */
+void memory_global_sync_dirty_bitmap(MemoryRegion *address_space);
+
+/**
+ * memory_region_transaction_begin: Start a transaction.
+ *
+ * During a transaction, changes will be accumulated and made visible
+ * only when the transaction ends (is committed).
+ */
+void memory_region_transaction_begin(void);
+
+/**
+ * memory_region_transaction_commit: Commit a transaction and make changes
+ * visible to the guest.
+ */
+void memory_region_transaction_commit(void);
+
+/**
+ * memory_listener_register: register callbacks to be called when memory
+ * sections are mapped or unmapped into an address
+ * space
+ *
+ * @listener: an object containing the callbacks to be called
+ * @filter: if non-%NULL, only regions in this address space will be observed
+ */
+void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
+
+/**
+ * memory_listener_unregister: undo the effect of memory_listener_register()
+ *
+ * @listener: an object containing the callbacks to be removed
+ */
+void memory_listener_unregister(MemoryListener *listener);
+
+/**
+ * memory_global_dirty_log_start: begin dirty logging for all regions
+ */
+void memory_global_dirty_log_start(void);
+
+/**
+ * memory_global_dirty_log_stop: end dirty logging for all regions
+ */
+void memory_global_dirty_log_stop(void);
+
+void mtree_info(fprintf_function mon_printf, void *f);
+
+/**
+ * address_space_init: initializes an address space
+ *
+ * @as: an uninitialized #AddressSpace
+ * @root: a #MemoryRegion that routes addesses for the address space
+ */
+void address_space_init(AddressSpace *as, MemoryRegion *root);
+
+
+/**
+ * address_space_destroy: destroy an address space
+ *
+ * Releases all resources associated with an address space. After an address space
+ * is destroyed, its root memory region (given by address_space_init()) may be destroyed
+ * as well.
+ *
+ * @as: address space to be destroyed
+ */
+void address_space_destroy(AddressSpace *as);
+
+/**
+ * address_space_rw: read from or write to an address space.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
+ int len, bool is_write);
+
+/**
+ * address_space_write: write to address space.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ */
+void address_space_write(AddressSpace *as, hwaddr addr,
+ const uint8_t *buf, int len);
+
+/**
+ * address_space_read: read from an address space.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ */
+void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
+
+/* address_space_map: map a physical memory region into a host virtual address
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @plen: pointer to length of buffer; updated on return
+ * @is_write: indicates the transfer direction
+ */
+void *address_space_map(AddressSpace *as, hwaddr addr,
+ hwaddr *plen, bool is_write);
+
+/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
+ *
+ * Will also mark the memory as dirty if @is_write == %true. @access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ *
+ * @as: #AddressSpace used
+ * @addr: address within that address space
+ * @len: buffer length as returned by address_space_map()
+ * @access_len: amount of data actually transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
+ int is_write, hwaddr access_len);
+
+
+#endif
+
+#endif
diff --git a/include/exec/poison.h b/include/exec/poison.h
new file mode 100644
index 0000000000..7d7b23b1fc
--- /dev/null
+++ b/include/exec/poison.h
@@ -0,0 +1,64 @@
+/* Poison identifiers that should not be used when building
+ target independent device code. */
+
+#ifndef HW_POISON_H
+#define HW_POISON_H
+#ifdef __GNUC__
+
+#pragma GCC poison TARGET_I386
+#pragma GCC poison TARGET_X86_64
+#pragma GCC poison TARGET_ALPHA
+#pragma GCC poison TARGET_ARM
+#pragma GCC poison TARGET_CRIS
+#pragma GCC poison TARGET_LM32
+#pragma GCC poison TARGET_M68K
+#pragma GCC poison TARGET_MIPS
+#pragma GCC poison TARGET_MIPS64
+#pragma GCC poison TARGET_OPENRISC
+#pragma GCC poison TARGET_PPC
+#pragma GCC poison TARGET_PPCEMB
+#pragma GCC poison TARGET_PPC64
+#pragma GCC poison TARGET_ABI32
+#pragma GCC poison TARGET_SH4
+#pragma GCC poison TARGET_SPARC
+#pragma GCC poison TARGET_SPARC64
+
+#pragma GCC poison TARGET_WORDS_BIGENDIAN
+#pragma GCC poison BSWAP_NEEDED
+
+#pragma GCC poison TARGET_LONG_BITS
+#pragma GCC poison TARGET_FMT_lx
+#pragma GCC poison TARGET_FMT_ld
+
+#pragma GCC poison TARGET_PAGE_SIZE
+#pragma GCC poison TARGET_PAGE_MASK
+#pragma GCC poison TARGET_PAGE_BITS
+#pragma GCC poison TARGET_PAGE_ALIGN
+
+#pragma GCC poison CPUArchState
+#pragma GCC poison env
+
+#pragma GCC poison lduw_phys
+#pragma GCC poison ldl_phys
+#pragma GCC poison ldq_phys
+#pragma GCC poison stl_phys_notdirty
+#pragma GCC poison stq_phys_notdirty
+#pragma GCC poison stw_phys
+#pragma GCC poison stl_phys
+#pragma GCC poison stq_phys
+
+#pragma GCC poison CPU_INTERRUPT_HARD
+#pragma GCC poison CPU_INTERRUPT_EXITTB
+#pragma GCC poison CPU_INTERRUPT_HALT
+#pragma GCC poison CPU_INTERRUPT_DEBUG
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3
+#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_0
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_1
+#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
+
+#endif
+#endif
diff --git a/include/exec/softmmu-semi.h b/include/exec/softmmu-semi.h
new file mode 100644
index 0000000000..93798b9614
--- /dev/null
+++ b/include/exec/softmmu-semi.h
@@ -0,0 +1,77 @@
+/*
+ * Helper routines to provide target memory access for semihosting
+ * syscalls in system emulation mode.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ *
+ * This code is licensed under the GPL
+ */
+#ifndef SOFTMMU_SEMI_H
+#define SOFTMMU_SEMI_H 1
+
+static inline uint32_t softmmu_tget32(CPUArchState *env, uint32_t addr)
+{
+ uint32_t val;
+
+ cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 0);
+ return tswap32(val);
+}
+static inline uint32_t softmmu_tget8(CPUArchState *env, uint32_t addr)
+{
+ uint8_t val;
+
+ cpu_memory_rw_debug(env, addr, &val, 1, 0);
+ return val;
+}
+
+#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
+#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
+#define get_user_ual(arg, p) get_user_u32(arg, p)
+
+static inline void softmmu_tput32(CPUArchState *env, uint32_t addr, uint32_t val)
+{
+ val = tswap32(val);
+ cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 1);
+}
+#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
+#define put_user_ual(arg, p) put_user_u32(arg, p)
+
+static void *softmmu_lock_user(CPUArchState *env, uint32_t addr, uint32_t len,
+ int copy)
+{
+ uint8_t *p;
+ /* TODO: Make this something that isn't fixed size. */
+ p = malloc(len);
+ if (p && copy)
+ cpu_memory_rw_debug(env, addr, p, len, 0);
+ return p;
+}
+#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
+static char *softmmu_lock_user_string(CPUArchState *env, uint32_t addr)
+{
+ char *p;
+ char *s;
+ uint8_t c;
+ /* TODO: Make this something that isn't fixed size. */
+ s = p = malloc(1024);
+ if (!s) {
+ return NULL;
+ }
+ do {
+ cpu_memory_rw_debug(env, addr, &c, 1, 0);
+ addr++;
+ *(p++) = c;
+ } while (c);
+ return s;
+}
+#define lock_user_string(p) softmmu_lock_user_string(env, p)
+static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
+ target_ulong len)
+{
+ if (len)
+ cpu_memory_rw_debug(env, addr, p, len, 1);
+ free(p);
+}
+#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
+
+#endif
diff --git a/include/exec/softmmu_defs.h b/include/exec/softmmu_defs.h
new file mode 100644
index 0000000000..1f25e33ce4
--- /dev/null
+++ b/include/exec/softmmu_defs.h
@@ -0,0 +1,37 @@
+/*
+ * Software MMU support
+ *
+ * Declare helpers used by TCG for qemu_ld/st ops.
+ *
+ * Used by softmmu_exec.h, TCG targets and exec-all.h.
+ *
+ */
+#ifndef SOFTMMU_DEFS_H
+#define SOFTMMU_DEFS_H
+
+uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ int mmu_idx);
+uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ int mmu_idx);
+uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx);
+uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx);
+
+uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stb_cmmu(CPUArchState *env, target_ulong addr, uint8_t val,
+int mmu_idx);
+uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stw_cmmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ int mmu_idx);
+uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stl_cmmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx);
+uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
+void helper_stq_cmmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx);
+#endif
diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h
new file mode 100644
index 0000000000..3e4e886a30
--- /dev/null
+++ b/include/exec/softmmu_exec.h
@@ -0,0 +1,163 @@
+/*
+ * Software MMU support
+ *
+ * Generate inline load/store functions for all MMU modes (typically
+ * at least _user and _kernel) as well as _data versions, for all data
+ * sizes.
+ *
+ * Used by target op helpers.
+ *
+ * MMU mode suffixes are defined in target cpu.h.
+ */
+
+/* XXX: find something cleaner.
+ * Furthermore, this is false for 64 bits targets
+ */
+#define ldul_user ldl_user
+#define ldul_kernel ldl_kernel
+#define ldul_hypv ldl_hypv
+#define ldul_executive ldl_executive
+#define ldul_supervisor ldl_supervisor
+
+#include "exec/softmmu_defs.h"
+
+#define ACCESS_TYPE 0
+#define MEMSUFFIX MMU_MODE0_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#define ACCESS_TYPE 1
+#define MEMSUFFIX MMU_MODE1_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#if (NB_MMU_MODES >= 3)
+
+#define ACCESS_TYPE 2
+#define MEMSUFFIX MMU_MODE2_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 3) */
+
+#if (NB_MMU_MODES >= 4)
+
+#define ACCESS_TYPE 3
+#define MEMSUFFIX MMU_MODE3_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 4) */
+
+#if (NB_MMU_MODES >= 5)
+
+#define ACCESS_TYPE 4
+#define MEMSUFFIX MMU_MODE4_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 5) */
+
+#if (NB_MMU_MODES >= 6)
+
+#define ACCESS_TYPE 5
+#define MEMSUFFIX MMU_MODE5_SUFFIX
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+#endif /* (NB_MMU_MODES >= 6) */
+
+#if (NB_MMU_MODES > 6)
+#error "NB_MMU_MODES > 6 is not supported for now"
+#endif /* (NB_MMU_MODES > 6) */
+
+/* these access are slower, they must be as rare as possible */
+#define ACCESS_TYPE (NB_MMU_MODES)
+#define MEMSUFFIX _data
+#define DATA_SIZE 1
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "exec/softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "exec/softmmu_header.h"
+#undef ACCESS_TYPE
+#undef MEMSUFFIX
+
+#define ldub(p) ldub_data(p)
+#define ldsb(p) ldsb_data(p)
+#define lduw(p) lduw_data(p)
+#define ldsw(p) ldsw_data(p)
+#define ldl(p) ldl_data(p)
+#define ldq(p) ldq_data(p)
+
+#define stb(p, v) stb_data(p, v)
+#define stw(p, v) stw_data(p, v)
+#define stl(p, v) stl_data(p, v)
+#define stq(p, v) stq_data(p, v)
diff --git a/include/exec/softmmu_header.h b/include/exec/softmmu_header.h
new file mode 100644
index 0000000000..d8d9c81b05
--- /dev/null
+++ b/include/exec/softmmu_header.h
@@ -0,0 +1,213 @@
+/*
+ * Software MMU support
+ *
+ * Generate inline load/store functions for one MMU mode and data
+ * size.
+ *
+ * Generate a store function as well as signed and unsigned loads. For
+ * 32 and 64 bit cases, also generate floating point functions with
+ * the same size.
+ *
+ * Not used directly but included from softmmu_exec.h and exec-all.h.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if DATA_SIZE == 8
+#define SUFFIX q
+#define USUFFIX q
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX l
+#define USUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX w
+#define USUFFIX uw
+#define DATA_TYPE uint16_t
+#define DATA_STYPE int16_t
+#elif DATA_SIZE == 1
+#define SUFFIX b
+#define USUFFIX ub
+#define DATA_TYPE uint8_t
+#define DATA_STYPE int8_t
+#else
+#error unsupported data size
+#endif
+
+#if ACCESS_TYPE < (NB_MMU_MODES)
+
+#define CPU_MMU_INDEX ACCESS_TYPE
+#define MMUSUFFIX _mmu
+
+#elif ACCESS_TYPE == (NB_MMU_MODES)
+
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MMUSUFFIX _mmu
+
+#elif ACCESS_TYPE == (NB_MMU_MODES + 1)
+
+#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define MMUSUFFIX _cmmu
+
+#else
+#error invalid ACCESS_TYPE
+#endif
+
+#if DATA_SIZE == 8
+#define RES_TYPE uint64_t
+#else
+#define RES_TYPE uint32_t
+#endif
+
+#if ACCESS_TYPE == (NB_MMU_MODES + 1)
+#define ADDR_READ addr_code
+#else
+#define ADDR_READ addr_read
+#endif
+
+/* generic load/store macros */
+
+static inline RES_TYPE
+glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ int page_index;
+ RES_TYPE res;
+ target_ulong addr;
+ int mmu_idx;
+
+ addr = ptr;
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = CPU_MMU_INDEX;
+ if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
+ (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+ res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
+ } else {
+ uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
+ }
+ return res;
+}
+
+#if DATA_SIZE <= 2
+static inline int
+glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ int res, page_index;
+ target_ulong addr;
+ int mmu_idx;
+
+ addr = ptr;
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = CPU_MMU_INDEX;
+ if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
+ (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+ res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
+ MMUSUFFIX)(env, addr, mmu_idx);
+ } else {
+ uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+ res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
+ }
+ return res;
+}
+#endif
+
+#if ACCESS_TYPE != (NB_MMU_MODES + 1)
+
+/* generic store macro */
+
+static inline void
+glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
+ RES_TYPE v)
+{
+ int page_index;
+ target_ulong addr;
+ int mmu_idx;
+
+ addr = ptr;
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = CPU_MMU_INDEX;
+ if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
+ (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
+ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
+ } else {
+ uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
+ glue(glue(st, SUFFIX), _raw)(hostaddr, v);
+ }
+}
+
+#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
+
+#if ACCESS_TYPE != (NB_MMU_MODES + 1)
+
+#if DATA_SIZE == 8
+static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr)
+{
+ union {
+ float64 d;
+ uint64_t i;
+ } u;
+ u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr);
+ return u.d;
+}
+
+static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr, float64 v)
+{
+ union {
+ float64 d;
+ uint64_t i;
+ } u;
+ u.d = v;
+ glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i);
+}
+#endif /* DATA_SIZE == 8 */
+
+#if DATA_SIZE == 4
+static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr);
+ return u.f;
+}
+
+static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
+ target_ulong ptr, float32 v)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.f = v;
+ glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i);
+}
+#endif /* DATA_SIZE == 4 */
+
+#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
+
+#undef RES_TYPE
+#undef DATA_TYPE
+#undef DATA_STYPE
+#undef SUFFIX
+#undef USUFFIX
+#undef DATA_SIZE
+#undef CPU_MMU_INDEX
+#undef MMUSUFFIX
+#undef ADDR_READ
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
new file mode 100644
index 0000000000..b219191abd
--- /dev/null
+++ b/include/exec/softmmu_template.h
@@ -0,0 +1,354 @@
+/*
+ * Software MMU support
+ *
+ * Generate helpers used by TCG for qemu_ld/st ops and code load
+ * functions.
+ *
+ * Included from target op helpers and exec.c.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/timer.h"
+#include "exec/memory.h"
+
+#define DATA_SIZE (1 << SHIFT)
+
+#if DATA_SIZE == 8
+#define SUFFIX q
+#define USUFFIX q
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX l
+#define USUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX w
+#define USUFFIX uw
+#define DATA_TYPE uint16_t
+#elif DATA_SIZE == 1
+#define SUFFIX b
+#define USUFFIX ub
+#define DATA_TYPE uint8_t
+#else
+#error unsupported data size
+#endif
+
+#ifdef SOFTMMU_CODE_ACCESS
+#define READ_ACCESS_TYPE 2
+#define ADDR_READ addr_code
+#else
+#define READ_ACCESS_TYPE 0
+#define ADDR_READ addr_read
+#endif
+
+static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr,
+ int mmu_idx,
+ uintptr_t retaddr);
+static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
+ hwaddr physaddr,
+ target_ulong addr,
+ uintptr_t retaddr)
+{
+ DATA_TYPE res;
+ MemoryRegion *mr = iotlb_to_region(physaddr);
+
+ physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
+ env->mem_io_pc = retaddr;
+ if (mr != &io_mem_ram && mr != &io_mem_rom
+ && mr != &io_mem_unassigned
+ && mr != &io_mem_notdirty
+ && !can_do_io(env)) {
+ cpu_io_recompile(env, retaddr);
+ }
+
+ env->mem_io_vaddr = addr;
+#if SHIFT <= 2
+ res = io_mem_read(mr, physaddr, 1 << SHIFT);
+#else
+#ifdef TARGET_WORDS_BIGENDIAN
+ res = io_mem_read(mr, physaddr, 4) << 32;
+ res |= io_mem_read(mr, physaddr + 4, 4);
+#else
+ res = io_mem_read(mr, physaddr, 4);
+ res |= io_mem_read(mr, physaddr + 4, 4) << 32;
+#endif
+#endif /* SHIFT > 2 */
+ return res;
+}
+
+/* handle all cases except unaligned access which span two pages */
+DATA_TYPE
+glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
+ int mmu_idx)
+{
+ DATA_TYPE res;
+ int index;
+ target_ulong tlb_addr;
+ hwaddr ioaddr;
+ uintptr_t retaddr;
+
+ /* test if there is match for unaligned or IO access */
+ /* XXX: could done more in memory macro in a non portable way */
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ retaddr = GETPC_EXT();
+ ioaddr = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+ } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+ /* slow unaligned access (it spans two pages or IO) */
+ do_unaligned_access:
+ retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+#endif
+ res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
+ mmu_idx, retaddr);
+ } else {
+ /* unaligned/aligned access in the same page */
+ uintptr_t addend;
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ retaddr = GETPC_EXT();
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ }
+#endif
+ addend = env->tlb_table[mmu_idx][index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
+ (addr + addend));
+ }
+ } else {
+ /* the page is not in the TLB : fill it */
+ retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+#endif
+ tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ goto redo;
+ }
+ return res;
+}
+
+/* handle all unaligned cases */
+static DATA_TYPE
+glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr,
+ int mmu_idx,
+ uintptr_t retaddr)
+{
+ DATA_TYPE res, res1, res2;
+ int index, shift;
+ hwaddr ioaddr;
+ target_ulong tlb_addr, addr1, addr2;
+
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
+ if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ ioaddr = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
+ } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+ do_unaligned_access:
+ /* slow unaligned access (it spans two pages) */
+ addr1 = addr & ~(DATA_SIZE - 1);
+ addr2 = addr1 + DATA_SIZE;
+ res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
+ mmu_idx, retaddr);
+ res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
+ mmu_idx, retaddr);
+ shift = (addr & (DATA_SIZE - 1)) * 8;
+#ifdef TARGET_WORDS_BIGENDIAN
+ res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
+#else
+ res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
+#endif
+ res = (DATA_TYPE)res;
+ } else {
+ /* unaligned/aligned access in the same page */
+ uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
+ (addr + addend));
+ }
+ } else {
+ /* the page is not in the TLB : fill it */
+ tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ goto redo;
+ }
+ return res;
+}
+
+#ifndef SOFTMMU_CODE_ACCESS
+
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr,
+ DATA_TYPE val,
+ int mmu_idx,
+ uintptr_t retaddr);
+
+static inline void glue(io_write, SUFFIX)(CPUArchState *env,
+ hwaddr physaddr,
+ DATA_TYPE val,
+ target_ulong addr,
+ uintptr_t retaddr)
+{
+ MemoryRegion *mr = iotlb_to_region(physaddr);
+
+ physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
+ if (mr != &io_mem_ram && mr != &io_mem_rom
+ && mr != &io_mem_unassigned
+ && mr != &io_mem_notdirty
+ && !can_do_io(env)) {
+ cpu_io_recompile(env, retaddr);
+ }
+
+ env->mem_io_vaddr = addr;
+ env->mem_io_pc = retaddr;
+#if SHIFT <= 2
+ io_mem_write(mr, physaddr, val, 1 << SHIFT);
+#else
+#ifdef TARGET_WORDS_BIGENDIAN
+ io_mem_write(mr, physaddr, (val >> 32), 4);
+ io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
+#else
+ io_mem_write(mr, physaddr, (uint32_t)val, 4);
+ io_mem_write(mr, physaddr + 4, val >> 32, 4);
+#endif
+#endif /* SHIFT > 2 */
+}
+
+void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr, DATA_TYPE val,
+ int mmu_idx)
+{
+ hwaddr ioaddr;
+ target_ulong tlb_addr;
+ uintptr_t retaddr;
+ int index;
+
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ retaddr = GETPC_EXT();
+ ioaddr = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+ } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+ do_unaligned_access:
+ retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+#endif
+ glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
+ mmu_idx, retaddr);
+ } else {
+ /* aligned/unaligned access in the same page */
+ uintptr_t addend;
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ retaddr = GETPC_EXT();
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+ }
+#endif
+ addend = env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
+ (addr + addend), val);
+ }
+ } else {
+ /* the page is not in the TLB : fill it */
+ retaddr = GETPC_EXT();
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
+#endif
+ tlb_fill(env, addr, 1, mmu_idx, retaddr);
+ goto redo;
+ }
+}
+
+/* handles all unaligned cases */
+static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
+ target_ulong addr,
+ DATA_TYPE val,
+ int mmu_idx,
+ uintptr_t retaddr)
+{
+ hwaddr ioaddr;
+ target_ulong tlb_addr;
+ int index, i;
+
+ index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ redo:
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ ioaddr = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
+ } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+ do_unaligned_access:
+ /* XXX: not efficient, but simple */
+ /* Note: relies on the fact that tlb_fill() does not remove the
+ * previous page from the TLB cache. */
+ for(i = DATA_SIZE - 1; i >= 0; i--) {
+#ifdef TARGET_WORDS_BIGENDIAN
+ glue(slow_stb, MMUSUFFIX)(env, addr + i,
+ val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
+ mmu_idx, retaddr);
+#else
+ glue(slow_stb, MMUSUFFIX)(env, addr + i,
+ val >> (i * 8),
+ mmu_idx, retaddr);
+#endif
+ }
+ } else {
+ /* aligned/unaligned access in the same page */
+ uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
+ (addr + addend), val);
+ }
+ } else {
+ /* the page is not in the TLB : fill it */
+ tlb_fill(env, addr, 1, mmu_idx, retaddr);
+ goto redo;
+ }
+}
+
+#endif /* !defined(SOFTMMU_CODE_ACCESS) */
+
+#undef READ_ACCESS_TYPE
+#undef SHIFT
+#undef DATA_TYPE
+#undef SUFFIX
+#undef USUFFIX
+#undef DATA_SIZE
+#undef ADDR_READ
diff --git a/include/exec/spinlock.h b/include/exec/spinlock.h
new file mode 100644
index 0000000000..a72edda1d2
--- /dev/null
+++ b/include/exec/spinlock.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+/* configure guarantees us that we have pthreads on any host except
+ * mingw32, which doesn't support any of the user-only targets.
+ * So we can simply assume we have pthread mutexes here.
+ */
+#if defined(CONFIG_USER_ONLY)
+
+#include <pthread.h>
+#define spin_lock pthread_mutex_lock
+#define spin_unlock pthread_mutex_unlock
+#define spinlock_t pthread_mutex_t
+#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
+
+#else
+
+/* Empty implementations, on the theory that system mode emulation
+ * is single-threaded. This means that these functions should only
+ * be used from code run in the TCG cpu thread, and cannot protect
+ * data structures which might also be accessed from the IO thread
+ * or from signal handlers.
+ */
+typedef int spinlock_t;
+#define SPIN_LOCK_UNLOCKED 0
+
+static inline void spin_lock(spinlock_t *lock)
+{
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+}
+
+#endif
diff --git a/include/exec/user/abitypes.h b/include/exec/user/abitypes.h
new file mode 100644
index 0000000000..fe7f6624f9
--- /dev/null
+++ b/include/exec/user/abitypes.h
@@ -0,0 +1,36 @@
+#ifndef QEMU_TYPES_H
+#define QEMU_TYPES_H
+#include "cpu.h"
+
+#ifdef TARGET_ABI32
+typedef uint32_t abi_ulong;
+typedef int32_t abi_long;
+#define TARGET_ABI_FMT_lx "%08x"
+#define TARGET_ABI_FMT_ld "%d"
+#define TARGET_ABI_FMT_lu "%u"
+#define TARGET_ABI_BITS 32
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+ return tswap32(v);
+}
+
+#else
+typedef target_ulong abi_ulong;
+typedef target_long abi_long;
+#define TARGET_ABI_FMT_lx TARGET_FMT_lx
+#define TARGET_ABI_FMT_ld TARGET_FMT_ld
+#define TARGET_ABI_FMT_lu TARGET_FMT_lu
+#define TARGET_ABI_BITS TARGET_LONG_BITS
+/* for consistency, define ABI32 too */
+#if TARGET_ABI_BITS == 32
+#define TARGET_ABI32 1
+#endif
+
+static inline abi_ulong tswapal(abi_ulong v)
+{
+ return tswapl(v);
+}
+
+#endif
+#endif
diff --git a/include/exec/user/thunk.h b/include/exec/user/thunk.h
new file mode 100644
index 0000000000..87025c3b04
--- /dev/null
+++ b/include/exec/user/thunk.h
@@ -0,0 +1,189 @@
+/*
+ * Generic thunking code to convert data between host and target CPU
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef THUNK_H
+#define THUNK_H
+
+#include <inttypes.h>
+#include "cpu.h"
+
+/* types enums definitions */
+
+typedef enum argtype {
+ TYPE_NULL,
+ TYPE_CHAR,
+ TYPE_SHORT,
+ TYPE_INT,
+ TYPE_LONG,
+ TYPE_ULONG,
+ TYPE_PTRVOID, /* pointer on unknown data */
+ TYPE_LONGLONG,
+ TYPE_ULONGLONG,
+ TYPE_PTR,
+ TYPE_ARRAY,
+ TYPE_STRUCT,
+ TYPE_OLDDEVT,
+} argtype;
+
+#define MK_PTR(type) TYPE_PTR, type
+#define MK_ARRAY(type, size) TYPE_ARRAY, size, type
+#define MK_STRUCT(id) TYPE_STRUCT, id
+
+#define THUNK_TARGET 0
+#define THUNK_HOST 1
+
+typedef struct {
+ /* standard struct handling */
+ const argtype *field_types;
+ int nb_fields;
+ int *field_offsets[2];
+ /* special handling */
+ void (*convert[2])(void *dst, const void *src);
+ int size[2];
+ int align[2];
+ const char *name;
+} StructEntry;
+
+/* Translation table for bitmasks... */
+typedef struct bitmask_transtbl {
+ unsigned int x86_mask;
+ unsigned int x86_bits;
+ unsigned int alpha_mask;
+ unsigned int alpha_bits;
+} bitmask_transtbl;
+
+void thunk_register_struct(int id, const char *name, const argtype *types);
+void thunk_register_struct_direct(int id, const char *name,
+ const StructEntry *se1);
+const argtype *thunk_convert(void *dst, const void *src,
+ const argtype *type_ptr, int to_host);
+#ifndef NO_THUNK_TYPE_SIZE
+
+extern StructEntry struct_entries[];
+
+int thunk_type_size_array(const argtype *type_ptr, int is_host);
+int thunk_type_align_array(const argtype *type_ptr, int is_host);
+
+static inline int thunk_type_size(const argtype *type_ptr, int is_host)
+{
+ int type, size;
+ const StructEntry *se;
+
+ type = *type_ptr;
+ switch(type) {
+ case TYPE_CHAR:
+ return 1;
+ case TYPE_SHORT:
+ return 2;
+ case TYPE_INT:
+ return 4;
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ return 8;
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ case TYPE_PTR:
+ if (is_host) {
+ return sizeof(void *);
+ } else {
+ return TARGET_ABI_BITS / 8;
+ }
+ break;
+ case TYPE_OLDDEVT:
+ if (is_host) {
+#if defined(HOST_X86_64)
+ return 8;
+#elif defined(HOST_ALPHA) || defined(HOST_IA64) || defined(HOST_MIPS) || \
+ defined(HOST_PARISC) || defined(HOST_SPARC64)
+ return 4;
+#elif defined(HOST_PPC)
+ return sizeof(void *);
+#else
+ return 2;
+#endif
+ } else {
+#if defined(TARGET_X86_64)
+ return 8;
+#elif defined(TARGET_ALPHA) || defined(TARGET_IA64) || defined(TARGET_MIPS) || \
+ defined(TARGET_PARISC) || defined(TARGET_SPARC64)
+ return 4;
+#elif defined(TARGET_PPC)
+ return TARGET_ABI_BITS / 8;
+#else
+ return 2;
+#endif
+ }
+ break;
+ case TYPE_ARRAY:
+ size = type_ptr[1];
+ return size * thunk_type_size_array(type_ptr + 2, is_host);
+ case TYPE_STRUCT:
+ se = struct_entries + type_ptr[1];
+ return se->size[is_host];
+ default:
+ return -1;
+ }
+}
+
+static inline int thunk_type_align(const argtype *type_ptr, int is_host)
+{
+ int type;
+ const StructEntry *se;
+
+ type = *type_ptr;
+ switch(type) {
+ case TYPE_CHAR:
+ return 1;
+ case TYPE_SHORT:
+ return 2;
+ case TYPE_INT:
+ return 4;
+ case TYPE_LONGLONG:
+ case TYPE_ULONGLONG:
+ return 8;
+ case TYPE_LONG:
+ case TYPE_ULONG:
+ case TYPE_PTRVOID:
+ case TYPE_PTR:
+ if (is_host) {
+ return sizeof(void *);
+ } else {
+ return TARGET_ABI_BITS / 8;
+ }
+ break;
+ case TYPE_OLDDEVT:
+ return thunk_type_size(type_ptr, is_host);
+ case TYPE_ARRAY:
+ return thunk_type_align_array(type_ptr + 2, is_host);
+ case TYPE_STRUCT:
+ se = struct_entries + type_ptr[1];
+ return se->align[is_host];
+ default:
+ return -1;
+ }
+}
+
+#endif /* NO_THUNK_TYPE_SIZE */
+
+unsigned int target_to_host_bitmask(unsigned int x86_mask,
+ const bitmask_transtbl * trans_tbl);
+unsigned int host_to_target_bitmask(unsigned int alpha_mask,
+ const bitmask_transtbl * trans_tbl);
+
+#endif
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
new file mode 100644
index 0000000000..f3927e2419
--- /dev/null
+++ b/include/fpu/softfloat.h
@@ -0,0 +1,641 @@
+/*
+ * QEMU float support
+ *
+ * Derived from SoftFloat.
+ */
+
+/*============================================================================
+
+This C header file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic
+Package, Release 2b.
+
+Written by John R. Hauser. This work was made possible in part by the
+International Computer Science Institute, located at Suite 600, 1947 Center
+Street, Berkeley, California 94704. Funding was partially provided by the
+National Science Foundation under grant MIP-9311980. The original version
+of this code was written as part of a project to build a fixed-point vector
+processor in collaboration with the University of California at Berkeley,
+overseen by Profs. Nelson Morgan and John Wawrzynek. More information
+is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
+arithmetic/SoftFloat.html'.
+
+THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
+been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
+RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
+AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
+COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
+EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
+INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
+OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
+
+Derivative works are acceptable, even for commercial purposes, so long as
+(1) the source code for the derivative work includes prominent notice that
+the work is derivative, and (2) the source code includes prominent notice with
+these four paragraphs for those parts of this code that are retained.
+
+=============================================================================*/
+
+#ifndef SOFTFLOAT_H
+#define SOFTFLOAT_H
+
+#if defined(CONFIG_SOLARIS) && defined(CONFIG_NEEDS_LIBSUNMATH)
+#include <sunmath.h>
+#endif
+
+#include <inttypes.h>
+#include "config-host.h"
+#include "qemu/osdep.h"
+
+/*----------------------------------------------------------------------------
+| Each of the following `typedef's defines the most convenient type that holds
+| integers of at least as many bits as specified. For example, `uint8' should
+| be the most convenient type that can hold unsigned integers of as many as
+| 8 bits. The `flag' type must be able to hold either a 0 or 1. For most
+| implementations of C, `flag', `uint8', and `int8' should all be `typedef'ed
+| to the same as `int'.
+*----------------------------------------------------------------------------*/
+typedef uint8_t flag;
+typedef uint8_t uint8;
+typedef int8_t int8;
+typedef unsigned int uint32;
+typedef signed int int32;
+typedef uint64_t uint64;
+typedef int64_t int64;
+
+#define LIT64( a ) a##LL
+#define INLINE static inline
+
+#define STATUS_PARAM , float_status *status
+#define STATUS(field) status->field
+#define STATUS_VAR , status
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE floating-point ordering relations
+*----------------------------------------------------------------------------*/
+enum {
+ float_relation_less = -1,
+ float_relation_equal = 0,
+ float_relation_greater = 1,
+ float_relation_unordered = 2
+};
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE floating-point types.
+*----------------------------------------------------------------------------*/
+/* Use structures for soft-float types. This prevents accidentally mixing
+ them with native int/float types. A sufficiently clever compiler and
+ sane ABI should be able to see though these structs. However
+ x86/gcc 3.x seems to struggle a bit, so leave them disabled by default. */
+//#define USE_SOFTFLOAT_STRUCT_TYPES
+#ifdef USE_SOFTFLOAT_STRUCT_TYPES
+typedef struct {
+ uint16_t v;
+} float16;
+#define float16_val(x) (((float16)(x)).v)
+#define make_float16(x) __extension__ ({ float16 f16_val = {x}; f16_val; })
+#define const_float16(x) { x }
+typedef struct {
+ uint32_t v;
+} float32;
+/* The cast ensures an error if the wrong type is passed. */
+#define float32_val(x) (((float32)(x)).v)
+#define make_float32(x) __extension__ ({ float32 f32_val = {x}; f32_val; })
+#define const_float32(x) { x }
+typedef struct {
+ uint64_t v;
+} float64;
+#define float64_val(x) (((float64)(x)).v)
+#define make_float64(x) __extension__ ({ float64 f64_val = {x}; f64_val; })
+#define const_float64(x) { x }
+#else
+typedef uint16_t float16;
+typedef uint32_t float32;
+typedef uint64_t float64;
+#define float16_val(x) (x)
+#define float32_val(x) (x)
+#define float64_val(x) (x)
+#define make_float16(x) (x)
+#define make_float32(x) (x)
+#define make_float64(x) (x)
+#define const_float16(x) (x)
+#define const_float32(x) (x)
+#define const_float64(x) (x)
+#endif
+typedef struct {
+ uint64_t low;
+ uint16_t high;
+} floatx80;
+#define make_floatx80(exp, mant) ((floatx80) { mant, exp })
+#define make_floatx80_init(exp, mant) { .low = mant, .high = exp }
+typedef struct {
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t high, low;
+#else
+ uint64_t low, high;
+#endif
+} float128;
+#define make_float128(high_, low_) ((float128) { .high = high_, .low = low_ })
+#define make_float128_init(high_, low_) { .high = high_, .low = low_ }
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE floating-point underflow tininess-detection mode.
+*----------------------------------------------------------------------------*/
+enum {
+ float_tininess_after_rounding = 0,
+ float_tininess_before_rounding = 1
+};
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE floating-point rounding mode.
+*----------------------------------------------------------------------------*/
+enum {
+ float_round_nearest_even = 0,
+ float_round_down = 1,
+ float_round_up = 2,
+ float_round_to_zero = 3
+};
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE floating-point exception flags.
+*----------------------------------------------------------------------------*/
+enum {
+ float_flag_invalid = 1,
+ float_flag_divbyzero = 4,
+ float_flag_overflow = 8,
+ float_flag_underflow = 16,
+ float_flag_inexact = 32,
+ float_flag_input_denormal = 64,
+ float_flag_output_denormal = 128
+};
+
+typedef struct float_status {
+ signed char float_detect_tininess;
+ signed char float_rounding_mode;
+ signed char float_exception_flags;
+ signed char floatx80_rounding_precision;
+ /* should denormalised results go to zero and set the inexact flag? */
+ flag flush_to_zero;
+ /* should denormalised inputs go to zero and set the input_denormal flag? */
+ flag flush_inputs_to_zero;
+ flag default_nan_mode;
+} float_status;
+
+void set_float_rounding_mode(int val STATUS_PARAM);
+void set_float_exception_flags(int val STATUS_PARAM);
+INLINE void set_float_detect_tininess(int val STATUS_PARAM)
+{
+ STATUS(float_detect_tininess) = val;
+}
+INLINE void set_flush_to_zero(flag val STATUS_PARAM)
+{
+ STATUS(flush_to_zero) = val;
+}
+INLINE void set_flush_inputs_to_zero(flag val STATUS_PARAM)
+{
+ STATUS(flush_inputs_to_zero) = val;
+}
+INLINE void set_default_nan_mode(flag val STATUS_PARAM)
+{
+ STATUS(default_nan_mode) = val;
+}
+INLINE int get_float_exception_flags(float_status *status)
+{
+ return STATUS(float_exception_flags);
+}
+void set_floatx80_rounding_precision(int val STATUS_PARAM);
+
+/*----------------------------------------------------------------------------
+| Routine to raise any or all of the software IEC/IEEE floating-point
+| exception flags.
+*----------------------------------------------------------------------------*/
+void float_raise( int8 flags STATUS_PARAM);
+
+/*----------------------------------------------------------------------------
+| Options to indicate which negations to perform in float*_muladd()
+| Using these differs from negating an input or output before calling
+| the muladd function in that this means that a NaN doesn't have its
+| sign bit inverted before it is propagated.
+*----------------------------------------------------------------------------*/
+enum {
+ float_muladd_negate_c = 1,
+ float_muladd_negate_product = 2,
+ float_muladd_negate_result = 4,
+};
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE integer-to-floating-point conversion routines.
+*----------------------------------------------------------------------------*/
+float32 int32_to_float32( int32 STATUS_PARAM );
+float64 int32_to_float64( int32 STATUS_PARAM );
+float32 uint32_to_float32( uint32 STATUS_PARAM );
+float64 uint32_to_float64( uint32 STATUS_PARAM );
+floatx80 int32_to_floatx80( int32 STATUS_PARAM );
+float128 int32_to_float128( int32 STATUS_PARAM );
+float32 int64_to_float32( int64 STATUS_PARAM );
+float32 uint64_to_float32( uint64 STATUS_PARAM );
+float64 int64_to_float64( int64 STATUS_PARAM );
+float64 uint64_to_float64( uint64 STATUS_PARAM );
+floatx80 int64_to_floatx80( int64 STATUS_PARAM );
+float128 int64_to_float128( int64 STATUS_PARAM );
+float128 uint64_to_float128( uint64 STATUS_PARAM );
+
+/*----------------------------------------------------------------------------
+| Software half-precision conversion routines.
+*----------------------------------------------------------------------------*/
+float16 float32_to_float16( float32, flag STATUS_PARAM );
+float32 float16_to_float32( float16, flag STATUS_PARAM );
+
+/*----------------------------------------------------------------------------
+| Software half-precision operations.
+*----------------------------------------------------------------------------*/
+int float16_is_quiet_nan( float16 );
+int float16_is_signaling_nan( float16 );
+float16 float16_maybe_silence_nan( float16 );
+
+INLINE int float16_is_any_nan(float16 a)
+{
+ return ((float16_val(a) & ~0x8000) > 0x7c00);
+}
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated half-precision NaN.
+*----------------------------------------------------------------------------*/
+extern const float16 float16_default_nan;
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE single-precision conversion routines.
+*----------------------------------------------------------------------------*/
+int_fast16_t float32_to_int16_round_to_zero(float32 STATUS_PARAM);
+uint_fast16_t float32_to_uint16_round_to_zero(float32 STATUS_PARAM);
+int32 float32_to_int32( float32 STATUS_PARAM );
+int32 float32_to_int32_round_to_zero( float32 STATUS_PARAM );
+uint32 float32_to_uint32( float32 STATUS_PARAM );
+uint32 float32_to_uint32_round_to_zero( float32 STATUS_PARAM );
+int64 float32_to_int64( float32 STATUS_PARAM );
+int64 float32_to_int64_round_to_zero( float32 STATUS_PARAM );
+float64 float32_to_float64( float32 STATUS_PARAM );
+floatx80 float32_to_floatx80( float32 STATUS_PARAM );
+float128 float32_to_float128( float32 STATUS_PARAM );
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE single-precision operations.
+*----------------------------------------------------------------------------*/
+float32 float32_round_to_int( float32 STATUS_PARAM );
+float32 float32_add( float32, float32 STATUS_PARAM );
+float32 float32_sub( float32, float32 STATUS_PARAM );
+float32 float32_mul( float32, float32 STATUS_PARAM );
+float32 float32_div( float32, float32 STATUS_PARAM );
+float32 float32_rem( float32, float32 STATUS_PARAM );
+float32 float32_muladd(float32, float32, float32, int STATUS_PARAM);
+float32 float32_sqrt( float32 STATUS_PARAM );
+float32 float32_exp2( float32 STATUS_PARAM );
+float32 float32_log2( float32 STATUS_PARAM );
+int float32_eq( float32, float32 STATUS_PARAM );
+int float32_le( float32, float32 STATUS_PARAM );
+int float32_lt( float32, float32 STATUS_PARAM );
+int float32_unordered( float32, float32 STATUS_PARAM );
+int float32_eq_quiet( float32, float32 STATUS_PARAM );
+int float32_le_quiet( float32, float32 STATUS_PARAM );
+int float32_lt_quiet( float32, float32 STATUS_PARAM );
+int float32_unordered_quiet( float32, float32 STATUS_PARAM );
+int float32_compare( float32, float32 STATUS_PARAM );
+int float32_compare_quiet( float32, float32 STATUS_PARAM );
+float32 float32_min(float32, float32 STATUS_PARAM);
+float32 float32_max(float32, float32 STATUS_PARAM);
+int float32_is_quiet_nan( float32 );
+int float32_is_signaling_nan( float32 );
+float32 float32_maybe_silence_nan( float32 );
+float32 float32_scalbn( float32, int STATUS_PARAM );
+
+INLINE float32 float32_abs(float32 a)
+{
+ /* Note that abs does *not* handle NaN specially, nor does
+ * it flush denormal inputs to zero.
+ */
+ return make_float32(float32_val(a) & 0x7fffffff);
+}
+
+INLINE float32 float32_chs(float32 a)
+{
+ /* Note that chs does *not* handle NaN specially, nor does
+ * it flush denormal inputs to zero.
+ */
+ return make_float32(float32_val(a) ^ 0x80000000);
+}
+
+INLINE int float32_is_infinity(float32 a)
+{
+ return (float32_val(a) & 0x7fffffff) == 0x7f800000;
+}
+
+INLINE int float32_is_neg(float32 a)
+{
+ return float32_val(a) >> 31;
+}
+
+INLINE int float32_is_zero(float32 a)
+{
+ return (float32_val(a) & 0x7fffffff) == 0;
+}
+
+INLINE int float32_is_any_nan(float32 a)
+{
+ return ((float32_val(a) & ~(1 << 31)) > 0x7f800000UL);
+}
+
+INLINE int float32_is_zero_or_denormal(float32 a)
+{
+ return (float32_val(a) & 0x7f800000) == 0;
+}
+
+INLINE float32 float32_set_sign(float32 a, int sign)
+{
+ return make_float32((float32_val(a) & 0x7fffffff) | (sign << 31));
+}
+
+#define float32_zero make_float32(0)
+#define float32_one make_float32(0x3f800000)
+#define float32_ln2 make_float32(0x3f317218)
+#define float32_pi make_float32(0x40490fdb)
+#define float32_half make_float32(0x3f000000)
+#define float32_infinity make_float32(0x7f800000)
+
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated single-precision NaN.
+*----------------------------------------------------------------------------*/
+extern const float32 float32_default_nan;
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE double-precision conversion routines.
+*----------------------------------------------------------------------------*/
+int_fast16_t float64_to_int16_round_to_zero(float64 STATUS_PARAM);
+uint_fast16_t float64_to_uint16_round_to_zero(float64 STATUS_PARAM);
+int32 float64_to_int32( float64 STATUS_PARAM );
+int32 float64_to_int32_round_to_zero( float64 STATUS_PARAM );
+uint32 float64_to_uint32( float64 STATUS_PARAM );
+uint32 float64_to_uint32_round_to_zero( float64 STATUS_PARAM );
+int64 float64_to_int64( float64 STATUS_PARAM );
+int64 float64_to_int64_round_to_zero( float64 STATUS_PARAM );
+uint64 float64_to_uint64 (float64 a STATUS_PARAM);
+uint64 float64_to_uint64_round_to_zero (float64 a STATUS_PARAM);
+float32 float64_to_float32( float64 STATUS_PARAM );
+floatx80 float64_to_floatx80( float64 STATUS_PARAM );
+float128 float64_to_float128( float64 STATUS_PARAM );
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE double-precision operations.
+*----------------------------------------------------------------------------*/
+float64 float64_round_to_int( float64 STATUS_PARAM );
+float64 float64_trunc_to_int( float64 STATUS_PARAM );
+float64 float64_add( float64, float64 STATUS_PARAM );
+float64 float64_sub( float64, float64 STATUS_PARAM );
+float64 float64_mul( float64, float64 STATUS_PARAM );
+float64 float64_div( float64, float64 STATUS_PARAM );
+float64 float64_rem( float64, float64 STATUS_PARAM );
+float64 float64_muladd(float64, float64, float64, int STATUS_PARAM);
+float64 float64_sqrt( float64 STATUS_PARAM );
+float64 float64_log2( float64 STATUS_PARAM );
+int float64_eq( float64, float64 STATUS_PARAM );
+int float64_le( float64, float64 STATUS_PARAM );
+int float64_lt( float64, float64 STATUS_PARAM );
+int float64_unordered( float64, float64 STATUS_PARAM );
+int float64_eq_quiet( float64, float64 STATUS_PARAM );
+int float64_le_quiet( float64, float64 STATUS_PARAM );
+int float64_lt_quiet( float64, float64 STATUS_PARAM );
+int float64_unordered_quiet( float64, float64 STATUS_PARAM );
+int float64_compare( float64, float64 STATUS_PARAM );
+int float64_compare_quiet( float64, float64 STATUS_PARAM );
+float64 float64_min(float64, float64 STATUS_PARAM);
+float64 float64_max(float64, float64 STATUS_PARAM);
+int float64_is_quiet_nan( float64 a );
+int float64_is_signaling_nan( float64 );
+float64 float64_maybe_silence_nan( float64 );
+float64 float64_scalbn( float64, int STATUS_PARAM );
+
+INLINE float64 float64_abs(float64 a)
+{
+ /* Note that abs does *not* handle NaN specially, nor does
+ * it flush denormal inputs to zero.
+ */
+ return make_float64(float64_val(a) & 0x7fffffffffffffffLL);
+}
+
+INLINE float64 float64_chs(float64 a)
+{
+ /* Note that chs does *not* handle NaN specially, nor does
+ * it flush denormal inputs to zero.
+ */
+ return make_float64(float64_val(a) ^ 0x8000000000000000LL);
+}
+
+INLINE int float64_is_infinity(float64 a)
+{
+ return (float64_val(a) & 0x7fffffffffffffffLL ) == 0x7ff0000000000000LL;
+}
+
+INLINE int float64_is_neg(float64 a)
+{
+ return float64_val(a) >> 63;
+}
+
+INLINE int float64_is_zero(float64 a)
+{
+ return (float64_val(a) & 0x7fffffffffffffffLL) == 0;
+}
+
+INLINE int float64_is_any_nan(float64 a)
+{
+ return ((float64_val(a) & ~(1ULL << 63)) > 0x7ff0000000000000ULL);
+}
+
+INLINE int float64_is_zero_or_denormal(float64 a)
+{
+ return (float64_val(a) & 0x7ff0000000000000LL) == 0;
+}
+
+INLINE float64 float64_set_sign(float64 a, int sign)
+{
+ return make_float64((float64_val(a) & 0x7fffffffffffffffULL)
+ | ((int64_t)sign << 63));
+}
+
+#define float64_zero make_float64(0)
+#define float64_one make_float64(0x3ff0000000000000LL)
+#define float64_ln2 make_float64(0x3fe62e42fefa39efLL)
+#define float64_pi make_float64(0x400921fb54442d18LL)
+#define float64_half make_float64(0x3fe0000000000000LL)
+#define float64_infinity make_float64(0x7ff0000000000000LL)
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated double-precision NaN.
+*----------------------------------------------------------------------------*/
+extern const float64 float64_default_nan;
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE extended double-precision conversion routines.
+*----------------------------------------------------------------------------*/
+int32 floatx80_to_int32( floatx80 STATUS_PARAM );
+int32 floatx80_to_int32_round_to_zero( floatx80 STATUS_PARAM );
+int64 floatx80_to_int64( floatx80 STATUS_PARAM );
+int64 floatx80_to_int64_round_to_zero( floatx80 STATUS_PARAM );
+float32 floatx80_to_float32( floatx80 STATUS_PARAM );
+float64 floatx80_to_float64( floatx80 STATUS_PARAM );
+float128 floatx80_to_float128( floatx80 STATUS_PARAM );
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE extended double-precision operations.
+*----------------------------------------------------------------------------*/
+floatx80 floatx80_round_to_int( floatx80 STATUS_PARAM );
+floatx80 floatx80_add( floatx80, floatx80 STATUS_PARAM );
+floatx80 floatx80_sub( floatx80, floatx80 STATUS_PARAM );
+floatx80 floatx80_mul( floatx80, floatx80 STATUS_PARAM );
+floatx80 floatx80_div( floatx80, floatx80 STATUS_PARAM );
+floatx80 floatx80_rem( floatx80, floatx80 STATUS_PARAM );
+floatx80 floatx80_sqrt( floatx80 STATUS_PARAM );
+int floatx80_eq( floatx80, floatx80 STATUS_PARAM );
+int floatx80_le( floatx80, floatx80 STATUS_PARAM );
+int floatx80_lt( floatx80, floatx80 STATUS_PARAM );
+int floatx80_unordered( floatx80, floatx80 STATUS_PARAM );
+int floatx80_eq_quiet( floatx80, floatx80 STATUS_PARAM );
+int floatx80_le_quiet( floatx80, floatx80 STATUS_PARAM );
+int floatx80_lt_quiet( floatx80, floatx80 STATUS_PARAM );
+int floatx80_unordered_quiet( floatx80, floatx80 STATUS_PARAM );
+int floatx80_compare( floatx80, floatx80 STATUS_PARAM );
+int floatx80_compare_quiet( floatx80, floatx80 STATUS_PARAM );
+int floatx80_is_quiet_nan( floatx80 );
+int floatx80_is_signaling_nan( floatx80 );
+floatx80 floatx80_maybe_silence_nan( floatx80 );
+floatx80 floatx80_scalbn( floatx80, int STATUS_PARAM );
+
+INLINE floatx80 floatx80_abs(floatx80 a)
+{
+ a.high &= 0x7fff;
+ return a;
+}
+
+INLINE floatx80 floatx80_chs(floatx80 a)
+{
+ a.high ^= 0x8000;
+ return a;
+}
+
+INLINE int floatx80_is_infinity(floatx80 a)
+{
+ return (a.high & 0x7fff) == 0x7fff && a.low == 0x8000000000000000LL;
+}
+
+INLINE int floatx80_is_neg(floatx80 a)
+{
+ return a.high >> 15;
+}
+
+INLINE int floatx80_is_zero(floatx80 a)
+{
+ return (a.high & 0x7fff) == 0 && a.low == 0;
+}
+
+INLINE int floatx80_is_zero_or_denormal(floatx80 a)
+{
+ return (a.high & 0x7fff) == 0;
+}
+
+INLINE int floatx80_is_any_nan(floatx80 a)
+{
+ return ((a.high & 0x7fff) == 0x7fff) && (a.low<<1);
+}
+
+#define floatx80_zero make_floatx80(0x0000, 0x0000000000000000LL)
+#define floatx80_one make_floatx80(0x3fff, 0x8000000000000000LL)
+#define floatx80_ln2 make_floatx80(0x3ffe, 0xb17217f7d1cf79acLL)
+#define floatx80_pi make_floatx80(0x4000, 0xc90fdaa22168c235LL)
+#define floatx80_half make_floatx80(0x3ffe, 0x8000000000000000LL)
+#define floatx80_infinity make_floatx80(0x7fff, 0x8000000000000000LL)
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated extended double-precision NaN.
+*----------------------------------------------------------------------------*/
+extern const floatx80 floatx80_default_nan;
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE quadruple-precision conversion routines.
+*----------------------------------------------------------------------------*/
+int32 float128_to_int32( float128 STATUS_PARAM );
+int32 float128_to_int32_round_to_zero( float128 STATUS_PARAM );
+int64 float128_to_int64( float128 STATUS_PARAM );
+int64 float128_to_int64_round_to_zero( float128 STATUS_PARAM );
+float32 float128_to_float32( float128 STATUS_PARAM );
+float64 float128_to_float64( float128 STATUS_PARAM );
+floatx80 float128_to_floatx80( float128 STATUS_PARAM );
+
+/*----------------------------------------------------------------------------
+| Software IEC/IEEE quadruple-precision operations.
+*----------------------------------------------------------------------------*/
+float128 float128_round_to_int( float128 STATUS_PARAM );
+float128 float128_add( float128, float128 STATUS_PARAM );
+float128 float128_sub( float128, float128 STATUS_PARAM );
+float128 float128_mul( float128, float128 STATUS_PARAM );
+float128 float128_div( float128, float128 STATUS_PARAM );
+float128 float128_rem( float128, float128 STATUS_PARAM );
+float128 float128_sqrt( float128 STATUS_PARAM );
+int float128_eq( float128, float128 STATUS_PARAM );
+int float128_le( float128, float128 STATUS_PARAM );
+int float128_lt( float128, float128 STATUS_PARAM );
+int float128_unordered( float128, float128 STATUS_PARAM );
+int float128_eq_quiet( float128, float128 STATUS_PARAM );
+int float128_le_quiet( float128, float128 STATUS_PARAM );
+int float128_lt_quiet( float128, float128 STATUS_PARAM );
+int float128_unordered_quiet( float128, float128 STATUS_PARAM );
+int float128_compare( float128, float128 STATUS_PARAM );
+int float128_compare_quiet( float128, float128 STATUS_PARAM );
+int float128_is_quiet_nan( float128 );
+int float128_is_signaling_nan( float128 );
+float128 float128_maybe_silence_nan( float128 );
+float128 float128_scalbn( float128, int STATUS_PARAM );
+
+INLINE float128 float128_abs(float128 a)
+{
+ a.high &= 0x7fffffffffffffffLL;
+ return a;
+}
+
+INLINE float128 float128_chs(float128 a)
+{
+ a.high ^= 0x8000000000000000LL;
+ return a;
+}
+
+INLINE int float128_is_infinity(float128 a)
+{
+ return (a.high & 0x7fffffffffffffffLL) == 0x7fff000000000000LL && a.low == 0;
+}
+
+INLINE int float128_is_neg(float128 a)
+{
+ return a.high >> 63;
+}
+
+INLINE int float128_is_zero(float128 a)
+{
+ return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0;
+}
+
+INLINE int float128_is_zero_or_denormal(float128 a)
+{
+ return (a.high & 0x7fff000000000000LL) == 0;
+}
+
+INLINE int float128_is_any_nan(float128 a)
+{
+ return ((a.high >> 48) & 0x7fff) == 0x7fff &&
+ ((a.low != 0) || ((a.high & 0xffffffffffffLL) != 0));
+}
+
+#define float128_zero make_float128(0, 0)
+
+/*----------------------------------------------------------------------------
+| The pattern for a default generated quadruple-precision NaN.
+*----------------------------------------------------------------------------*/
+extern const float128 float128_default_nan;
+
+#endif /* !SOFTFLOAT_H */
diff --git a/include/libfdt_env.h b/include/libfdt_env.h
new file mode 100644
index 0000000000..7938d73fae
--- /dev/null
+++ b/include/libfdt_env.h
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright IBM Corp. 2008
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ */
+
+#ifndef _LIBFDT_ENV_H
+#define _LIBFDT_ENV_H
+
+#include "qemu/bswap.h"
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define fdt32_to_cpu(x) (x)
+#define cpu_to_fdt32(x) (x)
+#define fdt64_to_cpu(x) (x)
+#define cpu_to_fdt64(x) (x)
+#else
+#define fdt32_to_cpu(x) (bswap_32((x)))
+#define cpu_to_fdt32(x) (bswap_32((x)))
+#define fdt64_to_cpu(x) (bswap_64((x)))
+#define cpu_to_fdt64(x) (bswap_64((x)))
+#endif
+
+#endif /* _LIBFDT_ENV_H */
diff --git a/include/migration/block.h b/include/migration/block.h
new file mode 100644
index 0000000000..ffa8ac0bdd
--- /dev/null
+++ b/include/migration/block.h
@@ -0,0 +1,23 @@
+/*
+ * QEMU live block migration
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ * Liran Schour <lirans@il.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef BLOCK_MIGRATION_H
+#define BLOCK_MIGRATION_H
+
+void blk_mig_init(void);
+int blk_mig_active(void);
+uint64_t blk_mig_bytes_transferred(void);
+uint64_t blk_mig_bytes_remaining(void);
+uint64_t blk_mig_bytes_total(void);
+
+#endif /* BLOCK_MIGRATION_H */
diff --git a/include/migration/migration.h b/include/migration/migration.h
new file mode 100644
index 0000000000..2d5b630cce
--- /dev/null
+++ b/include/migration/migration.h
@@ -0,0 +1,138 @@
+/*
+ * QEMU live migration
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_MIGRATION_H
+#define QEMU_MIGRATION_H
+
+#include "qapi/qmp/qdict.h"
+#include "qemu-common.h"
+#include "qemu/thread.h"
+#include "qemu/notify.h"
+#include "qapi/error.h"
+#include "migration/vmstate.h"
+#include "qapi-types.h"
+
+struct MigrationParams {
+ bool blk;
+ bool shared;
+};
+
+typedef struct MigrationState MigrationState;
+
+struct MigrationState
+{
+ int64_t bandwidth_limit;
+ size_t bytes_xfer;
+ size_t xfer_limit;
+ uint8_t *buffer;
+ size_t buffer_size;
+ size_t buffer_capacity;
+ QemuThread thread;
+
+ QEMUFile *file;
+ int fd;
+ int state;
+ int (*get_error)(MigrationState *s);
+ int (*close)(MigrationState *s);
+ int (*write)(MigrationState *s, const void *buff, size_t size);
+ void *opaque;
+ MigrationParams params;
+ int64_t total_time;
+ int64_t downtime;
+ int64_t expected_downtime;
+ int64_t dirty_pages_rate;
+ bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
+ int64_t xbzrle_cache_size;
+ bool complete;
+ bool first_time;
+};
+
+void process_incoming_migration(QEMUFile *f);
+
+void qemu_start_incoming_migration(const char *uri, Error **errp);
+
+uint64_t migrate_max_downtime(void);
+
+void do_info_migrate_print(Monitor *mon, const QObject *data);
+
+void do_info_migrate(Monitor *mon, QObject **ret_data);
+
+void exec_start_incoming_migration(const char *host_port, Error **errp);
+
+void exec_start_outgoing_migration(MigrationState *s, const char *host_port, Error **errp);
+
+void tcp_start_incoming_migration(const char *host_port, Error **errp);
+
+void tcp_start_outgoing_migration(MigrationState *s, const char *host_port, Error **errp);
+
+void unix_start_incoming_migration(const char *path, Error **errp);
+
+void unix_start_outgoing_migration(MigrationState *s, const char *path, Error **errp);
+
+void fd_start_incoming_migration(const char *path, Error **errp);
+
+void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **errp);
+
+void migrate_fd_error(MigrationState *s);
+
+void migrate_fd_connect(MigrationState *s);
+
+ssize_t migrate_fd_put_buffer(MigrationState *s, const void *data,
+ size_t size);
+int migrate_fd_close(MigrationState *s);
+
+void add_migration_state_change_notifier(Notifier *notify);
+void remove_migration_state_change_notifier(Notifier *notify);
+bool migration_is_active(MigrationState *);
+bool migration_has_finished(MigrationState *);
+bool migration_has_failed(MigrationState *);
+MigrationState *migrate_get_current(void);
+
+uint64_t ram_bytes_remaining(void);
+uint64_t ram_bytes_transferred(void);
+uint64_t ram_bytes_total(void);
+
+extern SaveVMHandlers savevm_ram_handlers;
+
+uint64_t dup_mig_bytes_transferred(void);
+uint64_t dup_mig_pages_transferred(void);
+uint64_t norm_mig_bytes_transferred(void);
+uint64_t norm_mig_pages_transferred(void);
+uint64_t xbzrle_mig_bytes_transferred(void);
+uint64_t xbzrle_mig_pages_transferred(void);
+uint64_t xbzrle_mig_pages_overflow(void);
+uint64_t xbzrle_mig_pages_cache_miss(void);
+
+/**
+ * @migrate_add_blocker - prevent migration from proceeding
+ *
+ * @reason - an error to be returned whenever migration is attempted
+ */
+void migrate_add_blocker(Error *reason);
+
+/**
+ * @migrate_del_blocker - remove a blocking error from migration
+ *
+ * @reason - the error blocking migration
+ */
+void migrate_del_blocker(Error *reason);
+
+int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen,
+ uint8_t *dst, int dlen);
+int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen);
+
+int migrate_use_xbzrle(void);
+int64_t migrate_xbzrle_cache_size(void);
+
+int64_t xbzrle_cache_resize(int64_t new_size);
+#endif
diff --git a/include/qemu/page_cache.h b/include/migration/page_cache.h
index 3839ac7726..3839ac7726 100644
--- a/include/qemu/page_cache.h
+++ b/include/migration/page_cache.h
diff --git a/include/migration/qemu-file.h b/include/migration/qemu-file.h
new file mode 100644
index 0000000000..68deefbcfb
--- /dev/null
+++ b/include/migration/qemu-file.h
@@ -0,0 +1,236 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef QEMU_FILE_H
+#define QEMU_FILE_H 1
+
+/* This function writes a chunk of data to a file at the given position.
+ * The pos argument can be ignored if the file is only being used for
+ * streaming. The handler should try to write all of the data it can.
+ */
+typedef int (QEMUFilePutBufferFunc)(void *opaque, const uint8_t *buf,
+ int64_t pos, int size);
+
+/* Read a chunk of data from a file at the given position. The pos argument
+ * can be ignored if the file is only be used for streaming. The number of
+ * bytes actually read should be returned.
+ */
+typedef int (QEMUFileGetBufferFunc)(void *opaque, uint8_t *buf,
+ int64_t pos, int size);
+
+/* Close a file
+ *
+ * Return negative error number on error, 0 or positive value on success.
+ *
+ * The meaning of return value on success depends on the specific back-end being
+ * used.
+ */
+typedef int (QEMUFileCloseFunc)(void *opaque);
+
+/* Called to return the OS file descriptor associated to the QEMUFile.
+ */
+typedef int (QEMUFileGetFD)(void *opaque);
+
+/* Called to determine if the file has exceeded its bandwidth allocation. The
+ * bandwidth capping is a soft limit, not a hard limit.
+ */
+typedef int (QEMUFileRateLimit)(void *opaque);
+
+/* Called to change the current bandwidth allocation. This function must return
+ * the new actual bandwidth. It should be new_rate if everything goes ok, and
+ * the old rate otherwise
+ */
+typedef int64_t (QEMUFileSetRateLimit)(void *opaque, int64_t new_rate);
+typedef int64_t (QEMUFileGetRateLimit)(void *opaque);
+
+typedef struct QEMUFileOps {
+ QEMUFilePutBufferFunc *put_buffer;
+ QEMUFileGetBufferFunc *get_buffer;
+ QEMUFileCloseFunc *close;
+ QEMUFileGetFD *get_fd;
+ QEMUFileRateLimit *rate_limit;
+ QEMUFileSetRateLimit *set_rate_limit;
+ QEMUFileGetRateLimit *get_rate_limit;
+} QEMUFileOps;
+
+QEMUFile *qemu_fopen_ops(void *opaque, const QEMUFileOps *ops);
+QEMUFile *qemu_fopen(const char *filename, const char *mode);
+QEMUFile *qemu_fdopen(int fd, const char *mode);
+QEMUFile *qemu_fopen_socket(int fd);
+QEMUFile *qemu_popen(FILE *popen_file, const char *mode);
+QEMUFile *qemu_popen_cmd(const char *command, const char *mode);
+int qemu_get_fd(QEMUFile *f);
+int qemu_fclose(QEMUFile *f);
+void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size);
+void qemu_put_byte(QEMUFile *f, int v);
+
+static inline void qemu_put_ubyte(QEMUFile *f, unsigned int v)
+{
+ qemu_put_byte(f, (int)v);
+}
+
+#define qemu_put_sbyte qemu_put_byte
+
+void qemu_put_be16(QEMUFile *f, unsigned int v);
+void qemu_put_be32(QEMUFile *f, unsigned int v);
+void qemu_put_be64(QEMUFile *f, uint64_t v);
+int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size);
+int qemu_get_byte(QEMUFile *f);
+
+static inline unsigned int qemu_get_ubyte(QEMUFile *f)
+{
+ return (unsigned int)qemu_get_byte(f);
+}
+
+#define qemu_get_sbyte qemu_get_byte
+
+unsigned int qemu_get_be16(QEMUFile *f);
+unsigned int qemu_get_be32(QEMUFile *f);
+uint64_t qemu_get_be64(QEMUFile *f);
+
+int qemu_file_rate_limit(QEMUFile *f);
+int64_t qemu_file_set_rate_limit(QEMUFile *f, int64_t new_rate);
+int64_t qemu_file_get_rate_limit(QEMUFile *f);
+int qemu_file_get_error(QEMUFile *f);
+
+static inline void qemu_put_be64s(QEMUFile *f, const uint64_t *pv)
+{
+ qemu_put_be64(f, *pv);
+}
+
+static inline void qemu_put_be32s(QEMUFile *f, const uint32_t *pv)
+{
+ qemu_put_be32(f, *pv);
+}
+
+static inline void qemu_put_be16s(QEMUFile *f, const uint16_t *pv)
+{
+ qemu_put_be16(f, *pv);
+}
+
+static inline void qemu_put_8s(QEMUFile *f, const uint8_t *pv)
+{
+ qemu_put_byte(f, *pv);
+}
+
+static inline void qemu_get_be64s(QEMUFile *f, uint64_t *pv)
+{
+ *pv = qemu_get_be64(f);
+}
+
+static inline void qemu_get_be32s(QEMUFile *f, uint32_t *pv)
+{
+ *pv = qemu_get_be32(f);
+}
+
+static inline void qemu_get_be16s(QEMUFile *f, uint16_t *pv)
+{
+ *pv = qemu_get_be16(f);
+}
+
+static inline void qemu_get_8s(QEMUFile *f, uint8_t *pv)
+{
+ *pv = qemu_get_byte(f);
+}
+
+// Signed versions for type safety
+static inline void qemu_put_sbuffer(QEMUFile *f, const int8_t *buf, int size)
+{
+ qemu_put_buffer(f, (const uint8_t *)buf, size);
+}
+
+static inline void qemu_put_sbe16(QEMUFile *f, int v)
+{
+ qemu_put_be16(f, (unsigned int)v);
+}
+
+static inline void qemu_put_sbe32(QEMUFile *f, int v)
+{
+ qemu_put_be32(f, (unsigned int)v);
+}
+
+static inline void qemu_put_sbe64(QEMUFile *f, int64_t v)
+{
+ qemu_put_be64(f, (uint64_t)v);
+}
+
+static inline size_t qemu_get_sbuffer(QEMUFile *f, int8_t *buf, int size)
+{
+ return qemu_get_buffer(f, (uint8_t *)buf, size);
+}
+
+static inline int qemu_get_sbe16(QEMUFile *f)
+{
+ return (int)qemu_get_be16(f);
+}
+
+static inline int qemu_get_sbe32(QEMUFile *f)
+{
+ return (int)qemu_get_be32(f);
+}
+
+static inline int64_t qemu_get_sbe64(QEMUFile *f)
+{
+ return (int64_t)qemu_get_be64(f);
+}
+
+static inline void qemu_put_s8s(QEMUFile *f, const int8_t *pv)
+{
+ qemu_put_8s(f, (const uint8_t *)pv);
+}
+
+static inline void qemu_put_sbe16s(QEMUFile *f, const int16_t *pv)
+{
+ qemu_put_be16s(f, (const uint16_t *)pv);
+}
+
+static inline void qemu_put_sbe32s(QEMUFile *f, const int32_t *pv)
+{
+ qemu_put_be32s(f, (const uint32_t *)pv);
+}
+
+static inline void qemu_put_sbe64s(QEMUFile *f, const int64_t *pv)
+{
+ qemu_put_be64s(f, (const uint64_t *)pv);
+}
+
+static inline void qemu_get_s8s(QEMUFile *f, int8_t *pv)
+{
+ qemu_get_8s(f, (uint8_t *)pv);
+}
+
+static inline void qemu_get_sbe16s(QEMUFile *f, int16_t *pv)
+{
+ qemu_get_be16s(f, (uint16_t *)pv);
+}
+
+static inline void qemu_get_sbe32s(QEMUFile *f, int32_t *pv)
+{
+ qemu_get_be32s(f, (uint32_t *)pv);
+}
+
+static inline void qemu_get_sbe64s(QEMUFile *f, int64_t *pv)
+{
+ qemu_get_be64s(f, (uint64_t *)pv);
+}
+#endif
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
new file mode 100644
index 0000000000..f27276c2d8
--- /dev/null
+++ b/include/migration/vmstate.h
@@ -0,0 +1,640 @@
+/*
+ * QEMU migration/snapshot declarations
+ *
+ * Copyright (c) 2009-2011 Red Hat, Inc.
+ *
+ * Original author: Juan Quintela <quintela@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef QEMU_VMSTATE_H
+#define QEMU_VMSTATE_H 1
+
+typedef void SaveStateHandler(QEMUFile *f, void *opaque);
+typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
+
+typedef struct SaveVMHandlers {
+ void (*set_params)(const MigrationParams *params, void * opaque);
+ SaveStateHandler *save_state;
+ int (*save_live_setup)(QEMUFile *f, void *opaque);
+ int (*save_live_iterate)(QEMUFile *f, void *opaque);
+ int (*save_live_complete)(QEMUFile *f, void *opaque);
+ uint64_t (*save_live_pending)(QEMUFile *f, void *opaque, uint64_t max_size);
+ void (*cancel)(void *opaque);
+ LoadStateHandler *load_state;
+ bool (*is_active)(void *opaque);
+} SaveVMHandlers;
+
+int register_savevm(DeviceState *dev,
+ const char *idstr,
+ int instance_id,
+ int version_id,
+ SaveStateHandler *save_state,
+ LoadStateHandler *load_state,
+ void *opaque);
+
+int register_savevm_live(DeviceState *dev,
+ const char *idstr,
+ int instance_id,
+ int version_id,
+ SaveVMHandlers *ops,
+ void *opaque);
+
+void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);
+void register_device_unmigratable(DeviceState *dev, const char *idstr,
+ void *opaque);
+
+
+typedef struct VMStateInfo VMStateInfo;
+typedef struct VMStateDescription VMStateDescription;
+
+struct VMStateInfo {
+ const char *name;
+ int (*get)(QEMUFile *f, void *pv, size_t size);
+ void (*put)(QEMUFile *f, void *pv, size_t size);
+};
+
+enum VMStateFlags {
+ VMS_SINGLE = 0x001,
+ VMS_POINTER = 0x002,
+ VMS_ARRAY = 0x004,
+ VMS_STRUCT = 0x008,
+ VMS_VARRAY_INT32 = 0x010, /* Array with size in int32_t field*/
+ VMS_BUFFER = 0x020, /* static sized buffer */
+ VMS_ARRAY_OF_POINTER = 0x040,
+ VMS_VARRAY_UINT16 = 0x080, /* Array with size in uint16_t field */
+ VMS_VBUFFER = 0x100, /* Buffer with size in int32_t field */
+ VMS_MULTIPLY = 0x200, /* multiply "size" field by field_size */
+ VMS_VARRAY_UINT8 = 0x400, /* Array with size in uint8_t field*/
+ VMS_VARRAY_UINT32 = 0x800, /* Array with size in uint32_t field*/
+};
+
+typedef struct {
+ const char *name;
+ size_t offset;
+ size_t size;
+ size_t start;
+ int num;
+ size_t num_offset;
+ size_t size_offset;
+ const VMStateInfo *info;
+ enum VMStateFlags flags;
+ const VMStateDescription *vmsd;
+ int version_id;
+ bool (*field_exists)(void *opaque, int version_id);
+} VMStateField;
+
+typedef struct VMStateSubsection {
+ const VMStateDescription *vmsd;
+ bool (*needed)(void *opaque);
+} VMStateSubsection;
+
+struct VMStateDescription {
+ const char *name;
+ int unmigratable;
+ int version_id;
+ int minimum_version_id;
+ int minimum_version_id_old;
+ LoadStateHandler *load_state_old;
+ int (*pre_load)(void *opaque);
+ int (*post_load)(void *opaque, int version_id);
+ void (*pre_save)(void *opaque);
+ VMStateField *fields;
+ const VMStateSubsection *subsections;
+};
+
+extern const VMStateInfo vmstate_info_bool;
+
+extern const VMStateInfo vmstate_info_int8;
+extern const VMStateInfo vmstate_info_int16;
+extern const VMStateInfo vmstate_info_int32;
+extern const VMStateInfo vmstate_info_int64;
+
+extern const VMStateInfo vmstate_info_uint8_equal;
+extern const VMStateInfo vmstate_info_uint16_equal;
+extern const VMStateInfo vmstate_info_int32_equal;
+extern const VMStateInfo vmstate_info_uint32_equal;
+extern const VMStateInfo vmstate_info_int32_le;
+
+extern const VMStateInfo vmstate_info_uint8;
+extern const VMStateInfo vmstate_info_uint16;
+extern const VMStateInfo vmstate_info_uint32;
+extern const VMStateInfo vmstate_info_uint64;
+
+extern const VMStateInfo vmstate_info_timer;
+extern const VMStateInfo vmstate_info_buffer;
+extern const VMStateInfo vmstate_info_unused_buffer;
+extern const VMStateInfo vmstate_info_bitmap;
+
+#define type_check_array(t1,t2,n) ((t1(*)[n])0 - (t2*)0)
+#define type_check_pointer(t1,t2) ((t1**)0 - (t2*)0)
+
+#define vmstate_offset_value(_state, _field, _type) \
+ (offsetof(_state, _field) + \
+ type_check(_type, typeof_field(_state, _field)))
+
+#define vmstate_offset_pointer(_state, _field, _type) \
+ (offsetof(_state, _field) + \
+ type_check_pointer(_type, typeof_field(_state, _field)))
+
+#define vmstate_offset_array(_state, _field, _type, _num) \
+ (offsetof(_state, _field) + \
+ type_check_array(_type, typeof_field(_state, _field), _num))
+
+#define vmstate_offset_sub_array(_state, _field, _type, _start) \
+ (offsetof(_state, _field[_start]))
+
+#define vmstate_offset_buffer(_state, _field) \
+ vmstate_offset_array(_state, _field, uint8_t, \
+ sizeof(typeof_field(_state, _field)))
+
+#define VMSTATE_SINGLE_TEST(_field, _state, _test, _version, _info, _type) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .field_exists = (_test), \
+ .size = sizeof(_type), \
+ .info = &(_info), \
+ .flags = VMS_SINGLE, \
+ .offset = vmstate_offset_value(_state, _field, _type), \
+}
+
+#define VMSTATE_POINTER(_field, _state, _version, _info, _type) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_SINGLE|VMS_POINTER, \
+ .offset = vmstate_offset_value(_state, _field, _type), \
+}
+
+#define VMSTATE_POINTER_TEST(_field, _state, _test, _info, _type) { \
+ .name = (stringify(_field)), \
+ .info = &(_info), \
+ .field_exists = (_test), \
+ .size = sizeof(_type), \
+ .flags = VMS_SINGLE|VMS_POINTER, \
+ .offset = vmstate_offset_value(_state, _field, _type), \
+}
+
+#define VMSTATE_ARRAY(_field, _state, _num, _version, _info, _type) {\
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .num = (_num), \
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_ARRAY, \
+ .offset = vmstate_offset_array(_state, _field, _type, _num), \
+}
+
+#define VMSTATE_ARRAY_TEST(_field, _state, _num, _test, _info, _type) {\
+ .name = (stringify(_field)), \
+ .field_exists = (_test), \
+ .num = (_num), \
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_ARRAY, \
+ .offset = vmstate_offset_array(_state, _field, _type, _num),\
+}
+
+#define VMSTATE_SUB_ARRAY(_field, _state, _start, _num, _version, _info, _type) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .num = (_num), \
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_ARRAY, \
+ .offset = vmstate_offset_sub_array(_state, _field, _type, _start), \
+}
+
+#define VMSTATE_ARRAY_INT32_UNSAFE(_field, _state, _field_num, _info, _type) {\
+ .name = (stringify(_field)), \
+ .num_offset = vmstate_offset_value(_state, _field_num, int32_t), \
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_VARRAY_INT32, \
+ .offset = offsetof(_state, _field), \
+}
+
+#define VMSTATE_VARRAY_INT32(_field, _state, _field_num, _version, _info, _type) {\
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .num_offset = vmstate_offset_value(_state, _field_num, int32_t), \
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_VARRAY_INT32|VMS_POINTER, \
+ .offset = vmstate_offset_pointer(_state, _field, _type), \
+}
+
+#define VMSTATE_VARRAY_UINT32(_field, _state, _field_num, _version, _info, _type) {\
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .num_offset = vmstate_offset_value(_state, _field_num, uint32_t),\
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_VARRAY_UINT32|VMS_POINTER, \
+ .offset = vmstate_offset_pointer(_state, _field, _type), \
+}
+
+#define VMSTATE_VARRAY_UINT16_UNSAFE(_field, _state, _field_num, _version, _info, _type) {\
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .num_offset = vmstate_offset_value(_state, _field_num, uint16_t),\
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_VARRAY_UINT16, \
+ .offset = offsetof(_state, _field), \
+}
+
+#define VMSTATE_STRUCT_TEST(_field, _state, _test, _version, _vmsd, _type) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .field_exists = (_test), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, _type), \
+}
+
+#define VMSTATE_STRUCT_POINTER_TEST(_field, _state, _test, _vmsd, _type) { \
+ .name = (stringify(_field)), \
+ .field_exists = (_test), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .flags = VMS_STRUCT|VMS_POINTER, \
+ .offset = vmstate_offset_value(_state, _field, _type), \
+}
+
+#define VMSTATE_ARRAY_OF_POINTER(_field, _state, _num, _version, _info, _type) {\
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .num = (_num), \
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_ARRAY|VMS_ARRAY_OF_POINTER, \
+ .offset = vmstate_offset_array(_state, _field, _type, _num), \
+}
+
+#define VMSTATE_STRUCT_ARRAY_TEST(_field, _state, _num, _test, _version, _vmsd, _type) { \
+ .name = (stringify(_field)), \
+ .num = (_num), \
+ .field_exists = (_test), \
+ .version_id = (_version), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .flags = VMS_STRUCT|VMS_ARRAY, \
+ .offset = vmstate_offset_array(_state, _field, _type, _num),\
+}
+
+#define VMSTATE_STRUCT_VARRAY_UINT8(_field, _state, _field_num, _version, _vmsd, _type) { \
+ .name = (stringify(_field)), \
+ .num_offset = vmstate_offset_value(_state, _field_num, uint8_t), \
+ .version_id = (_version), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .flags = VMS_STRUCT|VMS_VARRAY_UINT8, \
+ .offset = offsetof(_state, _field), \
+}
+
+#define VMSTATE_STRUCT_VARRAY_POINTER_INT32(_field, _state, _field_num, _vmsd, _type) { \
+ .name = (stringify(_field)), \
+ .version_id = 0, \
+ .num_offset = vmstate_offset_value(_state, _field_num, int32_t), \
+ .size = sizeof(_type), \
+ .vmsd = &(_vmsd), \
+ .flags = VMS_POINTER | VMS_VARRAY_INT32 | VMS_STRUCT, \
+ .offset = vmstate_offset_pointer(_state, _field, _type), \
+}
+
+#define VMSTATE_STRUCT_VARRAY_POINTER_UINT16(_field, _state, _field_num, _vmsd, _type) { \
+ .name = (stringify(_field)), \
+ .version_id = 0, \
+ .num_offset = vmstate_offset_value(_state, _field_num, uint16_t),\
+ .size = sizeof(_type), \
+ .vmsd = &(_vmsd), \
+ .flags = VMS_POINTER | VMS_VARRAY_UINT16 | VMS_STRUCT, \
+ .offset = vmstate_offset_pointer(_state, _field, _type), \
+}
+
+#define VMSTATE_STRUCT_VARRAY_INT32(_field, _state, _field_num, _version, _vmsd, _type) { \
+ .name = (stringify(_field)), \
+ .num_offset = vmstate_offset_value(_state, _field_num, int32_t), \
+ .version_id = (_version), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .flags = VMS_STRUCT|VMS_VARRAY_INT32, \
+ .offset = offsetof(_state, _field), \
+}
+
+#define VMSTATE_STRUCT_VARRAY_UINT32(_field, _state, _field_num, _version, _vmsd, _type) { \
+ .name = (stringify(_field)), \
+ .num_offset = vmstate_offset_value(_state, _field_num, uint32_t), \
+ .version_id = (_version), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .flags = VMS_STRUCT|VMS_VARRAY_UINT32, \
+ .offset = offsetof(_state, _field), \
+}
+
+#define VMSTATE_STATIC_BUFFER(_field, _state, _version, _test, _start, _size) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .field_exists = (_test), \
+ .size = (_size - _start), \
+ .info = &vmstate_info_buffer, \
+ .flags = VMS_BUFFER, \
+ .offset = vmstate_offset_buffer(_state, _field) + _start, \
+}
+
+#define VMSTATE_BUFFER_MULTIPLY(_field, _state, _version, _test, _start, _field_size, _multiply) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .field_exists = (_test), \
+ .size_offset = vmstate_offset_value(_state, _field_size, uint32_t),\
+ .size = (_multiply), \
+ .info = &vmstate_info_buffer, \
+ .flags = VMS_VBUFFER|VMS_MULTIPLY, \
+ .offset = offsetof(_state, _field), \
+ .start = (_start), \
+}
+
+#define VMSTATE_VBUFFER(_field, _state, _version, _test, _start, _field_size) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .field_exists = (_test), \
+ .size_offset = vmstate_offset_value(_state, _field_size, int32_t),\
+ .info = &vmstate_info_buffer, \
+ .flags = VMS_VBUFFER|VMS_POINTER, \
+ .offset = offsetof(_state, _field), \
+ .start = (_start), \
+}
+
+#define VMSTATE_VBUFFER_UINT32(_field, _state, _version, _test, _start, _field_size) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .field_exists = (_test), \
+ .size_offset = vmstate_offset_value(_state, _field_size, uint32_t),\
+ .info = &vmstate_info_buffer, \
+ .flags = VMS_VBUFFER|VMS_POINTER, \
+ .offset = offsetof(_state, _field), \
+ .start = (_start), \
+}
+
+#define VMSTATE_BUFFER_UNSAFE_INFO(_field, _state, _version, _info, _size) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .size = (_size), \
+ .info = &(_info), \
+ .flags = VMS_BUFFER, \
+ .offset = offsetof(_state, _field), \
+}
+
+#define VMSTATE_UNUSED_BUFFER(_test, _version, _size) { \
+ .name = "unused", \
+ .field_exists = (_test), \
+ .version_id = (_version), \
+ .size = (_size), \
+ .info = &vmstate_info_unused_buffer, \
+ .flags = VMS_BUFFER, \
+}
+
+/* _field_size should be a int32_t field in the _state struct giving the
+ * size of the bitmap _field in bits.
+ */
+#define VMSTATE_BITMAP(_field, _state, _version, _field_size) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .size_offset = vmstate_offset_value(_state, _field_size, int32_t),\
+ .info = &vmstate_info_bitmap, \
+ .flags = VMS_VBUFFER|VMS_POINTER, \
+ .offset = offsetof(_state, _field), \
+}
+
+/* _f : field name
+ _f_n : num of elements field_name
+ _n : num of elements
+ _s : struct state name
+ _v : version
+*/
+
+#define VMSTATE_SINGLE(_field, _state, _version, _info, _type) \
+ VMSTATE_SINGLE_TEST(_field, _state, NULL, _version, _info, _type)
+
+#define VMSTATE_STRUCT(_field, _state, _version, _vmsd, _type) \
+ VMSTATE_STRUCT_TEST(_field, _state, NULL, _version, _vmsd, _type)
+
+#define VMSTATE_STRUCT_POINTER(_field, _state, _vmsd, _type) \
+ VMSTATE_STRUCT_POINTER_TEST(_field, _state, NULL, _vmsd, _type)
+
+#define VMSTATE_STRUCT_ARRAY(_field, _state, _num, _version, _vmsd, _type) \
+ VMSTATE_STRUCT_ARRAY_TEST(_field, _state, _num, NULL, _version, \
+ _vmsd, _type)
+
+#define VMSTATE_BOOL_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_bool, bool)
+
+#define VMSTATE_INT8_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_int8, int8_t)
+#define VMSTATE_INT16_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_int16, int16_t)
+#define VMSTATE_INT32_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_int32, int32_t)
+#define VMSTATE_INT64_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_int64, int64_t)
+
+#define VMSTATE_UINT8_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint8, uint8_t)
+#define VMSTATE_UINT16_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint16, uint16_t)
+#define VMSTATE_UINT32_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint32, uint32_t)
+#define VMSTATE_UINT64_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64, uint64_t)
+
+#define VMSTATE_BOOL(_f, _s) \
+ VMSTATE_BOOL_V(_f, _s, 0)
+
+#define VMSTATE_INT8(_f, _s) \
+ VMSTATE_INT8_V(_f, _s, 0)
+#define VMSTATE_INT16(_f, _s) \
+ VMSTATE_INT16_V(_f, _s, 0)
+#define VMSTATE_INT32(_f, _s) \
+ VMSTATE_INT32_V(_f, _s, 0)
+#define VMSTATE_INT64(_f, _s) \
+ VMSTATE_INT64_V(_f, _s, 0)
+
+#define VMSTATE_UINT8(_f, _s) \
+ VMSTATE_UINT8_V(_f, _s, 0)
+#define VMSTATE_UINT16(_f, _s) \
+ VMSTATE_UINT16_V(_f, _s, 0)
+#define VMSTATE_UINT32(_f, _s) \
+ VMSTATE_UINT32_V(_f, _s, 0)
+#define VMSTATE_UINT64(_f, _s) \
+ VMSTATE_UINT64_V(_f, _s, 0)
+
+#define VMSTATE_UINT8_EQUAL(_f, _s) \
+ VMSTATE_SINGLE(_f, _s, 0, vmstate_info_uint8_equal, uint8_t)
+
+#define VMSTATE_UINT16_EQUAL(_f, _s) \
+ VMSTATE_SINGLE(_f, _s, 0, vmstate_info_uint16_equal, uint16_t)
+
+#define VMSTATE_UINT16_EQUAL_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint16_equal, uint16_t)
+
+#define VMSTATE_INT32_EQUAL(_f, _s) \
+ VMSTATE_SINGLE(_f, _s, 0, vmstate_info_int32_equal, int32_t)
+
+#define VMSTATE_UINT32_EQUAL(_f, _s) \
+ VMSTATE_SINGLE(_f, _s, 0, vmstate_info_uint32_equal, uint32_t)
+
+#define VMSTATE_INT32_LE(_f, _s) \
+ VMSTATE_SINGLE(_f, _s, 0, vmstate_info_int32_le, int32_t)
+
+#define VMSTATE_UINT8_TEST(_f, _s, _t) \
+ VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint8, uint8_t)
+
+#define VMSTATE_UINT16_TEST(_f, _s, _t) \
+ VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint16, uint16_t)
+
+#define VMSTATE_UINT32_TEST(_f, _s, _t) \
+ VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint32, uint32_t)
+
+#define VMSTATE_TIMER_TEST(_f, _s, _test) \
+ VMSTATE_POINTER_TEST(_f, _s, _test, vmstate_info_timer, QEMUTimer *)
+
+#define VMSTATE_TIMER_V(_f, _s, _v) \
+ VMSTATE_POINTER(_f, _s, _v, vmstate_info_timer, QEMUTimer *)
+
+#define VMSTATE_TIMER(_f, _s) \
+ VMSTATE_TIMER_V(_f, _s, 0)
+
+#define VMSTATE_TIMER_ARRAY(_f, _s, _n) \
+ VMSTATE_ARRAY_OF_POINTER(_f, _s, _n, 0, vmstate_info_timer, QEMUTimer *)
+
+#define VMSTATE_BOOL_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_bool, bool)
+
+#define VMSTATE_BOOL_ARRAY(_f, _s, _n) \
+ VMSTATE_BOOL_ARRAY_V(_f, _s, _n, 0)
+
+#define VMSTATE_UINT16_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint16, uint16_t)
+
+#define VMSTATE_UINT16_ARRAY(_f, _s, _n) \
+ VMSTATE_UINT16_ARRAY_V(_f, _s, _n, 0)
+
+#define VMSTATE_UINT8_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint8, uint8_t)
+
+#define VMSTATE_UINT8_ARRAY(_f, _s, _n) \
+ VMSTATE_UINT8_ARRAY_V(_f, _s, _n, 0)
+
+#define VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint32, uint32_t)
+
+#define VMSTATE_UINT32_ARRAY(_f, _s, _n) \
+ VMSTATE_UINT32_ARRAY_V(_f, _s, _n, 0)
+
+#define VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint64, uint64_t)
+
+#define VMSTATE_UINT64_ARRAY(_f, _s, _n) \
+ VMSTATE_UINT64_ARRAY_V(_f, _s, _n, 0)
+
+#define VMSTATE_INT16_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_int16, int16_t)
+
+#define VMSTATE_INT16_ARRAY(_f, _s, _n) \
+ VMSTATE_INT16_ARRAY_V(_f, _s, _n, 0)
+
+#define VMSTATE_INT32_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_int32, int32_t)
+
+#define VMSTATE_INT32_ARRAY(_f, _s, _n) \
+ VMSTATE_INT32_ARRAY_V(_f, _s, _n, 0)
+
+#define VMSTATE_UINT32_SUB_ARRAY(_f, _s, _start, _num) \
+ VMSTATE_SUB_ARRAY(_f, _s, _start, _num, 0, vmstate_info_uint32, uint32_t)
+
+#define VMSTATE_UINT32_ARRAY(_f, _s, _n) \
+ VMSTATE_UINT32_ARRAY_V(_f, _s, _n, 0)
+
+#define VMSTATE_INT64_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_int64, int64_t)
+
+#define VMSTATE_INT64_ARRAY(_f, _s, _n) \
+ VMSTATE_INT64_ARRAY_V(_f, _s, _n, 0)
+
+#define VMSTATE_BUFFER_V(_f, _s, _v) \
+ VMSTATE_STATIC_BUFFER(_f, _s, _v, NULL, 0, sizeof(typeof_field(_s, _f)))
+
+#define VMSTATE_BUFFER(_f, _s) \
+ VMSTATE_BUFFER_V(_f, _s, 0)
+
+#define VMSTATE_PARTIAL_BUFFER(_f, _s, _size) \
+ VMSTATE_STATIC_BUFFER(_f, _s, 0, NULL, 0, _size)
+
+#define VMSTATE_BUFFER_START_MIDDLE(_f, _s, _start) \
+ VMSTATE_STATIC_BUFFER(_f, _s, 0, NULL, _start, sizeof(typeof_field(_s, _f)))
+
+#define VMSTATE_PARTIAL_VBUFFER(_f, _s, _size) \
+ VMSTATE_VBUFFER(_f, _s, 0, NULL, 0, _size)
+
+#define VMSTATE_PARTIAL_VBUFFER_UINT32(_f, _s, _size) \
+ VMSTATE_VBUFFER_UINT32(_f, _s, 0, NULL, 0, _size)
+
+#define VMSTATE_SUB_VBUFFER(_f, _s, _start, _size) \
+ VMSTATE_VBUFFER(_f, _s, 0, NULL, _start, _size)
+
+#define VMSTATE_BUFFER_TEST(_f, _s, _test) \
+ VMSTATE_STATIC_BUFFER(_f, _s, 0, _test, 0, sizeof(typeof_field(_s, _f)))
+
+#define VMSTATE_BUFFER_UNSAFE(_field, _state, _version, _size) \
+ VMSTATE_BUFFER_UNSAFE_INFO(_field, _state, _version, vmstate_info_buffer, _size)
+
+#define VMSTATE_UNUSED_V(_v, _size) \
+ VMSTATE_UNUSED_BUFFER(NULL, _v, _size)
+
+#define VMSTATE_UNUSED(_size) \
+ VMSTATE_UNUSED_V(0, _size)
+
+#define VMSTATE_UNUSED_TEST(_test, _size) \
+ VMSTATE_UNUSED_BUFFER(_test, 0, _size)
+
+#define VMSTATE_END_OF_LIST() \
+ {}
+
+int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
+ void *opaque, int version_id);
+void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
+ void *opaque);
+int vmstate_register(DeviceState *dev, int instance_id,
+ const VMStateDescription *vmsd, void *base);
+int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
+ const VMStateDescription *vmsd,
+ void *base, int alias_id,
+ int required_for_version);
+void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd,
+ void *opaque);
+
+struct MemoryRegion;
+void vmstate_register_ram(struct MemoryRegion *memory, DeviceState *dev);
+void vmstate_unregister_ram(struct MemoryRegion *memory, DeviceState *dev);
+void vmstate_register_ram_global(struct MemoryRegion *memory);
+
+#endif
diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h
new file mode 100644
index 0000000000..87fb49c092
--- /dev/null
+++ b/include/monitor/monitor.h
@@ -0,0 +1,101 @@
+#ifndef MONITOR_H
+#define MONITOR_H
+
+#include "qemu-common.h"
+#include "qapi/qmp/qerror.h"
+#include "qapi/qmp/qdict.h"
+#include "block/block.h"
+#include "monitor/readline.h"
+
+extern Monitor *cur_mon;
+extern Monitor *default_mon;
+
+/* flags for monitor_init */
+#define MONITOR_IS_DEFAULT 0x01
+#define MONITOR_USE_READLINE 0x02
+#define MONITOR_USE_CONTROL 0x04
+#define MONITOR_USE_PRETTY 0x08
+
+/* flags for monitor commands */
+#define MONITOR_CMD_ASYNC 0x0001
+
+/* QMP events */
+typedef enum MonitorEvent {
+ QEVENT_SHUTDOWN,
+ QEVENT_RESET,
+ QEVENT_POWERDOWN,
+ QEVENT_STOP,
+ QEVENT_RESUME,
+ QEVENT_VNC_CONNECTED,
+ QEVENT_VNC_INITIALIZED,
+ QEVENT_VNC_DISCONNECTED,
+ QEVENT_BLOCK_IO_ERROR,
+ QEVENT_RTC_CHANGE,
+ QEVENT_WATCHDOG,
+ QEVENT_SPICE_CONNECTED,
+ QEVENT_SPICE_INITIALIZED,
+ QEVENT_SPICE_DISCONNECTED,
+ QEVENT_BLOCK_JOB_COMPLETED,
+ QEVENT_BLOCK_JOB_CANCELLED,
+ QEVENT_BLOCK_JOB_ERROR,
+ QEVENT_BLOCK_JOB_READY,
+ QEVENT_DEVICE_TRAY_MOVED,
+ QEVENT_SUSPEND,
+ QEVENT_SUSPEND_DISK,
+ QEVENT_WAKEUP,
+ QEVENT_BALLOON_CHANGE,
+ QEVENT_SPICE_MIGRATE_COMPLETED,
+
+ /* Add to 'monitor_event_names' array in monitor.c when
+ * defining new events here */
+
+ QEVENT_MAX,
+} MonitorEvent;
+
+int monitor_cur_is_qmp(void);
+
+void monitor_protocol_event(MonitorEvent event, QObject *data);
+void monitor_init(CharDriverState *chr, int flags);
+
+int monitor_suspend(Monitor *mon);
+void monitor_resume(Monitor *mon);
+
+int monitor_read_bdrv_key_start(Monitor *mon, BlockDriverState *bs,
+ BlockDriverCompletionFunc *completion_cb,
+ void *opaque);
+int monitor_read_block_device_key(Monitor *mon, const char *device,
+ BlockDriverCompletionFunc *completion_cb,
+ void *opaque);
+
+int monitor_get_fd(Monitor *mon, const char *fdname, Error **errp);
+int monitor_handle_fd_param(Monitor *mon, const char *fdname);
+
+void monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
+ GCC_FMT_ATTR(2, 0);
+void monitor_printf(Monitor *mon, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
+void monitor_print_filename(Monitor *mon, const char *filename);
+void monitor_flush(Monitor *mon);
+int monitor_set_cpu(int cpu_index);
+int monitor_get_cpu_index(void);
+
+typedef void (MonitorCompletion)(void *opaque, QObject *ret_data);
+
+void monitor_set_error(Monitor *mon, QError *qerror);
+void monitor_read_command(Monitor *mon, int show_prompt);
+ReadLineState *monitor_get_rs(Monitor *mon);
+int monitor_read_password(Monitor *mon, ReadLineFunc *readline_func,
+ void *opaque);
+
+int qmp_qom_set(Monitor *mon, const QDict *qdict, QObject **ret);
+
+int qmp_qom_get(Monitor *mon, const QDict *qdict, QObject **ret);
+
+AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
+ bool has_opaque, const char *opaque,
+ Error **errp);
+int monitor_fdset_get_fd(int64_t fdset_id, int flags);
+int monitor_fdset_dup_fd_add(int64_t fdset_id, int dup_fd);
+int monitor_fdset_dup_fd_remove(int dup_fd);
+int monitor_fdset_dup_fd_find(int dup_fd);
+
+#endif /* !MONITOR_H */
diff --git a/include/monitor/readline.h b/include/monitor/readline.h
new file mode 100644
index 0000000000..fc9806ecf1
--- /dev/null
+++ b/include/monitor/readline.h
@@ -0,0 +1,55 @@
+#ifndef READLINE_H
+#define READLINE_H
+
+#include "qemu-common.h"
+
+#define READLINE_CMD_BUF_SIZE 4095
+#define READLINE_MAX_CMDS 64
+#define READLINE_MAX_COMPLETIONS 256
+
+typedef void ReadLineFunc(Monitor *mon, const char *str, void *opaque);
+typedef void ReadLineCompletionFunc(const char *cmdline);
+
+typedef struct ReadLineState {
+ char cmd_buf[READLINE_CMD_BUF_SIZE + 1];
+ int cmd_buf_index;
+ int cmd_buf_size;
+
+ char last_cmd_buf[READLINE_CMD_BUF_SIZE + 1];
+ int last_cmd_buf_index;
+ int last_cmd_buf_size;
+
+ int esc_state;
+ int esc_param;
+
+ char *history[READLINE_MAX_CMDS];
+ int hist_entry;
+
+ ReadLineCompletionFunc *completion_finder;
+ char *completions[READLINE_MAX_COMPLETIONS];
+ int nb_completions;
+ int completion_index;
+
+ ReadLineFunc *readline_func;
+ void *readline_opaque;
+ int read_password;
+ char prompt[256];
+ Monitor *mon;
+} ReadLineState;
+
+void readline_add_completion(ReadLineState *rs, const char *str);
+void readline_set_completion_index(ReadLineState *rs, int completion_index);
+
+const char *readline_get_history(ReadLineState *rs, unsigned int index);
+
+void readline_handle_byte(ReadLineState *rs, int ch);
+
+void readline_start(ReadLineState *rs, const char *prompt, int read_password,
+ ReadLineFunc *readline_func, void *opaque);
+void readline_restart(ReadLineState *rs);
+void readline_show_prompt(ReadLineState *rs);
+
+ReadLineState *readline_init(Monitor *mon,
+ ReadLineCompletionFunc *completion_finder);
+
+#endif /* !READLINE_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
new file mode 100644
index 0000000000..1f052986e6
--- /dev/null
+++ b/include/net/checksum.h
@@ -0,0 +1,29 @@
+/*
+ * IP checksumming functions.
+ * (c) 2008 Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; under version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_NET_CHECKSUM_H
+#define QEMU_NET_CHECKSUM_H
+
+#include <stdint.h>
+
+uint32_t net_checksum_add(int len, uint8_t *buf);
+uint16_t net_checksum_finish(uint32_t sum);
+uint16_t net_checksum_tcpudp(uint16_t length, uint16_t proto,
+ uint8_t *addrs, uint8_t *buf);
+void net_checksum_calculate(uint8_t *data, int length);
+
+#endif /* QEMU_NET_CHECKSUM_H */
diff --git a/include/net/net.h b/include/net/net.h
new file mode 100644
index 0000000000..de42dd76da
--- /dev/null
+++ b/include/net/net.h
@@ -0,0 +1,175 @@
+#ifndef QEMU_NET_H
+#define QEMU_NET_H
+
+#include "qemu/queue.h"
+#include "qemu-common.h"
+#include "qapi/qmp/qdict.h"
+#include "qemu/option.h"
+#include "net/queue.h"
+#include "migration/vmstate.h"
+#include "qapi-types.h"
+
+struct MACAddr {
+ uint8_t a[6];
+};
+
+/* qdev nic properties */
+
+typedef struct NICConf {
+ MACAddr macaddr;
+ NetClientState *peer;
+ int32_t bootindex;
+} NICConf;
+
+#define DEFINE_NIC_PROPERTIES(_state, _conf) \
+ DEFINE_PROP_MACADDR("mac", _state, _conf.macaddr), \
+ DEFINE_PROP_VLAN("vlan", _state, _conf.peer), \
+ DEFINE_PROP_NETDEV("netdev", _state, _conf.peer), \
+ DEFINE_PROP_INT32("bootindex", _state, _conf.bootindex, -1)
+
+/* Net clients */
+
+typedef void (NetPoll)(NetClientState *, bool enable);
+typedef int (NetCanReceive)(NetClientState *);
+typedef ssize_t (NetReceive)(NetClientState *, const uint8_t *, size_t);
+typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
+typedef void (NetCleanup) (NetClientState *);
+typedef void (LinkStatusChanged)(NetClientState *);
+
+typedef struct NetClientInfo {
+ NetClientOptionsKind type;
+ size_t size;
+ NetReceive *receive;
+ NetReceive *receive_raw;
+ NetReceiveIOV *receive_iov;
+ NetCanReceive *can_receive;
+ NetCleanup *cleanup;
+ LinkStatusChanged *link_status_changed;
+ NetPoll *poll;
+} NetClientInfo;
+
+struct NetClientState {
+ NetClientInfo *info;
+ int link_down;
+ QTAILQ_ENTRY(NetClientState) next;
+ NetClientState *peer;
+ NetQueue *send_queue;
+ char *model;
+ char *name;
+ char info_str[256];
+ unsigned receive_disabled : 1;
+};
+
+typedef struct NICState {
+ NetClientState nc;
+ NICConf *conf;
+ void *opaque;
+ bool peer_deleted;
+} NICState;
+
+NetClientState *qemu_find_netdev(const char *id);
+NetClientState *qemu_new_net_client(NetClientInfo *info,
+ NetClientState *peer,
+ const char *model,
+ const char *name);
+NICState *qemu_new_nic(NetClientInfo *info,
+ NICConf *conf,
+ const char *model,
+ const char *name,
+ void *opaque);
+void qemu_del_net_client(NetClientState *nc);
+NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
+ const char *client_str);
+typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
+void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
+int qemu_can_send_packet(NetClientState *nc);
+ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
+ int iovcnt);
+ssize_t qemu_sendv_packet_async(NetClientState *nc, const struct iovec *iov,
+ int iovcnt, NetPacketSent *sent_cb);
+void qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size);
+ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size);
+ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
+ int size, NetPacketSent *sent_cb);
+void qemu_purge_queued_packets(NetClientState *nc);
+void qemu_flush_queued_packets(NetClientState *nc);
+void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]);
+void qemu_macaddr_default_if_unset(MACAddr *macaddr);
+int qemu_show_nic_models(const char *arg, const char *const *models);
+void qemu_check_nic_model(NICInfo *nd, const char *model);
+int qemu_find_nic_model(NICInfo *nd, const char * const *models,
+ const char *default_model);
+
+ssize_t qemu_deliver_packet(NetClientState *sender,
+ unsigned flags,
+ const uint8_t *data,
+ size_t size,
+ void *opaque);
+ssize_t qemu_deliver_packet_iov(NetClientState *sender,
+ unsigned flags,
+ const struct iovec *iov,
+ int iovcnt,
+ void *opaque);
+
+void print_net_client(Monitor *mon, NetClientState *nc);
+void do_info_network(Monitor *mon);
+
+/* NIC info */
+
+#define MAX_NICS 8
+
+struct NICInfo {
+ MACAddr macaddr;
+ char *model;
+ char *name;
+ char *devaddr;
+ NetClientState *netdev;
+ int used; /* is this slot in nd_table[] being used? */
+ int instantiated; /* does this NICInfo correspond to an instantiated NIC? */
+ int nvectors;
+};
+
+extern int nb_nics;
+extern NICInfo nd_table[MAX_NICS];
+extern int default_net;
+
+/* from net.c */
+extern const char *legacy_tftp_prefix;
+extern const char *legacy_bootp_filename;
+
+int net_client_init(QemuOpts *opts, int is_netdev, Error **errp);
+int net_client_parse(QemuOptsList *opts_list, const char *str);
+int net_init_clients(void);
+void net_check_clients(void);
+void net_cleanup(void);
+void net_host_device_add(Monitor *mon, const QDict *qdict);
+void net_host_device_remove(Monitor *mon, const QDict *qdict);
+void netdev_add(QemuOpts *opts, Error **errp);
+int qmp_netdev_add(Monitor *mon, const QDict *qdict, QObject **ret);
+
+int net_hub_id_for_client(NetClientState *nc, int *id);
+NetClientState *net_hub_port_find(int hub_id);
+
+#define DEFAULT_NETWORK_SCRIPT "/etc/qemu-ifup"
+#define DEFAULT_NETWORK_DOWN_SCRIPT "/etc/qemu-ifdown"
+#define DEFAULT_BRIDGE_HELPER CONFIG_QEMU_HELPERDIR "/qemu-bridge-helper"
+#define DEFAULT_BRIDGE_INTERFACE "br0"
+
+void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd);
+
+#define POLYNOMIAL 0x04c11db6
+unsigned compute_mcast_idx(const uint8_t *ep);
+
+#define vmstate_offset_macaddr(_state, _field) \
+ vmstate_offset_array(_state, _field.a, uint8_t, \
+ sizeof(typeof_field(_state, _field)))
+
+#define VMSTATE_MACADDR(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(MACAddr), \
+ .info = &vmstate_info_buffer, \
+ .flags = VMS_BUFFER, \
+ .offset = vmstate_offset_macaddr(_state, _field), \
+}
+
+#endif
diff --git a/include/net/queue.h b/include/net/queue.h
new file mode 100644
index 0000000000..fc02b33915
--- /dev/null
+++ b/include/net/queue.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2009 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_NET_QUEUE_H
+#define QEMU_NET_QUEUE_H
+
+#include "qemu-common.h"
+
+typedef struct NetPacket NetPacket;
+typedef struct NetQueue NetQueue;
+
+typedef void (NetPacketSent) (NetClientState *sender, ssize_t ret);
+
+#define QEMU_NET_PACKET_FLAG_NONE 0
+#define QEMU_NET_PACKET_FLAG_RAW (1<<0)
+
+NetQueue *qemu_new_net_queue(void *opaque);
+
+void qemu_del_net_queue(NetQueue *queue);
+
+ssize_t qemu_net_queue_send(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+ const uint8_t *data,
+ size_t size,
+ NetPacketSent *sent_cb);
+
+ssize_t qemu_net_queue_send_iov(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+ const struct iovec *iov,
+ int iovcnt,
+ NetPacketSent *sent_cb);
+
+void qemu_net_queue_purge(NetQueue *queue, NetClientState *from);
+bool qemu_net_queue_flush(NetQueue *queue);
+
+#endif /* QEMU_NET_QUEUE_H */
diff --git a/include/net/slirp.h b/include/net/slirp.h
new file mode 100644
index 0000000000..54b655c272
--- /dev/null
+++ b/include/net/slirp.h
@@ -0,0 +1,47 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef QEMU_NET_SLIRP_H
+#define QEMU_NET_SLIRP_H
+
+#include "qemu-common.h"
+#include "qapi/qmp/qdict.h"
+#include "qemu/option.h"
+#include "qapi-types.h"
+
+#ifdef CONFIG_SLIRP
+
+void net_slirp_hostfwd_add(Monitor *mon, const QDict *qdict);
+void net_slirp_hostfwd_remove(Monitor *mon, const QDict *qdict);
+
+int net_slirp_redir(const char *redir_str);
+
+int net_slirp_parse_legacy(QemuOptsList *opts_list, const char *optarg, int *ret);
+
+int net_slirp_smb(const char *exported_dir);
+
+void do_info_usernet(Monitor *mon);
+
+#endif
+
+#endif /* QEMU_NET_SLIRP_H */
diff --git a/include/net/tap.h b/include/net/tap.h
new file mode 100644
index 0000000000..bb7efb5439
--- /dev/null
+++ b/include/net/tap.h
@@ -0,0 +1,67 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2009 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_NET_TAP_H
+#define QEMU_NET_TAP_H
+
+#include "qemu-common.h"
+#include "qapi-types.h"
+
+int tap_has_ufo(NetClientState *nc);
+int tap_has_vnet_hdr(NetClientState *nc);
+int tap_has_vnet_hdr_len(NetClientState *nc, int len);
+void tap_using_vnet_hdr(NetClientState *nc, int using_vnet_hdr);
+void tap_set_offload(NetClientState *nc, int csum, int tso4, int tso6, int ecn, int ufo);
+void tap_set_vnet_hdr_len(NetClientState *nc, int len);
+
+int tap_get_fd(NetClientState *nc);
+
+struct vhost_net;
+struct vhost_net *tap_get_vhost_net(NetClientState *nc);
+
+struct virtio_net_hdr
+{
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
+#define VIRTIO_NET_HDR_F_DATA_VALID 2 // Csum is valid
+ uint8_t flags;
+#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame
+#define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO)
+#define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO)
+#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP
+#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set
+ uint8_t gso_type;
+ uint16_t hdr_len;
+ uint16_t gso_size;
+ uint16_t csum_start;
+ uint16_t csum_offset;
+};
+
+struct virtio_net_hdr_mrg_rxbuf
+{
+ struct virtio_net_hdr hdr;
+ uint16_t num_buffers; /* Number of merged rx buffers */
+};
+
+#endif /* QEMU_NET_TAP_H */
diff --git a/include/qapi/dealloc-visitor.h b/include/qapi/dealloc-visitor.h
new file mode 100644
index 0000000000..cf4c36d2d3
--- /dev/null
+++ b/include/qapi/dealloc-visitor.h
@@ -0,0 +1,26 @@
+/*
+ * Dealloc Visitor
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Michael Roth <mdroth@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QAPI_DEALLOC_VISITOR_H
+#define QAPI_DEALLOC_VISITOR_H
+
+#include "qapi/visitor.h"
+
+typedef struct QapiDeallocVisitor QapiDeallocVisitor;
+
+QapiDeallocVisitor *qapi_dealloc_visitor_new(void);
+void qapi_dealloc_visitor_cleanup(QapiDeallocVisitor *d);
+
+Visitor *qapi_dealloc_get_visitor(QapiDeallocVisitor *v);
+
+#endif
diff --git a/include/qapi/error.h b/include/qapi/error.h
new file mode 100644
index 0000000000..5cd2f0c302
--- /dev/null
+++ b/include/qapi/error.h
@@ -0,0 +1,80 @@
+/*
+ * QEMU Error Objects
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2. See
+ * the COPYING.LIB file in the top-level directory.
+ */
+#ifndef ERROR_H
+#define ERROR_H
+
+#include "qemu/compiler.h"
+#include "qapi-types.h"
+#include <stdbool.h>
+
+/**
+ * A class representing internal errors within QEMU. An error has a ErrorClass
+ * code and a human message.
+ */
+typedef struct Error Error;
+
+/**
+ * Set an indirect pointer to an error given a ErrorClass value and a
+ * printf-style human message. This function is not meant to be used outside
+ * of QEMU.
+ */
+void error_set(Error **err, ErrorClass err_class, const char *fmt, ...) GCC_FMT_ATTR(3, 4);
+
+/**
+ * Set an indirect pointer to an error given a ErrorClass value and a
+ * printf-style human message, followed by a strerror() string if
+ * @os_error is not zero.
+ */
+void error_set_errno(Error **err, int os_error, ErrorClass err_class, const char *fmt, ...) GCC_FMT_ATTR(4, 5);
+
+/**
+ * Same as error_set(), but sets a generic error
+ */
+#define error_setg(err, fmt, ...) \
+ error_set(err, ERROR_CLASS_GENERIC_ERROR, fmt, ## __VA_ARGS__)
+#define error_setg_errno(err, os_error, fmt, ...) \
+ error_set_errno(err, os_error, ERROR_CLASS_GENERIC_ERROR, fmt, ## __VA_ARGS__)
+
+/**
+ * Returns true if an indirect pointer to an error is pointing to a valid
+ * error object.
+ */
+bool error_is_set(Error **err);
+
+/*
+ * Get the error class of an error object.
+ */
+ErrorClass error_get_class(const Error *err);
+
+/**
+ * Returns an exact copy of the error passed as an argument.
+ */
+Error *error_copy(const Error *err);
+
+/**
+ * Get a human readable representation of an error object.
+ */
+const char *error_get_pretty(Error *err);
+
+/**
+ * Propagate an error to an indirect pointer to an error. This function will
+ * always transfer ownership of the error reference and handles the case where
+ * dst_err is NULL correctly. Errors after the first are discarded.
+ */
+void error_propagate(Error **dst_err, Error *local_err);
+
+/**
+ * Free an error object.
+ */
+void error_free(Error *err);
+
+#endif
diff --git a/include/qapi/opts-visitor.h b/include/qapi/opts-visitor.h
new file mode 100644
index 0000000000..5939eeebc7
--- /dev/null
+++ b/include/qapi/opts-visitor.h
@@ -0,0 +1,31 @@
+/*
+ * Options Visitor
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Author: Laszlo Ersek <lersek@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef OPTS_VISITOR_H
+#define OPTS_VISITOR_H
+
+#include "qapi/visitor.h"
+#include "qemu/option.h"
+
+typedef struct OptsVisitor OptsVisitor;
+
+/* Contrarily to qemu-option.c::parse_option_number(), OptsVisitor's "int"
+ * parser relies on strtoll() instead of strtoull(). Consequences:
+ * - string representations of negative numbers yield negative values,
+ * - values below INT64_MIN or LLONG_MIN are rejected,
+ * - values above INT64_MAX or LLONG_MAX are rejected.
+ */
+OptsVisitor *opts_visitor_new(const QemuOpts *opts);
+void opts_visitor_cleanup(OptsVisitor *nv);
+Visitor *opts_get_visitor(OptsVisitor *nv);
+
+#endif
diff --git a/include/qapi/qmp-input-visitor.h b/include/qapi/qmp-input-visitor.h
new file mode 100644
index 0000000000..3ed499cc42
--- /dev/null
+++ b/include/qapi/qmp-input-visitor.h
@@ -0,0 +1,29 @@
+/*
+ * Input Visitor
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QMP_INPUT_VISITOR_H
+#define QMP_INPUT_VISITOR_H
+
+#include "qapi/visitor.h"
+#include "qapi/qmp/qobject.h"
+
+typedef struct QmpInputVisitor QmpInputVisitor;
+
+QmpInputVisitor *qmp_input_visitor_new(QObject *obj);
+QmpInputVisitor *qmp_input_visitor_new_strict(QObject *obj);
+
+void qmp_input_visitor_cleanup(QmpInputVisitor *v);
+
+Visitor *qmp_input_get_visitor(QmpInputVisitor *v);
+
+#endif
diff --git a/include/qapi/qmp-output-visitor.h b/include/qapi/qmp-output-visitor.h
new file mode 100644
index 0000000000..22667706ab
--- /dev/null
+++ b/include/qapi/qmp-output-visitor.h
@@ -0,0 +1,28 @@
+/*
+ * Output Visitor
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QMP_OUTPUT_VISITOR_H
+#define QMP_OUTPUT_VISITOR_H
+
+#include "qapi/visitor.h"
+#include "qapi/qmp/qobject.h"
+
+typedef struct QmpOutputVisitor QmpOutputVisitor;
+
+QmpOutputVisitor *qmp_output_visitor_new(void);
+void qmp_output_visitor_cleanup(QmpOutputVisitor *v);
+
+QObject *qmp_output_get_qobject(QmpOutputVisitor *v);
+Visitor *qmp_output_get_visitor(QmpOutputVisitor *v);
+
+#endif
diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h
new file mode 100644
index 0000000000..1ce11f5df0
--- /dev/null
+++ b/include/qapi/qmp/dispatch.h
@@ -0,0 +1,55 @@
+/*
+ * Core Definitions for QAPI/QMP Dispatch
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QMP_CORE_H
+#define QMP_CORE_H
+
+#include "qapi/qmp/qobject.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/error.h"
+
+typedef void (QmpCommandFunc)(QDict *, QObject **, Error **);
+
+typedef enum QmpCommandType
+{
+ QCT_NORMAL,
+} QmpCommandType;
+
+typedef enum QmpCommandOptions
+{
+ QCO_NO_OPTIONS = 0x0,
+ QCO_NO_SUCCESS_RESP = 0x1,
+} QmpCommandOptions;
+
+typedef struct QmpCommand
+{
+ const char *name;
+ QmpCommandType type;
+ QmpCommandFunc *fn;
+ QmpCommandOptions options;
+ QTAILQ_ENTRY(QmpCommand) node;
+ bool enabled;
+} QmpCommand;
+
+void qmp_register_command(const char *name, QmpCommandFunc *fn,
+ QmpCommandOptions options);
+QmpCommand *qmp_find_command(const char *name);
+QObject *qmp_dispatch(QObject *request);
+void qmp_disable_command(const char *name);
+void qmp_enable_command(const char *name);
+bool qmp_command_is_enabled(const char *name);
+char **qmp_get_command_list(void);
+QObject *qmp_build_error_object(Error *errp);
+
+#endif
+
diff --git a/include/qapi/qmp/json-lexer.h b/include/qapi/qmp/json-lexer.h
new file mode 100644
index 0000000000..cdff0460a8
--- /dev/null
+++ b/include/qapi/qmp/json-lexer.h
@@ -0,0 +1,51 @@
+/*
+ * JSON lexer
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_JSON_LEXER_H
+#define QEMU_JSON_LEXER_H
+
+#include "qapi/qmp/qstring.h"
+#include "qapi/qmp/qlist.h"
+
+typedef enum json_token_type {
+ JSON_OPERATOR = 100,
+ JSON_INTEGER,
+ JSON_FLOAT,
+ JSON_KEYWORD,
+ JSON_STRING,
+ JSON_ESCAPE,
+ JSON_SKIP,
+ JSON_ERROR,
+} JSONTokenType;
+
+typedef struct JSONLexer JSONLexer;
+
+typedef void (JSONLexerEmitter)(JSONLexer *, QString *, JSONTokenType, int x, int y);
+
+struct JSONLexer
+{
+ JSONLexerEmitter *emit;
+ int state;
+ QString *token;
+ int x, y;
+};
+
+void json_lexer_init(JSONLexer *lexer, JSONLexerEmitter func);
+
+int json_lexer_feed(JSONLexer *lexer, const char *buffer, size_t size);
+
+int json_lexer_flush(JSONLexer *lexer);
+
+void json_lexer_destroy(JSONLexer *lexer);
+
+#endif
diff --git a/include/qapi/qmp/json-parser.h b/include/qapi/qmp/json-parser.h
new file mode 100644
index 0000000000..44d88f3468
--- /dev/null
+++ b/include/qapi/qmp/json-parser.h
@@ -0,0 +1,24 @@
+/*
+ * JSON Parser
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_JSON_PARSER_H
+#define QEMU_JSON_PARSER_H
+
+#include "qemu-common.h"
+#include "qapi/qmp/qlist.h"
+#include "qapi/error.h"
+
+QObject *json_parser_parse(QList *tokens, va_list *ap);
+QObject *json_parser_parse_err(QList *tokens, va_list *ap, Error **errp);
+
+#endif
diff --git a/include/qapi/qmp/json-streamer.h b/include/qapi/qmp/json-streamer.h
new file mode 100644
index 0000000000..823f7d7fa4
--- /dev/null
+++ b/include/qapi/qmp/json-streamer.h
@@ -0,0 +1,40 @@
+/*
+ * JSON streaming support
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_JSON_STREAMER_H
+#define QEMU_JSON_STREAMER_H
+
+#include "qapi/qmp/qlist.h"
+#include "qapi/qmp/json-lexer.h"
+
+typedef struct JSONMessageParser
+{
+ void (*emit)(struct JSONMessageParser *parser, QList *tokens);
+ JSONLexer lexer;
+ int brace_count;
+ int bracket_count;
+ QList *tokens;
+ uint64_t token_size;
+} JSONMessageParser;
+
+void json_message_parser_init(JSONMessageParser *parser,
+ void (*func)(JSONMessageParser *, QList *));
+
+int json_message_parser_feed(JSONMessageParser *parser,
+ const char *buffer, size_t size);
+
+int json_message_parser_flush(JSONMessageParser *parser);
+
+void json_message_parser_destroy(JSONMessageParser *parser);
+
+#endif
diff --git a/include/qapi/qmp/qbool.h b/include/qapi/qmp/qbool.h
new file mode 100644
index 0000000000..c4eaab9bb9
--- /dev/null
+++ b/include/qapi/qmp/qbool.h
@@ -0,0 +1,29 @@
+/*
+ * QBool Module
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QBOOL_H
+#define QBOOL_H
+
+#include <stdint.h>
+#include "qapi/qmp/qobject.h"
+
+typedef struct QBool {
+ QObject_HEAD;
+ int value;
+} QBool;
+
+QBool *qbool_from_int(int value);
+int qbool_get_int(const QBool *qb);
+QBool *qobject_to_qbool(const QObject *obj);
+
+#endif /* QBOOL_H */
diff --git a/include/qapi/qmp/qdict.h b/include/qapi/qmp/qdict.h
new file mode 100644
index 0000000000..6d9a4be3a5
--- /dev/null
+++ b/include/qapi/qmp/qdict.h
@@ -0,0 +1,67 @@
+/*
+ * QDict Module
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QDICT_H
+#define QDICT_H
+
+#include "qapi/qmp/qobject.h"
+#include "qapi/qmp/qlist.h"
+#include "qemu/queue.h"
+#include <stdint.h>
+
+#define QDICT_BUCKET_MAX 512
+
+typedef struct QDictEntry {
+ char *key;
+ QObject *value;
+ QLIST_ENTRY(QDictEntry) next;
+} QDictEntry;
+
+typedef struct QDict {
+ QObject_HEAD;
+ size_t size;
+ QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX];
+} QDict;
+
+/* Object API */
+QDict *qdict_new(void);
+const char *qdict_entry_key(const QDictEntry *entry);
+QObject *qdict_entry_value(const QDictEntry *entry);
+size_t qdict_size(const QDict *qdict);
+void qdict_put_obj(QDict *qdict, const char *key, QObject *value);
+void qdict_del(QDict *qdict, const char *key);
+int qdict_haskey(const QDict *qdict, const char *key);
+QObject *qdict_get(const QDict *qdict, const char *key);
+QDict *qobject_to_qdict(const QObject *obj);
+void qdict_iter(const QDict *qdict,
+ void (*iter)(const char *key, QObject *obj, void *opaque),
+ void *opaque);
+const QDictEntry *qdict_first(const QDict *qdict);
+const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry);
+
+/* Helper to qdict_put_obj(), accepts any object */
+#define qdict_put(qdict, key, obj) \
+ qdict_put_obj(qdict, key, QOBJECT(obj))
+
+/* High level helpers */
+double qdict_get_double(const QDict *qdict, const char *key);
+int64_t qdict_get_int(const QDict *qdict, const char *key);
+int qdict_get_bool(const QDict *qdict, const char *key);
+QList *qdict_get_qlist(const QDict *qdict, const char *key);
+QDict *qdict_get_qdict(const QDict *qdict, const char *key);
+const char *qdict_get_str(const QDict *qdict, const char *key);
+int64_t qdict_get_try_int(const QDict *qdict, const char *key,
+ int64_t def_value);
+int qdict_get_try_bool(const QDict *qdict, const char *key, int def_value);
+const char *qdict_get_try_str(const QDict *qdict, const char *key);
+
+#endif /* QDICT_H */
diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h
new file mode 100644
index 0000000000..6c0a18dfc4
--- /dev/null
+++ b/include/qapi/qmp/qerror.h
@@ -0,0 +1,252 @@
+/*
+ * QError Module
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+#ifndef QERROR_H
+#define QERROR_H
+
+#include "qapi/qmp/qdict.h"
+#include "qapi/qmp/qstring.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qapi-types.h"
+#include <stdarg.h>
+
+typedef struct QError {
+ QObject_HEAD;
+ Location loc;
+ char *err_msg;
+ ErrorClass err_class;
+} QError;
+
+QString *qerror_human(const QError *qerror);
+void qerror_report(ErrorClass err_class, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
+void qerror_report_err(Error *err);
+void assert_no_error(Error *err);
+
+/*
+ * QError class list
+ * Please keep the definitions in alphabetical order.
+ * Use scripts/check-qerror.sh to check.
+ */
+#define QERR_ADD_CLIENT_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Could not add client"
+
+#define QERR_AMBIGUOUS_PATH \
+ ERROR_CLASS_GENERIC_ERROR, "Path '%s' does not uniquely identify an object"
+
+#define QERR_BAD_BUS_FOR_DEVICE \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' can't go on a %s bus"
+
+#define QERR_BASE_NOT_FOUND \
+ ERROR_CLASS_GENERIC_ERROR, "Base '%s' not found"
+
+#define QERR_BLOCK_JOB_NOT_ACTIVE \
+ ERROR_CLASS_DEVICE_NOT_ACTIVE, "No active block job on device '%s'"
+
+#define QERR_BLOCK_JOB_PAUSED \
+ ERROR_CLASS_GENERIC_ERROR, "The block job for device '%s' is currently paused"
+
+#define QERR_BLOCK_JOB_NOT_READY \
+ ERROR_CLASS_GENERIC_ERROR, "The active block job for device '%s' cannot be completed"
+
+#define QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED \
+ ERROR_CLASS_GENERIC_ERROR, "Block format '%s' used by device '%s' does not support feature '%s'"
+
+#define QERR_BUFFER_OVERRUN \
+ ERROR_CLASS_GENERIC_ERROR, "An internal buffer overran"
+
+#define QERR_BUS_NO_HOTPLUG \
+ ERROR_CLASS_GENERIC_ERROR, "Bus '%s' does not support hotplugging"
+
+#define QERR_BUS_NOT_FOUND \
+ ERROR_CLASS_GENERIC_ERROR, "Bus '%s' not found"
+
+#define QERR_COMMAND_DISABLED \
+ ERROR_CLASS_GENERIC_ERROR, "The command %s has been disabled for this instance"
+
+#define QERR_COMMAND_NOT_FOUND \
+ ERROR_CLASS_COMMAND_NOT_FOUND, "The command %s has not been found"
+
+#define QERR_DEVICE_ENCRYPTED \
+ ERROR_CLASS_DEVICE_ENCRYPTED, "'%s' (%s) is encrypted"
+
+#define QERR_DEVICE_FEATURE_BLOCKS_MIGRATION \
+ ERROR_CLASS_GENERIC_ERROR, "Migration is disabled when using feature '%s' in device '%s'"
+
+#define QERR_DEVICE_HAS_NO_MEDIUM \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' has no medium"
+
+#define QERR_DEVICE_INIT_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' could not be initialized"
+
+#define QERR_DEVICE_IN_USE \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' is in use"
+
+#define QERR_DEVICE_IS_READ_ONLY \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' is read only"
+
+#define QERR_DEVICE_LOCKED \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' is locked"
+
+#define QERR_DEVICE_MULTIPLE_BUSSES \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' has multiple child busses"
+
+#define QERR_DEVICE_NO_BUS \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' has no child bus"
+
+#define QERR_DEVICE_NO_HOTPLUG \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' does not support hotplugging"
+
+#define QERR_DEVICE_NOT_ACTIVE \
+ ERROR_CLASS_DEVICE_NOT_ACTIVE, "Device '%s' has not been activated"
+
+#define QERR_DEVICE_NOT_ENCRYPTED \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' is not encrypted"
+
+#define QERR_DEVICE_NOT_FOUND \
+ ERROR_CLASS_DEVICE_NOT_FOUND, "Device '%s' not found"
+
+#define QERR_DEVICE_NOT_REMOVABLE \
+ ERROR_CLASS_GENERIC_ERROR, "Device '%s' is not removable"
+
+#define QERR_DUPLICATE_ID \
+ ERROR_CLASS_GENERIC_ERROR, "Duplicate ID '%s' for %s"
+
+#define QERR_FD_NOT_FOUND \
+ ERROR_CLASS_GENERIC_ERROR, "File descriptor named '%s' not found"
+
+#define QERR_FD_NOT_SUPPLIED \
+ ERROR_CLASS_GENERIC_ERROR, "No file descriptor supplied via SCM_RIGHTS"
+
+#define QERR_FEATURE_DISABLED \
+ ERROR_CLASS_GENERIC_ERROR, "The feature '%s' is not enabled"
+
+#define QERR_INVALID_BLOCK_FORMAT \
+ ERROR_CLASS_GENERIC_ERROR, "Invalid block format '%s'"
+
+#define QERR_INVALID_OPTION_GROUP \
+ ERROR_CLASS_GENERIC_ERROR, "There is no option group '%s'"
+
+#define QERR_INVALID_PARAMETER \
+ ERROR_CLASS_GENERIC_ERROR, "Invalid parameter '%s'"
+
+#define QERR_INVALID_PARAMETER_COMBINATION \
+ ERROR_CLASS_GENERIC_ERROR, "Invalid parameter combination"
+
+#define QERR_INVALID_PARAMETER_TYPE \
+ ERROR_CLASS_GENERIC_ERROR, "Invalid parameter type for '%s', expected: %s"
+
+#define QERR_INVALID_PARAMETER_VALUE \
+ ERROR_CLASS_GENERIC_ERROR, "Parameter '%s' expects %s"
+
+#define QERR_INVALID_PASSWORD \
+ ERROR_CLASS_GENERIC_ERROR, "Password incorrect"
+
+#define QERR_IO_ERROR \
+ ERROR_CLASS_GENERIC_ERROR, "An IO error has occurred"
+
+#define QERR_JSON_PARSE_ERROR \
+ ERROR_CLASS_GENERIC_ERROR, "JSON parse error, %s"
+
+#define QERR_JSON_PARSING \
+ ERROR_CLASS_GENERIC_ERROR, "Invalid JSON syntax"
+
+#define QERR_KVM_MISSING_CAP \
+ ERROR_CLASS_K_V_M_MISSING_CAP, "Using KVM without %s, %s unavailable"
+
+#define QERR_MIGRATION_ACTIVE \
+ ERROR_CLASS_GENERIC_ERROR, "There's a migration process in progress"
+
+#define QERR_MIGRATION_NOT_SUPPORTED \
+ ERROR_CLASS_GENERIC_ERROR, "State blocked by non-migratable device '%s'"
+
+#define QERR_MISSING_PARAMETER \
+ ERROR_CLASS_GENERIC_ERROR, "Parameter '%s' is missing"
+
+#define QERR_NO_BUS_FOR_DEVICE \
+ ERROR_CLASS_GENERIC_ERROR, "No '%s' bus found for device '%s'"
+
+#define QERR_NOT_SUPPORTED \
+ ERROR_CLASS_GENERIC_ERROR, "Not supported"
+
+#define QERR_OPEN_FILE_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Could not open '%s'"
+
+#define QERR_PERMISSION_DENIED \
+ ERROR_CLASS_GENERIC_ERROR, "Insufficient permission to perform this operation"
+
+#define QERR_PROPERTY_NOT_FOUND \
+ ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' not found"
+
+#define QERR_PROPERTY_VALUE_BAD \
+ ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' doesn't take value '%s'"
+
+#define QERR_PROPERTY_VALUE_IN_USE \
+ ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' can't take value '%s', it's in use"
+
+#define QERR_PROPERTY_VALUE_NOT_FOUND \
+ ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' can't find value '%s'"
+
+#define QERR_PROPERTY_VALUE_NOT_POWER_OF_2 \
+ ERROR_CLASS_GENERIC_ERROR, "Property %s.%s doesn't take value '%" PRId64 "', it's not a power of 2"
+
+#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \
+ ERROR_CLASS_GENERIC_ERROR, "Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")"
+
+#define QERR_QGA_COMMAND_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Guest agent command failed, error was '%s'"
+
+#define QERR_QGA_LOGGING_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Guest agent failed to log non-optional log statement"
+
+#define QERR_QMP_BAD_INPUT_OBJECT \
+ ERROR_CLASS_GENERIC_ERROR, "Expected '%s' in QMP input"
+
+#define QERR_QMP_BAD_INPUT_OBJECT_MEMBER \
+ ERROR_CLASS_GENERIC_ERROR, "QMP input object member '%s' expects '%s'"
+
+#define QERR_QMP_EXTRA_MEMBER \
+ ERROR_CLASS_GENERIC_ERROR, "QMP input object member '%s' is unexpected"
+
+#define QERR_RESET_REQUIRED \
+ ERROR_CLASS_GENERIC_ERROR, "Resetting the Virtual Machine is required"
+
+#define QERR_SET_PASSWD_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Could not set password"
+
+#define QERR_TOO_MANY_FILES \
+ ERROR_CLASS_GENERIC_ERROR, "Too many open files"
+
+#define QERR_UNDEFINED_ERROR \
+ ERROR_CLASS_GENERIC_ERROR, "An undefined error has occurred"
+
+#define QERR_UNKNOWN_BLOCK_FORMAT_FEATURE \
+ ERROR_CLASS_GENERIC_ERROR, "'%s' uses a %s feature which is not supported by this qemu version: %s"
+
+#define QERR_UNSUPPORTED \
+ ERROR_CLASS_GENERIC_ERROR, "this feature or command is not currently supported"
+
+#define QERR_VIRTFS_FEATURE_BLOCKS_MIGRATION \
+ ERROR_CLASS_GENERIC_ERROR, "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'"
+
+#define QERR_SOCKET_CONNECT_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Failed to connect to socket"
+
+#define QERR_SOCKET_LISTEN_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Failed to set socket to listening mode"
+
+#define QERR_SOCKET_BIND_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Failed to bind socket"
+
+#define QERR_SOCKET_CREATE_FAILED \
+ ERROR_CLASS_GENERIC_ERROR, "Failed to create socket"
+
+#endif /* QERROR_H */
diff --git a/include/qapi/qmp/qfloat.h b/include/qapi/qmp/qfloat.h
new file mode 100644
index 0000000000..a8658443dc
--- /dev/null
+++ b/include/qapi/qmp/qfloat.h
@@ -0,0 +1,29 @@
+/*
+ * QFloat Module
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QFLOAT_H
+#define QFLOAT_H
+
+#include <stdint.h>
+#include "qapi/qmp/qobject.h"
+
+typedef struct QFloat {
+ QObject_HEAD;
+ double value;
+} QFloat;
+
+QFloat *qfloat_from_double(double value);
+double qfloat_get_double(const QFloat *qi);
+QFloat *qobject_to_qfloat(const QObject *obj);
+
+#endif /* QFLOAT_H */
diff --git a/include/qapi/qmp/qint.h b/include/qapi/qmp/qint.h
new file mode 100644
index 0000000000..48a41b0f2a
--- /dev/null
+++ b/include/qapi/qmp/qint.h
@@ -0,0 +1,28 @@
+/*
+ * QInt Module
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QINT_H
+#define QINT_H
+
+#include <stdint.h>
+#include "qapi/qmp/qobject.h"
+
+typedef struct QInt {
+ QObject_HEAD;
+ int64_t value;
+} QInt;
+
+QInt *qint_from_int(int64_t value);
+int64_t qint_get_int(const QInt *qi);
+QInt *qobject_to_qint(const QObject *obj);
+
+#endif /* QINT_H */
diff --git a/include/qapi/qmp/qjson.h b/include/qapi/qmp/qjson.h
new file mode 100644
index 0000000000..73351ed6d6
--- /dev/null
+++ b/include/qapi/qmp/qjson.h
@@ -0,0 +1,29 @@
+/*
+ * QObject JSON integration
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef QJSON_H
+#define QJSON_H
+
+#include <stdarg.h>
+#include "qemu/compiler.h"
+#include "qapi/qmp/qobject.h"
+#include "qapi/qmp/qstring.h"
+
+QObject *qobject_from_json(const char *string) GCC_FMT_ATTR(1, 0);
+QObject *qobject_from_jsonf(const char *string, ...) GCC_FMT_ATTR(1, 2);
+QObject *qobject_from_jsonv(const char *string, va_list *ap) GCC_FMT_ATTR(1, 0);
+
+QString *qobject_to_json(const QObject *obj);
+QString *qobject_to_json_pretty(const QObject *obj);
+
+#endif /* QJSON_H */
diff --git a/include/qapi/qmp/qlist.h b/include/qapi/qmp/qlist.h
new file mode 100644
index 0000000000..382f04c3c4
--- /dev/null
+++ b/include/qapi/qmp/qlist.h
@@ -0,0 +1,64 @@
+/*
+ * QList Module
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QLIST_H
+#define QLIST_H
+
+#include "qapi/qmp/qobject.h"
+#include "qemu/queue.h"
+#include "qemu/queue.h"
+
+typedef struct QListEntry {
+ QObject *value;
+ QTAILQ_ENTRY(QListEntry) next;
+} QListEntry;
+
+typedef struct QList {
+ QObject_HEAD;
+ QTAILQ_HEAD(,QListEntry) head;
+} QList;
+
+#define qlist_append(qlist, obj) \
+ qlist_append_obj(qlist, QOBJECT(obj))
+
+#define QLIST_FOREACH_ENTRY(qlist, var) \
+ for ((var) = ((qlist)->head.tqh_first); \
+ (var); \
+ (var) = ((var)->next.tqe_next))
+
+static inline QObject *qlist_entry_obj(const QListEntry *entry)
+{
+ return entry->value;
+}
+
+QList *qlist_new(void);
+QList *qlist_copy(QList *src);
+void qlist_append_obj(QList *qlist, QObject *obj);
+void qlist_iter(const QList *qlist,
+ void (*iter)(QObject *obj, void *opaque), void *opaque);
+QObject *qlist_pop(QList *qlist);
+QObject *qlist_peek(QList *qlist);
+int qlist_empty(const QList *qlist);
+size_t qlist_size(const QList *qlist);
+QList *qobject_to_qlist(const QObject *obj);
+
+static inline const QListEntry *qlist_first(const QList *qlist)
+{
+ return QTAILQ_FIRST(&qlist->head);
+}
+
+static inline const QListEntry *qlist_next(const QListEntry *entry)
+{
+ return QTAILQ_NEXT(entry, next);
+}
+
+#endif /* QLIST_H */
diff --git a/include/qapi/qmp/qobject.h b/include/qapi/qmp/qobject.h
new file mode 100644
index 0000000000..9124649ed2
--- /dev/null
+++ b/include/qapi/qmp/qobject.h
@@ -0,0 +1,112 @@
+/*
+ * QEMU Object Model.
+ *
+ * Based on ideas by Avi Kivity <avi@redhat.com>
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ * QObject Reference Counts Terminology
+ * ------------------------------------
+ *
+ * - Returning references: A function that returns an object may
+ * return it as either a weak or a strong reference. If the reference
+ * is strong, you are responsible for calling QDECREF() on the reference
+ * when you are done.
+ *
+ * If the reference is weak, the owner of the reference may free it at
+ * any time in the future. Before storing the reference anywhere, you
+ * should call QINCREF() to make the reference strong.
+ *
+ * - Transferring ownership: when you transfer ownership of a reference
+ * by calling a function, you are no longer responsible for calling
+ * QDECREF() when the reference is no longer needed. In other words,
+ * when the function returns you must behave as if the reference to the
+ * passed object was weak.
+ */
+#ifndef QOBJECT_H
+#define QOBJECT_H
+
+#include <stddef.h>
+#include <assert.h>
+
+typedef enum {
+ QTYPE_NONE,
+ QTYPE_QINT,
+ QTYPE_QSTRING,
+ QTYPE_QDICT,
+ QTYPE_QLIST,
+ QTYPE_QFLOAT,
+ QTYPE_QBOOL,
+ QTYPE_QERROR,
+} qtype_code;
+
+struct QObject;
+
+typedef struct QType {
+ qtype_code code;
+ void (*destroy)(struct QObject *);
+} QType;
+
+typedef struct QObject {
+ const QType *type;
+ size_t refcnt;
+} QObject;
+
+/* Objects definitions must include this */
+#define QObject_HEAD \
+ QObject base
+
+/* Get the 'base' part of an object */
+#define QOBJECT(obj) (&(obj)->base)
+
+/* High-level interface for qobject_incref() */
+#define QINCREF(obj) \
+ qobject_incref(QOBJECT(obj))
+
+/* High-level interface for qobject_decref() */
+#define QDECREF(obj) \
+ qobject_decref(obj ? QOBJECT(obj) : NULL)
+
+/* Initialize an object to default values */
+#define QOBJECT_INIT(obj, qtype_type) \
+ obj->base.refcnt = 1; \
+ obj->base.type = qtype_type
+
+/**
+ * qobject_incref(): Increment QObject's reference count
+ */
+static inline void qobject_incref(QObject *obj)
+{
+ if (obj)
+ obj->refcnt++;
+}
+
+/**
+ * qobject_decref(): Decrement QObject's reference count, deallocate
+ * when it reaches zero
+ */
+static inline void qobject_decref(QObject *obj)
+{
+ if (obj && --obj->refcnt == 0) {
+ assert(obj->type != NULL);
+ assert(obj->type->destroy != NULL);
+ obj->type->destroy(obj);
+ }
+}
+
+/**
+ * qobject_type(): Return the QObject's type
+ */
+static inline qtype_code qobject_type(const QObject *obj)
+{
+ assert(obj->type != NULL);
+ return obj->type->code;
+}
+
+#endif /* QOBJECT_H */
diff --git a/include/qapi/qmp/qstring.h b/include/qapi/qmp/qstring.h
new file mode 100644
index 0000000000..0e690f4849
--- /dev/null
+++ b/include/qapi/qmp/qstring.h
@@ -0,0 +1,35 @@
+/*
+ * QString Module
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QSTRING_H
+#define QSTRING_H
+
+#include <stdint.h>
+#include "qapi/qmp/qobject.h"
+
+typedef struct QString {
+ QObject_HEAD;
+ char *string;
+ size_t length;
+ size_t capacity;
+} QString;
+
+QString *qstring_new(void);
+QString *qstring_from_str(const char *str);
+QString *qstring_from_substr(const char *str, int start, int end);
+const char *qstring_get_str(const QString *qstring);
+void qstring_append_int(QString *qstring, int64_t value);
+void qstring_append(QString *qstring, const char *str);
+void qstring_append_chr(QString *qstring, int c);
+QString *qobject_to_qstring(const QObject *obj);
+
+#endif /* QSTRING_H */
diff --git a/include/qapi/qmp/types.h b/include/qapi/qmp/types.h
new file mode 100644
index 0000000000..7782ec5a60
--- /dev/null
+++ b/include/qapi/qmp/types.h
@@ -0,0 +1,25 @@
+/*
+ * Include all QEMU objects.
+ *
+ * Copyright (C) 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Luiz Capitulino <lcapitulino@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef QEMU_OBJECTS_H
+#define QEMU_OBJECTS_H
+
+#include "qapi/qmp/qobject.h"
+#include "qapi/qmp/qint.h"
+#include "qapi/qmp/qfloat.h"
+#include "qapi/qmp/qbool.h"
+#include "qapi/qmp/qstring.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/qmp/qlist.h"
+#include "qapi/qmp/qjson.h"
+
+#endif /* QEMU_OBJECTS_H */
diff --git a/include/qapi/string-input-visitor.h b/include/qapi/string-input-visitor.h
new file mode 100644
index 0000000000..089243c09e
--- /dev/null
+++ b/include/qapi/string-input-visitor.h
@@ -0,0 +1,25 @@
+/*
+ * String parsing Visitor
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Author: Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef STRING_INPUT_VISITOR_H
+#define STRING_INPUT_VISITOR_H
+
+#include "qapi/visitor.h"
+
+typedef struct StringInputVisitor StringInputVisitor;
+
+StringInputVisitor *string_input_visitor_new(const char *str);
+void string_input_visitor_cleanup(StringInputVisitor *v);
+
+Visitor *string_input_get_visitor(StringInputVisitor *v);
+
+#endif
diff --git a/include/qapi/string-output-visitor.h b/include/qapi/string-output-visitor.h
new file mode 100644
index 0000000000..ec81e42b60
--- /dev/null
+++ b/include/qapi/string-output-visitor.h
@@ -0,0 +1,26 @@
+/*
+ * String printing Visitor
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Author: Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef STRING_OUTPUT_VISITOR_H
+#define STRING_OUTPUT_VISITOR_H
+
+#include "qapi/visitor.h"
+
+typedef struct StringOutputVisitor StringOutputVisitor;
+
+StringOutputVisitor *string_output_visitor_new(void);
+void string_output_visitor_cleanup(StringOutputVisitor *v);
+
+char *string_output_get_string(StringOutputVisitor *v);
+Visitor *string_output_get_visitor(StringOutputVisitor *v);
+
+#endif
diff --git a/include/qapi/visitor-impl.h b/include/qapi/visitor-impl.h
new file mode 100644
index 0000000000..5159964863
--- /dev/null
+++ b/include/qapi/visitor-impl.h
@@ -0,0 +1,63 @@
+/*
+ * Core Definitions for QAPI Visitor implementations
+ *
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * Author: Paolo Bonizni <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+#ifndef QAPI_VISITOR_IMPL_H
+#define QAPI_VISITOR_IMPL_H
+
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+
+struct Visitor
+{
+ /* Must be set */
+ void (*start_struct)(Visitor *v, void **obj, const char *kind,
+ const char *name, size_t size, Error **errp);
+ void (*end_struct)(Visitor *v, Error **errp);
+
+ void (*start_list)(Visitor *v, const char *name, Error **errp);
+ GenericList *(*next_list)(Visitor *v, GenericList **list, Error **errp);
+ void (*end_list)(Visitor *v, Error **errp);
+
+ void (*type_enum)(Visitor *v, int *obj, const char *strings[],
+ const char *kind, const char *name, Error **errp);
+
+ void (*type_int)(Visitor *v, int64_t *obj, const char *name, Error **errp);
+ void (*type_bool)(Visitor *v, bool *obj, const char *name, Error **errp);
+ void (*type_str)(Visitor *v, char **obj, const char *name, Error **errp);
+ void (*type_number)(Visitor *v, double *obj, const char *name,
+ Error **errp);
+
+ /* May be NULL */
+ void (*start_optional)(Visitor *v, bool *present, const char *name,
+ Error **errp);
+ void (*end_optional)(Visitor *v, Error **errp);
+
+ void (*start_handle)(Visitor *v, void **obj, const char *kind,
+ const char *name, Error **errp);
+ void (*end_handle)(Visitor *v, Error **errp);
+ void (*type_uint8)(Visitor *v, uint8_t *obj, const char *name, Error **errp);
+ void (*type_uint16)(Visitor *v, uint16_t *obj, const char *name, Error **errp);
+ void (*type_uint32)(Visitor *v, uint32_t *obj, const char *name, Error **errp);
+ void (*type_uint64)(Visitor *v, uint64_t *obj, const char *name, Error **errp);
+ void (*type_int8)(Visitor *v, int8_t *obj, const char *name, Error **errp);
+ void (*type_int16)(Visitor *v, int16_t *obj, const char *name, Error **errp);
+ void (*type_int32)(Visitor *v, int32_t *obj, const char *name, Error **errp);
+ void (*type_int64)(Visitor *v, int64_t *obj, const char *name, Error **errp);
+ /* visit_type_size() falls back to (*type_uint64)() if type_size is unset */
+ void (*type_size)(Visitor *v, uint64_t *obj, const char *name, Error **errp);
+};
+
+void input_type_enum(Visitor *v, int *obj, const char *strings[],
+ const char *kind, const char *name, Error **errp);
+void output_type_enum(Visitor *v, int *obj, const char *strings[],
+ const char *kind, const char *name, Error **errp);
+
+#endif
diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h
new file mode 100644
index 0000000000..1fef18c08f
--- /dev/null
+++ b/include/qapi/visitor.h
@@ -0,0 +1,55 @@
+/*
+ * Core Definitions for QAPI Visitor Classes
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+#ifndef QAPI_VISITOR_CORE_H
+#define QAPI_VISITOR_CORE_H
+
+#include "qapi/error.h"
+#include <stdlib.h>
+
+typedef struct GenericList
+{
+ void *value;
+ struct GenericList *next;
+} GenericList;
+
+typedef struct Visitor Visitor;
+
+void visit_start_handle(Visitor *v, void **obj, const char *kind,
+ const char *name, Error **errp);
+void visit_end_handle(Visitor *v, Error **errp);
+void visit_start_struct(Visitor *v, void **obj, const char *kind,
+ const char *name, size_t size, Error **errp);
+void visit_end_struct(Visitor *v, Error **errp);
+void visit_start_list(Visitor *v, const char *name, Error **errp);
+GenericList *visit_next_list(Visitor *v, GenericList **list, Error **errp);
+void visit_end_list(Visitor *v, Error **errp);
+void visit_start_optional(Visitor *v, bool *present, const char *name,
+ Error **errp);
+void visit_end_optional(Visitor *v, Error **errp);
+void visit_type_enum(Visitor *v, int *obj, const char *strings[],
+ const char *kind, const char *name, Error **errp);
+void visit_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp);
+void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp);
+void visit_type_uint16(Visitor *v, uint16_t *obj, const char *name, Error **errp);
+void visit_type_uint32(Visitor *v, uint32_t *obj, const char *name, Error **errp);
+void visit_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp);
+void visit_type_int8(Visitor *v, int8_t *obj, const char *name, Error **errp);
+void visit_type_int16(Visitor *v, int16_t *obj, const char *name, Error **errp);
+void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp);
+void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp);
+void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp);
+void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp);
+void visit_type_str(Visitor *v, char **obj, const char *name, Error **errp);
+void visit_type_number(Visitor *v, double *obj, const char *name, Error **errp);
+
+#endif
diff --git a/include/qemu-common.h b/include/qemu-common.h
new file mode 100644
index 0000000000..ca464bb367
--- /dev/null
+++ b/include/qemu-common.h
@@ -0,0 +1,426 @@
+
+/* Common header file that is included by all of QEMU.
+ *
+ * This file is supposed to be included only by .c files. No header file should
+ * depend on qemu-common.h, as this would easily lead to circular header
+ * dependencies.
+ *
+ * If a header file uses a definition from qemu-common.h, that definition
+ * must be moved to a separate header file, and the header that uses it
+ * must include that header.
+ */
+#ifndef QEMU_COMMON_H
+#define QEMU_COMMON_H
+
+#include "qemu/compiler.h"
+#include "config-host.h"
+#include "qemu/typedefs.h"
+
+#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__)
+#define WORDS_ALIGNED
+#endif
+
+#define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR)
+
+/* we put basic includes here to avoid repeating them in device drivers */
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <string.h>
+#include <strings.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <time.h>
+#include <ctype.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <assert.h>
+#include <signal.h>
+#include <glib.h>
+
+#ifdef _WIN32
+#include "sysemu/os-win32.h"
+#endif
+
+#ifdef CONFIG_POSIX
+#include "sysemu/os-posix.h"
+#endif
+
+#ifndef O_LARGEFILE
+#define O_LARGEFILE 0
+#endif
+#ifndef O_BINARY
+#define O_BINARY 0
+#endif
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#ifndef ENOMEDIUM
+#define ENOMEDIUM ENODEV
+#endif
+#if !defined(ENOTSUP)
+#define ENOTSUP 4096
+#endif
+#if !defined(ECANCELED)
+#define ECANCELED 4097
+#endif
+#ifndef TIME_MAX
+#define TIME_MAX LONG_MAX
+#endif
+
+/* HOST_LONG_BITS is the size of a native pointer in bits. */
+#if UINTPTR_MAX == UINT32_MAX
+# define HOST_LONG_BITS 32
+#elif UINTPTR_MAX == UINT64_MAX
+# define HOST_LONG_BITS 64
+#else
+# error Unknown pointer size
+#endif
+
+#ifndef CONFIG_IOVEC
+#define CONFIG_IOVEC
+struct iovec {
+ void *iov_base;
+ size_t iov_len;
+};
+/*
+ * Use the same value as Linux for now.
+ */
+#define IOV_MAX 1024
+#else
+#include <sys/uio.h>
+#endif
+
+typedef int (*fprintf_function)(FILE *f, const char *fmt, ...)
+ GCC_FMT_ATTR(2, 3);
+
+#ifdef _WIN32
+#define fsync _commit
+#if !defined(lseek)
+# define lseek _lseeki64
+#endif
+int qemu_ftruncate64(int, int64_t);
+#if !defined(ftruncate)
+# define ftruncate qemu_ftruncate64
+#endif
+
+static inline char *realpath(const char *path, char *resolved_path)
+{
+ _fullpath(resolved_path, path, _MAX_PATH);
+ return resolved_path;
+}
+#endif
+
+/* icount */
+void configure_icount(const char *option);
+extern int use_icount;
+
+/* FIXME: Remove NEED_CPU_H. */
+#ifndef NEED_CPU_H
+
+#include "qemu/osdep.h"
+#include "qemu/bswap.h"
+
+#else
+
+#include "cpu.h"
+
+#endif /* !defined(NEED_CPU_H) */
+
+/* main function, renamed */
+#if defined(CONFIG_COCOA)
+int qemu_main(int argc, char **argv, char **envp);
+#endif
+
+void qemu_get_timedate(struct tm *tm, int offset);
+int qemu_timedate_diff(struct tm *tm);
+
+/**
+ * is_help_option:
+ * @s: string to test
+ *
+ * Check whether @s is one of the standard strings which indicate
+ * that the user is asking for a list of the valid values for a
+ * command option like -cpu or -M. The current accepted strings
+ * are 'help' and '?'. '?' is deprecated (it is a shell wildcard
+ * which makes it annoying to use in a reliable way) but provided
+ * for backwards compatibility.
+ *
+ * Returns: true if @s is a request for a list.
+ */
+static inline bool is_help_option(const char *s)
+{
+ return !strcmp(s, "?") || !strcmp(s, "help");
+}
+
+/* cutils.c */
+void pstrcpy(char *buf, int buf_size, const char *str);
+void strpadcpy(char *buf, int buf_size, const char *str, char pad);
+char *pstrcat(char *buf, int buf_size, const char *s);
+int strstart(const char *str, const char *val, const char **ptr);
+int stristart(const char *str, const char *val, const char **ptr);
+int qemu_strnlen(const char *s, int max_len);
+time_t mktimegm(struct tm *tm);
+int qemu_fls(int i);
+int qemu_fdatasync(int fd);
+int fcntl_setfl(int fd, int flag);
+int qemu_parse_fd(const char *param);
+
+/*
+ * strtosz() suffixes used to specify the default treatment of an
+ * argument passed to strtosz() without an explicit suffix.
+ * These should be defined using upper case characters in the range
+ * A-Z, as strtosz() will use qemu_toupper() on the given argument
+ * prior to comparison.
+ */
+#define STRTOSZ_DEFSUFFIX_TB 'T'
+#define STRTOSZ_DEFSUFFIX_GB 'G'
+#define STRTOSZ_DEFSUFFIX_MB 'M'
+#define STRTOSZ_DEFSUFFIX_KB 'K'
+#define STRTOSZ_DEFSUFFIX_B 'B'
+int64_t strtosz(const char *nptr, char **end);
+int64_t strtosz_suffix(const char *nptr, char **end, const char default_suffix);
+int64_t strtosz_suffix_unit(const char *nptr, char **end,
+ const char default_suffix, int64_t unit);
+
+/* path.c */
+void init_paths(const char *prefix);
+const char *path(const char *pathname);
+
+#define qemu_isalnum(c) isalnum((unsigned char)(c))
+#define qemu_isalpha(c) isalpha((unsigned char)(c))
+#define qemu_iscntrl(c) iscntrl((unsigned char)(c))
+#define qemu_isdigit(c) isdigit((unsigned char)(c))
+#define qemu_isgraph(c) isgraph((unsigned char)(c))
+#define qemu_islower(c) islower((unsigned char)(c))
+#define qemu_isprint(c) isprint((unsigned char)(c))
+#define qemu_ispunct(c) ispunct((unsigned char)(c))
+#define qemu_isspace(c) isspace((unsigned char)(c))
+#define qemu_isupper(c) isupper((unsigned char)(c))
+#define qemu_isxdigit(c) isxdigit((unsigned char)(c))
+#define qemu_tolower(c) tolower((unsigned char)(c))
+#define qemu_toupper(c) toupper((unsigned char)(c))
+#define qemu_isascii(c) isascii((unsigned char)(c))
+#define qemu_toascii(c) toascii((unsigned char)(c))
+
+void *qemu_oom_check(void *ptr);
+
+ssize_t qemu_write_full(int fd, const void *buf, size_t count)
+ QEMU_WARN_UNUSED_RESULT;
+ssize_t qemu_send_full(int fd, const void *buf, size_t count, int flags)
+ QEMU_WARN_UNUSED_RESULT;
+ssize_t qemu_recv_full(int fd, void *buf, size_t count, int flags)
+ QEMU_WARN_UNUSED_RESULT;
+
+#ifndef _WIN32
+int qemu_pipe(int pipefd[2]);
+#endif
+
+#ifdef _WIN32
+/* MinGW needs type casts for the 'buf' and 'optval' arguments. */
+#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \
+ getsockopt(sockfd, level, optname, (void *)optval, optlen)
+#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \
+ setsockopt(sockfd, level, optname, (const void *)optval, optlen)
+#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, (void *)buf, len, flags)
+#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \
+ sendto(sockfd, (const void *)buf, len, flags, destaddr, addrlen)
+#else
+#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \
+ getsockopt(sockfd, level, optname, optval, optlen)
+#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \
+ setsockopt(sockfd, level, optname, optval, optlen)
+#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, buf, len, flags)
+#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \
+ sendto(sockfd, buf, len, flags, destaddr, addrlen)
+#endif
+
+/* Error handling. */
+
+void QEMU_NORETURN hw_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+
+struct ParallelIOArg {
+ void *buffer;
+ int count;
+};
+
+typedef int (*DMA_transfer_handler) (void *opaque, int nchan, int pos, int size);
+
+typedef uint64_t pcibus_t;
+
+typedef enum LostTickPolicy {
+ LOST_TICK_DISCARD,
+ LOST_TICK_DELAY,
+ LOST_TICK_MERGE,
+ LOST_TICK_SLEW,
+ LOST_TICK_MAX
+} LostTickPolicy;
+
+typedef struct PCIHostDeviceAddress {
+ unsigned int domain;
+ unsigned int bus;
+ unsigned int slot;
+ unsigned int function;
+} PCIHostDeviceAddress;
+
+void tcg_exec_init(unsigned long tb_size);
+bool tcg_enabled(void);
+
+void cpu_exec_init_all(void);
+
+/* CPU save/load. */
+void cpu_save(QEMUFile *f, void *opaque);
+int cpu_load(QEMUFile *f, void *opaque, int version_id);
+
+/* Unblock cpu */
+void qemu_cpu_kick_self(void);
+
+/* work queue */
+struct qemu_work_item {
+ struct qemu_work_item *next;
+ void (*func)(void *data);
+ void *data;
+ int done;
+};
+
+#ifdef CONFIG_USER_ONLY
+static inline void qemu_init_vcpu(void *env)
+{
+}
+#else
+void qemu_init_vcpu(void *env);
+#endif
+
+
+/**
+ * Sends a (part of) iovec down a socket, yielding when the socket is full, or
+ * Receives data into a (part of) iovec from a socket,
+ * yielding when there is no data in the socket.
+ * The same interface as qemu_sendv_recvv(), with added yielding.
+ * XXX should mark these as coroutine_fn
+ */
+ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt,
+ size_t offset, size_t bytes, bool do_send);
+#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \
+ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false)
+#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \
+ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true)
+
+/**
+ * The same as above, but with just a single buffer
+ */
+ssize_t qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send);
+#define qemu_co_recv(sockfd, buf, bytes) \
+ qemu_co_send_recv(sockfd, buf, bytes, false)
+#define qemu_co_send(sockfd, buf, bytes) \
+ qemu_co_send_recv(sockfd, buf, bytes, true)
+
+typedef struct QEMUIOVector {
+ struct iovec *iov;
+ int niov;
+ int nalloc;
+ size_t size;
+} QEMUIOVector;
+
+void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
+void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
+void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
+void qemu_iovec_concat(QEMUIOVector *dst,
+ QEMUIOVector *src, size_t soffset, size_t sbytes);
+void qemu_iovec_concat_iov(QEMUIOVector *dst,
+ struct iovec *src_iov, unsigned int src_cnt,
+ size_t soffset, size_t sbytes);
+void qemu_iovec_destroy(QEMUIOVector *qiov);
+void qemu_iovec_reset(QEMUIOVector *qiov);
+size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset,
+ void *buf, size_t bytes);
+size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset,
+ const void *buf, size_t bytes);
+size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
+ int fillc, size_t bytes);
+
+bool buffer_is_zero(const void *buf, size_t len);
+
+void qemu_progress_init(int enabled, float min_skip);
+void qemu_progress_end(void);
+void qemu_progress_print(float delta, int max);
+const char *qemu_get_vm_name(void);
+
+#define QEMU_FILE_TYPE_BIOS 0
+#define QEMU_FILE_TYPE_KEYMAP 1
+char *qemu_find_file(int type, const char *name);
+
+/* OS specific functions */
+void os_setup_early_signal_handling(void);
+char *os_find_datadir(const char *argv0);
+void os_parse_cmd_args(int index, const char *optarg);
+void os_pidfile_error(void);
+
+/* Convert a byte between binary and BCD. */
+static inline uint8_t to_bcd(uint8_t val)
+{
+ return ((val / 10) << 4) | (val % 10);
+}
+
+static inline uint8_t from_bcd(uint8_t val)
+{
+ return ((val >> 4) * 10) + (val & 0x0f);
+}
+
+/* compute with 96 bit intermediate result: (a*b)/c */
+static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
+{
+ union {
+ uint64_t ll;
+ struct {
+#ifdef HOST_WORDS_BIGENDIAN
+ uint32_t high, low;
+#else
+ uint32_t low, high;
+#endif
+ } l;
+ } u, res;
+ uint64_t rl, rh;
+
+ u.ll = a;
+ rl = (uint64_t)u.l.low * (uint64_t)b;
+ rh = (uint64_t)u.l.high * (uint64_t)b;
+ rh += (rl >> 32);
+ res.l.high = rh / c;
+ res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
+ return res.ll;
+}
+
+/* Round number down to multiple */
+#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m))
+
+/* Round number up to multiple */
+#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m))
+
+static inline bool is_power_of_2(uint64_t value)
+{
+ if (!value) {
+ return 0;
+ }
+
+ return !(value & (value - 1));
+}
+
+/* round down to the nearest power of 2*/
+int64_t pow2floor(int64_t value);
+
+#include "qemu/module.h"
+
+/*
+ * Implementation of ULEB128 (http://en.wikipedia.org/wiki/LEB128)
+ * Input is limited to 14-bit numbers
+ */
+
+int uleb128_encode_small(uint8_t *out, uint32_t n);
+int uleb128_decode_small(const uint8_t *in, uint32_t *n);
+
+#endif
diff --git a/include/qemu/acl.h b/include/qemu/acl.h
new file mode 100644
index 0000000000..116487e282
--- /dev/null
+++ b/include/qemu/acl.h
@@ -0,0 +1,74 @@
+/*
+ * QEMU access control list management
+ *
+ * Copyright (C) 2009 Red Hat, Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef __QEMU_ACL_H__
+#define __QEMU_ACL_H__
+
+#include "qemu/queue.h"
+
+typedef struct qemu_acl_entry qemu_acl_entry;
+typedef struct qemu_acl qemu_acl;
+
+struct qemu_acl_entry {
+ char *match;
+ int deny;
+
+ QTAILQ_ENTRY(qemu_acl_entry) next;
+};
+
+struct qemu_acl {
+ char *aclname;
+ unsigned int nentries;
+ QTAILQ_HEAD(,qemu_acl_entry) entries;
+ int defaultDeny;
+};
+
+qemu_acl *qemu_acl_init(const char *aclname);
+
+qemu_acl *qemu_acl_find(const char *aclname);
+
+int qemu_acl_party_is_allowed(qemu_acl *acl,
+ const char *party);
+
+void qemu_acl_reset(qemu_acl *acl);
+
+int qemu_acl_append(qemu_acl *acl,
+ int deny,
+ const char *match);
+int qemu_acl_insert(qemu_acl *acl,
+ int deny,
+ const char *match,
+ int index);
+int qemu_acl_remove(qemu_acl *acl,
+ const char *match);
+
+#endif /* __QEMU_ACL_H__ */
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 8
+ * End:
+ */
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
new file mode 100644
index 0000000000..96a194bbee
--- /dev/null
+++ b/include/qemu/atomic.h
@@ -0,0 +1,67 @@
+#ifndef __QEMU_BARRIER_H
+#define __QEMU_BARRIER_H 1
+
+/* Compiler barrier */
+#define barrier() asm volatile("" ::: "memory")
+
+#if defined(__i386__)
+
+#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */
+
+/*
+ * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
+ * on x86(well, a compiler barrier only). Well, at least as long as
+ * qemu doesn't do accesses to write-combining memory or non-temporal
+ * load/stores from C code.
+ */
+#define smp_wmb() barrier()
+#define smp_rmb() barrier()
+/*
+ * We use GCC builtin if it's available, as that can use
+ * mfence on 32 bit as well, e.g. if built with -march=pentium-m.
+ * However, on i386, there seem to be known bugs as recently as 4.3.
+ * */
+#if QEMU_GNUC_PREREQ(4, 4)
+#define smp_mb() __sync_synchronize()
+#else
+#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
+#endif
+
+#elif defined(__x86_64__)
+
+#define smp_wmb() barrier()
+#define smp_rmb() barrier()
+#define smp_mb() asm volatile("mfence" ::: "memory")
+
+#elif defined(_ARCH_PPC)
+
+/*
+ * We use an eieio() for wmb() on powerpc. This assumes we don't
+ * need to order cacheable and non-cacheable stores with respect to
+ * each other
+ */
+#define smp_wmb() asm volatile("eieio" ::: "memory")
+
+#if defined(__powerpc64__)
+#define smp_rmb() asm volatile("lwsync" ::: "memory")
+#else
+#define smp_rmb() asm volatile("sync" ::: "memory")
+#endif
+
+#define smp_mb() asm volatile("sync" ::: "memory")
+
+#else
+
+/*
+ * For (host) platforms we don't have explicit barrier definitions
+ * for, we use the gcc __sync_synchronize() primitive to generate a
+ * full barrier. This should be safe on all platforms, though it may
+ * be overkill for wmb() and rmb().
+ */
+#define smp_wmb() __sync_synchronize()
+#define smp_mb() __sync_synchronize()
+#define smp_rmb() __sync_synchronize()
+
+#endif
+
+#endif
diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
new file mode 100644
index 0000000000..308bbb71e9
--- /dev/null
+++ b/include/qemu/bitmap.h
@@ -0,0 +1,222 @@
+/*
+ * Bitmap Module
+ *
+ * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com>
+ *
+ * Mostly inspired by (stolen from) linux/bitmap.h and linux/bitops.h
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BITMAP_H
+#define BITMAP_H
+
+#include "qemu-common.h"
+#include "qemu/bitops.h"
+
+/*
+ * The available bitmap operations and their rough meaning in the
+ * case that the bitmap is a single unsigned long are thus:
+ *
+ * Note that nbits should be always a compile time evaluable constant.
+ * Otherwise many inlines will generate horrible code.
+ *
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ */
+
+/*
+ * Also the following operations apply to bitmaps.
+ *
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+ */
+
+#define BITMAP_LAST_WORD_MASK(nbits) \
+ ( \
+ ((nbits) % BITS_PER_LONG) ? \
+ (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
+ )
+
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
+#define small_nbits(nbits) \
+ ((nbits) <= BITS_PER_LONG)
+
+int slow_bitmap_empty(const unsigned long *bitmap, int bits);
+int slow_bitmap_full(const unsigned long *bitmap, int bits);
+int slow_bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+void slow_bitmap_complement(unsigned long *dst, const unsigned long *src,
+ int bits);
+void slow_bitmap_shift_right(unsigned long *dst,
+ const unsigned long *src, int shift, int bits);
+void slow_bitmap_shift_left(unsigned long *dst,
+ const unsigned long *src, int shift, int bits);
+int slow_bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+void slow_bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+void slow_bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+int slow_bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+int slow_bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+
+static inline unsigned long *bitmap_new(int nbits)
+{
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ return g_malloc0(len);
+}
+
+static inline void bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (small_nbits(nbits)) {
+ *dst = 0UL;
+ } else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+
+static inline void bitmap_fill(unsigned long *dst, int nbits)
+{
+ size_t nlongs = BITS_TO_LONGS(nbits);
+ if (!small_nbits(nbits)) {
+ int len = (nlongs - 1) * sizeof(unsigned long);
+ memset(dst, 0xff, len);
+ }
+ dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+}
+
+static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
+ int nbits)
+{
+ if (small_nbits(nbits)) {
+ *dst = *src;
+ } else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memcpy(dst, src, len);
+ }
+}
+
+static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (small_nbits(nbits)) {
+ return (*dst = *src1 & *src2) != 0;
+ }
+ return slow_bitmap_and(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (small_nbits(nbits)) {
+ *dst = *src1 | *src2;
+ } else {
+ slow_bitmap_or(dst, src1, src2, nbits);
+ }
+}
+
+static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (small_nbits(nbits)) {
+ *dst = *src1 ^ *src2;
+ } else {
+ slow_bitmap_xor(dst, src1, src2, nbits);
+ }
+}
+
+static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (small_nbits(nbits)) {
+ return (*dst = *src1 & ~(*src2)) != 0;
+ }
+ return slow_bitmap_andnot(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
+ int nbits)
+{
+ if (small_nbits(nbits)) {
+ *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
+ } else {
+ slow_bitmap_complement(dst, src, nbits);
+ }
+}
+
+static inline int bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (small_nbits(nbits)) {
+ return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
+ } else {
+ return slow_bitmap_equal(src1, src2, nbits);
+ }
+}
+
+static inline int bitmap_empty(const unsigned long *src, int nbits)
+{
+ if (small_nbits(nbits)) {
+ return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+ } else {
+ return slow_bitmap_empty(src, nbits);
+ }
+}
+
+static inline int bitmap_full(const unsigned long *src, int nbits)
+{
+ if (small_nbits(nbits)) {
+ return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+ } else {
+ return slow_bitmap_full(src, nbits);
+ }
+}
+
+static inline int bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (small_nbits(nbits)) {
+ return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+ } else {
+ return slow_bitmap_intersects(src1, src2, nbits);
+ }
+}
+
+void bitmap_set(unsigned long *map, int i, int len);
+void bitmap_clear(unsigned long *map, int start, int nr);
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask);
+
+#endif /* BITMAP_H */
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
new file mode 100644
index 0000000000..74e14e5724
--- /dev/null
+++ b/include/qemu/bitops.h
@@ -0,0 +1,362 @@
+/*
+ * Bitops Module
+ *
+ * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com>
+ *
+ * Mostly inspired by (stolen from) linux/bitmap.h and linux/bitops.h
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#ifndef BITOPS_H
+#define BITOPS_H
+
+#include "qemu-common.h"
+
+#define BITS_PER_BYTE CHAR_BIT
+#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+
+/**
+ * bitops_ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static unsigned long bitops_ffsl(unsigned long word)
+{
+ int num = 0;
+
+#if LONG_MAX > 0x7FFFFFFF
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0) {
+ num += 1;
+ }
+ return num;
+}
+
+/**
+ * bitops_fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long bitops_flsl(unsigned long word)
+{
+ int num = BITS_PER_LONG - 1;
+
+#if LONG_MAX > 0x7FFFFFFF
+ if (!(word & (~0ul << 32))) {
+ num -= 32;
+ word <<= 32;
+ }
+#endif
+ if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
+ num -= 16;
+ word <<= 16;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
+ num -= 8;
+ word <<= 8;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
+ num -= 4;
+ word <<= 4;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
+ num -= 2;
+
+ word <<= 2;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-1))))
+ num -= 1;
+ return num;
+}
+
+/**
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+ return bitops_ffsl(~word);
+}
+
+/**
+ * set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+static inline void set_bit(int nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+static inline void clear_bit(int nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+static inline void change_bit(int nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+static inline int test_and_set_bit(int nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+static inline int test_and_clear_bit(int nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+static inline int test_and_change_bit(int nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit(int nr, const unsigned long *addr)
+{
+ return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+/**
+ * find_last_bit - find the last set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit, or size.
+ */
+unsigned long find_last_bit(const unsigned long *addr,
+ unsigned long size);
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+unsigned long find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+
+unsigned long find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset);
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit.
+ */
+static inline unsigned long find_first_bit(const unsigned long *addr,
+ unsigned long size)
+{
+ return find_next_bit(addr, size, 0);
+}
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first cleared bit.
+ */
+static inline unsigned long find_first_zero_bit(const unsigned long *addr,
+ unsigned long size)
+{
+ return find_next_zero_bit(addr, size, 0);
+}
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+ unsigned long count;
+
+ for (count = 0; w; w >>= 1) {
+ count += w & 1;
+ }
+ return count;
+}
+
+/**
+ * extract32:
+ * @value: the value to extract the bit field from
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ *
+ * Extract from the 32 bit input @value the bit field specified by the
+ * @start and @length parameters, and return it. The bit field must
+ * lie entirely within the 32 bit word. It is valid to request that
+ * all 32 bits are returned (ie @length 32 and @start 0).
+ *
+ * Returns: the value of the bit field extracted from the input value.
+ */
+static inline uint32_t extract32(uint32_t value, int start, int length)
+{
+ assert(start >= 0 && length > 0 && length <= 32 - start);
+ return (value >> start) & (~0U >> (32 - length));
+}
+
+/**
+ * extract64:
+ * @value: the value to extract the bit field from
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ *
+ * Extract from the 64 bit input @value the bit field specified by the
+ * @start and @length parameters, and return it. The bit field must
+ * lie entirely within the 64 bit word. It is valid to request that
+ * all 64 bits are returned (ie @length 64 and @start 0).
+ *
+ * Returns: the value of the bit field extracted from the input value.
+ */
+static inline uint64_t extract64(uint64_t value, int start, int length)
+{
+ assert(start >= 0 && length > 0 && length <= 64 - start);
+ return (value >> start) & (~0ULL >> (64 - length));
+}
+
+/**
+ * deposit32:
+ * @value: initial value to insert bit field into
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ * @fieldval: the value to insert into the bit field
+ *
+ * Deposit @fieldval into the 32 bit @value at the bit field specified
+ * by the @start and @length parameters, and return the modified
+ * @value. Bits of @value outside the bit field are not modified.
+ * Bits of @fieldval above the least significant @length bits are
+ * ignored. The bit field must lie entirely within the 32 bit word.
+ * It is valid to request that all 32 bits are modified (ie @length
+ * 32 and @start 0).
+ *
+ * Returns: the modified @value.
+ */
+static inline uint32_t deposit32(uint32_t value, int start, int length,
+ uint32_t fieldval)
+{
+ uint32_t mask;
+ assert(start >= 0 && length > 0 && length <= 32 - start);
+ mask = (~0U >> (32 - length)) << start;
+ return (value & ~mask) | ((fieldval << start) & mask);
+}
+
+/**
+ * deposit64:
+ * @value: initial value to insert bit field into
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ * @fieldval: the value to insert into the bit field
+ *
+ * Deposit @fieldval into the 64 bit @value at the bit field specified
+ * by the @start and @length parameters, and return the modified
+ * @value. Bits of @value outside the bit field are not modified.
+ * Bits of @fieldval above the least significant @length bits are
+ * ignored. The bit field must lie entirely within the 64 bit word.
+ * It is valid to request that all 64 bits are modified (ie @length
+ * 64 and @start 0).
+ *
+ * Returns: the modified @value.
+ */
+static inline uint64_t deposit64(uint64_t value, int start, int length,
+ uint64_t fieldval)
+{
+ uint64_t mask;
+ assert(start >= 0 && length > 0 && length <= 64 - start);
+ mask = (~0ULL >> (64 - length)) << start;
+ return (value & ~mask) | ((fieldval << start) & mask);
+}
+
+#endif
diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
new file mode 100644
index 0000000000..2006fcd621
--- /dev/null
+++ b/include/qemu/bswap.h
@@ -0,0 +1,713 @@
+#ifndef BSWAP_H
+#define BSWAP_H
+
+#include "config-host.h"
+
+#include <inttypes.h>
+#include "fpu/softfloat.h"
+
+#ifdef CONFIG_MACHINE_BSWAP_H
+#include <sys/endian.h>
+#include <sys/types.h>
+#include <machine/bswap.h>
+#else
+
+#ifdef CONFIG_BYTESWAP_H
+#include <byteswap.h>
+#else
+
+#define bswap_16(x) \
+({ \
+ uint16_t __x = (x); \
+ ((uint16_t)( \
+ (((uint16_t)(__x) & (uint16_t)0x00ffU) << 8) | \
+ (((uint16_t)(__x) & (uint16_t)0xff00U) >> 8) )); \
+})
+
+#define bswap_32(x) \
+({ \
+ uint32_t __x = (x); \
+ ((uint32_t)( \
+ (((uint32_t)(__x) & (uint32_t)0x000000ffUL) << 24) | \
+ (((uint32_t)(__x) & (uint32_t)0x0000ff00UL) << 8) | \
+ (((uint32_t)(__x) & (uint32_t)0x00ff0000UL) >> 8) | \
+ (((uint32_t)(__x) & (uint32_t)0xff000000UL) >> 24) )); \
+})
+
+#define bswap_64(x) \
+({ \
+ uint64_t __x = (x); \
+ ((uint64_t)( \
+ (uint64_t)(((uint64_t)(__x) & (uint64_t)0x00000000000000ffULL) << 56) | \
+ (uint64_t)(((uint64_t)(__x) & (uint64_t)0x000000000000ff00ULL) << 40) | \
+ (uint64_t)(((uint64_t)(__x) & (uint64_t)0x0000000000ff0000ULL) << 24) | \
+ (uint64_t)(((uint64_t)(__x) & (uint64_t)0x00000000ff000000ULL) << 8) | \
+ (uint64_t)(((uint64_t)(__x) & (uint64_t)0x000000ff00000000ULL) >> 8) | \
+ (uint64_t)(((uint64_t)(__x) & (uint64_t)0x0000ff0000000000ULL) >> 24) | \
+ (uint64_t)(((uint64_t)(__x) & (uint64_t)0x00ff000000000000ULL) >> 40) | \
+ (uint64_t)(((uint64_t)(__x) & (uint64_t)0xff00000000000000ULL) >> 56) )); \
+})
+
+#endif /* !CONFIG_BYTESWAP_H */
+
+static inline uint16_t bswap16(uint16_t x)
+{
+ return bswap_16(x);
+}
+
+static inline uint32_t bswap32(uint32_t x)
+{
+ return bswap_32(x);
+}
+
+static inline uint64_t bswap64(uint64_t x)
+{
+ return bswap_64(x);
+}
+
+#endif /* ! CONFIG_MACHINE_BSWAP_H */
+
+static inline void bswap16s(uint16_t *s)
+{
+ *s = bswap16(*s);
+}
+
+static inline void bswap32s(uint32_t *s)
+{
+ *s = bswap32(*s);
+}
+
+static inline void bswap64s(uint64_t *s)
+{
+ *s = bswap64(*s);
+}
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define be_bswap(v, size) (v)
+#define le_bswap(v, size) bswap ## size(v)
+#define be_bswaps(v, size)
+#define le_bswaps(p, size) *p = bswap ## size(*p);
+#else
+#define le_bswap(v, size) (v)
+#define be_bswap(v, size) bswap ## size(v)
+#define le_bswaps(v, size)
+#define be_bswaps(p, size) *p = bswap ## size(*p);
+#endif
+
+#define CPU_CONVERT(endian, size, type)\
+static inline type endian ## size ## _to_cpu(type v)\
+{\
+ return endian ## _bswap(v, size);\
+}\
+\
+static inline type cpu_to_ ## endian ## size(type v)\
+{\
+ return endian ## _bswap(v, size);\
+}\
+\
+static inline void endian ## size ## _to_cpus(type *p)\
+{\
+ endian ## _bswaps(p, size)\
+}\
+\
+static inline void cpu_to_ ## endian ## size ## s(type *p)\
+{\
+ endian ## _bswaps(p, size)\
+}\
+\
+static inline type endian ## size ## _to_cpup(const type *p)\
+{\
+ return endian ## size ## _to_cpu(*p);\
+}\
+\
+static inline void cpu_to_ ## endian ## size ## w(type *p, type v)\
+{\
+ *p = cpu_to_ ## endian ## size(v);\
+}
+
+CPU_CONVERT(be, 16, uint16_t)
+CPU_CONVERT(be, 32, uint32_t)
+CPU_CONVERT(be, 64, uint64_t)
+
+CPU_CONVERT(le, 16, uint16_t)
+CPU_CONVERT(le, 32, uint32_t)
+CPU_CONVERT(le, 64, uint64_t)
+
+/* unaligned versions (optimized for frequent unaligned accesses)*/
+
+#if defined(__i386__) || defined(_ARCH_PPC)
+
+#define cpu_to_le16wu(p, v) cpu_to_le16w(p, v)
+#define cpu_to_le32wu(p, v) cpu_to_le32w(p, v)
+#define le16_to_cpupu(p) le16_to_cpup(p)
+#define le32_to_cpupu(p) le32_to_cpup(p)
+#define be32_to_cpupu(p) be32_to_cpup(p)
+
+#define cpu_to_be16wu(p, v) cpu_to_be16w(p, v)
+#define cpu_to_be32wu(p, v) cpu_to_be32w(p, v)
+#define cpu_to_be64wu(p, v) cpu_to_be64w(p, v)
+
+#else
+
+static inline void cpu_to_le16wu(uint16_t *p, uint16_t v)
+{
+ uint8_t *p1 = (uint8_t *)p;
+
+ p1[0] = v & 0xff;
+ p1[1] = v >> 8;
+}
+
+static inline void cpu_to_le32wu(uint32_t *p, uint32_t v)
+{
+ uint8_t *p1 = (uint8_t *)p;
+
+ p1[0] = v & 0xff;
+ p1[1] = v >> 8;
+ p1[2] = v >> 16;
+ p1[3] = v >> 24;
+}
+
+static inline uint16_t le16_to_cpupu(const uint16_t *p)
+{
+ const uint8_t *p1 = (const uint8_t *)p;
+ return p1[0] | (p1[1] << 8);
+}
+
+static inline uint32_t le32_to_cpupu(const uint32_t *p)
+{
+ const uint8_t *p1 = (const uint8_t *)p;
+ return p1[0] | (p1[1] << 8) | (p1[2] << 16) | (p1[3] << 24);
+}
+
+static inline uint32_t be32_to_cpupu(const uint32_t *p)
+{
+ const uint8_t *p1 = (const uint8_t *)p;
+ return p1[3] | (p1[2] << 8) | (p1[1] << 16) | (p1[0] << 24);
+}
+
+static inline void cpu_to_be16wu(uint16_t *p, uint16_t v)
+{
+ uint8_t *p1 = (uint8_t *)p;
+
+ p1[0] = v >> 8;
+ p1[1] = v & 0xff;
+}
+
+static inline void cpu_to_be32wu(uint32_t *p, uint32_t v)
+{
+ uint8_t *p1 = (uint8_t *)p;
+
+ p1[0] = v >> 24;
+ p1[1] = v >> 16;
+ p1[2] = v >> 8;
+ p1[3] = v & 0xff;
+}
+
+static inline void cpu_to_be64wu(uint64_t *p, uint64_t v)
+{
+ uint8_t *p1 = (uint8_t *)p;
+
+ p1[0] = v >> 56;
+ p1[1] = v >> 48;
+ p1[2] = v >> 40;
+ p1[3] = v >> 32;
+ p1[4] = v >> 24;
+ p1[5] = v >> 16;
+ p1[6] = v >> 8;
+ p1[7] = v & 0xff;
+}
+
+#endif
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define cpu_to_32wu cpu_to_be32wu
+#define leul_to_cpu(v) glue(glue(le,HOST_LONG_BITS),_to_cpu)(v)
+#else
+#define cpu_to_32wu cpu_to_le32wu
+#define leul_to_cpu(v) (v)
+#endif
+
+#undef le_bswap
+#undef be_bswap
+#undef le_bswaps
+#undef be_bswaps
+
+/* len must be one of 1, 2, 4 */
+static inline uint32_t qemu_bswap_len(uint32_t value, int len)
+{
+ return bswap32(value) >> (32 - 8 * len);
+}
+
+typedef union {
+ float32 f;
+ uint32_t l;
+} CPU_FloatU;
+
+typedef union {
+ float64 d;
+#if defined(HOST_WORDS_BIGENDIAN)
+ struct {
+ uint32_t upper;
+ uint32_t lower;
+ } l;
+#else
+ struct {
+ uint32_t lower;
+ uint32_t upper;
+ } l;
+#endif
+ uint64_t ll;
+} CPU_DoubleU;
+
+typedef union {
+ floatx80 d;
+ struct {
+ uint64_t lower;
+ uint16_t upper;
+ } l;
+} CPU_LDoubleU;
+
+typedef union {
+ float128 q;
+#if defined(HOST_WORDS_BIGENDIAN)
+ struct {
+ uint32_t upmost;
+ uint32_t upper;
+ uint32_t lower;
+ uint32_t lowest;
+ } l;
+ struct {
+ uint64_t upper;
+ uint64_t lower;
+ } ll;
+#else
+ struct {
+ uint32_t lowest;
+ uint32_t lower;
+ uint32_t upper;
+ uint32_t upmost;
+ } l;
+ struct {
+ uint64_t lower;
+ uint64_t upper;
+ } ll;
+#endif
+} CPU_QuadU;
+
+/* unaligned/endian-independent pointer access */
+
+/*
+ * the generic syntax is:
+ *
+ * load: ld{type}{sign}{size}{endian}_p(ptr)
+ *
+ * store: st{type}{size}{endian}_p(ptr, val)
+ *
+ * Note there are small differences with the softmmu access API!
+ *
+ * type is:
+ * (empty): integer access
+ * f : float access
+ *
+ * sign is:
+ * (empty): for floats or 32 bit size
+ * u : unsigned
+ * s : signed
+ *
+ * size is:
+ * b: 8 bits
+ * w: 16 bits
+ * l: 32 bits
+ * q: 64 bits
+ *
+ * endian is:
+ * (empty): 8 bit access
+ * be : big endian
+ * le : little endian
+ */
+static inline int ldub_p(const void *ptr)
+{
+ return *(uint8_t *)ptr;
+}
+
+static inline int ldsb_p(const void *ptr)
+{
+ return *(int8_t *)ptr;
+}
+
+static inline void stb_p(void *ptr, int v)
+{
+ *(uint8_t *)ptr = v;
+}
+
+/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
+ kernel handles unaligned load/stores may give better results, but
+ it is a system wide setting : bad */
+#if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
+
+/* conservative code for little endian unaligned accesses */
+static inline int lduw_le_p(const void *ptr)
+{
+#ifdef _ARCH_PPC
+ int val;
+ __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
+ return val;
+#else
+ const uint8_t *p = ptr;
+ return p[0] | (p[1] << 8);
+#endif
+}
+
+static inline int ldsw_le_p(const void *ptr)
+{
+#ifdef _ARCH_PPC
+ int val;
+ __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
+ return (int16_t)val;
+#else
+ const uint8_t *p = ptr;
+ return (int16_t)(p[0] | (p[1] << 8));
+#endif
+}
+
+static inline int ldl_le_p(const void *ptr)
+{
+#ifdef _ARCH_PPC
+ int val;
+ __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
+ return val;
+#else
+ const uint8_t *p = ptr;
+ return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
+#endif
+}
+
+static inline uint64_t ldq_le_p(const void *ptr)
+{
+ const uint8_t *p = ptr;
+ uint32_t v1, v2;
+ v1 = ldl_le_p(p);
+ v2 = ldl_le_p(p + 4);
+ return v1 | ((uint64_t)v2 << 32);
+}
+
+static inline void stw_le_p(void *ptr, int v)
+{
+#ifdef _ARCH_PPC
+ __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
+#else
+ uint8_t *p = ptr;
+ p[0] = v;
+ p[1] = v >> 8;
+#endif
+}
+
+static inline void stl_le_p(void *ptr, int v)
+{
+#ifdef _ARCH_PPC
+ __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
+#else
+ uint8_t *p = ptr;
+ p[0] = v;
+ p[1] = v >> 8;
+ p[2] = v >> 16;
+ p[3] = v >> 24;
+#endif
+}
+
+static inline void stq_le_p(void *ptr, uint64_t v)
+{
+ uint8_t *p = ptr;
+ stl_le_p(p, (uint32_t)v);
+ stl_le_p(p + 4, v >> 32);
+}
+
+/* float access */
+
+static inline float32 ldfl_le_p(const void *ptr)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.i = ldl_le_p(ptr);
+ return u.f;
+}
+
+static inline void stfl_le_p(void *ptr, float32 v)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.f = v;
+ stl_le_p(ptr, u.i);
+}
+
+static inline float64 ldfq_le_p(const void *ptr)
+{
+ CPU_DoubleU u;
+ u.l.lower = ldl_le_p(ptr);
+ u.l.upper = ldl_le_p(ptr + 4);
+ return u.d;
+}
+
+static inline void stfq_le_p(void *ptr, float64 v)
+{
+ CPU_DoubleU u;
+ u.d = v;
+ stl_le_p(ptr, u.l.lower);
+ stl_le_p(ptr + 4, u.l.upper);
+}
+
+#else
+
+static inline int lduw_le_p(const void *ptr)
+{
+ return *(uint16_t *)ptr;
+}
+
+static inline int ldsw_le_p(const void *ptr)
+{
+ return *(int16_t *)ptr;
+}
+
+static inline int ldl_le_p(const void *ptr)
+{
+ return *(uint32_t *)ptr;
+}
+
+static inline uint64_t ldq_le_p(const void *ptr)
+{
+ return *(uint64_t *)ptr;
+}
+
+static inline void stw_le_p(void *ptr, int v)
+{
+ *(uint16_t *)ptr = v;
+}
+
+static inline void stl_le_p(void *ptr, int v)
+{
+ *(uint32_t *)ptr = v;
+}
+
+static inline void stq_le_p(void *ptr, uint64_t v)
+{
+ *(uint64_t *)ptr = v;
+}
+
+/* float access */
+
+static inline float32 ldfl_le_p(const void *ptr)
+{
+ return *(float32 *)ptr;
+}
+
+static inline float64 ldfq_le_p(const void *ptr)
+{
+ return *(float64 *)ptr;
+}
+
+static inline void stfl_le_p(void *ptr, float32 v)
+{
+ *(float32 *)ptr = v;
+}
+
+static inline void stfq_le_p(void *ptr, float64 v)
+{
+ *(float64 *)ptr = v;
+}
+#endif
+
+#if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
+
+static inline int lduw_be_p(const void *ptr)
+{
+#if defined(__i386__)
+ int val;
+ asm volatile ("movzwl %1, %0\n"
+ "xchgb %b0, %h0\n"
+ : "=q" (val)
+ : "m" (*(uint16_t *)ptr));
+ return val;
+#else
+ const uint8_t *b = ptr;
+ return ((b[0] << 8) | b[1]);
+#endif
+}
+
+static inline int ldsw_be_p(const void *ptr)
+{
+#if defined(__i386__)
+ int val;
+ asm volatile ("movzwl %1, %0\n"
+ "xchgb %b0, %h0\n"
+ : "=q" (val)
+ : "m" (*(uint16_t *)ptr));
+ return (int16_t)val;
+#else
+ const uint8_t *b = ptr;
+ return (int16_t)((b[0] << 8) | b[1]);
+#endif
+}
+
+static inline int ldl_be_p(const void *ptr)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ int val;
+ asm volatile ("movl %1, %0\n"
+ "bswap %0\n"
+ : "=r" (val)
+ : "m" (*(uint32_t *)ptr));
+ return val;
+#else
+ const uint8_t *b = ptr;
+ return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
+#endif
+}
+
+static inline uint64_t ldq_be_p(const void *ptr)
+{
+ uint32_t a,b;
+ a = ldl_be_p(ptr);
+ b = ldl_be_p((uint8_t *)ptr + 4);
+ return (((uint64_t)a<<32)|b);
+}
+
+static inline void stw_be_p(void *ptr, int v)
+{
+#if defined(__i386__)
+ asm volatile ("xchgb %b0, %h0\n"
+ "movw %w0, %1\n"
+ : "=q" (v)
+ : "m" (*(uint16_t *)ptr), "0" (v));
+#else
+ uint8_t *d = (uint8_t *) ptr;
+ d[0] = v >> 8;
+ d[1] = v;
+#endif
+}
+
+static inline void stl_be_p(void *ptr, int v)
+{
+#if defined(__i386__) || defined(__x86_64__)
+ asm volatile ("bswap %0\n"
+ "movl %0, %1\n"
+ : "=r" (v)
+ : "m" (*(uint32_t *)ptr), "0" (v));
+#else
+ uint8_t *d = (uint8_t *) ptr;
+ d[0] = v >> 24;
+ d[1] = v >> 16;
+ d[2] = v >> 8;
+ d[3] = v;
+#endif
+}
+
+static inline void stq_be_p(void *ptr, uint64_t v)
+{
+ stl_be_p(ptr, v >> 32);
+ stl_be_p((uint8_t *)ptr + 4, v);
+}
+
+/* float access */
+
+static inline float32 ldfl_be_p(const void *ptr)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.i = ldl_be_p(ptr);
+ return u.f;
+}
+
+static inline void stfl_be_p(void *ptr, float32 v)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.f = v;
+ stl_be_p(ptr, u.i);
+}
+
+static inline float64 ldfq_be_p(const void *ptr)
+{
+ CPU_DoubleU u;
+ u.l.upper = ldl_be_p(ptr);
+ u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
+ return u.d;
+}
+
+static inline void stfq_be_p(void *ptr, float64 v)
+{
+ CPU_DoubleU u;
+ u.d = v;
+ stl_be_p(ptr, u.l.upper);
+ stl_be_p((uint8_t *)ptr + 4, u.l.lower);
+}
+
+#else
+
+static inline int lduw_be_p(const void *ptr)
+{
+ return *(uint16_t *)ptr;
+}
+
+static inline int ldsw_be_p(const void *ptr)
+{
+ return *(int16_t *)ptr;
+}
+
+static inline int ldl_be_p(const void *ptr)
+{
+ return *(uint32_t *)ptr;
+}
+
+static inline uint64_t ldq_be_p(const void *ptr)
+{
+ return *(uint64_t *)ptr;
+}
+
+static inline void stw_be_p(void *ptr, int v)
+{
+ *(uint16_t *)ptr = v;
+}
+
+static inline void stl_be_p(void *ptr, int v)
+{
+ *(uint32_t *)ptr = v;
+}
+
+static inline void stq_be_p(void *ptr, uint64_t v)
+{
+ *(uint64_t *)ptr = v;
+}
+
+/* float access */
+
+static inline float32 ldfl_be_p(const void *ptr)
+{
+ return *(float32 *)ptr;
+}
+
+static inline float64 ldfq_be_p(const void *ptr)
+{
+ return *(float64 *)ptr;
+}
+
+static inline void stfl_be_p(void *ptr, float32 v)
+{
+ *(float32 *)ptr = v;
+}
+
+static inline void stfq_be_p(void *ptr, float64 v)
+{
+ *(float64 *)ptr = v;
+}
+
+#endif
+
+#endif /* BSWAP_H */
diff --git a/include/qemu/cache-utils.h b/include/qemu/cache-utils.h
new file mode 100644
index 0000000000..2c57f78fc1
--- /dev/null
+++ b/include/qemu/cache-utils.h
@@ -0,0 +1,44 @@
+#ifndef QEMU_CACHE_UTILS_H
+#define QEMU_CACHE_UTILS_H
+
+#if defined(_ARCH_PPC)
+
+#include <stdint.h> /* uintptr_t */
+
+struct qemu_cache_conf {
+ unsigned long dcache_bsize;
+ unsigned long icache_bsize;
+};
+
+extern struct qemu_cache_conf qemu_cache_conf;
+
+void qemu_cache_utils_init(char **envp);
+
+/* mildly adjusted code from tcg-dyngen.c */
+static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
+{
+ unsigned long p, start1, stop1;
+ unsigned long dsize = qemu_cache_conf.dcache_bsize;
+ unsigned long isize = qemu_cache_conf.icache_bsize;
+
+ start1 = start & ~(dsize - 1);
+ stop1 = (stop + dsize - 1) & ~(dsize - 1);
+ for (p = start1; p < stop1; p += dsize) {
+ asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
+ }
+ asm volatile ("sync" : : : "memory");
+
+ start &= start & ~(isize - 1);
+ stop1 = (stop + isize - 1) & ~(isize - 1);
+ for (p = start1; p < stop1; p += isize) {
+ asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
+ }
+ asm volatile ("sync" : : : "memory");
+ asm volatile ("isync" : : : "memory");
+}
+
+#else
+#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
+#endif
+
+#endif /* QEMU_CACHE_UTILS_H */
diff --git a/include/qemu/compatfd.h b/include/qemu/compatfd.h
new file mode 100644
index 0000000000..6b04877b97
--- /dev/null
+++ b/include/qemu/compatfd.h
@@ -0,0 +1,44 @@
+/*
+ * signalfd/eventfd compatibility
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_COMPATFD_H
+#define QEMU_COMPATFD_H
+
+#include <signal.h>
+
+struct qemu_signalfd_siginfo {
+ uint32_t ssi_signo; /* Signal number */
+ int32_t ssi_errno; /* Error number (unused) */
+ int32_t ssi_code; /* Signal code */
+ uint32_t ssi_pid; /* PID of sender */
+ uint32_t ssi_uid; /* Real UID of sender */
+ int32_t ssi_fd; /* File descriptor (SIGIO) */
+ uint32_t ssi_tid; /* Kernel timer ID (POSIX timers) */
+ uint32_t ssi_band; /* Band event (SIGIO) */
+ uint32_t ssi_overrun; /* POSIX timer overrun count */
+ uint32_t ssi_trapno; /* Trap number that caused signal */
+ int32_t ssi_status; /* Exit status or signal (SIGCHLD) */
+ int32_t ssi_int; /* Integer sent by sigqueue(2) */
+ uint64_t ssi_ptr; /* Pointer sent by sigqueue(2) */
+ uint64_t ssi_utime; /* User CPU time consumed (SIGCHLD) */
+ uint64_t ssi_stime; /* System CPU time consumed (SIGCHLD) */
+ uint64_t ssi_addr; /* Address that generated signal
+ (for hardware-generated signals) */
+ uint8_t pad[48]; /* Pad size to 128 bytes (allow for
+ additional fields in the future) */
+};
+
+int qemu_signalfd(const sigset_t *mask);
+bool qemu_signalfd_available(void);
+
+#endif
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
new file mode 100644
index 0000000000..2f7998b6c1
--- /dev/null
+++ b/include/qemu/compiler.h
@@ -0,0 +1,58 @@
+/* public domain */
+
+#ifndef COMPILER_H
+#define COMPILER_H
+
+#include "config-host.h"
+
+/*----------------------------------------------------------------------------
+| The macro QEMU_GNUC_PREREQ tests for minimum version of the GNU C compiler.
+| The code is a copy of SOFTFLOAT_GNUC_PREREQ, see softfloat-macros.h.
+*----------------------------------------------------------------------------*/
+#if defined(__GNUC__) && defined(__GNUC_MINOR__)
+# define QEMU_GNUC_PREREQ(maj, min) \
+ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
+#else
+# define QEMU_GNUC_PREREQ(maj, min) 0
+#endif
+
+#define QEMU_NORETURN __attribute__ ((__noreturn__))
+
+#if QEMU_GNUC_PREREQ(3, 4)
+#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define QEMU_WARN_UNUSED_RESULT
+#endif
+
+#if defined(_WIN32)
+# define QEMU_PACKED __attribute__((gcc_struct, packed))
+#else
+# define QEMU_PACKED __attribute__((packed))
+#endif
+
+#define cat(x,y) x ## y
+#define cat2(x,y) cat(x,y)
+#define QEMU_BUILD_BUG_ON(x) \
+ typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1];
+
+#if defined __GNUC__
+# if !QEMU_GNUC_PREREQ(4, 4)
+ /* gcc versions before 4.4.x don't support gnu_printf, so use printf. */
+# define GCC_ATTR __attribute__((__unused__, format(printf, 1, 2)))
+# define GCC_FMT_ATTR(n, m) __attribute__((format(printf, n, m)))
+# else
+ /* Use gnu_printf when supported (qemu uses standard format strings). */
+# define GCC_ATTR __attribute__((__unused__, format(gnu_printf, 1, 2)))
+# define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m)))
+# if defined(_WIN32)
+ /* Map __printf__ to __gnu_printf__ because we want standard format strings
+ * even when MinGW or GLib include files use __printf__. */
+# define __printf__ __gnu_printf__
+# endif
+# endif
+#else
+#define GCC_ATTR /**/
+#define GCC_FMT_ATTR(n, m)
+#endif
+
+#endif /* COMPILER_H */
diff --git a/include/qemu/config-file.h b/include/qemu/config-file.h
new file mode 100644
index 0000000000..486c77cad4
--- /dev/null
+++ b/include/qemu/config-file.h
@@ -0,0 +1,30 @@
+#ifndef QEMU_CONFIG_H
+#define QEMU_CONFIG_H
+
+#include <stdio.h>
+#include "qemu/option.h"
+#include "qapi/error.h"
+#include "qemu/option.h"
+
+extern QemuOptsList qemu_fsdev_opts;
+extern QemuOptsList qemu_virtfs_opts;
+extern QemuOptsList qemu_spice_opts;
+extern QemuOptsList qemu_sandbox_opts;
+
+QemuOptsList *qemu_find_opts(const char *group);
+QemuOptsList *qemu_find_opts_err(const char *group, Error **errp);
+void qemu_add_opts(QemuOptsList *list);
+int qemu_set_option(const char *str);
+int qemu_global_option(const char *str);
+void qemu_add_globals(void);
+
+void qemu_config_write(FILE *fp);
+int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname);
+
+int qemu_read_config_file(const char *filename);
+
+/* Read default QEMU config files
+ */
+int qemu_read_default_config_files(bool userconfig);
+
+#endif /* QEMU_CONFIG_H */
diff --git a/include/qemu/envlist.h b/include/qemu/envlist.h
new file mode 100644
index 0000000000..b9addcc11f
--- /dev/null
+++ b/include/qemu/envlist.h
@@ -0,0 +1,22 @@
+#ifndef ENVLIST_H
+#define ENVLIST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct envlist envlist_t;
+
+envlist_t *envlist_create(void);
+void envlist_free(envlist_t *);
+int envlist_setenv(envlist_t *, const char *);
+int envlist_unsetenv(envlist_t *, const char *);
+int envlist_parse_set(envlist_t *, const char *);
+int envlist_parse_unset(envlist_t *, const char *);
+char **envlist_to_environ(const envlist_t *, size_t *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ENVLIST_H */
diff --git a/include/qemu/error-report.h b/include/qemu/error-report.h
new file mode 100644
index 0000000000..c902cc10de
--- /dev/null
+++ b/include/qemu/error-report.h
@@ -0,0 +1,43 @@
+/*
+ * Error reporting
+ *
+ * Copyright (C) 2010 Red Hat Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_ERROR_H
+#define QEMU_ERROR_H
+
+#include <stdarg.h>
+
+typedef struct Location {
+ /* all members are private to qemu-error.c */
+ enum { LOC_NONE, LOC_CMDLINE, LOC_FILE } kind;
+ int num;
+ const void *ptr;
+ struct Location *prev;
+} Location;
+
+Location *loc_push_restore(Location *loc);
+Location *loc_push_none(Location *loc);
+Location *loc_pop(Location *loc);
+Location *loc_save(Location *loc);
+void loc_restore(Location *loc);
+void loc_set_none(void);
+void loc_set_cmdline(char **argv, int idx, int cnt);
+void loc_set_file(const char *fname, int lno);
+
+void error_vprintf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
+void error_printf(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+void error_printf_unless_qmp(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+void error_print_loc(void);
+void error_set_progname(const char *argv0);
+void error_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+const char *error_get_progname(void);
+
+#endif
diff --git a/include/qemu/event_notifier.h b/include/qemu/event_notifier.h
new file mode 100644
index 0000000000..88b57af7ce
--- /dev/null
+++ b/include/qemu/event_notifier.h
@@ -0,0 +1,46 @@
+/*
+ * event notifier support
+ *
+ * Copyright Red Hat, Inc. 2010
+ *
+ * Authors:
+ * Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_EVENT_NOTIFIER_H
+#define QEMU_EVENT_NOTIFIER_H
+
+#include "qemu-common.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+struct EventNotifier {
+#ifdef _WIN32
+ HANDLE event;
+#else
+ int rfd;
+ int wfd;
+#endif
+};
+
+typedef void EventNotifierHandler(EventNotifier *);
+
+int event_notifier_init(EventNotifier *, int active);
+void event_notifier_cleanup(EventNotifier *);
+int event_notifier_set(EventNotifier *);
+int event_notifier_test_and_clear(EventNotifier *);
+int event_notifier_set_handler(EventNotifier *, EventNotifierHandler *);
+
+#ifdef CONFIG_POSIX
+void event_notifier_init_fd(EventNotifier *, int fd);
+int event_notifier_get_fd(EventNotifier *);
+#else
+HANDLE event_notifier_get_handle(EventNotifier *);
+#endif
+
+#endif
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
new file mode 100644
index 0000000000..81c9a754ae
--- /dev/null
+++ b/include/qemu/host-utils.h
@@ -0,0 +1,240 @@
+/*
+ * Utility compute operations used by translated code.
+ *
+ * Copyright (c) 2007 Thiemo Seufer
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef HOST_UTILS_H
+#define HOST_UTILS_H 1
+
+#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */
+
+#if defined(__x86_64__)
+#define __HAVE_FAST_MULU64__
+static inline void mulu64(uint64_t *plow, uint64_t *phigh,
+ uint64_t a, uint64_t b)
+{
+ __asm__ ("mul %0\n\t"
+ : "=d" (*phigh), "=a" (*plow)
+ : "a" (a), "0" (b));
+}
+#define __HAVE_FAST_MULS64__
+static inline void muls64(uint64_t *plow, uint64_t *phigh,
+ int64_t a, int64_t b)
+{
+ __asm__ ("imul %0\n\t"
+ : "=d" (*phigh), "=a" (*plow)
+ : "a" (a), "0" (b));
+}
+#else
+void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b);
+void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b);
+#endif
+
+/* Binary search for leading zeros. */
+
+static inline int clz32(uint32_t val)
+{
+#if QEMU_GNUC_PREREQ(3, 4)
+ if (val)
+ return __builtin_clz(val);
+ else
+ return 32;
+#else
+ int cnt = 0;
+
+ if (!(val & 0xFFFF0000U)) {
+ cnt += 16;
+ val <<= 16;
+ }
+ if (!(val & 0xFF000000U)) {
+ cnt += 8;
+ val <<= 8;
+ }
+ if (!(val & 0xF0000000U)) {
+ cnt += 4;
+ val <<= 4;
+ }
+ if (!(val & 0xC0000000U)) {
+ cnt += 2;
+ val <<= 2;
+ }
+ if (!(val & 0x80000000U)) {
+ cnt++;
+ val <<= 1;
+ }
+ if (!(val & 0x80000000U)) {
+ cnt++;
+ }
+ return cnt;
+#endif
+}
+
+static inline int clo32(uint32_t val)
+{
+ return clz32(~val);
+}
+
+static inline int clz64(uint64_t val)
+{
+#if QEMU_GNUC_PREREQ(3, 4)
+ if (val)
+ return __builtin_clzll(val);
+ else
+ return 64;
+#else
+ int cnt = 0;
+
+ if (!(val >> 32)) {
+ cnt += 32;
+ } else {
+ val >>= 32;
+ }
+
+ return cnt + clz32(val);
+#endif
+}
+
+static inline int clo64(uint64_t val)
+{
+ return clz64(~val);
+}
+
+static inline int ctz32(uint32_t val)
+{
+#if QEMU_GNUC_PREREQ(3, 4)
+ if (val)
+ return __builtin_ctz(val);
+ else
+ return 32;
+#else
+ int cnt;
+
+ cnt = 0;
+ if (!(val & 0x0000FFFFUL)) {
+ cnt += 16;
+ val >>= 16;
+ }
+ if (!(val & 0x000000FFUL)) {
+ cnt += 8;
+ val >>= 8;
+ }
+ if (!(val & 0x0000000FUL)) {
+ cnt += 4;
+ val >>= 4;
+ }
+ if (!(val & 0x00000003UL)) {
+ cnt += 2;
+ val >>= 2;
+ }
+ if (!(val & 0x00000001UL)) {
+ cnt++;
+ val >>= 1;
+ }
+ if (!(val & 0x00000001UL)) {
+ cnt++;
+ }
+
+ return cnt;
+#endif
+}
+
+static inline int cto32(uint32_t val)
+{
+ return ctz32(~val);
+}
+
+static inline int ctz64(uint64_t val)
+{
+#if QEMU_GNUC_PREREQ(3, 4)
+ if (val)
+ return __builtin_ctzll(val);
+ else
+ return 64;
+#else
+ int cnt;
+
+ cnt = 0;
+ if (!((uint32_t)val)) {
+ cnt += 32;
+ val >>= 32;
+ }
+
+ return cnt + ctz32(val);
+#endif
+}
+
+static inline int cto64(uint64_t val)
+{
+ return ctz64(~val);
+}
+
+static inline int ctpop8(uint8_t val)
+{
+ val = (val & 0x55) + ((val >> 1) & 0x55);
+ val = (val & 0x33) + ((val >> 2) & 0x33);
+ val = (val & 0x0f) + ((val >> 4) & 0x0f);
+
+ return val;
+}
+
+static inline int ctpop16(uint16_t val)
+{
+ val = (val & 0x5555) + ((val >> 1) & 0x5555);
+ val = (val & 0x3333) + ((val >> 2) & 0x3333);
+ val = (val & 0x0f0f) + ((val >> 4) & 0x0f0f);
+ val = (val & 0x00ff) + ((val >> 8) & 0x00ff);
+
+ return val;
+}
+
+static inline int ctpop32(uint32_t val)
+{
+#if QEMU_GNUC_PREREQ(3, 4)
+ return __builtin_popcount(val);
+#else
+ val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
+ val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
+ val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
+ val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
+ val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
+
+ return val;
+#endif
+}
+
+static inline int ctpop64(uint64_t val)
+{
+#if QEMU_GNUC_PREREQ(3, 4)
+ return __builtin_popcountll(val);
+#else
+ val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
+ val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
+ val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & 0x00ff00ff00ff00ffULL);
+ val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & 0x0000ffff0000ffffULL);
+ val = (val & 0x00000000ffffffffULL) + ((val >> 32) & 0x00000000ffffffffULL);
+
+ return val;
+#endif
+}
+
+#endif
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
new file mode 100644
index 0000000000..b3864b6cd4
--- /dev/null
+++ b/include/qemu/int128.h
@@ -0,0 +1,116 @@
+#ifndef INT128_H
+#define INT128_H
+
+typedef struct Int128 Int128;
+
+struct Int128 {
+ uint64_t lo;
+ int64_t hi;
+};
+
+static inline Int128 int128_make64(uint64_t a)
+{
+ return (Int128) { a, 0 };
+}
+
+static inline uint64_t int128_get64(Int128 a)
+{
+ assert(!a.hi);
+ return a.lo;
+}
+
+static inline Int128 int128_zero(void)
+{
+ return int128_make64(0);
+}
+
+static inline Int128 int128_one(void)
+{
+ return int128_make64(1);
+}
+
+static inline Int128 int128_2_64(void)
+{
+ return (Int128) { 0, 1 };
+}
+
+static inline Int128 int128_add(Int128 a, Int128 b)
+{
+ Int128 r = { a.lo + b.lo, a.hi + b.hi };
+ r.hi += (r.lo < a.lo) || (r.lo < b.lo);
+ return r;
+}
+
+static inline Int128 int128_neg(Int128 a)
+{
+ a.lo = ~a.lo;
+ a.hi = ~a.hi;
+ return int128_add(a, int128_one());
+}
+
+static inline Int128 int128_sub(Int128 a, Int128 b)
+{
+ return int128_add(a, int128_neg(b));
+}
+
+static inline bool int128_nonneg(Int128 a)
+{
+ return a.hi >= 0;
+}
+
+static inline bool int128_eq(Int128 a, Int128 b)
+{
+ return a.lo == b.lo && a.hi == b.hi;
+}
+
+static inline bool int128_ne(Int128 a, Int128 b)
+{
+ return !int128_eq(a, b);
+}
+
+static inline bool int128_ge(Int128 a, Int128 b)
+{
+ return int128_nonneg(int128_sub(a, b));
+}
+
+static inline bool int128_lt(Int128 a, Int128 b)
+{
+ return !int128_ge(a, b);
+}
+
+static inline bool int128_le(Int128 a, Int128 b)
+{
+ return int128_ge(b, a);
+}
+
+static inline bool int128_gt(Int128 a, Int128 b)
+{
+ return !int128_le(a, b);
+}
+
+static inline bool int128_nz(Int128 a)
+{
+ return a.lo || a.hi;
+}
+
+static inline Int128 int128_min(Int128 a, Int128 b)
+{
+ return int128_le(a, b) ? a : b;
+}
+
+static inline Int128 int128_max(Int128 a, Int128 b)
+{
+ return int128_ge(a, b) ? a : b;
+}
+
+static inline void int128_addto(Int128 *a, Int128 b)
+{
+ *a = int128_add(*a, b);
+}
+
+static inline void int128_subfrom(Int128 *a, Int128 b)
+{
+ *a = int128_sub(*a, b);
+}
+
+#endif
diff --git a/include/qemu/iov.h b/include/qemu/iov.h
new file mode 100644
index 0000000000..68d25f29b7
--- /dev/null
+++ b/include/qemu/iov.h
@@ -0,0 +1,115 @@
+/*
+ * Helpers for using (partial) iovecs.
+ *
+ * Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Author(s):
+ * Amit Shah <amit.shah@redhat.com>
+ * Michael Tokarev <mjt@tls.msk.ru>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef IOV_H
+#define IOV_H
+
+#include "qemu-common.h"
+
+/**
+ * count and return data size, in bytes, of an iovec
+ * starting at `iov' of `iov_cnt' number of elements.
+ */
+size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt);
+
+/**
+ * Copy from single continuous buffer to scatter-gather vector of buffers
+ * (iovec) and back like memcpy() between two continuous memory regions.
+ * Data in single continuous buffer starting at address `buf' and
+ * `bytes' bytes long will be copied to/from an iovec `iov' with
+ * `iov_cnt' number of elements, starting at byte position `offset'
+ * within the iovec. If the iovec does not contain enough space,
+ * only part of data will be copied, up to the end of the iovec.
+ * Number of bytes actually copied will be returned, which is
+ * min(bytes, iov_size(iov)-offset)
+ * `Offset' must point to the inside of iovec.
+ * It is okay to use very large value for `bytes' since we're
+ * limited by the size of the iovec anyway, provided that the
+ * buffer pointed to by buf has enough space. One possible
+ * such "large" value is -1 (sinice size_t is unsigned),
+ * so specifying `-1' as `bytes' means 'up to the end of iovec'.
+ */
+size_t iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, const void *buf, size_t bytes);
+size_t iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt,
+ size_t offset, void *buf, size_t bytes);
+
+/**
+ * Set data bytes pointed out by iovec `iov' of size `iov_cnt' elements,
+ * starting at byte offset `start', to value `fillc', repeating it
+ * `bytes' number of times. `Offset' must point to the inside of iovec.
+ * If `bytes' is large enough, only last bytes portion of iovec,
+ * up to the end of it, will be filled with the specified value.
+ * Function return actual number of bytes processed, which is
+ * min(size, iov_size(iov) - offset).
+ * Again, it is okay to use large value for `bytes' to mean "up to the end".
+ */
+size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
+ size_t offset, int fillc, size_t bytes);
+
+/*
+ * Send/recv data from/to iovec buffers directly
+ *
+ * `offset' bytes in the beginning of iovec buffer are skipped and
+ * next `bytes' bytes are used, which must be within data of iovec.
+ *
+ * r = iov_send_recv(sockfd, iov, iovcnt, offset, bytes, true);
+ *
+ * is logically equivalent to
+ *
+ * char *buf = malloc(bytes);
+ * iov_to_buf(iov, iovcnt, offset, buf, bytes);
+ * r = send(sockfd, buf, bytes, 0);
+ * free(buf);
+ *
+ * For iov_send_recv() _whole_ area being sent or received
+ * should be within the iovec, not only beginning of it.
+ */
+ssize_t iov_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt,
+ size_t offset, size_t bytes, bool do_send);
+#define iov_recv(sockfd, iov, iov_cnt, offset, bytes) \
+ iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, false)
+#define iov_send(sockfd, iov, iov_cnt, offset, bytes) \
+ iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, true)
+
+/**
+ * Produce a text hexdump of iovec `iov' with `iov_cnt' number of elements
+ * in file `fp', prefixing each line with `prefix' and processing not more
+ * than `limit' data bytes.
+ */
+void iov_hexdump(const struct iovec *iov, const unsigned int iov_cnt,
+ FILE *fp, const char *prefix, size_t limit);
+
+/*
+ * Partial copy of vector from iov to dst_iov (data is not copied).
+ * dst_iov overlaps iov at a specified offset.
+ * size of dst_iov is at most bytes. dst vector count is returned.
+ */
+unsigned iov_copy(struct iovec *dst_iov, unsigned int dst_iov_cnt,
+ const struct iovec *iov, unsigned int iov_cnt,
+ size_t offset, size_t bytes);
+
+/*
+ * Remove a given number of bytes from the front or back of a vector.
+ * This may update iov and/or iov_cnt to exclude iovec elements that are
+ * no longer required.
+ *
+ * The number of bytes actually discarded is returned. This number may be
+ * smaller than requested if the vector is too small.
+ */
+size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
+ size_t bytes);
+size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
+ size_t bytes);
+
+#endif
diff --git a/include/qemu/log.h b/include/qemu/log.h
new file mode 100644
index 0000000000..58f69cb494
--- /dev/null
+++ b/include/qemu/log.h
@@ -0,0 +1,160 @@
+#ifndef QEMU_LOG_H
+#define QEMU_LOG_H
+
+#include <stdarg.h>
+#ifdef NEED_CPU_H
+#include "disas/disas.h"
+#endif
+
+/* Private global variables, don't use */
+extern FILE *qemu_logfile;
+extern int qemu_loglevel;
+
+/*
+ * The new API:
+ *
+ */
+
+/* Log settings checking macros: */
+
+/* Returns true if qemu_log() will really write somewhere
+ */
+static inline bool qemu_log_enabled(void)
+{
+ return qemu_logfile != NULL;
+}
+
+#define CPU_LOG_TB_OUT_ASM (1 << 0)
+#define CPU_LOG_TB_IN_ASM (1 << 1)
+#define CPU_LOG_TB_OP (1 << 2)
+#define CPU_LOG_TB_OP_OPT (1 << 3)
+#define CPU_LOG_INT (1 << 4)
+#define CPU_LOG_EXEC (1 << 5)
+#define CPU_LOG_PCALL (1 << 6)
+#define CPU_LOG_IOPORT (1 << 7)
+#define CPU_LOG_TB_CPU (1 << 8)
+#define CPU_LOG_RESET (1 << 9)
+#define LOG_UNIMP (1 << 10)
+#define LOG_GUEST_ERROR (1 << 11)
+
+/* Returns true if a bit is set in the current loglevel mask
+ */
+static inline bool qemu_loglevel_mask(int mask)
+{
+ return (qemu_loglevel & mask) != 0;
+}
+
+/* Logging functions: */
+
+/* main logging function
+ */
+void GCC_FMT_ATTR(1, 2) qemu_log(const char *fmt, ...);
+
+/* vfprintf-like logging function
+ */
+static inline void GCC_FMT_ATTR(1, 0)
+qemu_log_vprintf(const char *fmt, va_list va)
+{
+ if (qemu_logfile) {
+ vfprintf(qemu_logfile, fmt, va);
+ }
+}
+
+/* log only if a bit is set on the current loglevel mask
+ */
+void GCC_FMT_ATTR(2, 3) qemu_log_mask(int mask, const char *fmt, ...);
+
+
+/* Special cases: */
+
+#ifdef NEED_CPU_H
+/* cpu_dump_state() logging functions: */
+static inline void log_cpu_state(CPUArchState *env1, int flags)
+{
+ if (qemu_log_enabled()) {
+ cpu_dump_state(env1, qemu_logfile, fprintf, flags);
+ }
+}
+
+static inline void log_cpu_state_mask(int mask, CPUArchState *env1, int flags)
+{
+ if (qemu_loglevel & mask) {
+ log_cpu_state(env1, flags);
+ }
+}
+
+/* disas() and target_disas() to qemu_logfile: */
+static inline void log_target_disas(CPUArchState *env, target_ulong start,
+ target_ulong len, int flags)
+{
+ target_disas(qemu_logfile, env, start, len, flags);
+}
+
+static inline void log_disas(void *code, unsigned long size)
+{
+ disas(qemu_logfile, code, size);
+}
+
+#if defined(CONFIG_USER_ONLY)
+/* page_dump() output to the log file: */
+static inline void log_page_dump(void)
+{
+ page_dump(qemu_logfile);
+}
+#endif
+#endif
+
+
+/* Maintenance: */
+
+/* fflush() the log file */
+static inline void qemu_log_flush(void)
+{
+ fflush(qemu_logfile);
+}
+
+/* Close the log file */
+static inline void qemu_log_close(void)
+{
+ fclose(qemu_logfile);
+ qemu_logfile = NULL;
+}
+
+/* Set up a new log file */
+static inline void qemu_log_set_file(FILE *f)
+{
+ qemu_logfile = f;
+}
+
+/* Set up a new log file, only if none is set */
+static inline void qemu_log_try_set_file(FILE *f)
+{
+ if (!qemu_logfile) {
+ qemu_logfile = f;
+ }
+}
+
+/* define log items */
+typedef struct CPULogItem {
+ int mask;
+ const char *name;
+ const char *help;
+} CPULogItem;
+
+extern const CPULogItem cpu_log_items[];
+
+void qemu_set_log(int log_flags, bool use_own_buffers);
+
+static inline void cpu_set_log(int log_flags)
+{
+#ifdef CONFIG_USER_ONLY
+ qemu_set_log(log_flags, true);
+#else
+ qemu_set_log(log_flags, false);
+#endif
+}
+
+void cpu_set_log_filename(const char *filename);
+int cpu_str_to_log_mask(const char *str);
+
+#endif
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
new file mode 100644
index 0000000000..e8059c3d0a
--- /dev/null
+++ b/include/qemu/main-loop.h
@@ -0,0 +1,306 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_MAIN_LOOP_H
+#define QEMU_MAIN_LOOP_H 1
+
+#include "block/aio.h"
+
+#define SIG_IPI SIGUSR1
+
+/**
+ * qemu_init_main_loop: Set up the process so that it can run the main loop.
+ *
+ * This includes setting up signal handlers. It should be called before
+ * any other threads are created. In addition, threads other than the
+ * main one should block signals that are trapped by the main loop.
+ * For simplicity, you can consider these signals to be safe: SIGUSR1,
+ * SIGUSR2, thread signals (SIGFPE, SIGILL, SIGSEGV, SIGBUS) and real-time
+ * signals if available. Remember that Windows in practice does not have
+ * signals, though.
+ *
+ * In the case of QEMU tools, this will also start/initialize timers.
+ */
+int qemu_init_main_loop(void);
+
+/**
+ * main_loop_wait: Run one iteration of the main loop.
+ *
+ * If @nonblocking is true, poll for events, otherwise suspend until
+ * one actually occurs. The main loop usually consists of a loop that
+ * repeatedly calls main_loop_wait(false).
+ *
+ * Main loop services include file descriptor callbacks, bottom halves
+ * and timers (defined in qemu-timer.h). Bottom halves are similar to timers
+ * that execute immediately, but have a lower overhead and scheduling them
+ * is wait-free, thread-safe and signal-safe.
+ *
+ * It is sometimes useful to put a whole program in a coroutine. In this
+ * case, the coroutine actually should be started from within the main loop,
+ * so that the main loop can run whenever the coroutine yields. To do this,
+ * you can use a bottom half to enter the coroutine as soon as the main loop
+ * starts:
+ *
+ * void enter_co_bh(void *opaque) {
+ * QEMUCoroutine *co = opaque;
+ * qemu_coroutine_enter(co, NULL);
+ * }
+ *
+ * ...
+ * QEMUCoroutine *co = qemu_coroutine_create(coroutine_entry);
+ * QEMUBH *start_bh = qemu_bh_new(enter_co_bh, co);
+ * qemu_bh_schedule(start_bh);
+ * while (...) {
+ * main_loop_wait(false);
+ * }
+ *
+ * (In the future we may provide a wrapper for this).
+ *
+ * @nonblocking: Whether the caller should block until an event occurs.
+ */
+int main_loop_wait(int nonblocking);
+
+/**
+ * qemu_notify_event: Force processing of pending events.
+ *
+ * Similar to signaling a condition variable, qemu_notify_event forces
+ * main_loop_wait to look at pending events and exit. The caller of
+ * main_loop_wait will usually call it again very soon, so qemu_notify_event
+ * also has the side effect of recalculating the sets of file descriptors
+ * that the main loop waits for.
+ *
+ * Calling qemu_notify_event is rarely necessary, because main loop
+ * services (bottom halves and timers) call it themselves. One notable
+ * exception occurs when using qemu_set_fd_handler2 (see below).
+ */
+void qemu_notify_event(void);
+
+#ifdef _WIN32
+/* return TRUE if no sleep should be done afterwards */
+typedef int PollingFunc(void *opaque);
+
+/**
+ * qemu_add_polling_cb: Register a Windows-specific polling callback
+ *
+ * Currently, under Windows some events are polled rather than waited for.
+ * Polling callbacks do not ensure that @func is called timely, because
+ * the main loop might wait for an arbitrarily long time. If possible,
+ * you should instead create a separate thread that does a blocking poll
+ * and set a Win32 event object. The event can then be passed to
+ * qemu_add_wait_object.
+ *
+ * Polling callbacks really have nothing Windows specific in them, but
+ * as they are a hack and are currently not necessary under POSIX systems,
+ * they are only available when QEMU is running under Windows.
+ *
+ * @func: The function that does the polling, and returns 1 to force
+ * immediate completion of main_loop_wait.
+ * @opaque: A pointer-size value that is passed to @func.
+ */
+int qemu_add_polling_cb(PollingFunc *func, void *opaque);
+
+/**
+ * qemu_del_polling_cb: Unregister a Windows-specific polling callback
+ *
+ * This function removes a callback that was registered with
+ * qemu_add_polling_cb.
+ *
+ * @func: The function that was passed to qemu_add_polling_cb.
+ * @opaque: A pointer-size value that was passed to qemu_add_polling_cb.
+ */
+void qemu_del_polling_cb(PollingFunc *func, void *opaque);
+
+/* Wait objects handling */
+typedef void WaitObjectFunc(void *opaque);
+
+/**
+ * qemu_add_wait_object: Register a callback for a Windows handle
+ *
+ * Under Windows, the iohandler mechanism can only be used with sockets.
+ * QEMU must use the WaitForMultipleObjects API to wait on other handles.
+ * This function registers a #HANDLE with QEMU, so that it will be included
+ * in the main loop's calls to WaitForMultipleObjects. When the handle
+ * is in a signaled state, QEMU will call @func.
+ *
+ * @handle: The Windows handle to be observed.
+ * @func: A function to be called when @handle is in a signaled state.
+ * @opaque: A pointer-size value that is passed to @func.
+ */
+int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque);
+
+/**
+ * qemu_del_wait_object: Unregister a callback for a Windows handle
+ *
+ * This function removes a callback that was registered with
+ * qemu_add_wait_object.
+ *
+ * @func: The function that was passed to qemu_add_wait_object.
+ * @opaque: A pointer-size value that was passed to qemu_add_wait_object.
+ */
+void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque);
+#endif
+
+/* async I/O support */
+
+typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size);
+typedef int IOCanReadHandler(void *opaque);
+
+/**
+ * qemu_set_fd_handler2: Register a file descriptor with the main loop
+ *
+ * This function tells the main loop to wake up whenever one of the
+ * following conditions is true:
+ *
+ * 1) if @fd_write is not %NULL, when the file descriptor is writable;
+ *
+ * 2) if @fd_read is not %NULL, when the file descriptor is readable.
+ *
+ * @fd_read_poll can be used to disable the @fd_read callback temporarily.
+ * This is useful to avoid calling qemu_set_fd_handler2 every time the
+ * client becomes interested in reading (or dually, stops being interested).
+ * A typical example is when @fd is a listening socket and you want to bound
+ * the number of active clients. Remember to call qemu_notify_event whenever
+ * the condition may change from %false to %true.
+ *
+ * The callbacks that are set up by qemu_set_fd_handler2 are level-triggered.
+ * If @fd_read does not read from @fd, or @fd_write does not write to @fd
+ * until its buffers are full, they will be called again on the next
+ * iteration.
+ *
+ * @fd: The file descriptor to be observed. Under Windows it must be
+ * a #SOCKET.
+ *
+ * @fd_read_poll: A function that returns 1 if the @fd_read callback
+ * should be fired. If the function returns 0, the main loop will not
+ * end its iteration even if @fd becomes readable.
+ *
+ * @fd_read: A level-triggered callback that is fired if @fd is readable
+ * at the beginning of a main loop iteration, or if it becomes readable
+ * during one.
+ *
+ * @fd_write: A level-triggered callback that is fired when @fd is writable
+ * at the beginning of a main loop iteration, or if it becomes writable
+ * during one.
+ *
+ * @opaque: A pointer-sized value that is passed to @fd_read_poll,
+ * @fd_read and @fd_write.
+ */
+int qemu_set_fd_handler2(int fd,
+ IOCanReadHandler *fd_read_poll,
+ IOHandler *fd_read,
+ IOHandler *fd_write,
+ void *opaque);
+
+/**
+ * qemu_set_fd_handler: Register a file descriptor with the main loop
+ *
+ * This function tells the main loop to wake up whenever one of the
+ * following conditions is true:
+ *
+ * 1) if @fd_write is not %NULL, when the file descriptor is writable;
+ *
+ * 2) if @fd_read is not %NULL, when the file descriptor is readable.
+ *
+ * The callbacks that are set up by qemu_set_fd_handler are level-triggered.
+ * If @fd_read does not read from @fd, or @fd_write does not write to @fd
+ * until its buffers are full, they will be called again on the next
+ * iteration.
+ *
+ * @fd: The file descriptor to be observed. Under Windows it must be
+ * a #SOCKET.
+ *
+ * @fd_read: A level-triggered callback that is fired if @fd is readable
+ * at the beginning of a main loop iteration, or if it becomes readable
+ * during one.
+ *
+ * @fd_write: A level-triggered callback that is fired when @fd is writable
+ * at the beginning of a main loop iteration, or if it becomes writable
+ * during one.
+ *
+ * @opaque: A pointer-sized value that is passed to @fd_read and @fd_write.
+ */
+int qemu_set_fd_handler(int fd,
+ IOHandler *fd_read,
+ IOHandler *fd_write,
+ void *opaque);
+
+#ifdef CONFIG_POSIX
+/**
+ * qemu_add_child_watch: Register a child process for reaping.
+ *
+ * Under POSIX systems, a parent process must read the exit status of
+ * its child processes using waitpid, or the operating system will not
+ * free some of the resources attached to that process.
+ *
+ * This function directs the QEMU main loop to observe a child process
+ * and call waitpid as soon as it exits; the watch is then removed
+ * automatically. It is useful whenever QEMU forks a child process
+ * but will find out about its termination by other means such as a
+ * "broken pipe".
+ *
+ * @pid: The pid that QEMU should observe.
+ */
+int qemu_add_child_watch(pid_t pid);
+#endif
+
+/**
+ * qemu_mutex_lock_iothread: Lock the main loop mutex.
+ *
+ * This function locks the main loop mutex. The mutex is taken by
+ * qemu_init_main_loop and always taken except while waiting on
+ * external events (such as with select). The mutex should be taken
+ * by threads other than the main loop thread when calling
+ * qemu_bh_new(), qemu_set_fd_handler() and basically all other
+ * functions documented in this file.
+ *
+ * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread
+ * is a no-op there.
+ */
+void qemu_mutex_lock_iothread(void);
+
+/**
+ * qemu_mutex_unlock_iothread: Unlock the main loop mutex.
+ *
+ * This function unlocks the main loop mutex. The mutex is taken by
+ * qemu_init_main_loop and always taken except while waiting on
+ * external events (such as with select). The mutex should be unlocked
+ * as soon as possible by threads other than the main loop thread,
+ * because it prevents the main loop from processing callbacks,
+ * including timers and bottom halves.
+ *
+ * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread
+ * is a no-op there.
+ */
+void qemu_mutex_unlock_iothread(void);
+
+/* internal interfaces */
+
+void qemu_fd_register(int fd);
+void qemu_iohandler_fill(int *pnfds, fd_set *readfds, fd_set *writefds, fd_set *xfds);
+void qemu_iohandler_poll(fd_set *readfds, fd_set *writefds, fd_set *xfds, int rc);
+
+QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
+void qemu_bh_schedule_idle(QEMUBH *bh);
+
+#endif
diff --git a/include/qemu/module.h b/include/qemu/module.h
new file mode 100644
index 0000000000..c4ccd57166
--- /dev/null
+++ b/include/qemu/module.h
@@ -0,0 +1,40 @@
+/*
+ * QEMU Module Infrastructure
+ *
+ * Copyright IBM, Corp. 2009
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_MODULE_H
+#define QEMU_MODULE_H
+
+/* This should not be used directly. Use block_init etc. instead. */
+#define module_init(function, type) \
+static void __attribute__((constructor)) do_qemu_init_ ## function(void) { \
+ register_module_init(function, type); \
+}
+
+typedef enum {
+ MODULE_INIT_BLOCK,
+ MODULE_INIT_MACHINE,
+ MODULE_INIT_QAPI,
+ MODULE_INIT_QOM,
+ MODULE_INIT_MAX
+} module_init_type;
+
+#define block_init(function) module_init(function, MODULE_INIT_BLOCK)
+#define machine_init(function) module_init(function, MODULE_INIT_MACHINE)
+#define qapi_init(function) module_init(function, MODULE_INIT_QAPI)
+#define type_init(function) module_init(function, MODULE_INIT_QOM)
+
+void register_module_init(void (*fn)(void), module_init_type type);
+
+void module_call_init(module_init_type type);
+
+#endif
diff --git a/include/qemu/notify.h b/include/qemu/notify.h
new file mode 100644
index 0000000000..4e2e7f0ec4
--- /dev/null
+++ b/include/qemu/notify.h
@@ -0,0 +1,43 @@
+/*
+ * Notifier lists
+ *
+ * Copyright IBM, Corp. 2010
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_NOTIFY_H
+#define QEMU_NOTIFY_H
+
+#include "qemu/queue.h"
+
+typedef struct Notifier Notifier;
+
+struct Notifier
+{
+ void (*notify)(Notifier *notifier, void *data);
+ QLIST_ENTRY(Notifier) node;
+};
+
+typedef struct NotifierList
+{
+ QLIST_HEAD(, Notifier) notifiers;
+} NotifierList;
+
+#define NOTIFIER_LIST_INITIALIZER(head) \
+ { QLIST_HEAD_INITIALIZER((head).notifiers) }
+
+void notifier_list_init(NotifierList *list);
+
+void notifier_list_add(NotifierList *list, Notifier *notifier);
+
+void notifier_remove(Notifier *notifier);
+
+void notifier_list_notify(NotifierList *list, void *data);
+
+#endif
diff --git a/include/qemu/option.h b/include/qemu/option.h
new file mode 100644
index 0000000000..ba197cddcf
--- /dev/null
+++ b/include/qemu/option.h
@@ -0,0 +1,158 @@
+/*
+ * Commandline option parsing functions
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2009 Kevin Wolf <kwolf@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_OPTIONS_H
+#define QEMU_OPTIONS_H
+
+#include <stdint.h>
+#include "qemu/queue.h"
+#include "qapi/error.h"
+#include "qapi/qmp/qdict.h"
+
+enum QEMUOptionParType {
+ OPT_FLAG,
+ OPT_NUMBER,
+ OPT_SIZE,
+ OPT_STRING,
+};
+
+typedef struct QEMUOptionParameter {
+ const char *name;
+ enum QEMUOptionParType type;
+ union {
+ uint64_t n;
+ char* s;
+ } value;
+ const char *help;
+} QEMUOptionParameter;
+
+
+const char *get_opt_name(char *buf, int buf_size, const char *p, char delim);
+const char *get_opt_value(char *buf, int buf_size, const char *p);
+int get_next_param_value(char *buf, int buf_size,
+ const char *tag, const char **pstr);
+int get_param_value(char *buf, int buf_size,
+ const char *tag, const char *str);
+int check_params(char *buf, int buf_size,
+ const char * const *params, const char *str);
+
+
+/*
+ * The following functions take a parameter list as input. This is a pointer to
+ * the first element of a QEMUOptionParameter array which is terminated by an
+ * entry with entry->name == NULL.
+ */
+
+QEMUOptionParameter *get_option_parameter(QEMUOptionParameter *list,
+ const char *name);
+int set_option_parameter(QEMUOptionParameter *list, const char *name,
+ const char *value);
+int set_option_parameter_int(QEMUOptionParameter *list, const char *name,
+ uint64_t value);
+QEMUOptionParameter *append_option_parameters(QEMUOptionParameter *dest,
+ QEMUOptionParameter *list);
+QEMUOptionParameter *parse_option_parameters(const char *param,
+ QEMUOptionParameter *list, QEMUOptionParameter *dest);
+void free_option_parameters(QEMUOptionParameter *list);
+void print_option_parameters(QEMUOptionParameter *list);
+void print_option_help(QEMUOptionParameter *list);
+
+/* ------------------------------------------------------------------ */
+
+typedef struct QemuOpt QemuOpt;
+typedef struct QemuOpts QemuOpts;
+typedef struct QemuOptsList QemuOptsList;
+
+enum QemuOptType {
+ QEMU_OPT_STRING = 0, /* no parsing (use string as-is) */
+ QEMU_OPT_BOOL, /* on/off */
+ QEMU_OPT_NUMBER, /* simple number */
+ QEMU_OPT_SIZE, /* size, accepts (K)ilo, (M)ega, (G)iga, (T)era postfix */
+};
+
+typedef struct QemuOptDesc {
+ const char *name;
+ enum QemuOptType type;
+ const char *help;
+} QemuOptDesc;
+
+struct QemuOptsList {
+ const char *name;
+ const char *implied_opt_name;
+ bool merge_lists; /* Merge multiple uses of option into a single list? */
+ QTAILQ_HEAD(, QemuOpts) head;
+ QemuOptDesc desc[];
+};
+
+const char *qemu_opt_get(QemuOpts *opts, const char *name);
+/**
+ * qemu_opt_has_help_opt:
+ * @opts: options to search for a help request
+ *
+ * Check whether the options specified by @opts include one of the
+ * standard strings which indicate that the user is asking for a
+ * list of the valid values for a command line option (as defined
+ * by is_help_option()).
+ *
+ * Returns: true if @opts includes 'help' or equivalent.
+ */
+bool qemu_opt_has_help_opt(QemuOpts *opts);
+bool qemu_opt_get_bool(QemuOpts *opts, const char *name, bool defval);
+uint64_t qemu_opt_get_number(QemuOpts *opts, const char *name, uint64_t defval);
+uint64_t qemu_opt_get_size(QemuOpts *opts, const char *name, uint64_t defval);
+int qemu_opt_set(QemuOpts *opts, const char *name, const char *value);
+void qemu_opt_set_err(QemuOpts *opts, const char *name, const char *value,
+ Error **errp);
+int qemu_opt_set_bool(QemuOpts *opts, const char *name, bool val);
+int qemu_opt_set_number(QemuOpts *opts, const char *name, int64_t val);
+typedef int (*qemu_opt_loopfunc)(const char *name, const char *value, void *opaque);
+int qemu_opt_foreach(QemuOpts *opts, qemu_opt_loopfunc func, void *opaque,
+ int abort_on_failure);
+
+QemuOpts *qemu_opts_find(QemuOptsList *list, const char *id);
+QemuOpts *qemu_opts_create(QemuOptsList *list, const char *id,
+ int fail_if_exists, Error **errp);
+QemuOpts *qemu_opts_create_nofail(QemuOptsList *list);
+void qemu_opts_reset(QemuOptsList *list);
+void qemu_opts_loc_restore(QemuOpts *opts);
+int qemu_opts_set(QemuOptsList *list, const char *id,
+ const char *name, const char *value);
+const char *qemu_opts_id(QemuOpts *opts);
+void qemu_opts_del(QemuOpts *opts);
+void qemu_opts_validate(QemuOpts *opts, const QemuOptDesc *desc, Error **errp);
+int qemu_opts_do_parse(QemuOpts *opts, const char *params, const char *firstname);
+QemuOpts *qemu_opts_parse(QemuOptsList *list, const char *params, int permit_abbrev);
+void qemu_opts_set_defaults(QemuOptsList *list, const char *params,
+ int permit_abbrev);
+QemuOpts *qemu_opts_from_qdict(QemuOptsList *list, const QDict *qdict,
+ Error **errp);
+QDict *qemu_opts_to_qdict(QemuOpts *opts, QDict *qdict);
+
+typedef int (*qemu_opts_loopfunc)(QemuOpts *opts, void *opaque);
+int qemu_opts_print(QemuOpts *opts, void *dummy);
+int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, void *opaque,
+ int abort_on_failure);
+
+#endif
diff --git a/include/qemu/option_int.h b/include/qemu/option_int.h
new file mode 100644
index 0000000000..8212fa4a48
--- /dev/null
+++ b/include/qemu/option_int.h
@@ -0,0 +1,54 @@
+/*
+ * Commandline option parsing functions
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2009 Kevin Wolf <kwolf@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_OPTIONS_INTERNAL_H
+#define QEMU_OPTIONS_INTERNAL_H
+
+#include "qemu/option.h"
+#include "qemu/error-report.h"
+
+struct QemuOpt {
+ const char *name;
+ const char *str;
+
+ const QemuOptDesc *desc;
+ union {
+ bool boolean;
+ uint64_t uint;
+ } value;
+
+ QemuOpts *opts;
+ QTAILQ_ENTRY(QemuOpt) next;
+};
+
+struct QemuOpts {
+ char *id;
+ QemuOptsList *list;
+ Location loc;
+ QTAILQ_HEAD(QemuOptHead, QemuOpt) head;
+ QTAILQ_ENTRY(QemuOpts) next;
+};
+
+#endif
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
new file mode 100644
index 0000000000..87d3b9cfa8
--- /dev/null
+++ b/include/qemu/osdep.h
@@ -0,0 +1,178 @@
+#ifndef QEMU_OSDEP_H
+#define QEMU_OSDEP_H
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdbool.h>
+#ifdef __OpenBSD__
+#include <sys/types.h>
+#include <sys/signal.h>
+#endif
+
+#include <sys/time.h>
+
+#if defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10
+/* [u]int_fast*_t not in <sys/int_types.h> */
+typedef unsigned char uint_fast8_t;
+typedef unsigned int uint_fast16_t;
+typedef signed int int_fast16_t;
+#endif
+
+#ifndef glue
+#define xglue(x, y) x ## y
+#define glue(x, y) xglue(x, y)
+#define stringify(s) tostring(s)
+#define tostring(s) #s
+#endif
+
+#ifndef likely
+#if __GNUC__ < 3
+#define __builtin_expect(x, n) (x)
+#endif
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+#ifndef container_of
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *) 0)->member) *__mptr = (ptr); \
+ (type *) ((char *) __mptr - offsetof(type, member));})
+#endif
+
+/* Convert from a base type to a parent type, with compile time checking. */
+#ifdef __GNUC__
+#define DO_UPCAST(type, field, dev) ( __extension__ ( { \
+ char __attribute__((unused)) offset_must_be_zero[ \
+ -offsetof(type, field)]; \
+ container_of(dev, type, field);}))
+#else
+#define DO_UPCAST(type, field, dev) container_of(dev, type, field)
+#endif
+
+#define typeof_field(type, field) typeof(((type *)0)->field)
+#define type_check(t1,t2) ((t1*)0 - (t2*)0)
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+#ifndef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef always_inline
+#if !((__GNUC__ < 3) || defined(__APPLE__))
+#ifdef __OPTIMIZE__
+#undef inline
+#define inline __attribute__ (( always_inline )) __inline__
+#endif
+#endif
+#else
+#undef inline
+#define inline always_inline
+#endif
+
+#define qemu_printf printf
+
+int qemu_daemon(int nochdir, int noclose);
+void *qemu_memalign(size_t alignment, size_t size);
+void *qemu_vmalloc(size_t size);
+void qemu_vfree(void *ptr);
+
+#define QEMU_MADV_INVALID -1
+
+#if defined(CONFIG_MADVISE)
+
+#define QEMU_MADV_WILLNEED MADV_WILLNEED
+#define QEMU_MADV_DONTNEED MADV_DONTNEED
+#ifdef MADV_DONTFORK
+#define QEMU_MADV_DONTFORK MADV_DONTFORK
+#else
+#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
+#endif
+#ifdef MADV_MERGEABLE
+#define QEMU_MADV_MERGEABLE MADV_MERGEABLE
+#else
+#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
+#endif
+#ifdef MADV_DONTDUMP
+#define QEMU_MADV_DONTDUMP MADV_DONTDUMP
+#else
+#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
+#endif
+#ifdef MADV_HUGEPAGE
+#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE
+#else
+#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+#endif
+
+#elif defined(CONFIG_POSIX_MADVISE)
+
+#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED
+#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED
+#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
+#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
+#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+
+#else /* no-op */
+
+#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID
+#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID
+#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
+#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
+#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
+#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
+
+#endif
+
+int qemu_madvise(void *addr, size_t len, int advice);
+
+int qemu_open(const char *name, int flags, ...);
+int qemu_close(int fd);
+
+#if defined(__HAIKU__) && defined(__i386__)
+#define FMT_pid "%ld"
+#elif defined(WIN64)
+#define FMT_pid "%" PRId64
+#else
+#define FMT_pid "%d"
+#endif
+
+int qemu_create_pidfile(const char *filename);
+int qemu_get_thread_id(void);
+
+#ifdef _WIN32
+static inline void qemu_timersub(const struct timeval *val1,
+ const struct timeval *val2,
+ struct timeval *res)
+{
+ res->tv_sec = val1->tv_sec - val2->tv_sec;
+ if (val1->tv_usec < val2->tv_usec) {
+ res->tv_sec--;
+ res->tv_usec = val1->tv_usec - val2->tv_usec + 1000 * 1000;
+ } else {
+ res->tv_usec = val1->tv_usec - val2->tv_usec;
+ }
+}
+#else
+#define qemu_timersub timersub
+#endif
+
+void qemu_set_cloexec(int fd);
+
+void qemu_set_version(const char *);
+const char *qemu_get_version(void);
+
+void fips_set_state(bool requested);
+bool fips_get_state(void);
+
+#endif
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
new file mode 100644
index 0000000000..d433b9017c
--- /dev/null
+++ b/include/qemu/queue.h
@@ -0,0 +1,414 @@
+/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */
+
+/*
+ * QEMU version: Copy from netbsd, removed debug code, removed some of
+ * the implementations. Left in singly-linked lists, lists, simple
+ * queues, and tail queues.
+ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef QEMU_SYS_QUEUE_H_
+#define QEMU_SYS_QUEUE_H_
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * lists, simple queues, and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The
+ * elements are singly linked for minimum space and pointer manipulation
+ * overhead at the expense of O(n) removal for arbitrary elements. New
+ * elements can be added to the list after an existing element or at the
+ * head of the list. Elements being removed from the head of the list
+ * should use the explicit macro for this purpose for optimum
+ * efficiency. A singly-linked list may only be traversed in the forward
+ * direction. Singly-linked lists are ideal for applications with large
+ * datasets and few or no removals or for implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#include "qemu/atomic.h" /* for smp_wmb() */
+
+/*
+ * List definitions.
+ */
+#define QLIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define QLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define QLIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+#define QLIST_INIT(head) do { \
+ (head)->lh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
+ (elm)->field.le_prev = &(head)->lh_first; \
+ (elm)->field.le_next = (head)->lh_first; \
+ smp_wmb(); /* fill elm before linking it */ \
+ if ((head)->lh_first != NULL) { \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next; \
+ } \
+ (head)->lh_first = (elm); \
+ smp_wmb(); \
+} while (/* CONSTCOND*/0)
+
+#define QLIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define QLIST_FOREACH(var, head, field) \
+ for ((var) = ((head)->lh_first); \
+ (var); \
+ (var) = ((var)->field.le_next))
+
+#define QLIST_FOREACH_SAFE(var, head, field, next_var) \
+ for ((var) = ((head)->lh_first); \
+ (var) && ((next_var) = ((var)->field.le_next), 1); \
+ (var) = (next_var))
+
+/*
+ * List access methods.
+ */
+#define QLIST_EMPTY(head) ((head)->lh_first == NULL)
+#define QLIST_FIRST(head) ((head)->lh_first)
+#define QLIST_NEXT(elm, field) ((elm)->field.le_next)
+
+
+/*
+ * Singly-linked List definitions.
+ */
+#define QSLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define QSLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define QSLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define QSLIST_INIT(head) do { \
+ (head)->slh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
+ (slistelm)->field.sle_next = \
+ QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \
+} while (/*CONSTCOND*/0)
+
+#define QSLIST_FOREACH(var, head, field) \
+ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
+
+#define QSLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = QSLIST_FIRST((head)); \
+ (var) && ((tvar) = QSLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+/*
+ * Singly-linked List access methods.
+ */
+#define QSLIST_EMPTY(head) ((head)->slh_first == NULL)
+#define QSLIST_FIRST(head) ((head)->slh_first)
+#define QSLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+
+/*
+ * Simple queue definitions.
+ */
+#define QSIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define QSIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define QSIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue functions.
+ */
+#define QSIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->sqh_first == (elm)) { \
+ QSIMPLEQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->sqh_first; \
+ while (curelm->field.sqe_next != (elm)) \
+ curelm = curelm->field.sqe_next; \
+ if ((curelm->field.sqe_next = \
+ curelm->field.sqe_next->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(curelm)->field.sqe_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->sqh_first); \
+ (var); \
+ (var) = ((var)->field.sqe_next))
+
+#define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \
+ for ((var) = ((head)->sqh_first); \
+ (var) && ((next = ((var)->field.sqe_next)), 1); \
+ (var) = (next))
+
+#define QSIMPLEQ_CONCAT(head1, head2) do { \
+ if (!QSIMPLEQ_EMPTY((head2))) { \
+ *(head1)->sqh_last = (head2)->sqh_first; \
+ (head1)->sqh_last = (head2)->sqh_last; \
+ QSIMPLEQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define QSIMPLEQ_LAST(head, type, field) \
+ (QSIMPLEQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->sqh_last) - offsetof(struct type, field))))
+
+/*
+ * Simple queue access methods.
+ */
+#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
+#define QSIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+
+/*
+ * Tail queue definitions.
+ */
+#define Q_TAILQ_HEAD(name, type, qual) \
+struct name { \
+ qual type *tqh_first; /* first element */ \
+ qual type *qual *tqh_last; /* addr of last next element */ \
+}
+#define QTAILQ_HEAD(name, type) Q_TAILQ_HEAD(name, struct type,)
+
+#define QTAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define Q_TAILQ_ENTRY(type, qual) \
+struct { \
+ qual type *tqe_next; /* next element */ \
+ qual type *qual *tqe_prev; /* address of previous next element */\
+}
+#define QTAILQ_ENTRY(type) Q_TAILQ_ENTRY(struct type,)
+
+/*
+ * Tail queue functions.
+ */
+#define QTAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define QTAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->tqh_first); \
+ (var); \
+ (var) = ((var)->field.tqe_next))
+
+#define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \
+ for ((var) = ((head)->tqh_first); \
+ (var) && ((next_var) = ((var)->field.tqe_next), 1); \
+ (var) = (next_var))
+
+#define QTAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
+ (var); \
+ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
+
+/*
+ * Tail queue access methods.
+ */
+#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+#define QTAILQ_FIRST(head) ((head)->tqh_first)
+#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define QTAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+#define QTAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#endif /* !QEMU_SYS_QUEUE_H_ */
diff --git a/include/qemu/range.h b/include/qemu/range.h
new file mode 100644
index 0000000000..350237212b
--- /dev/null
+++ b/include/qemu/range.h
@@ -0,0 +1,29 @@
+#ifndef QEMU_RANGE_H
+#define QEMU_RANGE_H
+
+/* Get last byte of a range from offset + length.
+ * Undefined for ranges that wrap around 0. */
+static inline uint64_t range_get_last(uint64_t offset, uint64_t len)
+{
+ return offset + len - 1;
+}
+
+/* Check whether a given range covers a given byte. */
+static inline int range_covers_byte(uint64_t offset, uint64_t len,
+ uint64_t byte)
+{
+ return offset <= byte && byte <= range_get_last(offset, len);
+}
+
+/* Check whether 2 given ranges overlap.
+ * Undefined if ranges that wrap around 0. */
+static inline int ranges_overlap(uint64_t first1, uint64_t len1,
+ uint64_t first2, uint64_t len2)
+{
+ uint64_t last1 = range_get_last(first1, len1);
+ uint64_t last2 = range_get_last(first2, len2);
+
+ return !(last2 < first1 || last1 < first2);
+}
+
+#endif
diff --git a/include/qemu/ratelimit.h b/include/qemu/ratelimit.h
index c6ac281141..d1610f135b 100644
--- a/include/qemu/ratelimit.h
+++ b/include/qemu/ratelimit.h
@@ -42,7 +42,7 @@ static inline void ratelimit_set_speed(RateLimit *limit, uint64_t speed,
uint64_t slice_ns)
{
limit->slice_ns = slice_ns;
- limit->slice_quota = ((double)speed * 1000000000ULL) / slice_ns;
+ limit->slice_quota = ((double)speed * slice_ns)/1000000000ULL;
}
#endif
diff --git a/include/qemu/rng-random.h b/include/qemu/rng-random.h
new file mode 100644
index 0000000000..4332772a24
--- /dev/null
+++ b/include/qemu/rng-random.h
@@ -0,0 +1,22 @@
+/*
+ * QEMU Random Number Generator Backend
+ *
+ * Copyright IBM, Corp. 2012
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_RNG_RANDOM_H
+#define QEMU_RNG_RANDOM_H
+
+#include "qom/object.h"
+
+#define TYPE_RNG_RANDOM "rng-random"
+#define RNG_RANDOM(obj) OBJECT_CHECK(RndRandom, (obj), TYPE_RNG_RANDOM)
+
+typedef struct RndRandom RndRandom;
+
+#endif
diff --git a/include/qemu/rng.h b/include/qemu/rng.h
new file mode 100644
index 0000000000..509abd023d
--- /dev/null
+++ b/include/qemu/rng.h
@@ -0,0 +1,93 @@
+/*
+ * QEMU Random Number Generator Backend
+ *
+ * Copyright IBM, Corp. 2012
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_RNG_H
+#define QEMU_RNG_H
+
+#include "qom/object.h"
+#include "qemu-common.h"
+#include "qapi/error.h"
+
+#define TYPE_RNG_BACKEND "rng-backend"
+#define RNG_BACKEND(obj) \
+ OBJECT_CHECK(RngBackend, (obj), TYPE_RNG_BACKEND)
+#define RNG_BACKEND_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(RngBackendClass, (obj), TYPE_RNG_BACKEND)
+#define RNG_BACKEND_CLASS(klass) \
+ OBJECT_CLASS_CHECK(RngBackendClass, (klass), TYPE_RNG_BACKEND)
+
+typedef struct RngBackendClass RngBackendClass;
+typedef struct RngBackend RngBackend;
+
+typedef void (EntropyReceiveFunc)(void *opaque,
+ const void *data,
+ size_t size);
+
+struct RngBackendClass
+{
+ ObjectClass parent_class;
+
+ void (*request_entropy)(RngBackend *s, size_t size,
+ EntropyReceiveFunc *recieve_entropy, void *opaque);
+ void (*cancel_requests)(RngBackend *s);
+
+ void (*opened)(RngBackend *s, Error **errp);
+};
+
+struct RngBackend
+{
+ Object parent;
+
+ /*< protected >*/
+ bool opened;
+};
+
+/**
+ * rng_backend_request_entropy:
+ * @s: the backend to request entropy from
+ * @size: the number of bytes of data to request
+ * @receive_entropy: a function to be invoked when entropy is available
+ * @opaque: data that should be passed to @receive_entropy
+ *
+ * This function is used by the front-end to request entropy from an entropy
+ * source. This function can be called multiple times before @receive_entropy
+ * is invoked with different values of @receive_entropy and @opaque. The
+ * backend will queue each request and handle appropriately.
+ *
+ * The backend does not need to pass the full amount of data to @receive_entropy
+ * but will pass a value greater than 0.
+ */
+void rng_backend_request_entropy(RngBackend *s, size_t size,
+ EntropyReceiveFunc *receive_entropy,
+ void *opaque);
+
+/**
+ * rng_backend_cancel_requests:
+ * @s: the backend to cancel all pending requests in
+ *
+ * Cancels all pending requests submitted by @rng_backend_request_entropy. This
+ * should be used by a device during reset or in preparation for live migration
+ * to stop tracking any request.
+ */
+void rng_backend_cancel_requests(RngBackend *s);
+
+/**
+ * rng_backend_open:
+ * @s: the backend to open
+ * @errp: a pointer to return the #Error object if an error occurs.
+ *
+ * This function will open the backend if it is not already open. Calling this
+ * function on an already opened backend will not result in an error.
+ */
+void rng_backend_open(RngBackend *s, Error **errp);
+
+#endif
diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h
new file mode 100644
index 0000000000..803ae1798c
--- /dev/null
+++ b/include/qemu/sockets.h
@@ -0,0 +1,77 @@
+/* headers to use the BSD sockets */
+#ifndef QEMU_SOCKET_H
+#define QEMU_SOCKET_H
+
+#ifdef _WIN32
+#include <windows.h>
+#include <winsock2.h>
+#include <ws2tcpip.h>
+
+#define socket_error() WSAGetLastError()
+
+int inet_aton(const char *cp, struct in_addr *ia);
+
+#else
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <sys/un.h>
+
+#define socket_error() errno
+#define closesocket(s) close(s)
+
+#endif /* !_WIN32 */
+
+#include "qemu/option.h"
+#include "qapi/error.h"
+#include "qapi/qmp/qerror.h"
+
+/* misc helpers */
+int qemu_socket(int domain, int type, int protocol);
+int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen);
+int socket_set_cork(int fd, int v);
+void socket_set_block(int fd);
+void socket_set_nonblock(int fd);
+int send_all(int fd, const void *buf, int len1);
+
+/* callback function for nonblocking connect
+ * valid fd on success, negative error code on failure
+ */
+typedef void NonBlockingConnectHandler(int fd, void *opaque);
+
+int inet_listen_opts(QemuOpts *opts, int port_offset, Error **errp);
+int inet_listen(const char *str, char *ostr, int olen,
+ int socktype, int port_offset, Error **errp);
+int inet_connect_opts(QemuOpts *opts, Error **errp,
+ NonBlockingConnectHandler *callback, void *opaque);
+int inet_connect(const char *str, Error **errp);
+int inet_nonblocking_connect(const char *str,
+ NonBlockingConnectHandler *callback,
+ void *opaque, Error **errp);
+
+int inet_dgram_opts(QemuOpts *opts, Error **errp);
+const char *inet_strfamily(int family);
+
+int unix_listen_opts(QemuOpts *opts, Error **errp);
+int unix_listen(const char *path, char *ostr, int olen, Error **errp);
+int unix_connect_opts(QemuOpts *opts, Error **errp,
+ NonBlockingConnectHandler *callback, void *opaque);
+int unix_connect(const char *path, Error **errp);
+int unix_nonblocking_connect(const char *str,
+ NonBlockingConnectHandler *callback,
+ void *opaque, Error **errp);
+
+SocketAddress *socket_parse(const char *str, Error **errp);
+int socket_connect(SocketAddress *addr, Error **errp,
+ NonBlockingConnectHandler *callback, void *opaque);
+int socket_listen(SocketAddress *addr, Error **errp);
+
+/* Old, ipv4 only bits. Don't use for new code. */
+int parse_host_port(struct sockaddr_in *saddr, const char *str);
+int socket_init(void);
+
+#endif /* QEMU_SOCKET_H */
diff --git a/include/qemu/thread-posix.h b/include/qemu/thread-posix.h
new file mode 100644
index 0000000000..0f30dccb53
--- /dev/null
+++ b/include/qemu/thread-posix.h
@@ -0,0 +1,28 @@
+#ifndef __QEMU_THREAD_POSIX_H
+#define __QEMU_THREAD_POSIX_H 1
+#include "pthread.h"
+#include <semaphore.h>
+
+struct QemuMutex {
+ pthread_mutex_t lock;
+};
+
+struct QemuCond {
+ pthread_cond_t cond;
+};
+
+struct QemuSemaphore {
+#if defined(__APPLE__) || defined(__NetBSD__)
+ pthread_mutex_t lock;
+ pthread_cond_t cond;
+ int count;
+#else
+ sem_t sem;
+#endif
+};
+
+struct QemuThread {
+ pthread_t thread;
+};
+
+#endif
diff --git a/include/qemu/thread-win32.h b/include/qemu/thread-win32.h
new file mode 100644
index 0000000000..13adb958f0
--- /dev/null
+++ b/include/qemu/thread-win32.h
@@ -0,0 +1,29 @@
+#ifndef __QEMU_THREAD_WIN32_H
+#define __QEMU_THREAD_WIN32_H 1
+#include "windows.h"
+
+struct QemuMutex {
+ CRITICAL_SECTION lock;
+ LONG owner;
+};
+
+struct QemuCond {
+ LONG waiters, target;
+ HANDLE sema;
+ HANDLE continue_event;
+};
+
+struct QemuSemaphore {
+ HANDLE sema;
+};
+
+typedef struct QemuThreadData QemuThreadData;
+struct QemuThread {
+ QemuThreadData *data;
+ unsigned tid;
+};
+
+/* Only valid for joinable threads. */
+HANDLE qemu_thread_get_handle(QemuThread *thread);
+
+#endif
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
new file mode 100644
index 0000000000..c02404b9fb
--- /dev/null
+++ b/include/qemu/thread.h
@@ -0,0 +1,56 @@
+#ifndef __QEMU_THREAD_H
+#define __QEMU_THREAD_H 1
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+typedef struct QemuMutex QemuMutex;
+typedef struct QemuCond QemuCond;
+typedef struct QemuSemaphore QemuSemaphore;
+typedef struct QemuThread QemuThread;
+
+#ifdef _WIN32
+#include "qemu/thread-win32.h"
+#else
+#include "qemu/thread-posix.h"
+#endif
+
+#define QEMU_THREAD_JOINABLE 0
+#define QEMU_THREAD_DETACHED 1
+
+void qemu_mutex_init(QemuMutex *mutex);
+void qemu_mutex_destroy(QemuMutex *mutex);
+void qemu_mutex_lock(QemuMutex *mutex);
+int qemu_mutex_trylock(QemuMutex *mutex);
+void qemu_mutex_unlock(QemuMutex *mutex);
+
+#define rcu_read_lock() do { } while (0)
+#define rcu_read_unlock() do { } while (0)
+
+void qemu_cond_init(QemuCond *cond);
+void qemu_cond_destroy(QemuCond *cond);
+
+/*
+ * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
+ * and pthread_cond_broadcast can be called except while the same mutex is
+ * held as in the corresponding pthread_cond_wait calls!
+ */
+void qemu_cond_signal(QemuCond *cond);
+void qemu_cond_broadcast(QemuCond *cond);
+void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex);
+
+void qemu_sem_init(QemuSemaphore *sem, int init);
+void qemu_sem_post(QemuSemaphore *sem);
+void qemu_sem_wait(QemuSemaphore *sem);
+int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
+void qemu_sem_destroy(QemuSemaphore *sem);
+
+void qemu_thread_create(QemuThread *thread,
+ void *(*start_routine)(void *),
+ void *arg, int mode);
+void *qemu_thread_join(QemuThread *thread);
+void qemu_thread_get_self(QemuThread *thread);
+bool qemu_thread_is_self(QemuThread *thread);
+void qemu_thread_exit(void *retval);
+
+#endif
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
new file mode 100644
index 0000000000..1766b2d6c7
--- /dev/null
+++ b/include/qemu/timer.h
@@ -0,0 +1,310 @@
+#ifndef QEMU_TIMER_H
+#define QEMU_TIMER_H
+
+#include "qemu-common.h"
+#include "qemu/main-loop.h"
+#include "qemu/notify.h"
+
+#ifdef __FreeBSD__
+#include <sys/param.h>
+#endif
+
+/* timers */
+
+#define SCALE_MS 1000000
+#define SCALE_US 1000
+#define SCALE_NS 1
+
+typedef struct QEMUClock QEMUClock;
+typedef void QEMUTimerCB(void *opaque);
+
+/* The real time clock should be used only for stuff which does not
+ change the virtual machine state, as it is run even if the virtual
+ machine is stopped. The real time clock has a frequency of 1000
+ Hz. */
+extern QEMUClock *rt_clock;
+
+/* The virtual clock is only run during the emulation. It is stopped
+ when the virtual machine is stopped. Virtual timers use a high
+ precision clock, usually cpu cycles (use ticks_per_sec). */
+extern QEMUClock *vm_clock;
+
+/* The host clock should be use for device models that emulate accurate
+ real time sources. It will continue to run when the virtual machine
+ is suspended, and it will reflect system time changes the host may
+ undergo (e.g. due to NTP). The host clock has the same precision as
+ the virtual clock. */
+extern QEMUClock *host_clock;
+
+int64_t qemu_get_clock_ns(QEMUClock *clock);
+int64_t qemu_clock_has_timers(QEMUClock *clock);
+int64_t qemu_clock_expired(QEMUClock *clock);
+int64_t qemu_clock_deadline(QEMUClock *clock);
+void qemu_clock_enable(QEMUClock *clock, bool enabled);
+void qemu_clock_warp(QEMUClock *clock);
+
+void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
+void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
+ Notifier *notifier);
+
+QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
+ QEMUTimerCB *cb, void *opaque);
+void qemu_free_timer(QEMUTimer *ts);
+void qemu_del_timer(QEMUTimer *ts);
+void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
+void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
+bool qemu_timer_pending(QEMUTimer *ts);
+bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
+uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
+
+void qemu_run_timers(QEMUClock *clock);
+void qemu_run_all_timers(void);
+void configure_alarms(char const *opt);
+void init_clocks(void);
+int init_timer_alarm(void);
+
+int64_t cpu_get_ticks(void);
+void cpu_enable_ticks(void);
+void cpu_disable_ticks(void);
+
+static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
+ void *opaque)
+{
+ return qemu_new_timer(clock, SCALE_NS, cb, opaque);
+}
+
+static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
+ void *opaque)
+{
+ return qemu_new_timer(clock, SCALE_MS, cb, opaque);
+}
+
+static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
+{
+ return qemu_get_clock_ns(clock) / SCALE_MS;
+}
+
+static inline int64_t get_ticks_per_sec(void)
+{
+ return 1000000000LL;
+}
+
+/* real time host monotonic timer */
+static inline int64_t get_clock_realtime(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
+}
+
+/* Warning: don't insert tracepoints into these functions, they are
+ also used by simpletrace backend and tracepoints would cause
+ an infinite recursion! */
+#ifdef _WIN32
+extern int64_t clock_freq;
+
+static inline int64_t get_clock(void)
+{
+ LARGE_INTEGER ti;
+ QueryPerformanceCounter(&ti);
+ return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
+}
+
+#else
+
+extern int use_rt_clock;
+
+static inline int64_t get_clock(void)
+{
+#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
+ || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
+ if (use_rt_clock) {
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000LL + ts.tv_nsec;
+ } else
+#endif
+ {
+ /* XXX: using gettimeofday leads to problems if the date
+ changes, so it should be avoided. */
+ return get_clock_realtime();
+ }
+}
+#endif
+
+void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
+void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
+
+/* icount */
+int64_t cpu_get_icount(void);
+int64_t cpu_get_clock(void);
+
+/*******************************************/
+/* host CPU ticks (if available) */
+
+#if defined(_ARCH_PPC)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t retval;
+#ifdef _ARCH_PPC64
+ /* This reads timebase in one 64bit go and includes Cell workaround from:
+ http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
+ */
+ __asm__ __volatile__ ("mftb %0\n\t"
+ "cmpwi %0,0\n\t"
+ "beq- $-8"
+ : "=r" (retval));
+#else
+ /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
+ unsigned long junk;
+ __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
+ "mfspr %L0,268\n\t" /* mftb */
+ "mfspr %0,269\n\t" /* mftbu */
+ "cmpw %0,%1\n\t"
+ "bne $-16"
+ : "=r" (retval), "=r" (junk));
+#endif
+ return retval;
+}
+
+#elif defined(__i386__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t val;
+ asm volatile ("rdtsc" : "=A" (val));
+ return val;
+}
+
+#elif defined(__x86_64__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ uint32_t low,high;
+ int64_t val;
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+ val = high;
+ val <<= 32;
+ val |= low;
+ return val;
+}
+
+#elif defined(__hppa__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int val;
+ asm volatile ("mfctl %%cr16, %0" : "=r"(val));
+ return val;
+}
+
+#elif defined(__ia64)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t val;
+ asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
+ return val;
+}
+
+#elif defined(__s390__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ int64_t val;
+ asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
+ return val;
+}
+
+#elif defined(__sparc__)
+
+static inline int64_t cpu_get_real_ticks (void)
+{
+#if defined(_LP64)
+ uint64_t rval;
+ asm volatile("rd %%tick,%0" : "=r"(rval));
+ return rval;
+#else
+ /* We need an %o or %g register for this. For recent enough gcc
+ there is an "h" constraint for that. Don't bother with that. */
+ union {
+ uint64_t i64;
+ struct {
+ uint32_t high;
+ uint32_t low;
+ } i32;
+ } rval;
+ asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
+ : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
+ return rval.i64;
+#endif
+}
+
+#elif defined(__mips__) && \
+ ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
+/*
+ * binutils wants to use rdhwr only on mips32r2
+ * but as linux kernel emulate it, it's fine
+ * to use it.
+ *
+ */
+#define MIPS_RDHWR(rd, value) { \
+ __asm__ __volatile__ (".set push\n\t" \
+ ".set mips32r2\n\t" \
+ "rdhwr %0, "rd"\n\t" \
+ ".set pop" \
+ : "=r" (value)); \
+ }
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
+ uint32_t count;
+ static uint32_t cyc_per_count = 0;
+
+ if (!cyc_per_count) {
+ MIPS_RDHWR("$3", cyc_per_count);
+ }
+
+ MIPS_RDHWR("$2", count);
+ return (int64_t)(count * cyc_per_count);
+}
+
+#elif defined(__alpha__)
+
+static inline int64_t cpu_get_real_ticks(void)
+{
+ uint64_t cc;
+ uint32_t cur, ofs;
+
+ asm volatile("rpcc %0" : "=r"(cc));
+ cur = cc;
+ ofs = cc >> 32;
+ return cur - ofs;
+}
+
+#else
+/* The host CPU doesn't have an easily accessible cycle counter.
+ Just return a monotonically increasing value. This will be
+ totally wrong, but hopefully better than nothing. */
+static inline int64_t cpu_get_real_ticks (void)
+{
+ static int64_t ticks = 0;
+ return ticks++;
+}
+#endif
+
+#ifdef CONFIG_PROFILER
+static inline int64_t profile_getclock(void)
+{
+ return cpu_get_real_ticks();
+}
+
+extern int64_t qemu_time, qemu_time_start;
+extern int64_t tlb_flush_time;
+extern int64_t dev_time;
+#endif
+
+#endif
diff --git a/include/qemu/tls.h b/include/qemu/tls.h
new file mode 100644
index 0000000000..b92ea9d7da
--- /dev/null
+++ b/include/qemu/tls.h
@@ -0,0 +1,52 @@
+/*
+ * Abstraction layer for defining and using TLS variables
+ *
+ * Copyright (c) 2011 Red Hat, Inc
+ * Copyright (c) 2011 Linaro Limited
+ *
+ * Authors:
+ * Paolo Bonzini <pbonzini@redhat.com>
+ * Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_TLS_H
+#define QEMU_TLS_H
+
+/* Per-thread variables. Note that we only have implementations
+ * which are really thread-local on Linux; the dummy implementations
+ * define plain global variables.
+ *
+ * This means that for the moment use should be restricted to
+ * per-VCPU variables, which are OK because:
+ * - the only -user mode supporting multiple VCPU threads is linux-user
+ * - TCG system mode is single-threaded regarding VCPUs
+ * - KVM system mode is multi-threaded but limited to Linux
+ *
+ * TODO: proper implementations via Win32 .tls sections and
+ * POSIX pthread_getspecific.
+ */
+#ifdef __linux__
+#define DECLARE_TLS(type, x) extern DEFINE_TLS(type, x)
+#define DEFINE_TLS(type, x) __thread __typeof__(type) tls__##x
+#define tls_var(x) tls__##x
+#else
+/* Dummy implementations which define plain global variables */
+#define DECLARE_TLS(type, x) extern DEFINE_TLS(type, x)
+#define DEFINE_TLS(type, x) __typeof__(type) tls__##x
+#define tls_var(x) tls__##x
+#endif
+
+#endif
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
new file mode 100644
index 0000000000..fd532a268d
--- /dev/null
+++ b/include/qemu/typedefs.h
@@ -0,0 +1,61 @@
+#ifndef QEMU_TYPEDEFS_H
+#define QEMU_TYPEDEFS_H
+
+/* A load of opaque types so that device init declarations don't have to
+ pull in all the real definitions. */
+typedef struct QEMUTimer QEMUTimer;
+typedef struct QEMUFile QEMUFile;
+typedef struct QEMUBH QEMUBH;
+
+struct Monitor;
+typedef struct Monitor Monitor;
+typedef struct MigrationParams MigrationParams;
+
+typedef struct Property Property;
+typedef struct PropertyInfo PropertyInfo;
+typedef struct CompatProperty CompatProperty;
+typedef struct DeviceState DeviceState;
+typedef struct BusState BusState;
+typedef struct BusClass BusClass;
+
+typedef struct NICInfo NICInfo;
+typedef struct HCIInfo HCIInfo;
+typedef struct AudioState AudioState;
+typedef struct BlockDriverState BlockDriverState;
+typedef struct DriveInfo DriveInfo;
+typedef struct DisplayState DisplayState;
+typedef struct DisplayChangeListener DisplayChangeListener;
+typedef struct DisplaySurface DisplaySurface;
+typedef struct PixelFormat PixelFormat;
+typedef struct QemuConsole QemuConsole;
+typedef struct CharDriverState CharDriverState;
+typedef struct MACAddr MACAddr;
+typedef struct NetClientState NetClientState;
+typedef struct i2c_bus i2c_bus;
+typedef struct ISABus ISABus;
+typedef struct ISADevice ISADevice;
+typedef struct SMBusDevice SMBusDevice;
+typedef struct PCIHostState PCIHostState;
+typedef struct PCIExpressHost PCIExpressHost;
+typedef struct PCIBus PCIBus;
+typedef struct PCIDevice PCIDevice;
+typedef struct PCIExpressDevice PCIExpressDevice;
+typedef struct PCIBridge PCIBridge;
+typedef struct PCIEAERMsg PCIEAERMsg;
+typedef struct PCIEAERLog PCIEAERLog;
+typedef struct PCIEAERErr PCIEAERErr;
+typedef struct PCIEPort PCIEPort;
+typedef struct PCIESlot PCIESlot;
+typedef struct MSIMessage MSIMessage;
+typedef struct SerialState SerialState;
+typedef struct PCMCIACardState PCMCIACardState;
+typedef struct MouseTransformInfo MouseTransformInfo;
+typedef struct uWireSlave uWireSlave;
+typedef struct I2SCodec I2SCodec;
+typedef struct SSIBus SSIBus;
+typedef struct EventNotifier EventNotifier;
+typedef struct VirtIODevice VirtIODevice;
+typedef struct QEMUSGList QEMUSGList;
+typedef struct SHPCDevice SHPCDevice;
+
+#endif /* QEMU_TYPEDEFS_H */
diff --git a/include/qemu/uri.h b/include/qemu/uri.h
new file mode 100644
index 0000000000..de99b3bd4b
--- /dev/null
+++ b/include/qemu/uri.h
@@ -0,0 +1,113 @@
+/**
+ * Summary: library of generic URI related routines
+ * Description: library of generic URI related routines
+ * Implements RFC 2396
+ *
+ * Copyright (C) 1998-2003 Daniel Veillard. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * DANIEL VEILLARD BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of Daniel Veillard shall not
+ * be used in advertising or otherwise to promote the sale, use or other
+ * dealings in this Software without prior written authorization from him.
+ *
+ * Author: Daniel Veillard
+ **
+ * Copyright (C) 2007 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Authors:
+ * Richard W.M. Jones <rjones@redhat.com>
+ *
+ * Utility functions to help parse and assemble query strings.
+ */
+
+#ifndef QEMU_URI_H
+#define QEMU_URI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * URI:
+ *
+ * A parsed URI reference. This is a struct containing the various fields
+ * as described in RFC 2396 but separated for further processing.
+ */
+typedef struct URI {
+ char *scheme; /* the URI scheme */
+ char *opaque; /* opaque part */
+ char *authority; /* the authority part */
+ char *server; /* the server part */
+ char *user; /* the user part */
+ int port; /* the port number */
+ char *path; /* the path string */
+ char *fragment; /* the fragment identifier */
+ int cleanup; /* parsing potentially unclean URI */
+ char *query; /* the query string (as it appears in the URI) */
+} URI;
+
+URI *uri_new(void);
+char *uri_resolve(const char *URI, const char *base);
+char *uri_resolve_relative(const char *URI, const char *base);
+URI *uri_parse(const char *str);
+URI *uri_parse_raw(const char *str, int raw);
+int uri_parse_into(URI *uri, const char *str);
+char *uri_to_string(URI *uri);
+char *uri_string_escape(const char *str, const char *list);
+char *uri_string_unescape(const char *str, int len, char *target);
+void uri_free(URI *uri);
+
+/* Single web service query parameter 'name=value'. */
+typedef struct QueryParam {
+ char *name; /* Name (unescaped). */
+ char *value; /* Value (unescaped). */
+ int ignore; /* Ignore this field in qparam_get_query */
+} QueryParam;
+
+/* Set of parameters. */
+typedef struct QueryParams {
+ int n; /* number of parameters used */
+ int alloc; /* allocated space */
+ QueryParam *p; /* array of parameters */
+} QueryParams;
+
+struct QueryParams *query_params_new (int init_alloc);
+int query_param_append (QueryParams *ps, const char *name, const char *value);
+extern char *query_param_to_string (const QueryParams *ps);
+extern QueryParams *query_params_parse (const char *query);
+extern void query_params_free (QueryParams *ps);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* QEMU_URI_H */
diff --git a/include/qemu/xattr.h b/include/qemu/xattr.h
new file mode 100644
index 0000000000..f910d96eaf
--- /dev/null
+++ b/include/qemu/xattr.h
@@ -0,0 +1,30 @@
+/*
+ * Host xattr.h abstraction
+ *
+ * Copyright 2011 Red Hat Inc. and/or its affiliates
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2, or any
+ * later version. See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef QEMU_XATTR_H
+#define QEMU_XATTR_H
+
+/*
+ * Modern distributions (e.g. Fedora 15, have no libattr.so, place attr.h
+ * in /usr/include/sys, and don't have ENOATTR.
+ */
+
+#include "config-host.h"
+
+#ifdef CONFIG_LIBATTR
+# include <attr/xattr.h>
+#else
+# define ENOATTR ENODATA
+# include <sys/xattr.h>
+#endif
+
+#endif
diff --git a/include/qemu/cpu.h b/include/qom/cpu.h
index ad706a6dbd..fbacb2756b 100644
--- a/include/qemu/cpu.h
+++ b/include/qom/cpu.h
@@ -20,8 +20,8 @@
#ifndef QEMU_CPU_H
#define QEMU_CPU_H
-#include "qemu/object.h"
-#include "qemu-thread.h"
+#include "hw/qdev-core.h"
+#include "qemu/thread.h"
/**
* SECTION:cpu
@@ -46,27 +46,47 @@ typedef struct CPUState CPUState;
*/
typedef struct CPUClass {
/*< private >*/
- ObjectClass parent_class;
+ DeviceClass parent_class;
/*< public >*/
void (*reset)(CPUState *cpu);
} CPUClass;
+struct KVMState;
+struct kvm_run;
+
/**
* CPUState:
+ * @created: Indicates whether the CPU thread has been successfully created.
+ * @stop: Indicates a pending stop request.
+ * @stopped: Indicates the CPU has been artificially stopped.
+ * @kvm_fd: vCPU file descriptor for KVM.
*
* State of one CPU core or thread.
*/
struct CPUState {
/*< private >*/
- Object parent_obj;
+ DeviceState parent_obj;
/*< public >*/
struct QemuThread *thread;
#ifdef _WIN32
HANDLE hThread;
#endif
+ int thread_id;
+ struct QemuCond *halt_cond;
+ struct qemu_work_item *queued_work_first, *queued_work_last;
bool thread_kicked;
+ bool created;
+ bool stop;
+ bool stopped;
+
+#if !defined(CONFIG_USER_ONLY)
+ int kvm_fd;
+ bool kvm_vcpu_dirty;
+#endif
+ struct KVMState *kvm_state;
+ struct kvm_run *kvm_run;
/* TODO Move common fields from CPUArchState here. */
};
@@ -78,5 +98,54 @@ struct CPUState {
*/
void cpu_reset(CPUState *cpu);
+/**
+ * qemu_cpu_has_work:
+ * @cpu: The vCPU to check.
+ *
+ * Checks whether the CPU has work to do.
+ *
+ * Returns: %true if the CPU has work, %false otherwise.
+ */
+bool qemu_cpu_has_work(CPUState *cpu);
+
+/**
+ * qemu_cpu_is_self:
+ * @cpu: The vCPU to check against.
+ *
+ * Checks whether the caller is executing on the vCPU thread.
+ *
+ * Returns: %true if called from @cpu's thread, %false otherwise.
+ */
+bool qemu_cpu_is_self(CPUState *cpu);
+
+/**
+ * qemu_cpu_kick:
+ * @cpu: The vCPU to kick.
+ *
+ * Kicks @cpu's thread.
+ */
+void qemu_cpu_kick(CPUState *cpu);
+
+/**
+ * cpu_is_stopped:
+ * @cpu: The CPU to check.
+ *
+ * Checks whether the CPU is stopped.
+ *
+ * Returns: %true if run state is not running or if artificially stopped;
+ * %false otherwise.
+ */
+bool cpu_is_stopped(CPUState *cpu);
+
+/**
+ * run_on_cpu:
+ * @cpu: The vCPU to run on.
+ * @func: The function to be executed.
+ * @data: Data to pass to the function.
+ *
+ * Schedules the function @func for execution on the vCPU @cpu.
+ */
+void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
+
#endif
diff --git a/include/qemu/object.h b/include/qom/object.h
index cc75feed66..abe9691cb7 100644
--- a/include/qemu/object.h
+++ b/include/qom/object.h
@@ -17,7 +17,7 @@
#include <glib.h>
#include <stdint.h>
#include <stdbool.h>
-#include "qemu-queue.h"
+#include "qemu/queue.h"
struct Visitor;
struct Error;
@@ -230,6 +230,23 @@ typedef struct ObjectProperty
} ObjectProperty;
/**
+ * ObjectUnparent:
+ * @obj: the object that is being removed from the composition tree
+ *
+ * Called when an object is being removed from the QOM composition tree.
+ * The function should remove any backlinks from children objects to @obj.
+ */
+typedef void (ObjectUnparent)(Object *obj);
+
+/**
+ * ObjectFree:
+ * @obj: the object being freed
+ *
+ * Called when an object's last reference is removed.
+ */
+typedef void (ObjectFree)(void *obj);
+
+/**
* ObjectClass:
*
* The base for all classes. The only thing that #ObjectClass contains is an
@@ -240,6 +257,8 @@ struct ObjectClass
/*< private >*/
Type type;
GSList *interfaces;
+
+ ObjectUnparent *unparent;
};
/**
@@ -261,6 +280,7 @@ struct Object
{
/*< private >*/
ObjectClass *class;
+ ObjectFree *free;
QTAILQ_HEAD(, ObjectProperty) properties;
uint32_t ref;
Object *parent;
@@ -485,15 +505,6 @@ void object_initialize_with_type(void *data, Type type);
void object_initialize(void *obj, const char *typename);
/**
- * object_finalize:
- * @obj: The object to finalize.
- *
- * This function destroys and object without freeing the memory associated with
- * it.
- */
-void object_finalize(void *obj);
-
-/**
* object_dynamic_cast:
* @obj: The object to cast.
* @typename: The @typename to cast to.
@@ -947,6 +958,22 @@ void object_property_add_str(Object *obj, const char *name,
struct Error **errp);
/**
+ * object_property_add_bool:
+ * @obj: the object to add a property to
+ * @name: the name of the property
+ * @get: the getter or NULL if the property is write-only.
+ * @set: the setter or NULL if the property is read-only
+ * @errp: if an error occurs, a pointer to an area to store the error
+ *
+ * Add a bool property using getters/setters. This function will add a
+ * property of type 'bool'.
+ */
+void object_property_add_bool(Object *obj, const char *name,
+ bool (*get)(Object *, struct Error **),
+ void (*set)(Object *, bool, struct Error **),
+ struct Error **errp);
+
+/**
* object_child_foreach:
* @obj: the object whose children will be navigated
* @fn: the iterator function to be called
diff --git a/include/qemu/qom-qobject.h b/include/qom/qom-qobject.h
index f9dff12f11..77cd717e3f 100644
--- a/include/qemu/qom-qobject.h
+++ b/include/qom/qom-qobject.h
@@ -13,7 +13,7 @@
#ifndef QEMU_QOM_QOBJECT_H
#define QEMU_QOM_QOBJECT_H
-#include "qemu/object.h"
+#include "qom/object.h"
/*
* object_property_get_qobject:
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
new file mode 100644
index 0000000000..5fc780c63d
--- /dev/null
+++ b/include/sysemu/arch_init.h
@@ -0,0 +1,39 @@
+#ifndef QEMU_ARCH_INIT_H
+#define QEMU_ARCH_INIT_H
+
+#include "qmp-commands.h"
+
+enum {
+ QEMU_ARCH_ALL = -1,
+ QEMU_ARCH_ALPHA = 1,
+ QEMU_ARCH_ARM = 2,
+ QEMU_ARCH_CRIS = 4,
+ QEMU_ARCH_I386 = 8,
+ QEMU_ARCH_M68K = 16,
+ QEMU_ARCH_LM32 = 32,
+ QEMU_ARCH_MICROBLAZE = 64,
+ QEMU_ARCH_MIPS = 128,
+ QEMU_ARCH_PPC = 256,
+ QEMU_ARCH_S390X = 512,
+ QEMU_ARCH_SH4 = 1024,
+ QEMU_ARCH_SPARC = 2048,
+ QEMU_ARCH_XTENSA = 4096,
+ QEMU_ARCH_OPENRISC = 8192,
+ QEMU_ARCH_UNICORE32 = 0x4000,
+};
+
+extern const uint32_t arch_type;
+
+void select_soundhw(const char *optarg);
+void do_acpitable_option(const char *optarg);
+void do_smbios_option(const char *optarg);
+void cpudef_init(void);
+int audio_available(void);
+void audio_init(ISABus *isa_bus, PCIBus *pci_bus);
+int tcg_available(void);
+int kvm_available(void);
+int xen_available(void);
+
+CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp);
+
+#endif
diff --git a/include/sysemu/balloon.h b/include/sysemu/balloon.h
new file mode 100644
index 0000000000..bd9d39505d
--- /dev/null
+++ b/include/sysemu/balloon.h
@@ -0,0 +1,29 @@
+/*
+ * Balloon
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef _QEMU_BALLOON_H
+#define _QEMU_BALLOON_H
+
+#include "monitor/monitor.h"
+#include "qapi-types.h"
+
+typedef void (QEMUBalloonEvent)(void *opaque, ram_addr_t target);
+typedef void (QEMUBalloonStatus)(void *opaque, BalloonInfo *info);
+
+int qemu_add_balloon_handler(QEMUBalloonEvent *event_func,
+ QEMUBalloonStatus *stat_func, void *opaque);
+void qemu_remove_balloon_handler(void *opaque);
+
+void qemu_balloon_changed(int64_t actual);
+
+#endif
diff --git a/include/sysemu/blockdev.h b/include/sysemu/blockdev.h
new file mode 100644
index 0000000000..1fe533299e
--- /dev/null
+++ b/include/sysemu/blockdev.h
@@ -0,0 +1,69 @@
+/*
+ * QEMU host block devices
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef BLOCKDEV_H
+#define BLOCKDEV_H
+
+#include "block/block.h"
+#include "qapi/error.h"
+#include "qemu/queue.h"
+
+void blockdev_mark_auto_del(BlockDriverState *bs);
+void blockdev_auto_del(BlockDriverState *bs);
+
+typedef enum {
+ IF_DEFAULT = -1, /* for use with drive_add() only */
+ /*
+ * IF_IDE must be zero, because we want QEMUMachine member
+ * block_default_type to default-initialize to IF_IDE
+ */
+ IF_IDE = 0,
+ IF_NONE,
+ IF_SCSI, IF_FLOPPY, IF_PFLASH, IF_MTD, IF_SD, IF_VIRTIO, IF_XEN,
+ IF_COUNT
+} BlockInterfaceType;
+
+struct DriveInfo {
+ BlockDriverState *bdrv;
+ char *id;
+ const char *devaddr;
+ BlockInterfaceType type;
+ int bus;
+ int unit;
+ int auto_del; /* see blockdev_mark_auto_del() */
+ int media_cd;
+ int cyls, heads, secs, trans;
+ QemuOpts *opts;
+ const char *serial;
+ QTAILQ_ENTRY(DriveInfo) next;
+ int refcount;
+};
+
+DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit);
+DriveInfo *drive_get_by_index(BlockInterfaceType type, int index);
+int drive_get_max_bus(BlockInterfaceType type);
+DriveInfo *drive_get_next(BlockInterfaceType type);
+void drive_get_ref(DriveInfo *dinfo);
+void drive_put_ref(DriveInfo *dinfo);
+DriveInfo *drive_get_by_blockdev(BlockDriverState *bs);
+
+QemuOpts *drive_def(const char *optstr);
+QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file,
+ const char *optstr);
+DriveInfo *drive_init(QemuOpts *arg, BlockInterfaceType block_default_type);
+
+/* device-hotplug */
+
+DriveInfo *add_init_drive(const char *opts);
+
+void qmp_change_blockdev(const char *device, const char *filename,
+ bool has_format, const char *format, Error **errp);
+void do_commit(Monitor *mon, const QDict *qdict);
+int do_drive_del(Monitor *mon, const QDict *qdict, QObject **ret_data);
+#endif
diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
new file mode 100644
index 0000000000..81bd81773f
--- /dev/null
+++ b/include/sysemu/cpus.h
@@ -0,0 +1,24 @@
+#ifndef QEMU_CPUS_H
+#define QEMU_CPUS_H
+
+/* cpus.c */
+void qemu_init_cpu_loop(void);
+void resume_all_vcpus(void);
+void pause_all_vcpus(void);
+void cpu_stop_current(void);
+
+void cpu_synchronize_all_states(void);
+void cpu_synchronize_all_post_reset(void);
+void cpu_synchronize_all_post_init(void);
+
+void qtest_clock_warp(int64_t dest);
+
+/* vl.c */
+extern int smp_cores;
+extern int smp_threads;
+void set_numa_modes(void);
+void set_cpu_log(const char *optarg);
+void set_cpu_log_filename(const char *optarg);
+void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg);
+
+#endif
diff --git a/include/sysemu/device_tree.h b/include/sysemu/device_tree.h
new file mode 100644
index 0000000000..f0b3f35e03
--- /dev/null
+++ b/include/sysemu/device_tree.h
@@ -0,0 +1,54 @@
+/*
+ * Header with function prototypes to help device tree manipulation using
+ * libfdt. It also provides functions to read entries from device tree proc
+ * interface.
+ *
+ * Copyright 2008 IBM Corporation.
+ * Authors: Jerone Young <jyoung5@us.ibm.com>
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ */
+
+#ifndef __DEVICE_TREE_H__
+#define __DEVICE_TREE_H__
+
+void *create_device_tree(int *sizep);
+void *load_device_tree(const char *filename_path, int *sizep);
+
+int qemu_devtree_setprop(void *fdt, const char *node_path,
+ const char *property, const void *val_array, int size);
+int qemu_devtree_setprop_cell(void *fdt, const char *node_path,
+ const char *property, uint32_t val);
+int qemu_devtree_setprop_u64(void *fdt, const char *node_path,
+ const char *property, uint64_t val);
+int qemu_devtree_setprop_string(void *fdt, const char *node_path,
+ const char *property, const char *string);
+int qemu_devtree_setprop_phandle(void *fdt, const char *node_path,
+ const char *property,
+ const char *target_node_path);
+const void *qemu_devtree_getprop(void *fdt, const char *node_path,
+ const char *property, int *lenp);
+uint32_t qemu_devtree_getprop_cell(void *fdt, const char *node_path,
+ const char *property);
+uint32_t qemu_devtree_get_phandle(void *fdt, const char *path);
+uint32_t qemu_devtree_alloc_phandle(void *fdt);
+int qemu_devtree_nop_node(void *fdt, const char *node_path);
+int qemu_devtree_add_subnode(void *fdt, const char *name);
+
+#define qemu_devtree_setprop_cells(fdt, node_path, property, ...) \
+ do { \
+ uint32_t qdt_tmp[] = { __VA_ARGS__ }; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(qdt_tmp); i++) { \
+ qdt_tmp[i] = cpu_to_be32(qdt_tmp[i]); \
+ } \
+ qemu_devtree_setprop(fdt, node_path, property, qdt_tmp, \
+ sizeof(qdt_tmp)); \
+ } while (0)
+
+void qemu_devtree_dumpdtb(void *fdt, int size);
+
+#endif /* __DEVICE_TREE_H__ */
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
new file mode 100644
index 0000000000..a52c93a553
--- /dev/null
+++ b/include/sysemu/dma.h
@@ -0,0 +1,282 @@
+/*
+ * DMA helper functions
+ *
+ * Copyright (c) 2009 Red Hat
+ *
+ * This work is licensed under the terms of the GNU General Public License
+ * (GNU GPL), version 2 or later.
+ */
+
+#ifndef DMA_H
+#define DMA_H
+
+#include <stdio.h>
+#include "exec/memory.h"
+#include "hw/hw.h"
+#include "block/block.h"
+#include "sysemu/kvm.h"
+
+typedef struct DMAContext DMAContext;
+typedef struct ScatterGatherEntry ScatterGatherEntry;
+
+typedef enum {
+ DMA_DIRECTION_TO_DEVICE = 0,
+ DMA_DIRECTION_FROM_DEVICE = 1,
+} DMADirection;
+
+struct QEMUSGList {
+ ScatterGatherEntry *sg;
+ int nsg;
+ int nalloc;
+ size_t size;
+ DMAContext *dma;
+};
+
+#ifndef CONFIG_USER_ONLY
+
+/*
+ * When an IOMMU is present, bus addresses become distinct from
+ * CPU/memory physical addresses and may be a different size. Because
+ * the IOVA size depends more on the bus than on the platform, we more
+ * or less have to treat these as 64-bit always to cover all (or at
+ * least most) cases.
+ */
+typedef uint64_t dma_addr_t;
+
+#define DMA_ADDR_BITS 64
+#define DMA_ADDR_FMT "%" PRIx64
+
+typedef int DMATranslateFunc(DMAContext *dma,
+ dma_addr_t addr,
+ hwaddr *paddr,
+ hwaddr *len,
+ DMADirection dir);
+typedef void* DMAMapFunc(DMAContext *dma,
+ dma_addr_t addr,
+ dma_addr_t *len,
+ DMADirection dir);
+typedef void DMAUnmapFunc(DMAContext *dma,
+ void *buffer,
+ dma_addr_t len,
+ DMADirection dir,
+ dma_addr_t access_len);
+
+struct DMAContext {
+ AddressSpace *as;
+ DMATranslateFunc *translate;
+ DMAMapFunc *map;
+ DMAUnmapFunc *unmap;
+};
+
+/* A global DMA context corresponding to the address_space_memory
+ * AddressSpace, for sysbus devices which do DMA.
+ */
+extern DMAContext dma_context_memory;
+
+static inline void dma_barrier(DMAContext *dma, DMADirection dir)
+{
+ /*
+ * This is called before DMA read and write operations
+ * unless the _relaxed form is used and is responsible
+ * for providing some sane ordering of accesses vs
+ * concurrently running VCPUs.
+ *
+ * Users of map(), unmap() or lower level st/ld_*
+ * operations are responsible for providing their own
+ * ordering via barriers.
+ *
+ * This primitive implementation does a simple smp_mb()
+ * before each operation which provides pretty much full
+ * ordering.
+ *
+ * A smarter implementation can be devised if needed to
+ * use lighter barriers based on the direction of the
+ * transfer, the DMA context, etc...
+ */
+ if (kvm_enabled()) {
+ smp_mb();
+ }
+}
+
+static inline bool dma_has_iommu(DMAContext *dma)
+{
+ return dma && dma->translate;
+}
+
+/* Checks that the given range of addresses is valid for DMA. This is
+ * useful for certain cases, but usually you should just use
+ * dma_memory_{read,write}() and check for errors */
+bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
+ DMADirection dir);
+static inline bool dma_memory_valid(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t len,
+ DMADirection dir)
+{
+ if (!dma_has_iommu(dma)) {
+ return true;
+ } else {
+ return iommu_dma_memory_valid(dma, addr, len, dir);
+ }
+}
+
+int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len, DMADirection dir);
+static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir)
+{
+ if (!dma_has_iommu(dma)) {
+ /* Fast-path for no IOMMU */
+ address_space_rw(dma->as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
+ return 0;
+ } else {
+ return iommu_dma_memory_rw(dma, addr, buf, len, dir);
+ }
+}
+
+static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+{
+ return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+}
+
+static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+{
+ return dma_memory_rw_relaxed(dma, addr, (void *)buf, len,
+ DMA_DIRECTION_FROM_DEVICE);
+}
+
+static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir)
+{
+ dma_barrier(dma, dir);
+
+ return dma_memory_rw_relaxed(dma, addr, buf, len, dir);
+}
+
+static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+{
+ return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+}
+
+static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+{
+ return dma_memory_rw(dma, addr, (void *)buf, len,
+ DMA_DIRECTION_FROM_DEVICE);
+}
+
+int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
+ dma_addr_t len);
+
+int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
+
+void *iommu_dma_memory_map(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir);
+static inline void *dma_memory_map(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir)
+{
+ if (!dma_has_iommu(dma)) {
+ hwaddr xlen = *len;
+ void *p;
+
+ p = address_space_map(dma->as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
+ *len = xlen;
+ return p;
+ } else {
+ return iommu_dma_memory_map(dma, addr, len, dir);
+ }
+}
+
+void iommu_dma_memory_unmap(DMAContext *dma,
+ void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len);
+static inline void dma_memory_unmap(DMAContext *dma,
+ void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len)
+{
+ if (!dma_has_iommu(dma)) {
+ address_space_unmap(dma->as, buffer, (hwaddr)len,
+ dir == DMA_DIRECTION_FROM_DEVICE, access_len);
+ } else {
+ iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
+ }
+}
+
+#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
+ static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \
+ dma_addr_t addr) \
+ { \
+ uint##_bits##_t val; \
+ dma_memory_read(dma, addr, &val, (_bits) / 8); \
+ return _end##_bits##_to_cpu(val); \
+ } \
+ static inline void st##_sname##_##_end##_dma(DMAContext *dma, \
+ dma_addr_t addr, \
+ uint##_bits##_t val) \
+ { \
+ val = cpu_to_##_end##_bits(val); \
+ dma_memory_write(dma, addr, &val, (_bits) / 8); \
+ }
+
+static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr)
+{
+ uint8_t val;
+
+ dma_memory_read(dma, addr, &val, 1);
+ return val;
+}
+
+static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val)
+{
+ dma_memory_write(dma, addr, &val, 1);
+}
+
+DEFINE_LDST_DMA(uw, w, 16, le);
+DEFINE_LDST_DMA(l, l, 32, le);
+DEFINE_LDST_DMA(q, q, 64, le);
+DEFINE_LDST_DMA(uw, w, 16, be);
+DEFINE_LDST_DMA(l, l, 32, be);
+DEFINE_LDST_DMA(q, q, 64, be);
+
+#undef DEFINE_LDST_DMA
+
+void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
+ DMAMapFunc map, DMAUnmapFunc unmap);
+
+struct ScatterGatherEntry {
+ dma_addr_t base;
+ dma_addr_t len;
+};
+
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma);
+void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
+void qemu_sglist_destroy(QEMUSGList *qsg);
+#endif
+
+typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
+ QEMUIOVector *iov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque);
+
+BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs,
+ QEMUSGList *sg, uint64_t sector_num,
+ DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
+ void *opaque, DMADirection dir);
+BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
+ QEMUSGList *sg, uint64_t sector,
+ BlockDriverCompletionFunc *cb, void *opaque);
+BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
+ QEMUSGList *sg, uint64_t sector,
+ BlockDriverCompletionFunc *cb, void *opaque);
+uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
+uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
+
+void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
+ QEMUSGList *sg, enum BlockAcctType type);
+
+#endif
diff --git a/include/sysemu/dump.h b/include/sysemu/dump.h
new file mode 100644
index 0000000000..e25b7cfb73
--- /dev/null
+++ b/include/sysemu/dump.h
@@ -0,0 +1,35 @@
+/*
+ * QEMU dump
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ * Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef DUMP_H
+#define DUMP_H
+
+typedef struct ArchDumpInfo {
+ int d_machine; /* Architecture */
+ int d_endian; /* ELFDATA2LSB or ELFDATA2MSB */
+ int d_class; /* ELFCLASS32 or ELFCLASS64 */
+} ArchDumpInfo;
+
+typedef int (*write_core_dump_function)(void *buf, size_t size, void *opaque);
+int cpu_write_elf64_note(write_core_dump_function f, CPUArchState *env,
+ int cpuid, void *opaque);
+int cpu_write_elf32_note(write_core_dump_function f, CPUArchState *env,
+ int cpuid, void *opaque);
+int cpu_write_elf64_qemunote(write_core_dump_function f, CPUArchState *env,
+ void *opaque);
+int cpu_write_elf32_qemunote(write_core_dump_function f, CPUArchState *env,
+ void *opaque);
+int cpu_get_dump_info(ArchDumpInfo *info);
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus);
+
+#endif
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
new file mode 100644
index 0000000000..3db19ffdac
--- /dev/null
+++ b/include/sysemu/kvm.h
@@ -0,0 +1,280 @@
+/*
+ * QEMU KVM support
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_KVM_H
+#define QEMU_KVM_H
+
+#include <errno.h>
+#include "config-host.h"
+#include "qemu/queue.h"
+
+#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+#endif
+
+extern int kvm_allowed;
+extern bool kvm_kernel_irqchip;
+extern bool kvm_async_interrupts_allowed;
+extern bool kvm_irqfds_allowed;
+extern bool kvm_msi_via_irqfd_allowed;
+extern bool kvm_gsi_routing_allowed;
+
+#if defined CONFIG_KVM || !defined NEED_CPU_H
+#define kvm_enabled() (kvm_allowed)
+/**
+ * kvm_irqchip_in_kernel:
+ *
+ * Returns: true if the user asked us to create an in-kernel
+ * irqchip via the "kernel_irqchip=on" machine option.
+ * What this actually means is architecture and machine model
+ * specific: on PC, for instance, it means that the LAPIC,
+ * IOAPIC and PIT are all in kernel. This function should never
+ * be used from generic target-independent code: use one of the
+ * following functions or some other specific check instead.
+ */
+#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
+
+/**
+ * kvm_async_interrupts_enabled:
+ *
+ * Returns: true if we can deliver interrupts to KVM
+ * asynchronously (ie by ioctl from any thread at any time)
+ * rather than having to do interrupt delivery synchronously
+ * (where the vcpu must be stopped at a suitable point first).
+ */
+#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
+
+/**
+ * kvm_irqfds_enabled:
+ *
+ * Returns: true if we can use irqfds to inject interrupts into
+ * a KVM CPU (ie the kernel supports irqfds and we are running
+ * with a configuration where it is meaningful to use them).
+ */
+#define kvm_irqfds_enabled() (kvm_irqfds_allowed)
+
+/**
+ * kvm_msi_via_irqfd_enabled:
+ *
+ * Returns: true if we can route a PCI MSI (Message Signaled Interrupt)
+ * to a KVM CPU via an irqfd. This requires that the kernel supports
+ * this and that we're running in a configuration that permits it.
+ */
+#define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
+
+/**
+ * kvm_gsi_routing_enabled:
+ *
+ * Returns: true if GSI routing is enabled (ie the kernel supports
+ * it and we're running in a configuration that permits it).
+ */
+#define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
+
+#else
+#define kvm_enabled() (0)
+#define kvm_irqchip_in_kernel() (false)
+#define kvm_async_interrupts_enabled() (false)
+#define kvm_irqfds_enabled() (false)
+#define kvm_msi_via_irqfd_enabled() (false)
+#define kvm_gsi_routing_allowed() (false)
+#endif
+
+struct kvm_run;
+struct kvm_lapic_state;
+
+typedef struct KVMCapabilityInfo {
+ const char *name;
+ int value;
+} KVMCapabilityInfo;
+
+#define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
+#define KVM_CAP_LAST_INFO { NULL, 0 }
+
+struct KVMState;
+typedef struct KVMState KVMState;
+extern KVMState *kvm_state;
+
+/* external API */
+
+int kvm_init(void);
+
+int kvm_has_sync_mmu(void);
+int kvm_has_vcpu_events(void);
+int kvm_has_robust_singlestep(void);
+int kvm_has_debugregs(void);
+int kvm_has_xsave(void);
+int kvm_has_xcrs(void);
+int kvm_has_pit_state2(void);
+int kvm_has_many_ioeventfds(void);
+int kvm_has_gsi_routing(void);
+int kvm_has_intx_set_mask(void);
+
+#ifdef NEED_CPU_H
+int kvm_init_vcpu(CPUArchState *env);
+
+int kvm_cpu_exec(CPUArchState *env);
+
+#if !defined(CONFIG_USER_ONLY)
+void *kvm_vmalloc(ram_addr_t size);
+void *kvm_arch_vmalloc(ram_addr_t size);
+void kvm_setup_guest_memory(void *start, size_t size);
+
+void kvm_flush_coalesced_mmio_buffer(void);
+#endif
+
+int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
+ target_ulong len, int type);
+int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
+ target_ulong len, int type);
+void kvm_remove_all_breakpoints(CPUArchState *current_env);
+int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap);
+#ifndef _WIN32
+int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset);
+#endif
+
+int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr);
+int kvm_on_sigbus(int code, void *addr);
+
+/* internal API */
+
+int kvm_ioctl(KVMState *s, int type, ...);
+
+int kvm_vm_ioctl(KVMState *s, int type, ...);
+
+int kvm_vcpu_ioctl(CPUState *cpu, int type, ...);
+
+/* Arch specific hooks */
+
+extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
+
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
+void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
+
+int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
+
+int kvm_arch_process_async_events(CPUState *cpu);
+
+int kvm_arch_get_registers(CPUState *cpu);
+
+/* state subset only touched by the VCPU itself during runtime */
+#define KVM_PUT_RUNTIME_STATE 1
+/* state subset modified during VCPU reset */
+#define KVM_PUT_RESET_STATE 2
+/* full state set, modified during initialization or on vmload */
+#define KVM_PUT_FULL_STATE 3
+
+int kvm_arch_put_registers(CPUState *cpu, int level);
+
+int kvm_arch_init(KVMState *s);
+
+int kvm_arch_init_vcpu(CPUState *cpu);
+
+void kvm_arch_reset_vcpu(CPUState *cpu);
+
+int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
+int kvm_arch_on_sigbus(int code, void *addr);
+
+void kvm_arch_init_irq_routing(KVMState *s);
+
+int kvm_set_irq(KVMState *s, int irq, int level);
+int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
+
+void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
+
+void kvm_put_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
+void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
+
+struct kvm_guest_debug;
+struct kvm_debug_exit_arch;
+
+struct kvm_sw_breakpoint {
+ target_ulong pc;
+ target_ulong saved_insn;
+ int use_count;
+ QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
+};
+
+QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
+
+struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
+ target_ulong pc);
+
+int kvm_sw_breakpoints_active(CPUState *cpu);
+
+int kvm_arch_insert_sw_breakpoint(CPUState *current_cpu,
+ struct kvm_sw_breakpoint *bp);
+int kvm_arch_remove_sw_breakpoint(CPUState *current_cpu,
+ struct kvm_sw_breakpoint *bp);
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type);
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type);
+void kvm_arch_remove_all_hw_breakpoints(void);
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
+
+int kvm_check_extension(KVMState *s, unsigned int extension);
+
+uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
+ uint32_t index, int reg);
+void kvm_cpu_synchronize_state(CPUArchState *env);
+void kvm_cpu_synchronize_post_reset(CPUArchState *env);
+void kvm_cpu_synchronize_post_init(CPUArchState *env);
+
+/* generic hooks - to be moved/refactored once there are more users */
+
+static inline void cpu_synchronize_state(CPUArchState *env)
+{
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_state(env);
+ }
+}
+
+static inline void cpu_synchronize_post_reset(CPUArchState *env)
+{
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_post_reset(env);
+ }
+}
+
+static inline void cpu_synchronize_post_init(CPUArchState *env)
+{
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_post_init(env);
+ }
+}
+
+
+#if !defined(CONFIG_USER_ONLY)
+int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
+ hwaddr *phys_addr);
+#endif
+
+#endif
+int kvm_set_ioeventfd_mmio(int fd, uint32_t adr, uint32_t val, bool assign,
+ uint32_t size);
+
+int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
+
+int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg);
+int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg);
+void kvm_irqchip_release_virq(KVMState *s, int virq);
+
+int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq);
+int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq);
+void kvm_pc_gsi_handler(void *opaque, int n, int level);
+void kvm_pc_setup_irq_routing(bool pci_enabled);
+#endif
diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h
new file mode 100644
index 0000000000..1256125963
--- /dev/null
+++ b/include/sysemu/memory_mapping.h
@@ -0,0 +1,64 @@
+/*
+ * QEMU memory mapping
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ * Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_MAPPING_H
+#define MEMORY_MAPPING_H
+
+#include "qemu/queue.h"
+
+/* The physical and virtual address in the memory mapping are contiguous. */
+typedef struct MemoryMapping {
+ hwaddr phys_addr;
+ target_ulong virt_addr;
+ ram_addr_t length;
+ QTAILQ_ENTRY(MemoryMapping) next;
+} MemoryMapping;
+
+typedef struct MemoryMappingList {
+ unsigned int num;
+ MemoryMapping *last_mapping;
+ QTAILQ_HEAD(, MemoryMapping) head;
+} MemoryMappingList;
+
+int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env);
+bool cpu_paging_enabled(CPUArchState *env);
+
+/*
+ * add or merge the memory region [phys_addr, phys_addr + length) into the
+ * memory mapping's list. The region's virtual address starts with virt_addr,
+ * and is contiguous. The list is sorted by phys_addr.
+ */
+void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
+ hwaddr phys_addr,
+ hwaddr virt_addr,
+ ram_addr_t length);
+
+void memory_mapping_list_free(MemoryMappingList *list);
+
+void memory_mapping_list_init(MemoryMappingList *list);
+
+/*
+ * Return value:
+ * 0: success
+ * -1: failed
+ * -2: unsupported
+ */
+int qemu_get_guest_memory_mapping(MemoryMappingList *list);
+
+/* get guest's memory mapping without do paging(virtual address is 0). */
+void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list);
+
+void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
+ int64_t length);
+
+#endif
diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h
new file mode 100644
index 0000000000..7f198e475c
--- /dev/null
+++ b/include/sysemu/os-posix.h
@@ -0,0 +1,51 @@
+/*
+ * posix specific declarations
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_OS_POSIX_H
+#define QEMU_OS_POSIX_H
+
+void os_set_line_buffering(void);
+void os_set_proc_name(const char *s);
+void os_setup_signal_handling(void);
+void os_daemonize(void);
+void os_setup_post(void);
+
+typedef struct timeval qemu_timeval;
+#define qemu_gettimeofday(tp) gettimeofday(tp, NULL)
+
+#ifndef CONFIG_UTIMENSAT
+#ifndef UTIME_NOW
+# define UTIME_NOW ((1l << 30) - 1l)
+#endif
+#ifndef UTIME_OMIT
+# define UTIME_OMIT ((1l << 30) - 2l)
+#endif
+#endif
+typedef struct timespec qemu_timespec;
+int qemu_utimens(const char *path, const qemu_timespec *times);
+
+bool is_daemonized(void);
+
+#endif
diff --git a/include/sysemu/os-win32.h b/include/sysemu/os-win32.h
new file mode 100644
index 0000000000..d0e9234d24
--- /dev/null
+++ b/include/sysemu/os-win32.h
@@ -0,0 +1,99 @@
+/*
+ * win32 specific declarations
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef QEMU_OS_WIN32_H
+#define QEMU_OS_WIN32_H
+
+#include <windows.h>
+#include <winsock2.h>
+
+/* Workaround for older versions of MinGW. */
+#ifndef ECONNREFUSED
+# define ECONNREFUSED WSAECONNREFUSED
+#endif
+#ifndef EINPROGRESS
+# define EINPROGRESS WSAEINPROGRESS
+#endif
+#ifndef EHOSTUNREACH
+# define EHOSTUNREACH WSAEHOSTUNREACH
+#endif
+#ifndef EINTR
+# define EINTR WSAEINTR
+#endif
+#ifndef EINPROGRESS
+# define EINPROGRESS WSAEINPROGRESS
+#endif
+#ifndef ENETUNREACH
+# define ENETUNREACH WSAENETUNREACH
+#endif
+#ifndef ENOTCONN
+# define ENOTCONN WSAENOTCONN
+#endif
+#ifndef EWOULDBLOCK
+# define EWOULDBLOCK WSAEWOULDBLOCK
+#endif
+
+#if defined(_WIN64)
+/* On w64, setjmp is implemented by _setjmp which needs a second parameter.
+ * If this parameter is NULL, longjump does no stack unwinding.
+ * That is what we need for QEMU. Passing the value of register rsp (default)
+ * lets longjmp try a stack unwinding which will crash with generated code. */
+# undef setjmp
+# define setjmp(env) _setjmp(env, NULL)
+#endif
+
+/* Declaration of ffs() is missing in MinGW's strings.h. */
+int ffs(int i);
+
+/* Missing POSIX functions. Don't use MinGW-w64 macros. */
+#undef gmtime_r
+struct tm *gmtime_r(const time_t *timep, struct tm *result);
+#undef localtime_r
+struct tm *localtime_r(const time_t *timep, struct tm *result);
+
+static inline void os_setup_signal_handling(void) {}
+static inline void os_daemonize(void) {}
+static inline void os_setup_post(void) {}
+void os_set_line_buffering(void);
+static inline void os_set_proc_name(const char *dummy) {}
+
+#if !defined(EPROTONOSUPPORT)
+# define EPROTONOSUPPORT EINVAL
+#endif
+
+int setenv(const char *name, const char *value, int overwrite);
+
+typedef struct {
+ long tv_sec;
+ long tv_usec;
+} qemu_timeval;
+int qemu_gettimeofday(qemu_timeval *tp);
+
+static inline bool is_daemonized(void)
+{
+ return false;
+}
+
+#endif
diff --git a/include/sysemu/qtest.h b/include/sysemu/qtest.h
new file mode 100644
index 0000000000..723a4f9536
--- /dev/null
+++ b/include/sysemu/qtest.h
@@ -0,0 +1,53 @@
+/*
+ * Test Server
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QTEST_H
+#define QTEST_H
+
+#include "qemu-common.h"
+
+#if !defined(CONFIG_USER_ONLY)
+extern int qtest_allowed;
+extern const char *qtest_chrdev;
+extern const char *qtest_log;
+
+static inline bool qtest_enabled(void)
+{
+ return qtest_allowed;
+}
+
+static inline int qtest_available(void)
+{
+ return 1;
+}
+
+int qtest_init(void);
+#else
+static inline bool qtest_enabled(void)
+{
+ return false;
+}
+
+static inline int qtest_available(void)
+{
+ return 0;
+}
+
+static inline int qtest_init(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/include/sysemu/seccomp.h b/include/sysemu/seccomp.h
new file mode 100644
index 0000000000..1189fa241d
--- /dev/null
+++ b/include/sysemu/seccomp.h
@@ -0,0 +1,22 @@
+/*
+ * QEMU seccomp mode 2 support with libseccomp
+ *
+ * Copyright IBM, Corp. 2012
+ *
+ * Authors:
+ * Eduardo Otubo <eotubo@br.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+#ifndef QEMU_SECCOMP_H
+#define QEMU_SECCOMP_H
+
+#include <seccomp.h>
+#include "qemu/osdep.h"
+
+int seccomp_start(void);
+#endif
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
new file mode 100644
index 0000000000..28a783e2be
--- /dev/null
+++ b/include/sysemu/sysemu.h
@@ -0,0 +1,186 @@
+#ifndef SYSEMU_H
+#define SYSEMU_H
+/* Misc. things related to the system emulator. */
+
+#include "qemu/typedefs.h"
+#include "qemu/option.h"
+#include "qemu/queue.h"
+#include "qemu/timer.h"
+#include "qapi-types.h"
+#include "qemu/notify.h"
+#include "qemu/main-loop.h"
+
+/* vl.c */
+
+extern const char *bios_name;
+
+extern const char *qemu_name;
+extern uint8_t qemu_uuid[];
+int qemu_uuid_parse(const char *str, uint8_t *uuid);
+#define UUID_FMT "%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx"
+
+bool runstate_check(RunState state);
+void runstate_set(RunState new_state);
+int runstate_is_running(void);
+typedef struct vm_change_state_entry VMChangeStateEntry;
+typedef void VMChangeStateHandler(void *opaque, int running, RunState state);
+
+VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
+ void *opaque);
+void qemu_del_vm_change_state_handler(VMChangeStateEntry *e);
+void vm_state_notify(int running, RunState state);
+
+#define VMRESET_SILENT false
+#define VMRESET_REPORT true
+
+void vm_start(void);
+void vm_stop(RunState state);
+void vm_stop_force_state(RunState state);
+
+typedef enum WakeupReason {
+ QEMU_WAKEUP_REASON_OTHER = 0,
+ QEMU_WAKEUP_REASON_RTC,
+ QEMU_WAKEUP_REASON_PMTIMER,
+} WakeupReason;
+
+void qemu_system_reset_request(void);
+void qemu_system_suspend_request(void);
+void qemu_register_suspend_notifier(Notifier *notifier);
+void qemu_system_wakeup_request(WakeupReason reason);
+void qemu_system_wakeup_enable(WakeupReason reason, bool enabled);
+void qemu_register_wakeup_notifier(Notifier *notifier);
+void qemu_system_shutdown_request(void);
+void qemu_system_powerdown_request(void);
+void qemu_register_powerdown_notifier(Notifier *notifier);
+void qemu_system_debug_request(void);
+void qemu_system_vmstop_request(RunState reason);
+int qemu_shutdown_requested_get(void);
+int qemu_reset_requested_get(void);
+void qemu_system_killed(int signal, pid_t pid);
+void qemu_devices_reset(void);
+void qemu_system_reset(bool report);
+
+void qemu_add_exit_notifier(Notifier *notify);
+void qemu_remove_exit_notifier(Notifier *notify);
+
+void qemu_add_machine_init_done_notifier(Notifier *notify);
+
+void do_savevm(Monitor *mon, const QDict *qdict);
+int load_vmstate(const char *name);
+void do_delvm(Monitor *mon, const QDict *qdict);
+void do_info_snapshots(Monitor *mon);
+
+void qemu_announce_self(void);
+
+bool qemu_savevm_state_blocked(Error **errp);
+int qemu_savevm_state_begin(QEMUFile *f,
+ const MigrationParams *params);
+int qemu_savevm_state_iterate(QEMUFile *f);
+int qemu_savevm_state_complete(QEMUFile *f);
+void qemu_savevm_state_cancel(QEMUFile *f);
+uint64_t qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size);
+int qemu_loadvm_state(QEMUFile *f);
+
+/* SLIRP */
+void do_info_slirp(Monitor *mon);
+
+typedef enum DisplayType
+{
+ DT_DEFAULT,
+ DT_CURSES,
+ DT_SDL,
+ DT_NOGRAPHIC,
+ DT_NONE,
+} DisplayType;
+
+extern int autostart;
+extern int bios_size;
+
+typedef enum {
+ VGA_NONE, VGA_STD, VGA_CIRRUS, VGA_VMWARE, VGA_XENFB, VGA_QXL,
+} VGAInterfaceType;
+
+extern int vga_interface_type;
+#define xenfb_enabled (vga_interface_type == VGA_XENFB)
+#define qxl_enabled (vga_interface_type == VGA_QXL)
+
+extern int graphic_width;
+extern int graphic_height;
+extern int graphic_depth;
+extern DisplayType display_type;
+extern const char *keyboard_layout;
+extern int win2k_install_hack;
+extern int alt_grab;
+extern int ctrl_grab;
+extern int smp_cpus;
+extern int max_cpus;
+extern int cursor_hide;
+extern int graphic_rotate;
+extern int no_quit;
+extern int no_shutdown;
+extern int semihosting_enabled;
+extern int old_param;
+extern int boot_menu;
+extern uint8_t *boot_splash_filedata;
+extern int boot_splash_filedata_size;
+extern uint8_t qemu_extra_params_fw[2];
+extern QEMUClock *rtc_clock;
+
+#define MAX_NODES 64
+#define MAX_CPUMASK_BITS 255
+extern int nb_numa_nodes;
+extern uint64_t node_mem[MAX_NODES];
+extern unsigned long *node_cpumask[MAX_NODES];
+
+#define MAX_OPTION_ROMS 16
+typedef struct QEMUOptionRom {
+ const char *name;
+ int32_t bootindex;
+} QEMUOptionRom;
+extern QEMUOptionRom option_rom[MAX_OPTION_ROMS];
+extern int nb_option_roms;
+
+#define MAX_PROM_ENVS 128
+extern const char *prom_envs[MAX_PROM_ENVS];
+extern unsigned int nb_prom_envs;
+
+/* pci-hotplug */
+void pci_device_hot_add(Monitor *mon, const QDict *qdict);
+int pci_drive_hot_add(Monitor *mon, const QDict *qdict, DriveInfo *dinfo);
+void do_pci_device_hot_remove(Monitor *mon, const QDict *qdict);
+
+/* generic hotplug */
+void drive_hot_add(Monitor *mon, const QDict *qdict);
+
+/* pcie aer error injection */
+void pcie_aer_inject_error_print(Monitor *mon, const QObject *data);
+int do_pcie_aer_inject_error(Monitor *mon,
+ const QDict *qdict, QObject **ret_data);
+
+/* serial ports */
+
+#define MAX_SERIAL_PORTS 4
+
+extern CharDriverState *serial_hds[MAX_SERIAL_PORTS];
+
+/* parallel ports */
+
+#define MAX_PARALLEL_PORTS 3
+
+extern CharDriverState *parallel_hds[MAX_PARALLEL_PORTS];
+
+void do_usb_add(Monitor *mon, const QDict *qdict);
+void do_usb_del(Monitor *mon, const QDict *qdict);
+void usb_info(Monitor *mon);
+
+void rtc_change_mon_event(struct tm *tm);
+
+void register_devices(void);
+
+void add_boot_device_path(int32_t bootindex, DeviceState *dev,
+ const char *suffix);
+char *get_boot_devices_list(uint32_t *size);
+
+bool usb_enabled(bool default_usb);
+
+#endif
diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
new file mode 100644
index 0000000000..c59804060b
--- /dev/null
+++ b/include/sysemu/xen-mapcache.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011 Citrix Ltd.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef XEN_MAPCACHE_H
+#define XEN_MAPCACHE_H
+
+#include <stdlib.h>
+
+typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr start_addr,
+ ram_addr_t size,
+ void *opaque);
+#ifdef CONFIG_XEN
+
+void xen_map_cache_init(phys_offset_to_gaddr_t f,
+ void *opaque);
+uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
+ uint8_t lock);
+ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
+void xen_invalidate_map_cache_entry(uint8_t *buffer);
+void xen_invalidate_map_cache(void);
+
+#else
+
+static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
+ void *opaque)
+{
+}
+
+static inline uint8_t *xen_map_cache(hwaddr phys_addr,
+ hwaddr size,
+ uint8_t lock)
+{
+ abort();
+}
+
+static inline ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
+{
+ abort();
+}
+
+static inline void xen_invalidate_map_cache_entry(uint8_t *buffer)
+{
+}
+
+static inline void xen_invalidate_map_cache(void)
+{
+}
+
+#endif
+
+#endif /* !XEN_MAPCACHE_H */
diff --git a/include/ui/console.h b/include/ui/console.h
new file mode 100644
index 0000000000..fc23baa06b
--- /dev/null
+++ b/include/ui/console.h
@@ -0,0 +1,485 @@
+#ifndef CONSOLE_H
+#define CONSOLE_H
+
+#include "ui/qemu-pixman.h"
+#include "qapi/qmp/qdict.h"
+#include "qemu/notify.h"
+#include "monitor/monitor.h"
+#include "trace.h"
+#include "qapi-types.h"
+#include "qapi/error.h"
+
+/* keyboard/mouse support */
+
+#define MOUSE_EVENT_LBUTTON 0x01
+#define MOUSE_EVENT_RBUTTON 0x02
+#define MOUSE_EVENT_MBUTTON 0x04
+
+/* identical to the ps/2 keyboard bits */
+#define QEMU_SCROLL_LOCK_LED (1 << 0)
+#define QEMU_NUM_LOCK_LED (1 << 1)
+#define QEMU_CAPS_LOCK_LED (1 << 2)
+
+/* in ms */
+#define GUI_REFRESH_INTERVAL 30
+
+typedef void QEMUPutKBDEvent(void *opaque, int keycode);
+typedef void QEMUPutLEDEvent(void *opaque, int ledstate);
+typedef void QEMUPutMouseEvent(void *opaque, int dx, int dy, int dz, int buttons_state);
+
+typedef struct QEMUPutMouseEntry {
+ QEMUPutMouseEvent *qemu_put_mouse_event;
+ void *qemu_put_mouse_event_opaque;
+ int qemu_put_mouse_event_absolute;
+ char *qemu_put_mouse_event_name;
+
+ int index;
+
+ /* used internally by qemu for handling mice */
+ QTAILQ_ENTRY(QEMUPutMouseEntry) node;
+} QEMUPutMouseEntry;
+
+typedef struct QEMUPutLEDEntry {
+ QEMUPutLEDEvent *put_led;
+ void *opaque;
+ QTAILQ_ENTRY(QEMUPutLEDEntry) next;
+} QEMUPutLEDEntry;
+
+void qemu_add_kbd_event_handler(QEMUPutKBDEvent *func, void *opaque);
+void qemu_remove_kbd_event_handler(void);
+QEMUPutMouseEntry *qemu_add_mouse_event_handler(QEMUPutMouseEvent *func,
+ void *opaque, int absolute,
+ const char *name);
+void qemu_remove_mouse_event_handler(QEMUPutMouseEntry *entry);
+void qemu_activate_mouse_event_handler(QEMUPutMouseEntry *entry);
+
+QEMUPutLEDEntry *qemu_add_led_event_handler(QEMUPutLEDEvent *func, void *opaque);
+void qemu_remove_led_event_handler(QEMUPutLEDEntry *entry);
+
+void kbd_put_keycode(int keycode);
+void kbd_put_ledstate(int ledstate);
+void kbd_mouse_event(int dx, int dy, int dz, int buttons_state);
+
+/* Does the current mouse generate absolute events */
+int kbd_mouse_is_absolute(void);
+void qemu_add_mouse_mode_change_notifier(Notifier *notify);
+void qemu_remove_mouse_mode_change_notifier(Notifier *notify);
+
+/* Of all the mice, is there one that generates absolute events */
+int kbd_mouse_has_absolute(void);
+
+struct MouseTransformInfo {
+ /* Touchscreen resolution */
+ int x;
+ int y;
+ /* Calibration values as used/generated by tslib */
+ int a[7];
+};
+
+void do_mouse_set(Monitor *mon, const QDict *qdict);
+
+/* keysym is a unicode code except for special keys (see QEMU_KEY_xxx
+ constants) */
+#define QEMU_KEY_ESC1(c) ((c) | 0xe100)
+#define QEMU_KEY_BACKSPACE 0x007f
+#define QEMU_KEY_UP QEMU_KEY_ESC1('A')
+#define QEMU_KEY_DOWN QEMU_KEY_ESC1('B')
+#define QEMU_KEY_RIGHT QEMU_KEY_ESC1('C')
+#define QEMU_KEY_LEFT QEMU_KEY_ESC1('D')
+#define QEMU_KEY_HOME QEMU_KEY_ESC1(1)
+#define QEMU_KEY_END QEMU_KEY_ESC1(4)
+#define QEMU_KEY_PAGEUP QEMU_KEY_ESC1(5)
+#define QEMU_KEY_PAGEDOWN QEMU_KEY_ESC1(6)
+#define QEMU_KEY_DELETE QEMU_KEY_ESC1(3)
+
+#define QEMU_KEY_CTRL_UP 0xe400
+#define QEMU_KEY_CTRL_DOWN 0xe401
+#define QEMU_KEY_CTRL_LEFT 0xe402
+#define QEMU_KEY_CTRL_RIGHT 0xe403
+#define QEMU_KEY_CTRL_HOME 0xe404
+#define QEMU_KEY_CTRL_END 0xe405
+#define QEMU_KEY_CTRL_PAGEUP 0xe406
+#define QEMU_KEY_CTRL_PAGEDOWN 0xe407
+
+void kbd_put_keysym(int keysym);
+
+/* consoles */
+
+#define QEMU_BIG_ENDIAN_FLAG 0x01
+#define QEMU_ALLOCATED_FLAG 0x02
+
+struct PixelFormat {
+ uint8_t bits_per_pixel;
+ uint8_t bytes_per_pixel;
+ uint8_t depth; /* color depth in bits */
+ uint32_t rmask, gmask, bmask, amask;
+ uint8_t rshift, gshift, bshift, ashift;
+ uint8_t rmax, gmax, bmax, amax;
+ uint8_t rbits, gbits, bbits, abits;
+};
+
+struct DisplaySurface {
+ pixman_format_code_t format;
+ pixman_image_t *image;
+ uint8_t flags;
+
+ struct PixelFormat pf;
+};
+
+/* cursor data format is 32bit RGBA */
+typedef struct QEMUCursor {
+ int width, height;
+ int hot_x, hot_y;
+ int refcount;
+ uint32_t data[];
+} QEMUCursor;
+
+QEMUCursor *cursor_alloc(int width, int height);
+void cursor_get(QEMUCursor *c);
+void cursor_put(QEMUCursor *c);
+QEMUCursor *cursor_builtin_hidden(void);
+QEMUCursor *cursor_builtin_left_ptr(void);
+void cursor_print_ascii_art(QEMUCursor *c, const char *prefix);
+int cursor_get_mono_bpl(QEMUCursor *c);
+void cursor_set_mono(QEMUCursor *c,
+ uint32_t foreground, uint32_t background, uint8_t *image,
+ int transparent, uint8_t *mask);
+void cursor_get_mono_image(QEMUCursor *c, int foreground, uint8_t *mask);
+void cursor_get_mono_mask(QEMUCursor *c, int transparent, uint8_t *mask);
+
+struct DisplayChangeListener {
+ int idle;
+ uint64_t gui_timer_interval;
+
+ void (*dpy_refresh)(struct DisplayState *s);
+
+ void (*dpy_gfx_update)(struct DisplayState *s, int x, int y, int w, int h);
+ void (*dpy_gfx_resize)(struct DisplayState *s);
+ void (*dpy_gfx_setdata)(struct DisplayState *s);
+ void (*dpy_gfx_copy)(struct DisplayState *s, int src_x, int src_y,
+ int dst_x, int dst_y, int w, int h);
+
+ void (*dpy_text_cursor)(struct DisplayState *s, int x, int y);
+ void (*dpy_text_resize)(struct DisplayState *s, int w, int h);
+ void (*dpy_text_update)(struct DisplayState *s, int x, int y, int w, int h);
+
+ void (*dpy_mouse_set)(struct DisplayState *s, int x, int y, int on);
+ void (*dpy_cursor_define)(struct DisplayState *s, QEMUCursor *cursor);
+
+ QLIST_ENTRY(DisplayChangeListener) next;
+};
+
+struct DisplayState {
+ struct DisplaySurface *surface;
+ void *opaque;
+ struct QEMUTimer *gui_timer;
+ bool have_gfx;
+ bool have_text;
+
+ QLIST_HEAD(, DisplayChangeListener) listeners;
+
+ struct DisplayState *next;
+};
+
+void register_displaystate(DisplayState *ds);
+DisplayState *get_displaystate(void);
+DisplaySurface* qemu_create_displaysurface_from(int width, int height, int bpp,
+ int linesize, uint8_t *data);
+PixelFormat qemu_different_endianness_pixelformat(int bpp);
+PixelFormat qemu_default_pixelformat(int bpp);
+
+DisplaySurface *qemu_create_displaysurface(DisplayState *ds,
+ int width, int height);
+DisplaySurface *qemu_resize_displaysurface(DisplayState *ds,
+ int width, int height);
+void qemu_free_displaysurface(DisplayState *ds);
+
+static inline int is_surface_bgr(DisplaySurface *surface)
+{
+ if (surface->pf.bits_per_pixel == 32 && surface->pf.rshift == 0)
+ return 1;
+ else
+ return 0;
+}
+
+static inline int is_buffer_shared(DisplaySurface *surface)
+{
+ return !(surface->flags & QEMU_ALLOCATED_FLAG);
+}
+
+void gui_setup_refresh(DisplayState *ds);
+
+static inline void register_displaychangelistener(DisplayState *ds, DisplayChangeListener *dcl)
+{
+ QLIST_INSERT_HEAD(&ds->listeners, dcl, next);
+ gui_setup_refresh(ds);
+ if (dcl->dpy_gfx_resize) {
+ dcl->dpy_gfx_resize(ds);
+ }
+}
+
+static inline void unregister_displaychangelistener(DisplayState *ds,
+ DisplayChangeListener *dcl)
+{
+ QLIST_REMOVE(dcl, next);
+ gui_setup_refresh(ds);
+}
+
+static inline void dpy_gfx_update(DisplayState *s, int x, int y, int w, int h)
+{
+ struct DisplayChangeListener *dcl;
+ int width = pixman_image_get_width(s->surface->image);
+ int height = pixman_image_get_height(s->surface->image);
+
+ x = MAX(x, 0);
+ y = MAX(y, 0);
+ x = MIN(x, width);
+ y = MIN(y, height);
+ w = MIN(w, width - x);
+ h = MIN(h, height - y);
+
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_gfx_update) {
+ dcl->dpy_gfx_update(s, x, y, w, h);
+ }
+ }
+}
+
+static inline void dpy_gfx_resize(DisplayState *s)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_gfx_resize) {
+ dcl->dpy_gfx_resize(s);
+ }
+ }
+}
+
+static inline void dpy_gfx_setdata(DisplayState *s)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_gfx_setdata) {
+ dcl->dpy_gfx_setdata(s);
+ }
+ }
+}
+
+static inline void dpy_refresh(DisplayState *s)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_refresh) {
+ dcl->dpy_refresh(s);
+ }
+ }
+}
+
+static inline void dpy_gfx_copy(struct DisplayState *s, int src_x, int src_y,
+ int dst_x, int dst_y, int w, int h)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_gfx_copy) {
+ dcl->dpy_gfx_copy(s, src_x, src_y, dst_x, dst_y, w, h);
+ } else { /* TODO */
+ dcl->dpy_gfx_update(s, dst_x, dst_y, w, h);
+ }
+ }
+}
+
+static inline void dpy_text_cursor(struct DisplayState *s, int x, int y)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_text_cursor) {
+ dcl->dpy_text_cursor(s, x, y);
+ }
+ }
+}
+
+static inline void dpy_text_update(DisplayState *s, int x, int y, int w, int h)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_text_update) {
+ dcl->dpy_text_update(s, x, y, w, h);
+ }
+ }
+}
+
+static inline void dpy_text_resize(DisplayState *s, int w, int h)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_text_resize) {
+ dcl->dpy_text_resize(s, w, h);
+ }
+ }
+}
+
+static inline void dpy_mouse_set(struct DisplayState *s, int x, int y, int on)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_mouse_set) {
+ dcl->dpy_mouse_set(s, x, y, on);
+ }
+ }
+}
+
+static inline void dpy_cursor_define(struct DisplayState *s, QEMUCursor *cursor)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_cursor_define) {
+ dcl->dpy_cursor_define(s, cursor);
+ }
+ }
+}
+
+static inline bool dpy_cursor_define_supported(struct DisplayState *s)
+{
+ struct DisplayChangeListener *dcl;
+ QLIST_FOREACH(dcl, &s->listeners, next) {
+ if (dcl->dpy_cursor_define) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline int ds_get_linesize(DisplayState *ds)
+{
+ return pixman_image_get_stride(ds->surface->image);
+}
+
+static inline uint8_t* ds_get_data(DisplayState *ds)
+{
+ return (void *)pixman_image_get_data(ds->surface->image);
+}
+
+static inline int ds_get_width(DisplayState *ds)
+{
+ return pixman_image_get_width(ds->surface->image);
+}
+
+static inline int ds_get_height(DisplayState *ds)
+{
+ return pixman_image_get_height(ds->surface->image);
+}
+
+static inline int ds_get_bits_per_pixel(DisplayState *ds)
+{
+ int bits = PIXMAN_FORMAT_BPP(ds->surface->format);
+ return bits;
+}
+
+static inline int ds_get_bytes_per_pixel(DisplayState *ds)
+{
+ int bits = PIXMAN_FORMAT_BPP(ds->surface->format);
+ return (bits + 7) / 8;
+}
+
+static inline pixman_format_code_t ds_get_format(DisplayState *ds)
+{
+ return ds->surface->format;
+}
+
+static inline pixman_image_t *ds_get_image(DisplayState *ds)
+{
+ return ds->surface->image;
+}
+
+static inline int ds_get_depth(DisplayState *ds)
+{
+ return ds->surface->pf.depth;
+}
+
+static inline int ds_get_rmask(DisplayState *ds)
+{
+ return ds->surface->pf.rmask;
+}
+
+static inline int ds_get_gmask(DisplayState *ds)
+{
+ return ds->surface->pf.gmask;
+}
+
+static inline int ds_get_bmask(DisplayState *ds)
+{
+ return ds->surface->pf.bmask;
+}
+
+#ifdef CONFIG_CURSES
+#include <curses.h>
+typedef chtype console_ch_t;
+#else
+typedef unsigned long console_ch_t;
+#endif
+static inline void console_write_ch(console_ch_t *dest, uint32_t ch)
+{
+ if (!(ch & 0xff))
+ ch |= ' ';
+ *dest = ch;
+}
+
+typedef void (*vga_hw_update_ptr)(void *);
+typedef void (*vga_hw_invalidate_ptr)(void *);
+typedef void (*vga_hw_screen_dump_ptr)(void *, const char *, bool cswitch,
+ Error **errp);
+typedef void (*vga_hw_text_update_ptr)(void *, console_ch_t *);
+
+DisplayState *graphic_console_init(vga_hw_update_ptr update,
+ vga_hw_invalidate_ptr invalidate,
+ vga_hw_screen_dump_ptr screen_dump,
+ vga_hw_text_update_ptr text_update,
+ void *opaque);
+
+void vga_hw_update(void);
+void vga_hw_invalidate(void);
+void vga_hw_text_update(console_ch_t *chardata);
+
+int is_graphic_console(void);
+int is_fixedsize_console(void);
+CharDriverState *text_console_init(QemuOpts *opts);
+void text_consoles_set_display(DisplayState *ds);
+void console_select(unsigned int index);
+void console_color_init(DisplayState *ds);
+void qemu_console_resize(DisplayState *ds, int width, int height);
+void qemu_console_copy(DisplayState *ds, int src_x, int src_y,
+ int dst_x, int dst_y, int w, int h);
+
+/* sdl.c */
+void sdl_display_init(DisplayState *ds, int full_screen, int no_frame);
+
+/* cocoa.m */
+void cocoa_display_init(DisplayState *ds, int full_screen);
+
+/* vnc.c */
+void vnc_display_init(DisplayState *ds);
+void vnc_display_open(DisplayState *ds, const char *display, Error **errp);
+void vnc_display_add_client(DisplayState *ds, int csock, int skipauth);
+char *vnc_display_local_addr(DisplayState *ds);
+#ifdef CONFIG_VNC
+int vnc_display_password(DisplayState *ds, const char *password);
+int vnc_display_pw_expire(DisplayState *ds, time_t expires);
+#else
+static inline int vnc_display_password(DisplayState *ds, const char *password)
+{
+ return -ENODEV;
+}
+static inline int vnc_display_pw_expire(DisplayState *ds, time_t expires)
+{
+ return -ENODEV;
+};
+#endif
+
+/* curses.c */
+void curses_display_init(DisplayState *ds, int full_screen);
+
+/* input.c */
+int index_from_key(const char *key);
+int index_from_keycode(int code);
+
+#endif
diff --git a/include/ui/pixel_ops.h b/include/ui/pixel_ops.h
new file mode 100644
index 0000000000..d390adfd1b
--- /dev/null
+++ b/include/ui/pixel_ops.h
@@ -0,0 +1,53 @@
+static inline unsigned int rgb_to_pixel8(unsigned int r, unsigned int g,
+ unsigned int b)
+{
+ return ((r >> 5) << 5) | ((g >> 5) << 2) | (b >> 6);
+}
+
+static inline unsigned int rgb_to_pixel15(unsigned int r, unsigned int g,
+ unsigned int b)
+{
+ return ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);
+}
+
+static inline unsigned int rgb_to_pixel15bgr(unsigned int r, unsigned int g,
+ unsigned int b)
+{
+ return ((b >> 3) << 10) | ((g >> 3) << 5) | (r >> 3);
+}
+
+static inline unsigned int rgb_to_pixel16(unsigned int r, unsigned int g,
+ unsigned int b)
+{
+ return ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);
+}
+
+static inline unsigned int rgb_to_pixel16bgr(unsigned int r, unsigned int g,
+ unsigned int b)
+{
+ return ((b >> 3) << 11) | ((g >> 2) << 5) | (r >> 3);
+}
+
+static inline unsigned int rgb_to_pixel24(unsigned int r, unsigned int g,
+ unsigned int b)
+{
+ return (r << 16) | (g << 8) | b;
+}
+
+static inline unsigned int rgb_to_pixel24bgr(unsigned int r, unsigned int g,
+ unsigned int b)
+{
+ return (b << 16) | (g << 8) | r;
+}
+
+static inline unsigned int rgb_to_pixel32(unsigned int r, unsigned int g,
+ unsigned int b)
+{
+ return (r << 16) | (g << 8) | b;
+}
+
+static inline unsigned int rgb_to_pixel32bgr(unsigned int r, unsigned int g,
+ unsigned int b)
+{
+ return (b << 16) | (g << 8) | r;
+}
diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h
new file mode 100644
index 0000000000..3c05c83a7c
--- /dev/null
+++ b/include/ui/qemu-pixman.h
@@ -0,0 +1,39 @@
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_PIXMAN_H
+#define QEMU_PIXMAN_H
+
+#include <pixman.h>
+
+#include "console.h"
+
+/*
+ * pixman image formats are defined to be native endian,
+ * that means host byte order on qemu. So we go define
+ * fixed formats here for cases where it is needed, like
+ * feeding libjpeg / libpng and writing screenshots.
+ */
+
+#ifdef HOST_WORDS_BIGENDIAN
+# define PIXMAN_BE_r8g8b8 PIXMAN_r8g8b8
+#else
+# define PIXMAN_BE_r8g8b8 PIXMAN_b8g8r8
+#endif
+
+/* -------------------------------------------------------------------- */
+
+int qemu_pixman_get_type(int rshift, int gshift, int bshift);
+pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf);
+
+pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format,
+ int width);
+void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb,
+ int width, int x, int y);
+pixman_image_t *qemu_pixman_mirror_create(pixman_format_code_t format,
+ pixman_image_t *image);
+void qemu_pixman_image_unref(pixman_image_t *image);
+
+#endif /* QEMU_PIXMAN_H */
diff --git a/include/ui/qemu-spice.h b/include/ui/qemu-spice.h
new file mode 100644
index 0000000000..5a78fd764d
--- /dev/null
+++ b/include/ui/qemu-spice.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_SPICE_H
+#define QEMU_SPICE_H
+
+#ifdef CONFIG_SPICE
+
+#include <spice.h>
+
+#include "qemu/option.h"
+#include "qemu/config-file.h"
+#include "monitor/monitor.h"
+
+extern int using_spice;
+
+void qemu_spice_init(void);
+void qemu_spice_input_init(void);
+void qemu_spice_audio_init(void);
+void qemu_spice_display_init(DisplayState *ds);
+int qemu_spice_display_add_client(int csock, int skipauth, int tls);
+int qemu_spice_add_interface(SpiceBaseInstance *sin);
+int qemu_spice_set_passwd(const char *passwd,
+ bool fail_if_connected, bool disconnect_if_connected);
+int qemu_spice_set_pw_expire(time_t expires);
+int qemu_spice_migrate_info(const char *hostname, int port, int tls_port,
+ const char *subject,
+ MonitorCompletion cb, void *opaque);
+
+void do_info_spice_print(Monitor *mon, const QObject *data);
+void do_info_spice(Monitor *mon, QObject **ret_data);
+
+CharDriverState *qemu_chr_open_spice(QemuOpts *opts);
+#if SPICE_SERVER_VERSION >= 0x000c02
+CharDriverState *qemu_chr_open_spice_port(QemuOpts *opts);
+void qemu_spice_register_ports(void);
+#endif
+
+#else /* CONFIG_SPICE */
+#include "monitor/monitor.h"
+
+#define using_spice 0
+static inline int qemu_spice_set_passwd(const char *passwd,
+ bool fail_if_connected,
+ bool disconnect_if_connected)
+{
+ return -1;
+}
+static inline int qemu_spice_set_pw_expire(time_t expires)
+{
+ return -1;
+}
+static inline int qemu_spice_migrate_info(const char *h, int p, int t,
+ const char *s,
+ MonitorCompletion cb, void *opaque)
+{
+ cb(opaque, NULL);
+ return -1;
+}
+
+static inline int qemu_spice_display_add_client(int csock, int skipauth,
+ int tls)
+{
+ return -1;
+}
+
+#endif /* CONFIG_SPICE */
+
+#endif /* QEMU_SPICE_H */
diff --git a/include/ui/spice-display.h b/include/ui/spice-display.h
new file mode 100644
index 0000000000..8b192e9613
--- /dev/null
+++ b/include/ui/spice-display.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <spice/ipc_ring.h>
+#include <spice/enums.h>
+#include <spice/qxl_dev.h>
+
+#include "qemu/thread.h"
+#include "ui/qemu-pixman.h"
+#include "sysemu/sysemu.h"
+
+#define NUM_MEMSLOTS 8
+#define MEMSLOT_GENERATION_BITS 8
+#define MEMSLOT_SLOT_BITS 8
+
+#define MEMSLOT_GROUP_HOST 0
+#define MEMSLOT_GROUP_GUEST 1
+#define NUM_MEMSLOTS_GROUPS 2
+
+/*
+ * Internal enum to differenciate between options for
+ * io calls that have a sync (old) version and an _async (new)
+ * version:
+ * QXL_SYNC: use the old version
+ * QXL_ASYNC: use the new version and make sure there are no two
+ * happening at the same time. This is used for guest initiated
+ * calls
+ */
+typedef enum qxl_async_io {
+ QXL_SYNC,
+ QXL_ASYNC,
+} qxl_async_io;
+
+enum {
+ QXL_COOKIE_TYPE_IO,
+ QXL_COOKIE_TYPE_RENDER_UPDATE_AREA,
+ QXL_COOKIE_TYPE_POST_LOAD_MONITORS_CONFIG,
+};
+
+typedef struct QXLCookie {
+ int type;
+ uint64_t io;
+ union {
+ uint32_t surface_id;
+ QXLRect area;
+ struct {
+ QXLRect area;
+ int redraw;
+ } render;
+ } u;
+} QXLCookie;
+
+QXLCookie *qxl_cookie_new(int type, uint64_t io);
+
+typedef struct SimpleSpiceDisplay SimpleSpiceDisplay;
+typedef struct SimpleSpiceUpdate SimpleSpiceUpdate;
+
+struct SimpleSpiceDisplay {
+ DisplayState *ds;
+ void *buf;
+ int bufsize;
+ QXLWorker *worker;
+ QXLInstance qxl;
+ uint32_t unique;
+ pixman_image_t *surface;
+ pixman_image_t *mirror;
+ int32_t num_surfaces;
+
+ QXLRect dirty;
+ int notify;
+
+ /*
+ * All struct members below this comment can be accessed from
+ * both spice server and qemu (iothread) context and any access
+ * to them must be protected by the lock.
+ */
+ QemuMutex lock;
+ QTAILQ_HEAD(, SimpleSpiceUpdate) updates;
+ QEMUCursor *cursor;
+ int mouse_x, mouse_y;
+};
+
+struct SimpleSpiceUpdate {
+ QXLDrawable drawable;
+ QXLImage image;
+ QXLCommandExt ext;
+ uint8_t *bitmap;
+ QTAILQ_ENTRY(SimpleSpiceUpdate) next;
+};
+
+int qemu_spice_rect_is_empty(const QXLRect* r);
+void qemu_spice_rect_union(QXLRect *dest, const QXLRect *r);
+
+void qemu_spice_destroy_update(SimpleSpiceDisplay *sdpy, SimpleSpiceUpdate *update);
+void qemu_spice_create_host_memslot(SimpleSpiceDisplay *ssd);
+void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd);
+void qemu_spice_destroy_host_primary(SimpleSpiceDisplay *ssd);
+void qemu_spice_vm_change_state_handler(void *opaque, int running,
+ RunState state);
+void qemu_spice_display_init_common(SimpleSpiceDisplay *ssd, DisplayState *ds);
+
+void qemu_spice_display_update(SimpleSpiceDisplay *ssd,
+ int x, int y, int w, int h);
+void qemu_spice_display_resize(SimpleSpiceDisplay *ssd);
+void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd);
+void qemu_spice_cursor_refresh_unlocked(SimpleSpiceDisplay *ssd);
+
+void qemu_spice_add_memslot(SimpleSpiceDisplay *ssd, QXLDevMemSlot *memslot,
+ qxl_async_io async);
+void qemu_spice_del_memslot(SimpleSpiceDisplay *ssd, uint32_t gid,
+ uint32_t sid);
+void qemu_spice_create_primary_surface(SimpleSpiceDisplay *ssd, uint32_t id,
+ QXLDevSurfaceCreate *surface,
+ qxl_async_io async);
+void qemu_spice_destroy_primary_surface(SimpleSpiceDisplay *ssd,
+ uint32_t id, qxl_async_io async);
+void qemu_spice_wakeup(SimpleSpiceDisplay *ssd);
+void qemu_spice_display_start(void);
+void qemu_spice_display_stop(void);
+int qemu_spice_display_is_running(SimpleSpiceDisplay *ssd);