summaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-11-07 17:14:36 +0100
committerMichael S. Tsirkin <mst@redhat.com>2013-12-10 12:29:56 +0200
commit03f4995781a64e106e6f73864a1e9c4163dac53b (patch)
tree003ca5ab7066202a511bd6becf71a5602184d35e /exec.c
parent92b8e39c7f582e15f9e9423bc9fd3f186536b073 (diff)
downloadqemu-03f4995781a64e106e6f73864a1e9c4163dac53b.tar.gz
split definitions for exec.c and translate-all.c radix trees
The exec.c and translate-all.c radix trees are quite different, and the exec.c one in particular is not limited to the CPU---it can be used also by devices that do DMA, and in that case the address space is not limited to TARGET_PHYS_ADDR_SPACE_BITS bits. We want to make exec.c's radix trees 64-bit wide. As a first step, stop sharing the constants between exec.c and translate-all.c. exec.c gets P_L2_* constants, translate-all.c gets V_L2_*, for consistency with the existing V_L1_* symbols. Though actually in the softmmu case translate-all.c is also indexed by physical addresses... This patch has no semantic change. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c29
1 files changed, 21 insertions, 8 deletions
diff --git a/exec.c b/exec.c
index f4b9ef25f5..060f3f3430 100644
--- a/exec.c
+++ b/exec.c
@@ -88,7 +88,15 @@ struct PhysPageEntry {
uint16_t ptr : 15;
};
-typedef PhysPageEntry Node[L2_SIZE];
+/* Size of the L2 (and L3, etc) page tables. */
+#define ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
+
+#define P_L2_BITS 10
+#define P_L2_SIZE (1 << P_L2_BITS)
+
+#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
+
+typedef PhysPageEntry Node[P_L2_SIZE];
struct AddressSpaceDispatch {
/* This is a multi-level map on the physical address space.
@@ -155,7 +163,7 @@ static uint16_t phys_map_node_alloc(void)
ret = next_map.nodes_nb++;
assert(ret != PHYS_MAP_NODE_NIL);
assert(ret != next_map.nodes_nb_alloc);
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < P_L2_SIZE; ++i) {
next_map.nodes[ret][i].is_leaf = 0;
next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
}
@@ -168,13 +176,13 @@ static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
{
PhysPageEntry *p;
int i;
- hwaddr step = (hwaddr)1 << (level * L2_BITS);
+ hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
lp->ptr = phys_map_node_alloc();
p = next_map.nodes[lp->ptr];
if (level == 0) {
- for (i = 0; i < L2_SIZE; i++) {
+ for (i = 0; i < P_L2_SIZE; i++) {
p[i].is_leaf = 1;
p[i].ptr = PHYS_SECTION_UNASSIGNED;
}
@@ -182,9 +190,9 @@ static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
} else {
p = next_map.nodes[lp->ptr];
}
- lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
+ lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
- while (*nb && lp < &p[L2_SIZE]) {
+ while (*nb && lp < &p[P_L2_SIZE]) {
if ((*index & (step - 1)) == 0 && *nb >= step) {
lp->is_leaf = true;
lp->ptr = leaf;
@@ -218,7 +226,7 @@ static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
return &sections[PHYS_SECTION_UNASSIGNED];
}
p = nodes[lp.ptr];
- lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
+ lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
}
return &sections[lp.ptr];
}
@@ -1778,7 +1786,12 @@ void address_space_destroy_dispatch(AddressSpace *as)
static void memory_map_init(void)
{
system_memory = g_malloc(sizeof(*system_memory));
- memory_region_init(system_memory, NULL, "system", INT64_MAX);
+
+ assert(ADDR_SPACE_BITS <= 64);
+
+ memory_region_init(system_memory, NULL, "system",
+ ADDR_SPACE_BITS == 64 ?
+ UINT64_MAX : (0x1ULL << ADDR_SPACE_BITS));
address_space_init(&address_space_memory, system_memory, "memory");
system_io = g_malloc(sizeof(*system_io));