summaryrefslogtreecommitdiff
path: root/memory.c
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2017-09-21 18:50:56 +1000
committerPaolo Bonzini <pbonzini@redhat.com>2017-09-21 23:19:37 +0200
commit66a6df1dc6d5b28cc3e65db0d71683fbdddc6b62 (patch)
tree1cc4ab0eb28401da99ab4dc7361d98c9c135d2bd /memory.c
parentcc94cd6d36602d976a5e7bc29134d1eaefb4102e (diff)
downloadqemu-66a6df1dc6d5b28cc3e65db0d71683fbdddc6b62.tar.gz
memory: Move AddressSpaceDispatch from AddressSpace to FlatView
As we are going to share FlatView's between AddressSpace's, and AddressSpaceDispatch is a structure to perform quick lookup in FlatView, this moves ASD to FlatView. After previosly open coded ASD rendering, we can also remove as->next_dispatch as the new FlatView pointer is stored on a stack and set to an AS atomically. flatview_destroy() is executed under RCU instead of address_space_dispatch_free() now. This makes mem_begin/mem_commit to work with ASD and mem_add with FV as later on mem_add will be taking FV as an argument anyway. This should cause no behavioural change. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Message-Id: <20170921085110.25598-5-aik@ozlabs.ru> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'memory.c')
-rw-r--r--memory.c31
1 files changed, 24 insertions, 7 deletions
diff --git a/memory.c b/memory.c
index eec668eec7..962e9b961f 100644
--- a/memory.c
+++ b/memory.c
@@ -229,6 +229,7 @@ struct FlatView {
FlatRange *ranges;
unsigned nr;
unsigned nr_allocated;
+ struct AddressSpaceDispatch *dispatch;
};
typedef struct AddressSpaceOps AddressSpaceOps;
@@ -289,6 +290,9 @@ static void flatview_destroy(FlatView *view)
{
int i;
+ if (view->dispatch) {
+ address_space_dispatch_free(view->dispatch);
+ }
for (i = 0; i < view->nr; i++) {
memory_region_unref(view->ranges[i].mr);
}
@@ -304,10 +308,25 @@ static bool flatview_ref(FlatView *view)
static void flatview_unref(FlatView *view)
{
if (atomic_fetch_dec(&view->ref) == 1) {
- flatview_destroy(view);
+ call_rcu(view, flatview_destroy, rcu);
}
}
+static FlatView *address_space_to_flatview(AddressSpace *as)
+{
+ return atomic_rcu_read(&as->current_map);
+}
+
+AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
+{
+ return fv->dispatch;
+}
+
+AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
+{
+ return flatview_to_dispatch(address_space_to_flatview(as));
+}
+
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
return int128_eq(addrrange_end(r1->addr), r2->addr.start)
@@ -891,13 +910,13 @@ static void address_space_update_topology(AddressSpace *as)
FlatView *new_view = generate_memory_topology(as->root);
int i;
- mem_begin(as);
+ new_view->dispatch = mem_begin(as);
for (i = 0; i < new_view->nr; i++) {
MemoryRegionSection mrs =
section_from_flat_range(&new_view->ranges[i], as);
- mem_add(as, &mrs);
+ mem_add(as, new_view, &mrs);
}
- mem_commit(as);
+ mem_commit(new_view->dispatch);
if (!QTAILQ_EMPTY(&as->listeners)) {
address_space_update_topology_pass(as, old_view, new_view, false);
@@ -906,7 +925,7 @@ static void address_space_update_topology(AddressSpace *as)
/* Writes are protected by the BQL. */
atomic_rcu_set(&as->current_map, new_view);
- call_rcu(old_view, flatview_unref, rcu);
+ flatview_unref(old_view);
/* Note that all the old MemoryRegions are still alive up to this
* point. This relieves most MemoryListeners from the need to
@@ -2636,7 +2655,6 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
QTAILQ_INIT(&as->listeners);
QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
as->name = g_strdup(name ? name : "anonymous");
- as->dispatch = NULL;
memory_region_update_pending |= root->enabled;
memory_region_transaction_commit();
}
@@ -2645,7 +2663,6 @@ static void do_address_space_destroy(AddressSpace *as)
{
bool do_free = as->malloced;
- address_space_destroy_dispatch(as);
assert(QTAILQ_EMPTY(&as->listeners));
flatview_unref(as->current_map);