summaryrefslogtreecommitdiff
path: root/translate-all.c
diff options
context:
space:
mode:
authorAlex Bennée <alex.bennee@linaro.org>2016-10-27 16:10:05 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2016-10-31 10:51:16 +0100
commite505a063bac780a4ca190aee29df2cc0b767c67a (patch)
tree989681297b08b7989ec76e3b64aae7ee06df4901 /translate-all.c
parent98c1076cc9a3ccebd1316fecf65149f6052a24fc (diff)
downloadqemu-e505a063bac780a4ca190aee29df2cc0b767c67a.tar.gz
translate-all: Add assert_(memory|tb)_lock annotations
This adds calls to the assert_(memory|tb)_lock for all public APIs which are documented as needing them held for linux-user mode. The asserts are NOPs for system-mode although these will be converted when MTTCG is enabled. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Message-Id: <20161027151030.20863-9-alex.bennee@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'translate-all.c')
-rw-r--r--translate-all.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/translate-all.c b/translate-all.c
index fad2646ddd..3ff43ec2e2 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -473,6 +473,10 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
void **lp;
int i;
+ if (alloc) {
+ assert_memory_lock();
+ }
+
/* Level 1. Always allocated. */
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
@@ -839,6 +843,8 @@ static TranslationBlock *tb_alloc(target_ulong pc)
{
TranslationBlock *tb;
+ assert_tb_lock();
+
if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
return NULL;
}
@@ -852,6 +858,8 @@ static TranslationBlock *tb_alloc(target_ulong pc)
/* Called with tb_lock held. */
void tb_free(TranslationBlock *tb)
{
+ assert_tb_lock();
+
/* In practice this is mostly used for single use temporary TB
Ignore the hard cases and just back up if this TB happens to
be the last one generated. */
@@ -1093,6 +1101,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
uint32_t h;
tb_page_addr_t phys_pc;
+ assert_tb_lock();
+
atomic_set(&tb->invalid, true);
/* remove the TB from the hash list */
@@ -1150,7 +1160,7 @@ static void build_page_bitmap(PageDesc *p)
tb_end = tb_start + tb->size;
if (tb_end > TARGET_PAGE_SIZE) {
tb_end = TARGET_PAGE_SIZE;
- }
+ }
} else {
tb_start = 0;
tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
@@ -1173,6 +1183,8 @@ static inline void tb_alloc_page(TranslationBlock *tb,
bool page_already_protected;
#endif
+ assert_memory_lock();
+
tb->page_addr[n] = page_addr;
p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
tb->page_next[n] = p->first_tb;
@@ -1229,6 +1241,8 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
{
uint32_t h;
+ assert_memory_lock();
+
/* add in the page list */
tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
if (phys_page2 != -1) {
@@ -1260,6 +1274,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
+ assert_memory_lock();
phys_pc = get_page_addr_code(env, pc);
if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
@@ -1388,6 +1403,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
+ assert_memory_lock();
+
while (start < end) {
tb_invalidate_phys_page_range(start, end, 0);
start &= TARGET_PAGE_MASK;
@@ -1424,6 +1441,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
uint32_t current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */
+ assert_memory_lock();
+
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
@@ -2031,6 +2050,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
#endif
assert(start < end);
+ assert_memory_lock();
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);