summaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2017-10-16 22:23:57 +0200
committerCornelia Huck <cohuck@redhat.com>2017-10-20 13:32:10 +0200
commitf52bfb12143e29d7c8bd827bdb751aee47a9694e (patch)
tree2176cb2e9e24ed4a44f5579664fcb84a3724c56e /accel
parentd0a5cc5bf4cf8e4d7d14597dd57be526c3e9d76f (diff)
downloadqemu-f52bfb12143e29d7c8bd827bdb751aee47a9694e.tar.gz
accel/tcg: allow to invalidate a write TLB entry immediately
Background: s390x implements Low-Address Protection (LAP). If LAP is enabled, writing to effective addresses (before any translation) 0-511 and 4096-4607 triggers a protection exception. So we have subpage protection on the first two pages of every address space (where the lowcore - the CPU private data resides). By immediately invalidating the write entry but allowing the caller to continue, we force every write access onto these first two pages into the slow path. we will get a tlb fault with the specific accessed addresses and can then evaluate if protection applies or not. We have to make sure to ignore the invalid bit if tlb_fill() succeeds. Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20171016202358.3633-2-david@redhat.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c5
-rw-r--r--accel/tcg/softmmu_template.h4
2 files changed, 6 insertions, 3 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 5b1ef1442c..a23919c3a8 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -694,6 +694,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
} else {
tn.addr_write = address;
}
+ if (prot & PAGE_WRITE_INV) {
+ tn.addr_write |= TLB_INVALID_MASK;
+ }
}
/* Pairs with flag setting in tlb_reset_dirty_range */
@@ -978,7 +981,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
- tlb_addr = tlbe->addr_write;
+ tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
}
/* Check notdirty */
diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h
index d7563292a5..3fc5144316 100644
--- a/accel/tcg/softmmu_template.h
+++ b/accel/tcg/softmmu_template.h
@@ -285,7 +285,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
}
/* Handle an IO access. */
@@ -361,7 +361,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
}
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
}
/* Handle an IO access. */