summaryrefslogtreecommitdiff
path: root/softmmu_template.h
diff options
context:
space:
mode:
Diffstat (limited to 'softmmu_template.h')
-rw-r--r--softmmu_template.h43
1 files changed, 39 insertions, 4 deletions
diff --git a/softmmu_template.h b/softmmu_template.h
index 5a07f991a1..88e33900b6 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -116,6 +116,31 @@
# define helper_te_st_name helper_le_st_name
#endif
+/* macro to check the victim tlb */
+#define VICTIM_TLB_HIT(ty) \
+({ \
+ /* we are about to do a page table walk. our last hope is the \
+ * victim tlb. try to refill from the victim tlb before walking the \
+ * page table. */ \
+ int vidx; \
+ hwaddr tmpiotlb; \
+ CPUTLBEntry tmptlb; \
+ for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
+ if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
+ /* found entry in victim tlb, swap tlb and iotlb */ \
+ tmptlb = env->tlb_table[mmu_idx][index]; \
+ env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
+ env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
+ tmpiotlb = env->iotlb[mmu_idx][index]; \
+ env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
+ env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
+ break; \
+ } \
+ } \
+ /* return true when there is a vtlb hit, i.e. vidx >=0 */ \
+ vidx >= 0; \
+})
+
#ifndef SOFTMMU_CODE_ACCESS
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
@@ -161,7 +186,10 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
@@ -246,7 +274,10 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
@@ -368,7 +399,9 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(addr_write)) {
+ tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
@@ -444,7 +477,9 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(addr_write)) {
+ tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}