summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cpu-all.h3
-rw-r--r--exec.c105
-rw-r--r--target-i386/helper2.c87
-rw-r--r--target-ppc/helper.c6
4 files changed, 144 insertions, 57 deletions
diff --git a/cpu-all.h b/cpu-all.h
index d395c0038b..1361cf1be0 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -725,6 +725,9 @@ static inline void cpu_physical_memory_write(target_phys_addr_t addr,
{
cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
}
+uint32_t ldl_phys(target_phys_addr_t addr);
+void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
+void stl_phys(target_phys_addr_t addr, uint32_t val);
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
uint8_t *buf, int len, int is_write);
diff --git a/exec.c b/exec.c
index 0dd14f9e24..48cbe85749 100644
--- a/exec.c
+++ b/exec.c
@@ -2032,6 +2032,21 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
addr += l;
}
}
+
+/* never used */
+uint32_t ldl_phys(target_phys_addr_t addr)
+{
+ return 0;
+}
+
+void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
+{
+}
+
+void stl_phys(target_phys_addr_t addr, uint32_t val)
+{
+}
+
#else
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
int len, int is_write)
@@ -2118,6 +2133,96 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
addr += l;
}
}
+
+/* warning: addr must be aligned */
+uint32_t ldl_phys(target_phys_addr_t addr)
+{
+ int io_index;
+ uint8_t *ptr;
+ uint32_t val;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
+ (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
+ /* I/O case */
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+ } else {
+ /* RAM case */
+ ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ val = ldl_p(ptr);
+ }
+ return val;
+}
+
+/* warning: addr must be aligned. The ram page is not masked as dirty
+ and the code inside is not invalidated. It is useful if the dirty
+ bits are used to track modified PTEs */
+void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
+{
+ int io_index;
+ uint8_t *ptr;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) != 0) {
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ } else {
+ ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ stl_p(ptr, val);
+ }
+}
+
+/* warning: addr must be aligned */
+/* XXX: optimize code invalidation test */
+void stl_phys(target_phys_addr_t addr, uint32_t val)
+{
+ int io_index;
+ uint8_t *ptr;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) != 0) {
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ } else {
+ unsigned long addr1;
+ addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ /* RAM case */
+ ptr = phys_ram_base + addr1;
+ stl_p(ptr, val);
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
+ /* set dirty bit */
+ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 1;
+ }
+}
+
#endif
/* virtual memory access for debug */
diff --git a/target-i386/helper2.c b/target-i386/helper2.c
index f78786ee6d..9ecf05d93c 100644
--- a/target-i386/helper2.c
+++ b/target-i386/helper2.c
@@ -490,34 +490,26 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
tlb_flush_page(env, addr);
}
-static inline uint8_t *get_phys_mem_ptr(target_phys_addr_t addr)
-{
- /* XXX: incorrect */
- return phys_ram_base + addr;
-}
+#if defined(CONFIG_USER_ONLY)
-/* WARNING: addr must be aligned */
-uint32_t ldl_phys_aligned(target_phys_addr_t addr)
+int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
+ int is_write, int is_user, int is_softmmu)
{
- uint8_t *ptr;
- uint32_t val;
- ptr = get_phys_mem_ptr(addr);
- if (!ptr)
- val = 0;
- else
- val = ldl_raw(ptr);
- return val;
+ /* user mode only emulation */
+ is_write &= 1;
+ env->cr[2] = addr;
+ env->error_code = (is_write << PG_ERROR_W_BIT);
+ env->error_code |= PG_ERROR_U_MASK;
+ return 1;
}
-void stl_phys_aligned(target_phys_addr_t addr, uint32_t val)
+target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
- uint8_t *ptr;
- ptr = get_phys_mem_ptr(addr);
- if (!ptr)
- return;
- stl_raw(ptr, val);
+ return addr;
}
+#else
+
/* return value:
-1 = cannot handle fault
0 = nothing more to do
@@ -539,12 +531,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
#endif
is_write &= 1;
- if (env->user_mode_only) {
- /* user mode only emulation */
- error_code = 0;
- goto do_fault;
- }
-
if (!(env->cr[0] & CR0_PG_MASK)) {
pte = addr;
virt_addr = addr & TARGET_PAGE_MASK;
@@ -553,7 +539,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
goto do_mapping;
}
-
if (env->cr[4] & CR4_PAE_MASK) {
/* XXX: we only use 32 bit physical addresses */
#ifdef TARGET_X86_64
@@ -572,33 +557,33 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
env->a20_mask;
- pml4e = ldl_phys_aligned(pml4e_addr);
+ pml4e = ldl_phys(pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(pml4e & PG_ACCESSED_MASK)) {
pml4e |= PG_ACCESSED_MASK;
- stl_phys_aligned(pml4e_addr, pml4e);
+ stl_phys_notdirty(pml4e_addr, pml4e);
}
pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
env->a20_mask;
- pdpe = ldl_phys_aligned(pdpe_addr);
+ pdpe = ldl_phys(pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
- stl_phys_aligned(pdpe_addr, pdpe);
+ stl_phys_notdirty(pdpe_addr, pdpe);
}
} else
#endif
{
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
env->a20_mask;
- pdpe = ldl_phys_aligned(pdpe_addr);
+ pdpe = ldl_phys(pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -607,7 +592,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
env->a20_mask;
- pde = ldl_phys_aligned(pde_addr);
+ pde = ldl_phys(pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -620,7 +605,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
- stl_phys_aligned(pde_addr, pde);
+ stl_phys_notdirty(pde_addr, pde);
}
pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
env->a20_mask;
@@ -630,7 +615,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
env->a20_mask;
- pde = ldl_phys_aligned(pde_addr);
+ pde = ldl_phys(pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -654,7 +639,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
- stl_phys_aligned(pde_addr, pde);
+ stl_phys_notdirty(pde_addr, pde);
}
pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
@@ -663,14 +648,14 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
} else {
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
- stl_phys_aligned(pde_addr, pde);
+ stl_phys_notdirty(pde_addr, pde);
}
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
env->a20_mask;
handle_4k_page:
- pte = ldl_phys_aligned(pte_addr);
+ pte = ldl_phys(pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
@@ -692,7 +677,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
- stl_phys_aligned(pte_addr, pte);
+ stl_phys_notdirty(pte_addr, pte);
}
page_size = 4096;
virt_addr = addr & ~0xfff;
@@ -734,12 +719,6 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
return 1;
}
-#if defined(CONFIG_USER_ONLY)
-target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
-{
- return addr;
-}
-#else
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
uint32_t pde_addr, pte_addr;
@@ -762,13 +741,13 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
env->a20_mask;
- pml4e = ldl_phys_aligned(pml4e_addr);
+ pml4e = ldl_phys(pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK))
return -1;
pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
env->a20_mask;
- pdpe = ldl_phys_aligned(pdpe_addr);
+ pdpe = ldl_phys(pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK))
return -1;
} else
@@ -776,14 +755,14 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
env->a20_mask;
- pdpe = ldl_phys_aligned(pdpe_addr);
+ pdpe = ldl_phys(pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK))
return -1;
}
pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
env->a20_mask;
- pde = ldl_phys_aligned(pde_addr);
+ pde = ldl_phys(pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
return -1;
}
@@ -796,7 +775,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
env->a20_mask;
page_size = 4096;
- pte = ldl_phys_aligned(pte_addr);
+ pte = ldl_phys(pte_addr);
}
} else {
if (!(env->cr[0] & CR0_PG_MASK)) {
@@ -805,7 +784,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
} else {
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
- pde = ldl_phys_aligned(pde_addr);
+ pde = ldl_phys(pde_addr);
if (!(pde & PG_PRESENT_MASK))
return -1;
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
@@ -814,7 +793,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
} else {
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
- pte = ldl_phys_aligned(pte_addr);
+ pte = ldl_phys(pte_addr);
if (!(pte & PG_PRESENT_MASK))
return -1;
page_size = 4096;
@@ -827,7 +806,7 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
return paddr;
}
-#endif
+#endif /* !CONFIG_USER_ONLY */
#if defined(USE_CODE_COPY)
struct fpstate {
diff --git a/target-ppc/helper.c b/target-ppc/helper.c
index 5d41fcb058..f3db55fe4d 100644
--- a/target-ppc/helper.c
+++ b/target-ppc/helper.c
@@ -129,8 +129,8 @@ static int find_pte (uint32_t *RPN, int *prot, uint32_t base, uint32_t va,
int ret = -1; /* No entry found */
for (i = 0; i < 8; i++) {
- pte0 = ldl_raw(phys_ram_base + base + (i * 8));
- pte1 = ldl_raw(phys_ram_base + base + (i * 8) + 4);
+ pte0 = ldl_phys(base + (i * 8));
+ pte1 = ldl_phys(base + (i * 8) + 4);
#if defined (DEBUG_MMU)
if (loglevel > 0) {
fprintf(logfile, "Load pte from 0x%08x => 0x%08x 0x%08x "
@@ -220,7 +220,7 @@ static int find_pte (uint32_t *RPN, int *prot, uint32_t base, uint32_t va,
}
}
if (store) {
- stl_raw(phys_ram_base + base + (good * 8) + 4, keep);
+ stl_phys_notdirty(base + (good * 8) + 4, keep);
}
}