summaryrefslogtreecommitdiff
path: root/target-ppc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2016-06-28 08:48:34 +0200
committerDavid Gibson <david@gibson.dropbear.id.au>2016-07-01 09:57:01 +1000
commit4322e8ced5aaac7191958f09622d199fe61e2d87 (patch)
treea5391ba478bd020001332b556dc06ce06dc5e5f9 /target-ppc
parenta36848ff7ca54d9181ec6c2202ce7563a2c5cfdc (diff)
downloadqemu-4322e8ced5aaac7191958f09622d199fe61e2d87.tar.gz
ppc: Fix 64K pages support in full emulation
We were always advertising only 4K & 16M. Additionally the code wasn't properly matching the page size with the PTE content, which meant we could potentially hit an incorrect PTE if the guest used multiple sizes. Finally, honor the CPU capabilities when decoding the size from the SLB so we don't try to use 64K pages on 970. This still doesn't add support for MPSS (Multiple Page Sizes per Segment) Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> [clg: fixed checkpatch.pl errors commits 61a36c9b5a12 and 1114e712c998 reworked the hpte code doing insertion/removal in hw/ppc/spapr_hcall.c. The hunks modifying these areas were removed. ] Signed-off-by: Cédric Le Goater <clg@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'target-ppc')
-rw-r--r--target-ppc/cpu-qom.h3
-rw-r--r--target-ppc/mmu-hash64.c39
-rw-r--r--target-ppc/translate_init.c22
3 files changed, 57 insertions, 7 deletions
diff --git a/target-ppc/cpu-qom.h b/target-ppc/cpu-qom.h
index 0fad2def0a..286410502f 100644
--- a/target-ppc/cpu-qom.h
+++ b/target-ppc/cpu-qom.h
@@ -70,18 +70,21 @@ enum powerpc_mmu_t {
#define POWERPC_MMU_64 0x00010000
#define POWERPC_MMU_1TSEG 0x00020000
#define POWERPC_MMU_AMR 0x00040000
+#define POWERPC_MMU_64K 0x00080000
/* 64 bits PowerPC MMU */
POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001,
/* Architecture 2.03 and later (has LPCR) */
POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002,
/* Architecture 2.06 variant */
POWERPC_MMU_2_06 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
+ | POWERPC_MMU_64K
| POWERPC_MMU_AMR | 0x00000003,
/* Architecture 2.06 "degraded" (no 1T segments) */
POWERPC_MMU_2_06a = POWERPC_MMU_64 | POWERPC_MMU_AMR
| 0x00000003,
/* Architecture 2.07 variant */
POWERPC_MMU_2_07 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
+ | POWERPC_MMU_64K
| POWERPC_MMU_AMR | 0x00000004,
/* Architecture 2.07 "degraded" (no 1T segments) */
POWERPC_MMU_2_07a = POWERPC_MMU_64 | POWERPC_MMU_AMR
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 6d6f26c929..3b1357a648 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -450,9 +450,31 @@ void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
}
}
+/* Returns the effective page shift or 0. MPSS isn't supported yet so
+ * this will always be the slb_pshift or 0
+ */
+static uint32_t ppc_hash64_pte_size_decode(uint64_t pte1, uint32_t slb_pshift)
+{
+ switch (slb_pshift) {
+ case 12:
+ return 12;
+ case 16:
+ if ((pte1 & 0xf000) == 0x1000) {
+ return 16;
+ }
+ return 0;
+ case 24:
+ if ((pte1 & 0xff000) == 0) {
+ return 24;
+ }
+ return 0;
+ }
+ return 0;
+}
+
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
- bool secondary, target_ulong ptem,
- ppc_hash_pte64_t *pte)
+ uint32_t slb_pshift, bool secondary,
+ target_ulong ptem, ppc_hash_pte64_t *pte)
{
CPUPPCState *env = &cpu->env;
int i;
@@ -472,6 +494,13 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
if ((pte0 & HPTE64_V_VALID)
&& (secondary == !!(pte0 & HPTE64_V_SECONDARY))
&& HPTE64_V_COMPARE(pte0, ptem)) {
+ uint32_t pshift = ppc_hash64_pte_size_decode(pte1, slb_pshift);
+ if (pshift == 0) {
+ continue;
+ }
+ /* We don't do anything with pshift yet as qemu TLB only deals
+ * with 4K pages anyway
+ */
pte->pte0 = pte0;
pte->pte1 = pte1;
ppc_hash64_stop_access(cpu, token);
@@ -525,7 +554,8 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, vsid, ptem, hash);
- pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, hash, slb->sps->page_shift,
+ 0, ptem, pte);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
@@ -535,7 +565,8 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ptem, ~hash);
- pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, ~hash, slb->sps->page_shift, 1,
+ ptem, pte);
}
return pte_offset;
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index 55d1bfac97..843f19b748 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -10293,8 +10293,8 @@ static void ppc_cpu_initfn(Object *obj)
if (pcc->sps) {
env->sps = *pcc->sps;
} else if (env->mmu_model & POWERPC_MMU_64) {
- /* Use default sets of page sizes */
- static const struct ppc_segment_page_sizes defsps = {
+ /* Use default sets of page sizes. We don't support MPSS */
+ static const struct ppc_segment_page_sizes defsps_4k = {
.sps = {
{ .page_shift = 12, /* 4K */
.slb_enc = 0,
@@ -10306,7 +10306,23 @@ static void ppc_cpu_initfn(Object *obj)
},
},
};
- env->sps = defsps;
+ static const struct ppc_segment_page_sizes defsps_64k = {
+ .sps = {
+ { .page_shift = 12, /* 4K */
+ .slb_enc = 0,
+ .enc = { { .page_shift = 12, .pte_enc = 0 } }
+ },
+ { .page_shift = 16, /* 64K */
+ .slb_enc = 0x110,
+ .enc = { { .page_shift = 16, .pte_enc = 1 } }
+ },
+ { .page_shift = 24, /* 16M */
+ .slb_enc = 0x100,
+ .enc = { { .page_shift = 24, .pte_enc = 0 } }
+ },
+ },
+ };
+ env->sps = (env->mmu_model & POWERPC_MMU_64K) ? defsps_64k : defsps_4k;
}
#endif /* defined(TARGET_PPC64) */