From 8cbbe3851c0ed3dad2231245e47bad5acf9e2a9a Mon Sep 17 00:00:00 2001 From: Blue Swirl Date: Wed, 30 May 2012 04:23:33 +0000 Subject: ppc: Move MMU helpers from helper.c to mmu_helper.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move more MMU helpers from helper.c to mmu_helper.c. Signed-off-by: Blue Swirl Signed-off-by: Alexander Graf Signed-off-by: Andreas Färber [update to current helper.c state] Signed-off-by: Alexander Graf --- target-ppc/mmu_helper.c | 2471 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 2471 insertions(+) (limited to 'target-ppc/mmu_helper.c') diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c index e79b8f284f..1f6745192f 100644 --- a/target-ppc/mmu_helper.c +++ b/target-ppc/mmu_helper.c @@ -18,8 +18,24 @@ */ #include "cpu.h" #include "helper.h" +#include "kvm.h" +#include "kvm_ppc.h" +//#define DEBUG_MMU +//#define DEBUG_BATS +//#define DEBUG_SLB //#define DEBUG_SOFTWARE_TLB +//#define DUMP_PAGE_TABLES +//#define DEBUG_SOFTWARE_TLB +//#define FLUSH_ALL_TLBS + +#ifdef DEBUG_MMU +# define LOG_MMU(...) qemu_log(__VA_ARGS__) +# define LOG_MMU_STATE(env) log_cpu_state((env), 0) +#else +# define LOG_MMU(...) do { } while (0) +# define LOG_MMU_STATE(...) do { } while (0) +#endif #ifdef DEBUG_SOFTWARE_TLB # define LOG_SWTLB(...) qemu_log(__VA_ARGS__) @@ -27,6 +43,2461 @@ # define LOG_SWTLB(...) do { } while (0) #endif +#ifdef DEBUG_BATS +# define LOG_BATS(...) qemu_log(__VA_ARGS__) +#else +# define LOG_BATS(...) do { } while (0) +#endif + +#ifdef DEBUG_SLB +# define LOG_SLB(...) qemu_log(__VA_ARGS__) +#else +# define LOG_SLB(...) do { } while (0) +#endif + +/*****************************************************************************/ +/* PowerPC MMU emulation */ +#if defined(CONFIG_USER_ONLY) +int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw, + int mmu_idx) +{ + int exception, error_code; + + if (rw == 2) { + exception = POWERPC_EXCP_ISI; + error_code = 0x40000000; + } else { + exception = POWERPC_EXCP_DSI; + error_code = 0x40000000; + if (rw) { + error_code |= 0x02000000; + } + env->spr[SPR_DAR] = address; + env->spr[SPR_DSISR] = error_code; + } + env->exception_index = exception; + env->error_code = error_code; + + return 1; +} + +#else +/* Common routines used by software and hardware TLBs emulation */ +static inline int pte_is_valid(target_ulong pte0) +{ + return pte0 & 0x80000000 ? 1 : 0; +} + +static inline void pte_invalidate(target_ulong *pte0) +{ + *pte0 &= ~0x80000000; +} + +#if defined(TARGET_PPC64) +static inline int pte64_is_valid(target_ulong pte0) +{ + return pte0 & 0x0000000000000001ULL ? 1 : 0; +} + +static inline void pte64_invalidate(target_ulong *pte0) +{ + *pte0 &= ~0x0000000000000001ULL; +} +#endif + +#define PTE_PTEM_MASK 0x7FFFFFBF +#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) +#if defined(TARGET_PPC64) +#define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL +#define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F) +#endif + +static inline int pp_check(int key, int pp, int nx) +{ + int access; + + /* Compute access rights */ + /* When pp is 3/7, the result is undefined. Set it to noaccess */ + access = 0; + if (key == 0) { + switch (pp) { + case 0x0: + case 0x1: + case 0x2: + access |= PAGE_WRITE; + /* No break here */ + case 0x3: + case 0x6: + access |= PAGE_READ; + break; + } + } else { + switch (pp) { + case 0x0: + case 0x6: + access = 0; + break; + case 0x1: + case 0x3: + access = PAGE_READ; + break; + case 0x2: + access = PAGE_READ | PAGE_WRITE; + break; + } + } + if (nx == 0) { + access |= PAGE_EXEC; + } + + return access; +} + +static inline int check_prot(int prot, int rw, int access_type) +{ + int ret; + + if (access_type == ACCESS_CODE) { + if (prot & PAGE_EXEC) { + ret = 0; + } else { + ret = -2; + } + } else if (rw) { + if (prot & PAGE_WRITE) { + ret = 0; + } else { + ret = -2; + } + } else { + if (prot & PAGE_READ) { + ret = 0; + } else { + ret = -2; + } + } + + return ret; +} + +static inline int pte_check(mmu_ctx_t *ctx, int is_64b, target_ulong pte0, + target_ulong pte1, int h, int rw, int type) +{ + target_ulong ptem, mmask; + int access, ret, pteh, ptev, pp; + + ret = -1; + /* Check validity and table match */ +#if defined(TARGET_PPC64) + if (is_64b) { + ptev = pte64_is_valid(pte0); + pteh = (pte0 >> 1) & 1; + } else +#endif + { + ptev = pte_is_valid(pte0); + pteh = (pte0 >> 6) & 1; + } + if (ptev && h == pteh) { + /* Check vsid & api */ +#if defined(TARGET_PPC64) + if (is_64b) { + ptem = pte0 & PTE64_PTEM_MASK; + mmask = PTE64_CHECK_MASK; + pp = (pte1 & 0x00000003) | ((pte1 >> 61) & 0x00000004); + ctx->nx = (pte1 >> 2) & 1; /* No execute bit */ + ctx->nx |= (pte1 >> 3) & 1; /* Guarded bit */ + } else +#endif + { + ptem = pte0 & PTE_PTEM_MASK; + mmask = PTE_CHECK_MASK; + pp = pte1 & 0x00000003; + } + if (ptem == ctx->ptem) { + if (ctx->raddr != (target_phys_addr_t)-1ULL) { + /* all matches should have equal RPN, WIMG & PP */ + if ((ctx->raddr & mmask) != (pte1 & mmask)) { + qemu_log("Bad RPN/WIMG/PP\n"); + return -3; + } + } + /* Compute access rights */ + access = pp_check(ctx->key, pp, ctx->nx); + /* Keep the matching PTE informations */ + ctx->raddr = pte1; + ctx->prot = access; + ret = check_prot(ctx->prot, rw, type); + if (ret == 0) { + /* Access granted */ + LOG_MMU("PTE access granted !\n"); + } else { + /* Access right violation */ + LOG_MMU("PTE access rejected\n"); + } + } + } + + return ret; +} + +static inline int pte32_check(mmu_ctx_t *ctx, target_ulong pte0, + target_ulong pte1, int h, int rw, int type) +{ + return pte_check(ctx, 0, pte0, pte1, h, rw, type); +} + +#if defined(TARGET_PPC64) +static inline int pte64_check(mmu_ctx_t *ctx, target_ulong pte0, + target_ulong pte1, int h, int rw, int type) +{ + return pte_check(ctx, 1, pte0, pte1, h, rw, type); +} +#endif + +static inline int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p, + int ret, int rw) +{ + int store = 0; + + /* Update page flags */ + if (!(*pte1p & 0x00000100)) { + /* Update accessed flag */ + *pte1p |= 0x00000100; + store = 1; + } + if (!(*pte1p & 0x00000080)) { + if (rw == 1 && ret == 0) { + /* Update changed flag */ + *pte1p |= 0x00000080; + store = 1; + } else { + /* Force page fault for first write access */ + ctx->prot &= ~PAGE_WRITE; + } + } + + return store; +} + +/* Software driven TLB helpers */ +static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, + int way, int is_code) +{ + int nr; + + /* Select TLB num in a way from address */ + nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); + /* Select TLB way */ + nr += env->tlb_per_way * way; + /* 6xx have separate TLBs for instructions and data */ + if (is_code && env->id_tlbs == 1) { + nr += env->nb_tlb; + } + + return nr; +} + +static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) +{ + ppc6xx_tlb_t *tlb; + int nr, max; + + /* LOG_SWTLB("Invalidate all TLBs\n"); */ + /* Invalidate all defined software TLB */ + max = env->nb_tlb; + if (env->id_tlbs == 1) { + max *= 2; + } + for (nr = 0; nr < max; nr++) { + tlb = &env->tlb.tlb6[nr]; + pte_invalidate(&tlb->pte0); + } + tlb_flush(env, 1); +} + +static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, + target_ulong eaddr, + int is_code, int match_epn) +{ +#if !defined(FLUSH_ALL_TLBS) + ppc6xx_tlb_t *tlb; + int way, nr; + + /* Invalidate ITLB + DTLB, all ways */ + for (way = 0; way < env->nb_ways; way++) { + nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); + tlb = &env->tlb.tlb6[nr]; + if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { + LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr, + env->nb_tlb, eaddr); + pte_invalidate(&tlb->pte0); + tlb_flush_page(env, tlb->EPN); + } + } +#else + /* XXX: PowerPC specification say this is valid as well */ + ppc6xx_tlb_invalidate_all(env); +#endif +} + +static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, + target_ulong eaddr, int is_code) +{ + ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); +} + +void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, int is_code, + target_ulong pte0, target_ulong pte1) +{ + ppc6xx_tlb_t *tlb; + int nr; + + nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); + tlb = &env->tlb.tlb6[nr]; + LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx + " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1); + /* Invalidate any pending reference in QEMU for this virtual address */ + ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); + tlb->pte0 = pte0; + tlb->pte1 = pte1; + tlb->EPN = EPN; + /* Store last way for LRU mechanism */ + env->last_way = way; +} + +static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw, int access_type) +{ + ppc6xx_tlb_t *tlb; + int nr, best, way; + int ret; + + best = -1; + ret = -1; /* No TLB found */ + for (way = 0; way < env->nb_ways; way++) { + nr = ppc6xx_tlb_getnum(env, eaddr, way, + access_type == ACCESS_CODE ? 1 : 0); + tlb = &env->tlb.tlb6[nr]; + /* This test "emulates" the PTE index match for hardware TLBs */ + if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) { + LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx + "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr); + continue; + } + LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " " + TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb, + pte_is_valid(tlb->pte0) ? "valid" : "inval", + tlb->EPN, eaddr, tlb->pte1, + rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D'); + switch (pte32_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) { + case -3: + /* TLB inconsistency */ + return -1; + case -2: + /* Access violation */ + ret = -2; + best = nr; + break; + case -1: + default: + /* No match */ + break; + case 0: + /* access granted */ + /* XXX: we should go on looping to check all TLBs consistency + * but we can speed-up the whole thing as the + * result would be undefined if TLBs are not consistent. + */ + ret = 0; + best = nr; + goto done; + } + } + if (best != -1) { + done: + LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n", + ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); + /* Update page flags */ + pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw); + } + + return ret; +} + +/* Perform BAT hit & translation */ +static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, + int *validp, int *protp, target_ulong *BATu, + target_ulong *BATl) +{ + target_ulong bl; + int pp, valid, prot; + + bl = (*BATu & 0x00001FFC) << 15; + valid = 0; + prot = 0; + if (((msr_pr == 0) && (*BATu & 0x00000002)) || + ((msr_pr != 0) && (*BATu & 0x00000001))) { + valid = 1; + pp = *BATl & 0x00000003; + if (pp != 0) { + prot = PAGE_READ | PAGE_EXEC; + if (pp == 0x2) { + prot |= PAGE_WRITE; + } + } + } + *blp = bl; + *validp = valid; + *protp = prot; +} + +static inline void bat_601_size_prot(CPUPPCState *env, target_ulong *blp, + int *validp, int *protp, + target_ulong *BATu, target_ulong *BATl) +{ + target_ulong bl; + int key, pp, valid, prot; + + bl = (*BATl & 0x0000003F) << 17; + LOG_BATS("b %02x ==> bl " TARGET_FMT_lx " msk " TARGET_FMT_lx "\n", + (uint8_t)(*BATl & 0x0000003F), bl, ~bl); + prot = 0; + valid = (*BATl >> 6) & 1; + if (valid) { + pp = *BATu & 0x00000003; + if (msr_pr == 0) { + key = (*BATu >> 3) & 1; + } else { + key = (*BATu >> 2) & 1; + } + prot = pp_check(key, pp, 0); + } + *blp = bl; + *validp = valid; + *protp = prot; +} + +static inline int get_bat(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong virtual, int rw, int type) +{ + target_ulong *BATlt, *BATut, *BATu, *BATl; + target_ulong BEPIl, BEPIu, bl; + int i, valid, prot; + int ret = -1; + + LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, + type == ACCESS_CODE ? 'I' : 'D', virtual); + switch (type) { + case ACCESS_CODE: + BATlt = env->IBAT[1]; + BATut = env->IBAT[0]; + break; + default: + BATlt = env->DBAT[1]; + BATut = env->DBAT[0]; + break; + } + for (i = 0; i < env->nb_BATs; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + if (unlikely(env->mmu_model == POWERPC_MMU_601)) { + bat_601_size_prot(env, &bl, &valid, &prot, BATu, BATl); + } else { + bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); + } + LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n", __func__, + type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl); + if ((virtual & 0xF0000000) == BEPIu && + ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { + /* BAT matches */ + if (valid != 0) { + /* Get physical address */ + ctx->raddr = (*BATl & 0xF0000000) | + ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | + (virtual & 0x0001F000); + /* Compute access rights */ + ctx->prot = prot; + ret = check_prot(ctx->prot, rw, type); + if (ret == 0) { + LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n", + i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', + ctx->prot & PAGE_WRITE ? 'W' : '-'); + } + break; + } + } + } + if (ret < 0) { +#if defined(DEBUG_BATS) + if (qemu_log_enabled()) { + LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual); + for (i = 0; i < 4; i++) { + BATu = &BATut[i]; + BATl = &BATlt[i]; + BEPIu = *BATu & 0xF0000000; + BEPIl = *BATu & 0x0FFE0000; + bl = (*BATu & 0x00001FFC) << 15; + LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx + " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " + TARGET_FMT_lx " " TARGET_FMT_lx "\n", + __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, + *BATu, *BATl, BEPIu, BEPIl, bl); + } + } +#endif + } + /* No hit */ + return ret; +} + +static inline target_phys_addr_t get_pteg_offset(CPUPPCState *env, + target_phys_addr_t hash, + int pte_size) +{ + return (hash * pte_size * 8) & env->htab_mask; +} + +/* PTE table lookup */ +static inline int find_pte2(CPUPPCState *env, mmu_ctx_t *ctx, int is_64b, int h, + int rw, int type, int target_page_bits) +{ + target_phys_addr_t pteg_off; + target_ulong pte0, pte1; + int i, good = -1; + int ret, r; + + ret = -1; /* No entry found */ + pteg_off = get_pteg_offset(env, ctx->hash[h], + is_64b ? HASH_PTE_SIZE_64 : HASH_PTE_SIZE_32); + for (i = 0; i < 8; i++) { +#if defined(TARGET_PPC64) + if (is_64b) { + if (env->external_htab) { + pte0 = ldq_p(env->external_htab + pteg_off + (i * 16)); + pte1 = ldq_p(env->external_htab + pteg_off + (i * 16) + 8); + } else { + pte0 = ldq_phys(env->htab_base + pteg_off + (i * 16)); + pte1 = ldq_phys(env->htab_base + pteg_off + (i * 16) + 8); + } + + r = pte64_check(ctx, pte0, pte1, h, rw, type); + LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " " + TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n", + pteg_off + (i * 16), pte0, pte1, (int)(pte0 & 1), h, + (int)((pte0 >> 1) & 1), ctx->ptem); + } else +#endif + { + if (env->external_htab) { + pte0 = ldl_p(env->external_htab + pteg_off + (i * 8)); + pte1 = ldl_p(env->external_htab + pteg_off + (i * 8) + 4); + } else { + pte0 = ldl_phys(env->htab_base + pteg_off + (i * 8)); + pte1 = ldl_phys(env->htab_base + pteg_off + (i * 8) + 4); + } + r = pte32_check(ctx, pte0, pte1, h, rw, type); + LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " " + TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n", + pteg_off + (i * 8), pte0, pte1, (int)(pte0 >> 31), h, + (int)((pte0 >> 6) & 1), ctx->ptem); + } + switch (r) { + case -3: + /* PTE inconsistency */ + return -1; + case -2: + /* Access violation */ + ret = -2; + good = i; + break; + case -1: + default: + /* No PTE match */ + break; + case 0: + /* access granted */ + /* XXX: we should go on looping to check all PTEs consistency + * but if we can speed-up the whole thing as the + * result would be undefined if PTEs are not consistent. + */ + ret = 0; + good = i; + goto done; + } + } + if (good != -1) { + done: + LOG_MMU("found PTE at addr " TARGET_FMT_lx " prot=%01x ret=%d\n", + ctx->raddr, ctx->prot, ret); + /* Update page flags */ + pte1 = ctx->raddr; + if (pte_update_flags(ctx, &pte1, ret, rw) == 1) { +#if defined(TARGET_PPC64) + if (is_64b) { + if (env->external_htab) { + stq_p(env->external_htab + pteg_off + (good * 16) + 8, + pte1); + } else { + stq_phys_notdirty(env->htab_base + pteg_off + + (good * 16) + 8, pte1); + } + } else +#endif + { + if (env->external_htab) { + stl_p(env->external_htab + pteg_off + (good * 8) + 4, + pte1); + } else { + stl_phys_notdirty(env->htab_base + pteg_off + + (good * 8) + 4, pte1); + } + } + } + } + + /* We have a TLB that saves 4K pages, so let's + * split a huge page to 4k chunks */ + if (target_page_bits != TARGET_PAGE_BITS) { + ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1)) + & TARGET_PAGE_MASK; + } + return ret; +} + +static inline int find_pte(CPUPPCState *env, mmu_ctx_t *ctx, int h, int rw, + int type, int target_page_bits) +{ +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + return find_pte2(env, ctx, 1, h, rw, type, target_page_bits); + } +#endif + + return find_pte2(env, ctx, 0, h, rw, type, target_page_bits); +} + +#if defined(TARGET_PPC64) +static inline ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr) +{ + uint64_t esid_256M, esid_1T; + int n; + + LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); + + esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; + esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; + + for (n = 0; n < env->slb_nr; n++) { + ppc_slb_t *slb = &env->slb[n]; + + LOG_SLB("%s: slot %d %016" PRIx64 " %016" + PRIx64 "\n", __func__, n, slb->esid, slb->vsid); + /* We check for 1T matches on all MMUs here - if the MMU + * doesn't have 1T segment support, we will have prevented 1T + * entries from being inserted in the slbmte code. */ + if (((slb->esid == esid_256M) && + ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) + || ((slb->esid == esid_1T) && + ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { + return slb; + } + } + + return NULL; +} + +void ppc_slb_invalidate_all(CPUPPCState *env) +{ + int n, do_invalidate; + + do_invalidate = 0; + /* XXX: Warning: slbia never invalidates the first segment */ + for (n = 1; n < env->slb_nr; n++) { + ppc_slb_t *slb = &env->slb[n]; + + if (slb->esid & SLB_ESID_V) { + slb->esid &= ~SLB_ESID_V; + /* XXX: given the fact that segment size is 256 MB or 1TB, + * and we still don't have a tlb_flush_mask(env, n, mask) + * in QEMU, we just invalidate all TLBs + */ + do_invalidate = 1; + } + } + if (do_invalidate) { + tlb_flush(env, 1); + } +} + +void ppc_slb_invalidate_one(CPUPPCState *env, uint64_t T0) +{ + ppc_slb_t *slb; + + slb = slb_lookup(env, T0); + if (!slb) { + return; + } + + if (slb->esid & SLB_ESID_V) { + slb->esid &= ~SLB_ESID_V; + + /* XXX: given the fact that segment size is 256 MB or 1TB, + * and we still don't have a tlb_flush_mask(env, n, mask) + * in QEMU, we just invalidate all TLBs + */ + tlb_flush(env, 1); + } +} + +int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) +{ + int slot = rb & 0xfff; + ppc_slb_t *slb = &env->slb[slot]; + + if (rb & (0x1000 - env->slb_nr)) { + return -1; /* Reserved bits set or slot too high */ + } + if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) { + return -1; /* Bad segment size */ + } + if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) { + return -1; /* 1T segment on MMU that doesn't support it */ + } + + /* Mask out the slot number as we store the entry */ + slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V); + slb->vsid = rs; + + LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 + " %016" PRIx64 "\n", __func__, slot, rb, rs, + slb->esid, slb->vsid); + + return 0; +} + +int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb, target_ulong *rt) +{ + int slot = rb & 0xfff; + ppc_slb_t *slb = &env->slb[slot]; + + if (slot >= env->slb_nr) { + return -1; + } + + *rt = slb->esid; + return 0; +} + +int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb, target_ulong *rt) +{ + int slot = rb & 0xfff; + ppc_slb_t *slb = &env->slb[slot]; + + if (slot >= env->slb_nr) { + return -1; + } + + *rt = slb->vsid; + return 0; +} +#endif /* defined(TARGET_PPC64) */ + +/* Perform segment based translation */ +static inline int get_segment(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw, int type) +{ + target_phys_addr_t hash; + target_ulong vsid; + int ds, pr, target_page_bits; + int ret, ret2; + + pr = msr_pr; + ctx->eaddr = eaddr; +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + ppc_slb_t *slb; + target_ulong pageaddr; + int segment_bits; + + LOG_MMU("Check SLBs\n"); + slb = slb_lookup(env, eaddr); + if (!slb) { + return -5; + } + + if (slb->vsid & SLB_VSID_B) { + vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; + segment_bits = 40; + } else { + vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; + segment_bits = 28; + } + + target_page_bits = (slb->vsid & SLB_VSID_L) + ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; + ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP) + : (slb->vsid & SLB_VSID_KS)); + ds = 0; + ctx->nx = !!(slb->vsid & SLB_VSID_N); + + pageaddr = eaddr & ((1ULL << segment_bits) + - (1ULL << target_page_bits)); + if (slb->vsid & SLB_VSID_B) { + hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits); + } else { + hash = vsid ^ (pageaddr >> target_page_bits); + } + /* Only 5 bits of the page index are used in the AVPN */ + ctx->ptem = (slb->vsid & SLB_VSID_PTEM) | + ((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80)); + } else +#endif /* defined(TARGET_PPC64) */ + { + target_ulong sr, pgidx; + + sr = env->sr[eaddr >> 28]; + ctx->key = (((sr & 0x20000000) && (pr != 0)) || + ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; + ds = sr & 0x80000000 ? 1 : 0; + ctx->nx = sr & 0x10000000 ? 1 : 0; + vsid = sr & 0x00FFFFFF; + target_page_bits = TARGET_PAGE_BITS; + LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip=" + TARGET_FMT_lx " lr=" TARGET_FMT_lx + " ir=%d dr=%d pr=%d %d t=%d\n", + eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, + (int)msr_dr, pr != 0 ? 1 : 0, rw, type); + pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; + hash = vsid ^ pgidx; + ctx->ptem = (vsid << 7) | (pgidx >> 10); + } + LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", + ctx->key, ds, ctx->nx, vsid); + ret = -1; + if (!ds) { + /* Check if instruction fetch is allowed, if needed */ + if (type != ACCESS_CODE || ctx->nx == 0) { + /* Page address translation */ + LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx + " hash " TARGET_FMT_plx "\n", + env->htab_base, env->htab_mask, hash); + ctx->hash[0] = hash; + ctx->hash[1] = ~hash; + + /* Initialize real address with an invalid value */ + ctx->raddr = (target_phys_addr_t)-1ULL; + if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx || + env->mmu_model == POWERPC_MMU_SOFT_74xx)) { + /* Software TLB search */ + ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); + } else { + LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx + " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx + " hash=" TARGET_FMT_plx "\n", + env->htab_base, env->htab_mask, vsid, ctx->ptem, + ctx->hash[0]); + /* Primary table lookup */ + ret = find_pte(env, ctx, 0, rw, type, target_page_bits); + if (ret < 0) { + /* Secondary table lookup */ + if (eaddr != 0xEFFFFFFF) { + LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx + " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx + " hash=" TARGET_FMT_plx "\n", env->htab_base, + env->htab_mask, vsid, ctx->ptem, ctx->hash[1]); + } + ret2 = find_pte(env, ctx, 1, rw, type, + target_page_bits); + if (ret2 != -1) { + ret = ret2; + } + } + } +#if defined(DUMP_PAGE_TABLES) + if (qemu_log_enabled()) { + target_phys_addr_t curaddr; + uint32_t a0, a1, a2, a3; + + qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx + "\n", sdr, mask + 0x80); + for (curaddr = sdr; curaddr < (sdr + mask + 0x80); + curaddr += 16) { + a0 = ldl_phys(curaddr); + a1 = ldl_phys(curaddr + 4); + a2 = ldl_phys(curaddr + 8); + a3 = ldl_phys(curaddr + 12); + if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { + qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", + curaddr, a0, a1, a2, a3); + } + } + } +#endif + } else { + LOG_MMU("No access allowed\n"); + ret = -3; + } + } else { + target_ulong sr; + + LOG_MMU("direct store...\n"); + /* Direct-store segment : absolutely *BUGGY* for now */ + + /* Direct-store implies a 32-bit MMU. + * Check the Segment Register's bus unit ID (BUID). + */ + sr = env->sr[eaddr >> 28]; + if ((sr & 0x1FF00000) >> 20 == 0x07f) { + /* Memory-forced I/O controller interface access */ + /* If T=1 and BUID=x'07F', the 601 performs a memory access + * to SR[28-31] LA[4-31], bypassing all protection mechanisms. + */ + ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); + ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return 0; + } + + switch (type) { + case ACCESS_INT: + /* Integer load/store : only access allowed */ + break; + case ACCESS_CODE: + /* No code fetch is allowed in direct-store areas */ + return -4; + case ACCESS_FLOAT: + /* Floating point load/store */ + return -4; + case ACCESS_RES: + /* lwarx, ldarx or srwcx. */ + return -4; + case ACCESS_CACHE: + /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ + /* Should make the instruction do no-op. + * As it already do no-op, it's quite easy :-) + */ + ctx->raddr = eaddr; + return 0; + case ACCESS_EXT: + /* eciwx or ecowx */ + return -4; + default: + qemu_log("ERROR: instruction should not need " + "address translation\n"); + return -4; + } + if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { + ctx->raddr = eaddr; + ret = 2; + } else { + ret = -2; + } + } + + return ret; +} + +/* Generic TLB check function for embedded PowerPC implementations */ +int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, + target_phys_addr_t *raddrp, + target_ulong address, uint32_t pid, int ext, + int i) +{ + target_ulong mask; + + /* Check valid flag */ + if (!(tlb->prot & PAGE_VALID)) { + return -1; + } + mask = ~(tlb->size - 1); + LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx + " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN, + mask, (uint32_t)tlb->PID, tlb->prot); + /* Check PID */ + if (tlb->PID != 0 && tlb->PID != pid) { + return -1; + } + /* Check effective address */ + if ((address & mask) != tlb->EPN) { + return -1; + } + *raddrp = (tlb->RPN & mask) | (address & ~mask); +#if (TARGET_PHYS_ADDR_BITS >= 36) + if (ext) { + /* Extend the physical address to 36 bits */ + *raddrp |= (target_phys_addr_t)(tlb->RPN & 0xF) << 32; + } +#endif + + return 0; +} + +/* Generic TLB search function for PowerPC embedded implementations */ +int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) +{ + ppcemb_tlb_t *tlb; + target_phys_addr_t raddr; + int i, ret; + + /* Default return value is no match */ + ret = -1; + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) { + ret = i; + break; + } + } + + return ret; +} + +/* Helpers specific to PowerPC 40x implementations */ +static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) +{ + ppcemb_tlb_t *tlb; + int i; + + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + tlb->prot &= ~PAGE_VALID; + } + tlb_flush(env, 1); +} + +static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env, + target_ulong eaddr, uint32_t pid) +{ +#if !defined(FLUSH_ALL_TLBS) + ppcemb_tlb_t *tlb; + target_phys_addr_t raddr; + target_ulong page, end; + int i; + + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) { + end = tlb->EPN + tlb->size; + for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(env, page); + } + tlb->prot &= ~PAGE_VALID; + break; + } + } +#else + ppc4xx_tlb_invalidate_all(env); +#endif +} + +static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type) +{ + ppcemb_tlb_t *tlb; + target_phys_addr_t raddr; + int i, ret, zsel, zpr, pr; + + ret = -1; + raddr = (target_phys_addr_t)-1ULL; + pr = msr_pr; + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, address, + env->spr[SPR_40x_PID], 0, i) < 0) { + continue; + } + zsel = (tlb->attr >> 4) & 0xF; + zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; + LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n", + __func__, i, zsel, zpr, rw, tlb->attr); + /* Check execute enable bit */ + switch (zpr) { + case 0x2: + if (pr != 0) { + goto check_perms; + } + /* No break here */ + case 0x3: + /* All accesses granted */ + ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + ret = 0; + break; + case 0x0: + if (pr != 0) { + /* Raise Zone protection fault. */ + env->spr[SPR_40x_ESR] = 1 << 22; + ctx->prot = 0; + ret = -2; + break; + } + /* No break here */ + case 0x1: + check_perms: + /* Check from TLB entry */ + ctx->prot = tlb->prot; + ret = check_prot(ctx->prot, rw, access_type); + if (ret == -2) { + env->spr[SPR_40x_ESR] = 0; + } + break; + } + if (ret >= 0) { + ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + return 0; + } + } + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + + return ret; +} + +void store_40x_sler(CPUPPCState *env, uint32_t val) +{ + /* XXX: TO BE FIXED */ + if (val != 0x00000000) { + cpu_abort(env, "Little-endian regions are not supported by now\n"); + } + env->spr[SPR_405_SLER] = val; +} + +static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, + target_phys_addr_t *raddr, int *prot, + target_ulong address, int rw, + int access_type, int i) +{ + int ret, prot2; + + if (ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID], + !env->nb_pids, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1], 0, i) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2], 0, i) >= 0) { + goto found_tlb; + } + + LOG_SWTLB("%s: TLB entry not found\n", __func__); + return -1; + +found_tlb: + + if (msr_pr != 0) { + prot2 = tlb->prot & 0xF; + } else { + prot2 = (tlb->prot >> 4) & 0xF; + } + + /* Check the address space */ + if (access_type == ACCESS_CODE) { + if (msr_ir != (tlb->attr & 1)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if (prot2 & PAGE_EXEC) { + LOG_SWTLB("%s: good TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); + ret = -3; + } else { + if (msr_dr != (tlb->attr & 1)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { + LOG_SWTLB("%s: found TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); + ret = -2; + } + + return ret; +} + +static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type) +{ + ppcemb_tlb_t *tlb; + target_phys_addr_t raddr; + int i, ret; + + ret = -1; + raddr = (target_phys_addr_t)-1ULL; + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw, + access_type, i); + if (!ret) { + break; + } + } + + if (ret >= 0) { + ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + } else { + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + } + + return ret; +} + +void booke206_flush_tlb(CPUPPCState *env, int flags, const int check_iprot) +{ + int tlb_size; + int i, j; + ppcmas_tlb_t *tlb = env->tlb.tlbm; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + if (flags & (1 << i)) { + tlb_size = booke206_tlb_size(env, i); + for (j = 0; j < tlb_size; j++) { + if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { + tlb[j].mas1 &= ~MAS1_VALID; + } + } + } + tlb += booke206_tlb_size(env, i); + } + + tlb_flush(env, 1); +} + +target_phys_addr_t booke206_tlb_to_page_size(CPUPPCState *env, + ppcmas_tlb_t *tlb) +{ + int tlbm_size; + + tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + + return 1024ULL << tlbm_size; +} + +/* TLB check function for MAS based SoftTLBs */ +int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, + target_phys_addr_t *raddrp, + target_ulong address, uint32_t pid) +{ + target_ulong mask; + uint32_t tlb_pid; + + /* Check valid flag */ + if (!(tlb->mas1 & MAS1_VALID)) { + return -1; + } + + mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); + LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%" + PRIx64 " mask=0x" TARGET_FMT_lx " MAS7_3=0x%" PRIx64 " MAS8=%x\n", + __func__, address, pid, tlb->mas1, tlb->mas2, mask, tlb->mas7_3, + tlb->mas8); + + /* Check PID */ + tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; + if (tlb_pid != 0 && tlb_pid != pid) { + return -1; + } + + /* Check effective address */ + if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { + return -1; + } + + if (raddrp) { + *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); + } + + return 0; +} + +static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, + target_phys_addr_t *raddr, int *prot, + target_ulong address, int rw, + int access_type) +{ + int ret; + int prot2 = 0; + + if (ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2]) >= 0) { + goto found_tlb; + } + + LOG_SWTLB("%s: TLB entry not found\n", __func__); + return -1; + +found_tlb: + + if (msr_pr != 0) { + if (tlb->mas7_3 & MAS3_UR) { + prot2 |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_UW) { + prot2 |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_UX) { + prot2 |= PAGE_EXEC; + } + } else { + if (tlb->mas7_3 & MAS3_SR) { + prot2 |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_SW) { + prot2 |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_SX) { + prot2 |= PAGE_EXEC; + } + } + + /* Check the address space and permissions */ + if (access_type == ACCESS_CODE) { + if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if (prot2 & PAGE_EXEC) { + LOG_SWTLB("%s: good TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2); + ret = -3; + } else { + if (msr_dr != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + LOG_SWTLB("%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = prot2; + if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) { + LOG_SWTLB("%s: found TLB!\n", __func__); + return 0; + } + + LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2); + ret = -2; + } + + return ret; +} + +static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong address, int rw, + int access_type) +{ + ppcmas_tlb_t *tlb; + target_phys_addr_t raddr; + int i, j, ret; + + ret = -1; + raddr = (target_phys_addr_t)-1ULL; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbm(env, i, address, j); + if (!tlb) { + continue; + } + ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, + rw, access_type); + if (ret != -1) { + goto found_tlb; + } + } + } + +found_tlb: + + if (ret >= 0) { + ctx->raddr = raddr; + LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, ctx->raddr, ctx->prot, + ret); + } else { + LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx + " %d %d\n", __func__, address, raddr, ctx->prot, ret); + } + + return ret; +} + +static const char *book3e_tsize_to_str[32] = { + "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", + "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", + "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G", + "1T", "2T" +}; + +static void mmubooke_dump_mmu(FILE *f, fprintf_function cpu_fprintf, + CPUPPCState *env) +{ + ppcemb_tlb_t *entry; + int i; + + if (kvm_enabled() && !env->kvm_sw_tlb) { + cpu_fprintf(f, "Cannot access KVM TLB\n"); + return; + } + + cpu_fprintf(f, "\nTLB:\n"); + cpu_fprintf(f, "Effective Physical Size PID Prot " + "Attr\n"); + + entry = &env->tlb.tlbe[0]; + for (i = 0; i < env->nb_tlb; i++, entry++) { + target_phys_addr_t ea, pa; + target_ulong mask; + uint64_t size = (uint64_t)entry->size; + char size_buf[20]; + + /* Check valid flag */ + if (!(entry->prot & PAGE_VALID)) { + continue; + } + + mask = ~(entry->size - 1); + ea = entry->EPN & mask; + pa = entry->RPN & mask; +#if (TARGET_PHYS_ADDR_BITS >= 36) + /* Extend the physical address to 36 bits */ + pa |= (target_phys_addr_t)(entry->RPN & 0xF) << 32; +#endif + size /= 1024; + if (size >= 1024) { + snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / 1024); + } else { + snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size); + } + cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n", + (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, + entry->prot, entry->attr); + } + +} + +static void mmubooke206_dump_one_tlb(FILE *f, fprintf_function cpu_fprintf, + CPUPPCState *env, int tlbn, int offset, + int tlbsize) +{ + ppcmas_tlb_t *entry; + int i; + + cpu_fprintf(f, "\nTLB%d:\n", tlbn); + cpu_fprintf(f, "Effective Physical Size TID TS SRWX" + " URWX WIMGE U0123\n"); + + entry = &env->tlb.tlbm[offset]; + for (i = 0; i < tlbsize; i++, entry++) { + target_phys_addr_t ea, pa, size; + int tsize; + + if (!(entry->mas1 & MAS1_VALID)) { + continue; + } + + tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + size = 1024ULL << tsize; + ea = entry->mas2 & ~(size - 1); + pa = entry->mas7_3 & ~(size - 1); + + cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c" + "U%c%c%c %c%c%c%c%c U%c%c%c%c\n", + (uint64_t)ea, (uint64_t)pa, + book3e_tsize_to_str[tsize], + (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT, + (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT, + entry->mas7_3 & MAS3_SR ? 'R' : '-', + entry->mas7_3 & MAS3_SW ? 'W' : '-', + entry->mas7_3 & MAS3_SX ? 'X' : '-', + entry->mas7_3 & MAS3_UR ? 'R' : '-', + entry->mas7_3 & MAS3_UW ? 'W' : '-', + entry->mas7_3 & MAS3_UX ? 'X' : '-', + entry->mas2 & MAS2_W ? 'W' : '-', + entry->mas2 & MAS2_I ? 'I' : '-', + entry->mas2 & MAS2_M ? 'M' : '-', + entry->mas2 & MAS2_G ? 'G' : '-', + entry->mas2 & MAS2_E ? 'E' : '-', + entry->mas7_3 & MAS3_U0 ? '0' : '-', + entry->mas7_3 & MAS3_U1 ? '1' : '-', + entry->mas7_3 & MAS3_U2 ? '2' : '-', + entry->mas7_3 & MAS3_U3 ? '3' : '-'); + } +} + +static void mmubooke206_dump_mmu(FILE *f, fprintf_function cpu_fprintf, + CPUPPCState *env) +{ + int offset = 0; + int i; + + if (kvm_enabled() && !env->kvm_sw_tlb) { + cpu_fprintf(f, "Cannot access KVM TLB\n"); + return; + } + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int size = booke206_tlb_size(env, i); + + if (size == 0) { + continue; + } + + mmubooke206_dump_one_tlb(f, cpu_fprintf, env, i, offset, size); + offset += size; + } +} + +#if defined(TARGET_PPC64) +static void mmubooks_dump_mmu(FILE *f, fprintf_function cpu_fprintf, + CPUPPCState *env) +{ + int i; + uint64_t slbe, slbv; + + cpu_synchronize_state(env); + + cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n"); + for (i = 0; i < env->slb_nr; i++) { + slbe = env->slb[i].esid; + slbv = env->slb[i].vsid; + if (slbe == 0 && slbv == 0) { + continue; + } + cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", + i, slbe, slbv); + } +} +#endif + +void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env) +{ + switch (env->mmu_model) { + case POWERPC_MMU_BOOKE: + mmubooke_dump_mmu(f, cpu_fprintf, env); + break; + case POWERPC_MMU_BOOKE206: + mmubooke206_dump_mmu(f, cpu_fprintf, env); + break; +#if defined(TARGET_PPC64) + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + mmubooks_dump_mmu(f, cpu_fprintf, env); + break; +#endif + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__); + } +} + +static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, int rw) +{ + int in_plb, ret; + + ctx->raddr = eaddr; + ctx->prot = PAGE_READ | PAGE_EXEC; + ret = 0; + switch (env->mmu_model) { + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_REAL: + case POWERPC_MMU_BOOKE: + ctx->prot |= PAGE_WRITE; + break; +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + /* Real address are 60 bits long */ + ctx->raddr &= 0x0FFFFFFFFFFFFFFFULL; + ctx->prot |= PAGE_WRITE; + break; +#endif + case POWERPC_MMU_SOFT_4xx_Z: + if (unlikely(msr_pe != 0)) { + /* 403 family add some particular protections, + * using PBL/PBU registers for accesses with no translation. + */ + in_plb = + /* Check PLB validity */ + (env->pb[0] < env->pb[1] && + /* and address in plb area */ + eaddr >= env->pb[0] && eaddr < env->pb[1]) || + (env->pb[2] < env->pb[3] && + eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; + if (in_plb ^ msr_px) { + /* Access in protected area */ + if (rw == 1) { + /* Access is not allowed */ + ret = -2; + } + } else { + /* Read-write access is allowed */ + ctx->prot |= PAGE_WRITE; + } + } + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE206: + cpu_abort(env, "BookE 2.06 MMU doesn't have physical real mode\n"); + break; + default: + cpu_abort(env, "Unknown or invalid MMU model\n"); + return -1; + } + + return ret; +} + +int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, + int rw, int access_type) +{ + int ret; + +#if 0 + qemu_log("%s\n", __func__); +#endif + if ((access_type == ACCESS_CODE && msr_ir == 0) || + (access_type != ACCESS_CODE && msr_dr == 0)) { + if (env->mmu_model == POWERPC_MMU_BOOKE) { + /* The BookE MMU always performs address translation. The + IS and DS bits only affect the address space. */ + ret = mmubooke_get_physical_address(env, ctx, eaddr, + rw, access_type); + } else if (env->mmu_model == POWERPC_MMU_BOOKE206) { + ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, + access_type); + } else { + /* No address translation. */ + ret = check_physical(env, ctx, eaddr, rw); + } + } else { + ret = -1; + switch (env->mmu_model) { + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + /* Try to find a BAT */ + if (env->nb_BATs != 0) { + ret = get_bat(env, ctx, eaddr, rw, access_type); + } +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: +#endif + if (ret < 0) { + /* We didn't match any BAT entry or don't have BATs */ + ret = get_segment(env, ctx, eaddr, rw, access_type); + } + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + ret = mmu40x_get_physical_address(env, ctx, eaddr, + rw, access_type); + break; + case POWERPC_MMU_BOOKE: + ret = mmubooke_get_physical_address(env, ctx, eaddr, + rw, access_type); + break; + case POWERPC_MMU_BOOKE206: + ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw, + access_type); + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_REAL: + cpu_abort(env, "PowerPC in real mode do not do any translation\n"); + return -1; + default: + cpu_abort(env, "Unknown or invalid MMU model\n"); + return -1; + } + } +#if 0 + qemu_log("%s address " TARGET_FMT_lx " => %d " TARGET_FMT_plx "\n", + __func__, eaddr, ret, ctx->raddr); +#endif + + return ret; +} + +target_phys_addr_t cpu_get_phys_page_debug(CPUPPCState *env, target_ulong addr) +{ + mmu_ctx_t ctx; + + if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) { + return -1; + } + + return ctx.raddr & TARGET_PAGE_MASK; +} + +static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, + int rw) +{ + env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; + env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; + env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; + env->spr[SPR_BOOKE_MAS3] = 0; + env->spr[SPR_BOOKE_MAS6] = 0; + env->spr[SPR_BOOKE_MAS7] = 0; + + /* AS */ + if (((rw == 2) && msr_ir) || ((rw != 2) && msr_dr)) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; + } + + env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; + env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; + + switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { + case MAS4_TIDSELD_PID0: + env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID] << MAS1_TID_SHIFT; + break; + case MAS4_TIDSELD_PID1: + env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID1] << MAS1_TID_SHIFT; + break; + case MAS4_TIDSELD_PID2: + env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID2] << MAS1_TID_SHIFT; + break; + } + + env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; + + /* next victim logic */ + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; + env->last_way++; + env->last_way &= booke206_tlb_ways(env, 0) - 1; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + +/* Perform address translation */ +int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw, + int mmu_idx) +{ + mmu_ctx_t ctx; + int access_type; + int ret = 0; + + if (rw == 2) { + /* code access */ + rw = 0; + access_type = ACCESS_CODE; + } else { + /* data access */ + access_type = env->access_type; + } + ret = get_physical_address(env, &ctx, address, rw, access_type); + if (ret == 0) { + tlb_set_page(env, address & TARGET_PAGE_MASK, + ctx.raddr & TARGET_PAGE_MASK, ctx.prot, + mmu_idx, TARGET_PAGE_SIZE); + ret = 0; + } else if (ret < 0) { + LOG_MMU_STATE(env); + if (access_type == ACCESS_CODE) { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + env->exception_index = POWERPC_EXCP_IFTLB; + env->error_code = 1 << 18; + env->spr[SPR_IMISS] = address; + env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; + goto tlb_miss; + case POWERPC_MMU_SOFT_74xx: + env->exception_index = POWERPC_EXCP_IFTLB; + goto tlb_miss_74xx; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + env->exception_index = POWERPC_EXCP_ITLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = address; + env->spr[SPR_40x_ESR] = 0x00000000; + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: +#endif + env->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x40000000; + break; + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, address, rw); + /* fall through */ + case POWERPC_MMU_BOOKE: + env->exception_index = POWERPC_EXCP_ITLB; + env->error_code = 0; + env->spr[SPR_BOOKE_DEAR] = address; + return -1; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_REAL: + cpu_abort(env, "PowerPC in real mode should never raise " + "any MMU exceptions\n"); + return -1; + default: + cpu_abort(env, "Unknown or invalid MMU model\n"); + return -1; + } + break; + case -2: + /* Access rights violation */ + env->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x08000000; + break; + case -3: + /* No execute protection violation */ + if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->spr[SPR_BOOKE_ESR] = 0x00000000; + } + env->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + break; + case -4: + /* Direct store exception */ + /* No code fetch is allowed in direct-store areas */ + env->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + break; +#if defined(TARGET_PPC64) + case -5: + /* No match in segment table */ + if (env->mmu_model == POWERPC_MMU_620) { + env->exception_index = POWERPC_EXCP_ISI; + /* XXX: this might be incorrect */ + env->error_code = 0x40000000; + } else { + env->exception_index = POWERPC_EXCP_ISEG; + env->error_code = 0; + } + break; +#endif + } + } else { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + if (rw == 1) { + env->exception_index = POWERPC_EXCP_DSTLB; + env->error_code = 1 << 16; + } else { + env->exception_index = POWERPC_EXCP_DLTLB; + env->error_code = 0; + } + env->spr[SPR_DMISS] = address; + env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; + tlb_miss: + env->error_code |= ctx.key << 19; + env->spr[SPR_HASH1] = env->htab_base + + get_pteg_offset(env, ctx.hash[0], HASH_PTE_SIZE_32); + env->spr[SPR_HASH2] = env->htab_base + + get_pteg_offset(env, ctx.hash[1], HASH_PTE_SIZE_32); + break; + case POWERPC_MMU_SOFT_74xx: + if (rw == 1) { + env->exception_index = POWERPC_EXCP_DSTLB; + } else { + env->exception_index = POWERPC_EXCP_DLTLB; + } + tlb_miss_74xx: + /* Implement LRU algorithm */ + env->error_code = ctx.key << 19; + env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | + ((env->last_way + 1) & (env->nb_ways - 1)); + env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + env->exception_index = POWERPC_EXCP_DTLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = address; + if (rw) { + env->spr[SPR_40x_ESR] = 0x00800000; + } else { + env->spr[SPR_40x_ESR] = 0x00000000; + } + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: +#endif + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x42000000; + } else { + env->spr[SPR_DSISR] = 0x40000000; + } + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE206: + booke206_update_mas_tlb_miss(env, address, rw); + /* fall through */ + case POWERPC_MMU_BOOKE: + env->exception_index = POWERPC_EXCP_DTLB; + env->error_code = 0; + env->spr[SPR_BOOKE_DEAR] = address; + env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; + return -1; + case POWERPC_MMU_REAL: + cpu_abort(env, "PowerPC in real mode should never raise " + "any MMU exceptions\n"); + return -1; + default: + cpu_abort(env, "Unknown or invalid MMU model\n"); + return -1; + } + break; + case -2: + /* Access rights violation */ + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + if (env->mmu_model == POWERPC_MMU_SOFT_4xx + || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { + env->spr[SPR_40x_DEAR] = address; + if (rw) { + env->spr[SPR_40x_ESR] |= 0x00800000; + } + } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || + (env->mmu_model == POWERPC_MMU_BOOKE206)) { + env->spr[SPR_BOOKE_DEAR] = address; + env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; + } else { + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x0A000000; + } else { + env->spr[SPR_DSISR] = 0x08000000; + } + } + break; + case -4: + /* Direct store exception */ + switch (access_type) { + case ACCESS_FLOAT: + /* Floating point load/store */ + env->exception_index = POWERPC_EXCP_ALIGN; + env->error_code = POWERPC_EXCP_ALIGN_FP; + env->spr[SPR_DAR] = address; + break; + case ACCESS_RES: + /* lwarx, ldarx or stwcx. */ + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x06000000; + } else { + env->spr[SPR_DSISR] = 0x04000000; + } + break; + case ACCESS_EXT: + /* eciwx or ecowx */ + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + if (rw == 1) { + env->spr[SPR_DSISR] = 0x06100000; + } else { + env->spr[SPR_DSISR] = 0x04100000; + } + break; + default: + printf("DSI: invalid exception (%d)\n", ret); + env->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = + POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; + env->spr[SPR_DAR] = address; + break; + } + break; +#if defined(TARGET_PPC64) + case -5: + /* No match in segment table */ + if (env->mmu_model == POWERPC_MMU_620) { + env->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = address; + /* XXX: this might be incorrect */ + if (rw == 1) { + env->spr[SPR_DSISR] = 0x42000000; + } else { + env->spr[SPR_DSISR] = 0x40000000; + } + } else { + env->exception_index = POWERPC_EXCP_DSEG; + env->error_code = 0; + env->spr[SPR_DAR] = address; + } + break; +#endif + } + } +#if 0 + printf("%s: set exception to %d %02x\n", __func__, + env->exception, env->error_code); +#endif + ret = 1; + } + + return ret; +} + +/*****************************************************************************/ +/* BATs management */ +#if !defined(FLUSH_ALL_TLBS) +static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, + target_ulong mask) +{ + target_ulong base, end, page; + + base = BATu & ~0x0001FFFF; + end = base + mask + 0x00020000; + LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" + TARGET_FMT_lx ")\n", base, end, mask); + for (page = base; page != end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(env, page); + } + LOG_BATS("Flush done\n"); +} +#endif + +static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, + target_ulong value) +{ + LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID, + nr, ul == 0 ? 'u' : 'l', value, env->nip); +} + +void ppc_store_ibatu(CPUPPCState *env, int nr, target_ulong value) +{ + target_ulong mask; + + dump_store_bat(env, 'I', 0, nr, value); + if (env->IBAT[0][nr] != value) { + mask = (value << 15) & 0x0FFE0000UL; +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#endif + /* When storing valid upper BAT, mask BEPI and BRPN + * and invalidate all TLBs covered by this BAT + */ + mask = (value << 15) & 0x0FFE0000UL; + env->IBAT[0][nr] = (value & 0x00001FFFUL) | + (value & ~0x0001FFFFUL & ~mask); + env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | + (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + tlb_flush(env, 1); +#endif + } +} + +void ppc_store_ibatl(CPUPPCState *env, int nr, target_ulong value) +{ + dump_store_bat(env, 'I', 1, nr, value); + env->IBAT[1][nr] = value; +} + +void ppc_store_dbatu(CPUPPCState *env, int nr, target_ulong value) +{ + target_ulong mask; + + dump_store_bat(env, 'D', 0, nr, value); + if (env->DBAT[0][nr] != value) { + /* When storing valid upper BAT, mask BEPI and BRPN + * and invalidate all TLBs covered by this BAT + */ + mask = (value << 15) & 0x0FFE0000UL; +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->DBAT[0][nr], mask); +#endif + mask = (value << 15) & 0x0FFE0000UL; + env->DBAT[0][nr] = (value & 0x00001FFFUL) | + (value & ~0x0001FFFFUL & ~mask); + env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | + (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->DBAT[0][nr], mask); +#else + tlb_flush(env, 1); +#endif + } +} + +void ppc_store_dbatl(CPUPPCState *env, int nr, target_ulong value) +{ + dump_store_bat(env, 'D', 1, nr, value); + env->DBAT[1][nr] = value; +} + +void ppc_store_ibatu_601(CPUPPCState *env, int nr, target_ulong value) +{ + target_ulong mask; +#if defined(FLUSH_ALL_TLBS) + int do_inval; +#endif + + dump_store_bat(env, 'I', 0, nr, value); + if (env->IBAT[0][nr] != value) { +#if defined(FLUSH_ALL_TLBS) + do_inval = 0; +#endif + mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; + if (env->IBAT[1][nr] & 0x40) { + /* Invalidate BAT only if it is valid */ +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } + /* When storing valid upper BAT, mask BEPI and BRPN + * and invalidate all TLBs covered by this BAT + */ + env->IBAT[0][nr] = (value & 0x00001FFFUL) | + (value & ~0x0001FFFFUL & ~mask); + env->DBAT[0][nr] = env->IBAT[0][nr]; + if (env->IBAT[1][nr] & 0x40) { +#if !defined(FLUSH_ALL_TLBS) + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } +#if defined(FLUSH_ALL_TLBS) + if (do_inval) { + tlb_flush(env, 1); + } +#endif + } +} + +void ppc_store_ibatl_601(CPUPPCState *env, int nr, target_ulong value) +{ + target_ulong mask; +#if defined(FLUSH_ALL_TLBS) + int do_inval; +#endif + + dump_store_bat(env, 'I', 1, nr, value); + if (env->IBAT[1][nr] != value) { +#if defined(FLUSH_ALL_TLBS) + do_inval = 0; +#endif + if (env->IBAT[1][nr] & 0x40) { +#if !defined(FLUSH_ALL_TLBS) + mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL; + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } + if (value & 0x40) { +#if !defined(FLUSH_ALL_TLBS) + mask = (value << 17) & 0x0FFE0000UL; + do_invalidate_BAT(env, env->IBAT[0][nr], mask); +#else + do_inval = 1; +#endif + } + env->IBAT[1][nr] = value; + env->DBAT[1][nr] = value; +#if defined(FLUSH_ALL_TLBS) + if (do_inval) { + tlb_flush(env, 1); + } +#endif + } +} + +/*****************************************************************************/ +/* TLB management */ +void ppc_tlb_invalidate_all(CPUPPCState *env) +{ + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + ppc6xx_tlb_invalidate_all(env); + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + ppc4xx_tlb_invalidate_all(env); + break; + case POWERPC_MMU_REAL: + cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n"); + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE: + tlb_flush(env, 1); + break; + case POWERPC_MMU_BOOKE206: + booke206_flush_tlb(env, -1, 0); + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: +#endif /* defined(TARGET_PPC64) */ + tlb_flush(env, 1); + break; + default: + /* XXX: TODO */ + cpu_abort(env, "Unknown MMU model\n"); + break; + } +} + +void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) +{ +#if !defined(FLUSH_ALL_TLBS) + addr &= TARGET_PAGE_MASK; + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_6xx: + case POWERPC_MMU_SOFT_74xx: + ppc6xx_tlb_invalidate_virt(env, addr, 0); + if (env->id_tlbs == 1) { + ppc6xx_tlb_invalidate_virt(env, addr, 1); + } + break; + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]); + break; + case POWERPC_MMU_REAL: + cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n"); + break; + case POWERPC_MMU_MPC8xx: + /* XXX: TODO */ + cpu_abort(env, "MPC8xx MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE: + /* XXX: TODO */ + cpu_abort(env, "BookE MMU model is not implemented\n"); + break; + case POWERPC_MMU_BOOKE206: + /* XXX: TODO */ + cpu_abort(env, "BookE 2.06 MMU model is not implemented\n"); + break; + case POWERPC_MMU_32B: + case POWERPC_MMU_601: + /* tlbie invalidate TLBs for all segments */ + addr &= ~((target_ulong)-1ULL << 28); + /* XXX: this case should be optimized, + * giving a mask to tlb_flush_page + */ + tlb_flush_page(env, addr | (0x0 << 28)); + tlb_flush_page(env, addr | (0x1 << 28)); + tlb_flush_page(env, addr | (0x2 << 28)); + tlb_flush_page(env, addr | (0x3 << 28)); + tlb_flush_page(env, addr | (0x4 << 28)); + tlb_flush_page(env, addr | (0x5 << 28)); + tlb_flush_page(env, addr | (0x6 << 28)); + tlb_flush_page(env, addr | (0x7 << 28)); + tlb_flush_page(env, addr | (0x8 << 28)); + tlb_flush_page(env, addr | (0x9 << 28)); + tlb_flush_page(env, addr | (0xA << 28)); + tlb_flush_page(env, addr | (0xB << 28)); + tlb_flush_page(env, addr | (0xC << 28)); + tlb_flush_page(env, addr | (0xD << 28)); + tlb_flush_page(env, addr | (0xE << 28)); + tlb_flush_page(env, addr | (0xF << 28)); + break; +#if defined(TARGET_PPC64) + case POWERPC_MMU_620: + case POWERPC_MMU_64B: + case POWERPC_MMU_2_06: + /* tlbie invalidate TLBs for all segments */ + /* XXX: given the fact that there are too many segments to invalidate, + * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, + * we just invalidate all TLBs + */ + tlb_flush(env, 1); + break; +#endif /* defined(TARGET_PPC64) */ + default: + /* XXX: TODO */ + cpu_abort(env, "Unknown MMU model\n"); + break; + } +#else + ppc_tlb_invalidate_all(env); +#endif +} + +/*****************************************************************************/ +/* Special registers manipulation */ +#if defined(TARGET_PPC64) +void ppc_store_asr(CPUPPCState *env, target_ulong value) +{ + if (env->asr != value) { + env->asr = value; + tlb_flush(env, 1); + } +} +#endif + +void ppc_store_sdr1(CPUPPCState *env, target_ulong value) +{ + LOG_MMU("%s: " TARGET_FMT_lx "\n", __func__, value); + if (env->spr[SPR_SDR1] != value) { + env->spr[SPR_SDR1] = value; +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + target_ulong htabsize = value & SDR_64_HTABSIZE; + + if (htabsize > 28) { + fprintf(stderr, "Invalid HTABSIZE 0x" TARGET_FMT_lx + " stored in SDR1\n", htabsize); + htabsize = 28; + } + env->htab_mask = (1ULL << (htabsize + 18)) - 1; + env->htab_base = value & SDR_64_HTABORG; + } else +#endif /* defined(TARGET_PPC64) */ + { + /* FIXME: Should check for valid HTABMASK values */ + env->htab_mask = ((value & SDR_32_HTABMASK) << 16) | 0xFFFF; + env->htab_base = value & SDR_32_HTABORG; + } + tlb_flush(env, 1); + } +} + +#if defined(TARGET_PPC64) +target_ulong ppc_load_sr(CPUPPCState *env, int slb_nr) +{ + /* XXX */ + return 0; +} +#endif + +void ppc_store_sr(CPUPPCState *env, int srnum, target_ulong value) +{ + LOG_MMU("%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, + srnum, value, env->sr[srnum]); +#if defined(TARGET_PPC64) + if (env->mmu_model & POWERPC_MMU_64) { + uint64_t rb = 0, rs = 0; + + /* ESID = srnum */ + rb |= ((uint32_t)srnum & 0xf) << 28; + /* Set the valid bit */ + rb |= 1 << 27; + /* Index = ESID */ + rb |= (uint32_t)srnum; + + /* VSID = VSID */ + rs |= (value & 0xfffffff) << 12; + /* flags = flags */ + rs |= ((value >> 27) & 0xf) << 8; + + ppc_store_slb(env, rb, rs); + } else +#endif + if (env->sr[srnum] != value) { + env->sr[srnum] = value; +/* Invalidating 256MB of virtual memory in 4kB pages is way longer than + flusing the whole TLB. */ +#if !defined(FLUSH_ALL_TLBS) && 0 + { + target_ulong page, end; + /* Invalidate 256 MB of virtual memory */ + page = (16 << 20) * srnum; + end = page + (16 << 20); + for (; page != end; page += TARGET_PAGE_SIZE) { + tlb_flush_page(env, page); + } + } +#else + tlb_flush(env, 1); +#endif + } +} +#endif /* !defined(CONFIG_USER_ONLY) */ + /*****************************************************************************/ /* SPR accesses */ -- cgit v1.2.1