summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/book3s/64/hash-64k.h
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-04-29 23:25:35 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-01 18:32:29 +1000
commit96270b1fc25d527b015c73533119f6c85df2e0ff (patch)
tree63e49ef4b22593a05986a65221018aef907298f0 /arch/powerpc/include/asm/book3s/64/hash-64k.h
parentac29c64089b74d107edb90879e63a2f7a03cd66b (diff)
downloadlinux-96270b1fc25d527b015c73533119f6c85df2e0ff.tar.gz
powerpc/mm: Remove RPN_SHIFT and RPN_SIZE
PTE_RPN_SHIFT is actually page size dependent. Even though PowerISA 3.0 expects only the lower 12 bits to be zero, we will always find the pages to be PAGE_SHIFT aligned. In case of hash config, this also allows us to use the additional 3 bits to track pte specific information. We need to make sure we use these bits only for hash specific pte flags. For both 4K and 64K config, pte now can hold 57 bits address. Inorder to keep things simpler, drop PTE_RPN_SHIFT and PTE_RPN_SIZE and specify the 57 bit detail explicitly. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/book3s/64/hash-64k.h')
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index f4650504a61e..ecc7ce0f4baf 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -40,15 +40,6 @@
/* PTE flags to conserve for HPTE identification */
#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_F_SECOND | \
_PAGE_F_GIX | _PAGE_HASHPTE | _PAGE_COMBO)
-
-/* Shift to put page number into pte.
- *
- * That gives us a max RPN of 41 bits, which means a max of 57 bits
- * of addressable physical space, or 53 bits for the special 4k PFNs.
- */
-#define PTE_RPN_SHIFT (16)
-#define PTE_RPN_SIZE (41)
-
/*
* we support 16 fragments per PTE page of 64K size.
*/
@@ -68,6 +59,7 @@
#define PGD_MASKED_BITS 0xc0000000000000ffUL
#ifndef __ASSEMBLY__
+#include <asm/errno.h>
/*
* With 64K pages on hash table, we have a special PTE format that
@@ -124,10 +116,18 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
#define pte_pagesize_index(mm, addr, pte) \
(((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
-#define remap_4k_pfn(vma, addr, pfn, prot) \
- (WARN_ON(((pfn) >= (1UL << PTE_RPN_SIZE))) ? -EINVAL : \
- remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
- __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)))
+extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
+static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t prot)
+{
+ if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
+ WARN(1, "remap_4k_pfn called with wrong pfn value\n");
+ return -EINVAL;
+ }
+ return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
+ __pgprot(pgprot_val(prot) | _PAGE_4K_PFN));
+}
#define PTE_TABLE_SIZE PTE_FRAG_SIZE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE