summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-10-11 20:37:10 +1000
committerPaul Mackerras <paulus@samba.org>2007-10-12 14:05:17 +1000
commit1189be6508d45183013ddb82b18f4934193de274 (patch)
tree58924481b4de56699e4a884dce8dc601e71cf7d1 /arch/powerpc/mm/hugetlbpage.c
parent287e5d6fcccfa38b953cebe307e1ddfd32363355 (diff)
[POWERPC] Use 1TB segments
This makes the kernel use 1TB segments for all kernel mappings and for user addresses of 1TB and above, on machines which support them (currently POWER5+, POWER6 and PA6T). We detect that the machine supports 1TB segments by looking at the ibm,processor-segment-sizes property in the device tree. We don't currently use 1TB segments for user addresses < 1T, since that would effectively prevent 32-bit processes from using huge pages unless we also had a way to revert to using 256MB segments. That would be possible but would involve extra complications (such as keeping track of which segment size was used when HPTEs were inserted) and is not addressed here. Parts of this patch were originally written by Ben Herrenschmidt. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index ba5f12a60467..08f0d9ff7712 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -403,11 +403,12 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
unsigned long va, rflags, pa;
long slot;
int err = 1;
+ int ssize = user_segment_size(ea);
ptep = huge_pte_offset(mm, ea);
/* Search the Linux page table for a match with va */
- va = (vsid << 28) | (ea & 0x0fffffff);
+ va = hpt_va(ea, vsid, ssize);
/*
* If no pte found or not present, send the problem up to
@@ -458,19 +459,19 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
/* There MIGHT be an HPTE for this pte */
unsigned long hash, slot;
- hash = hpt_hash(va, HPAGE_SHIFT);
+ hash = hpt_hash(va, HPAGE_SHIFT, ssize);
if (old_pte & _PAGE_F_SECOND)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & _PAGE_F_GIX) >> 12;
if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
- local) == -1)
+ ssize, local) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
if (likely(!(old_pte & _PAGE_HASHPTE))) {
- unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
+ unsigned long hash = hpt_hash(va, HPAGE_SHIFT, ssize);
unsigned long hpte_group;
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -489,7 +490,7 @@ repeat:
/* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
- mmu_huge_psize);
+ mmu_huge_psize, ssize);
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
@@ -497,7 +498,7 @@ repeat:
HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
HPTE_V_SECONDARY,
- mmu_huge_psize);
+ mmu_huge_psize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *