diff options
author | Paul Mackerras <paulus@samba.org> | 2007-10-11 20:37:10 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-10-12 14:05:17 +1000 |
commit | 1189be6508d45183013ddb82b18f4934193de274 (patch) | |
tree | 58924481b4de56699e4a884dce8dc601e71cf7d1 /arch/powerpc/mm/slb_low.S | |
parent | 287e5d6fcccfa38b953cebe307e1ddfd32363355 (diff) |
[POWERPC] Use 1TB segments
This makes the kernel use 1TB segments for all kernel mappings and for
user addresses of 1TB and above, on machines which support them
(currently POWER5+, POWER6 and PA6T).
We detect that the machine supports 1TB segments by looking at the
ibm,processor-segment-sizes property in the device tree.
We don't currently use 1TB segments for user addresses < 1T, since
that would effectively prevent 32-bit processes from using huge pages
unless we also had a way to revert to using 256MB segments. That
would be possible but would involve extra complications (such as
keeping track of which segment size was used when HPTEs were inserted)
and is not addressed here.
Parts of this patch were originally written by Ben Herrenschmidt.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/slb_low.S')
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 37 |
1 files changed, 33 insertions, 4 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index cd1a93d4948c..1328a81a84aa 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -57,7 +57,10 @@ _GLOBAL(slb_allocate_realmode) */ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 +BEGIN_FTR_SECTION b slb_finish_load +END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) + b slb_finish_load_1T 1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below * will be patched by the kernel at boot @@ -68,13 +71,16 @@ BEGIN_FTR_SECTION cmpldi r11,(VMALLOC_SIZE >> 28) - 1 bgt 5f lhz r11,PACAVMALLOCSLLP(r13) - b slb_finish_load + b 6f 5: END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) _GLOBAL(slb_miss_kernel_load_io) li r11,0 +6: +BEGIN_FTR_SECTION b slb_finish_load - +END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) + b slb_finish_load_1T 0: /* user address: proto-VSID = context << 15 | ESID. First check * if the address is within the boundaries of the user region @@ -122,7 +128,13 @@ _GLOBAL(slb_miss_kernel_load_io) #endif /* CONFIG_PPC_MM_SLICES */ ld r9,PACACONTEXTID(r13) +BEGIN_FTR_SECTION + cmpldi r10,0x1000 +END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) rldimi r10,r9,USER_ESID_BITS,0 +BEGIN_FTR_SECTION + bge slb_finish_load_1T +END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ @@ -188,7 +200,7 @@ _GLOBAL(slb_allocate_user) * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET */ slb_finish_load: - ASM_VSID_SCRAMBLE(r10,r9) + ASM_VSID_SCRAMBLE(r10,r9,256M) rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ /* r3 = EA, r11 = VSID data */ @@ -213,7 +225,7 @@ BEGIN_FW_FTR_SECTION END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif /* CONFIG_PPC_ISERIES */ - ld r10,PACASTABRR(r13) +7: ld r10,PACASTABRR(r13) addi r10,r10,1 /* use a cpu feature mask if we ever change our slb size */ cmpldi r10,SLB_NUM_ENTRIES @@ -259,3 +271,20 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) crclr 4*cr0+eq /* set result to "success" */ blr +/* + * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. + * We assume legacy iSeries will never have 1T segments. + * + * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 + */ +slb_finish_load_1T: + srdi r10,r10,40-28 /* get 1T ESID */ + ASM_VSID_SCRAMBLE(r10,r9,1T) + rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ + li r10,MMU_SEGSIZE_1T + rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ + + /* r3 = EA, r11 = VSID data */ + clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */ + b 7b + |