From d5d14ed6f2db7287a5088e1350cf422bf72140b3 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Thu, 29 Mar 2012 13:58:43 -0400 Subject: arch/tile: Allow tilegx to build with either 16K or 64K page size This change introduces new flags for the hv_install_context() API that passes a page table pointer to the hypervisor. Clients can explicitly request 4K, 16K, or 64K small pages when they install a new context. In practice, the page size is fixed at kernel compile time and the same size is always requested every time a new page table is installed. The header changes so that it provides more abstract macros for managing "page" things like PFNs and page tables. For example there is now a HV_DEFAULT_PAGE_SIZE_SMALL instead of the old HV_PAGE_SIZE_SMALL. The various PFN routines have been eliminated and only PA- or PTFN-based ones remain (since PTFNs are always expressed in fixed 2KB "page" size). The page-table management macros are renamed with a leading underscore and take page-size arguments with the presumption that clients will use those macros in some single place to provide the "real" macros they will use themselves. I happened to notice the old hv_set_caching() API was totally broken (it assumed 4KB pages) so I changed it so it would nominally work correctly with other page sizes. Tag modules with the page size so you can't load a module built with a conflicting page size. (And add a test for SMP while we're at it.) Signed-off-by: Chris Metcalf --- arch/tile/mm/pgtable.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) (limited to 'arch/tile/mm/pgtable.c') diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 2410aa899b3e..3d7074347e6d 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -289,13 +289,12 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER) -struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) +struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, + int order) { gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; struct page *p; -#if L2_USER_PGTABLE_ORDER > 0 int i; -#endif #ifdef CONFIG_HIGHPTE flags |= __GFP_HIGHMEM; @@ -305,17 +304,15 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) if (p == NULL) return NULL; -#if L2_USER_PGTABLE_ORDER > 0 /* * Make every page have a page_count() of one, not just the first. * We don't use __GFP_COMP since it doesn't look like it works * correctly with tlb_remove_page(). */ - for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { + for (i = 1; i < order; ++i) { init_page_count(p+i); inc_zone_page_state(p+i, NR_PAGETABLE); } -#endif pgtable_page_ctor(p); return p; @@ -326,28 +323,28 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) * process). We have to correct whatever pte_alloc_one() did before * returning the pages to the allocator. */ -void pte_free(struct mm_struct *mm, struct page *p) +void pgtable_free(struct mm_struct *mm, struct page *p, int order) { int i; pgtable_page_dtor(p); __free_page(p); - for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { + for (i = 1; i < order; ++i) { __free_page(p+i); dec_zone_page_state(p+i, NR_PAGETABLE); } } -void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, - unsigned long address) +void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte, + unsigned long address, int order) { int i; pgtable_page_dtor(pte); tlb_remove_page(tlb, pte); - for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { + for (i = 1; i < order; ++i) { tlb_remove_page(tlb, pte + i); dec_zone_page_state(pte + i, NR_PAGETABLE); } @@ -490,7 +487,7 @@ void set_pte(pte_t *ptep, pte_t pte) /* Can this mm load a PTE with cached_priority set? */ static inline int mm_is_priority_cached(struct mm_struct *mm) { - return mm->context.priority_cached; + return mm->context.priority_cached != 0; } /* @@ -500,8 +497,8 @@ static inline int mm_is_priority_cached(struct mm_struct *mm) void start_mm_caching(struct mm_struct *mm) { if (!mm_is_priority_cached(mm)) { - mm->context.priority_cached = -1U; - hv_set_caching(-1U); + mm->context.priority_cached = -1UL; + hv_set_caching(-1UL); } } @@ -516,7 +513,7 @@ void start_mm_caching(struct mm_struct *mm) * Presumably we'll come back later and have more luck and clear * the value then; for now we'll just keep the cache marked for priority. */ -static unsigned int update_priority_cached(struct mm_struct *mm) +static unsigned long update_priority_cached(struct mm_struct *mm) { if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) { struct vm_area_struct *vm; -- cgit v1.2.3