diff options
Diffstat (limited to 'arch/powerpc/include/asm')
52 files changed, 2372 insertions, 2897 deletions
diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/book3s/32/hash.h index 62cfb0c663bb..264b754d65b0 100644 --- a/arch/powerpc/include/asm/pte-hash32.h +++ b/arch/powerpc/include/asm/book3s/32/hash.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_HASH32_H -#define _ASM_POWERPC_PTE_HASH32_H +#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H +#define _ASM_POWERPC_BOOK3S_32_HASH_H #ifdef __KERNEL__ /* @@ -43,4 +43,4 @@ #define PTE_ATOMIC_UPDATES 1 #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_HASH32_H */ +#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h new file mode 100644 index 000000000000..38b33dcfcc9d --- /dev/null +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -0,0 +1,482 @@ +#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H +#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H + +#include <asm-generic/pgtable-nopmd.h> + +#include <asm/book3s/32/hash.h> + +/* And here we include common definitions */ +#include <asm/pte-common.h> + +/* + * The normal case is that PTEs are 32-bits and we have a 1-page + * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus + * + * For any >32-bit physical address platform, we can use the following + * two level page table layout where the pgdir is 8KB and the MS 13 bits + * are an index to the second level table. The combined pgdir/pmd first + * level has 2048 entries and the second level has 512 64-bit PTE entries. + * -Matt + */ +/* PGDIR_SHIFT determines what a top-level page table entry can map */ +#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#define PTRS_PER_PTE (1 << PTE_SHIFT) +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +/* + * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary + * value (for now) on others, from where we can start layout kernel + * virtual space that goes below PKMAP and FIXMAP + */ +#ifdef CONFIG_HIGHMEM +#define KVIRT_TOP PKMAP_BASE +#else +#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ +#endif + +/* + * ioremap_bot starts at that address. Early ioremaps move down from there, + * until mem_init() at which point this becomes the top of the vmalloc + * and ioremap space + */ +#ifdef CONFIG_NOT_COHERENT_CACHE +#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) +#else +#define IOREMAP_TOP KVIRT_TOP +#endif + +/* + * Just any arbitrary offset to the start of the vmalloc VM area: the + * current 16MB value just means that there will be a 64MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + * + * We no longer map larger than phys RAM with the BATs so we don't have + * to worry about the VMALLOC_OFFSET causing problems. We do have to worry + * about clashes between our early calls to ioremap() that start growing down + * from ioremap_base being run into the VM area allocations (growing upwards + * from VMALLOC_START). For this reason we have ioremap_bot to check when + * we actually run into our mappings setup in the early boot with the VM + * system. This really does become a problem for machines with good amounts + * of RAM. -- Cort + */ +#define VMALLOC_OFFSET (0x1000000) /* 16M */ +#ifdef PPC_PIN_SIZE +#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#else +#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#endif +#define VMALLOC_END ioremap_bot + +#ifndef __ASSEMBLY__ +#include <linux/sched.h> +#include <linux/threads.h> +#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ + +extern unsigned long ioremap_bot; + +/* + * entries per page directory level: our page-table tree is two-level, so + * we don't really have any PMD directory. + */ +#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) +#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ + (unsigned long long)pte_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) +/* + * Bits in a linux-style PTE. These match the bits in the + * (hardware-defined) PowerPC PTE as closely as possible. + */ + +#define pte_clear(mm, addr, ptep) \ + do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) +#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + + +/* + * When flushing the tlb entry for a page, we also need to flush the hash + * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. + */ +extern int flush_hash_pages(unsigned context, unsigned long va, + unsigned long pmdval, int count); + +/* Add an HPTE to the hash table */ +extern void add_hash_page(unsigned context, unsigned long va, + unsigned long pmdval); + +/* Flush an entry from the TLB/hash table */ +extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, + unsigned long address); + +/* + * PTE updates. This function is called whenever an existing + * valid PTE is updated. This does -not- include set_pte_at() + * which nowadays only sets a new PTE. + * + * Depending on the type of MMU, we may need to use atomic updates + * and the PTE may be either 32 or 64 bit wide. In the later case, + * when using atomic updates, only the low part of the PTE is + * accessed atomically. + * + * In addition, on 44x, we also maintain a global flag indicating + * that an executable user mapping was modified, which is needed + * to properly flush the virtually tagged instruction cache of + * those implementations. + */ +#ifndef CONFIG_PTE_64BIT +static inline unsigned long pte_update(pte_t *p, + unsigned long clr, + unsigned long set) +{ + unsigned long old, tmp; + + __asm__ __volatile__("\ +1: lwarx %0,0,%3\n\ + andc %1,%0,%4\n\ + or %1,%1,%5\n" + PPC405_ERR77(0,%3) +" stwcx. %1,0,%3\n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*p) + : "r" (p), "r" (clr), "r" (set), "m" (*p) + : "cc" ); + + return old; +} +#else /* CONFIG_PTE_64BIT */ +static inline unsigned long long pte_update(pte_t *p, + unsigned long clr, + unsigned long set) +{ + unsigned long long old; + unsigned long tmp; + + __asm__ __volatile__("\ +1: lwarx %L0,0,%4\n\ + lwzx %0,0,%3\n\ + andc %1,%L0,%5\n\ + or %1,%1,%6\n" + PPC405_ERR77(0,%3) +" stwcx. %1,0,%4\n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*p) + : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) + : "cc" ); + + return old; +} +#endif /* CONFIG_PTE_64BIT */ + +/* + * 2.6 calls this without flushing the TLB entry; this is wrong + * for our hash-based implementation, we fix that up here. + */ +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) +{ + unsigned long old; + old = pte_update(ptep, _PAGE_ACCESSED, 0); + if (old & _PAGE_HASHPTE) { + unsigned long ptephys = __pa(ptep) & PAGE_MASK; + flush_hash_pages(context, addr, ptephys, 1); + } + return (old & _PAGE_ACCESSED) != 0; +} +#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ + __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); +} +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + + +static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) +{ + unsigned long set = pte_val(entry) & + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); + unsigned long clr = ~pte_val(entry) & _PAGE_RO; + + pte_update(ptep, clr, set); +} + +#define __HAVE_ARCH_PTE_SAME +#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) + +/* + * Note that on Book E processors, the pmd contains the kernel virtual + * (lowmem) address of the pte page. The physical address is less useful + * because everything runs with translation enabled (even the TLB miss + * handler). On everything else the pmd contains the physical address + * of the pte page. -- paulus + */ +#ifndef CONFIG_BOOKE +#define pmd_page_vaddr(pmd) \ + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) +#define pmd_page(pmd) \ + pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) +#else +#define pmd_page_vaddr(pmd) \ + ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) +#define pmd_page(pmd) \ + pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) +#endif + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* to find an entry in a page-table-directory */ +#define pgd_index(address) ((address) >> PGDIR_SHIFT) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +/* Find an entry in the third-level page table.. */ +#define pte_index(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, addr) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) +#define pte_offset_map(dir, addr) \ + ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) +#define pte_unmap(pte) kunmap_atomic(pte) + +/* + * Encode and decode a swap entry. + * Note that the bits we use in a PTE for representing a swap entry + * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). + * -- paulus + */ +#define __swp_type(entry) ((entry).val & 0x1f) +#define __swp_offset(entry) ((entry).val >> 5) +#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) + +#ifndef CONFIG_PPC_4K_PAGES +void pgtable_cache_init(void); +#else +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) +#endif + +extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, + pmd_t **pmdp); + +/* Generic accessors to PTE bits */ +static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} +static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } +static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } +static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } +static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } +static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } + +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +/* Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * Even if PTEs can be unsigned long long, a PFN is always an unsigned + * long for now. + */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) +{ + return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | + pgprot_val(pgprot)); +} + +static inline unsigned long pte_pfn(pte_t pte) +{ + return pte_val(pte) >> PTE_RPN_SHIFT; +} + +/* Generic modifiers for PTE bits */ +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RW); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_DIRTY); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_ACCESSED); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPECIAL); +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + + + +/* This low level function performs the actual PTE insertion + * Setting the PTE depends on the MMU type and other factors. It's + * an horrible mess that I'm not going to try to clean up now but + * I'm keeping it in one place rather than spread around + */ +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, int percpu) +{ +#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) + /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the + * helper pte_update() which does an atomic update. We need to do that + * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a + * per-CPU PTE such as a kmap_atomic, we do a simple update preserving + * the hash bits instead (ie, same as the non-SMP case) + */ + if (percpu) + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + else + pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); + +#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) + /* Second case is 32-bit with 64-bit PTE. In this case, we + * can just store as long as we do the two halves in the right order + * with a barrier in between. This is possible because we take care, + * in the hash code, to pre-invalidate if the PTE was already hashed, + * which synchronizes us with any concurrent invalidation. + * In the percpu case, we also fallback to the simple update preserving + * the hash bits + */ + if (percpu) { + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + return; + } + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); + __asm__ __volatile__("\ + stw%U0%X0 %2,%0\n\ + eieio\n\ + stw%U0%X0 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +#elif defined(CONFIG_PPC_STD_MMU_32) + /* Third case is 32-bit hash table in UP mode, we need to preserve + * the _PAGE_HASHPTE bit since we may not have invalidated the previous + * translation in the hash yet (done in a subsequent flush_tlb_xxx()) + * and see we need to keep track that this PTE needs invalidating + */ + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + +#else +#error "Not supported " +#endif +} + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached pgprot_noncached +static inline pgprot_t pgprot_noncached(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_NO_CACHE | _PAGE_GUARDED); +} + +#define pgprot_noncached_wc pgprot_noncached_wc +static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_NO_CACHE); +} + +#define pgprot_cached pgprot_cached +static inline pgprot_t pgprot_cached(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_COHERENT); +} + +#define pgprot_cached_wthru pgprot_cached_wthru +static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_COHERENT | _PAGE_WRITETHRU); +} + +#define pgprot_cached_noncoherent pgprot_cached_noncoherent +static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) +{ + return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); +} + +#define pgprot_writecombine pgprot_writecombine +static inline pgprot_t pgprot_writecombine(pgprot_t prot) +{ + return pgprot_noncached_wc(prot); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h new file mode 100644 index 000000000000..ea0414d6659e --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -0,0 +1,132 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H +#define _ASM_POWERPC_BOOK3S_64_HASH_4K_H +/* + * Entries per page directory level. The PTE level must use a 64b record + * for each page table entry. The PMD and PGD level use a 32b record for + * each entry by assuming that each entry is page aligned. + */ +#define PTE_INDEX_SIZE 9 +#define PMD_INDEX_SIZE 7 +#define PUD_INDEX_SIZE 9 +#define PGD_INDEX_SIZE 9 + +#ifndef __ASSEMBLY__ +#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) +#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) +#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) +#endif /* __ASSEMBLY__ */ + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) +#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + +/* PMD_SHIFT determines what a second-level page table entry can map */ +#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* With 4k base page size, hugepage PTEs go at the PMD level */ +#define MIN_HUGEPTE_SHIFT PMD_SHIFT + +/* PUD_SHIFT determines what a third-level page table entry can map */ +#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* PGDIR_SHIFT determines what a fourth-level page table entry can map */ +#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* Bits to mask out from a PMD to get to the PTE page */ +#define PMD_MASKED_BITS 0 +/* Bits to mask out from a PUD to get to the PMD page */ +#define PUD_MASKED_BITS 0 +/* Bits to mask out from a PGD to get to the PUD page */ +#define PGD_MASKED_BITS 0 + +/* PTE flags to conserve for HPTE identification */ +#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ + _PAGE_F_SECOND | _PAGE_F_GIX) + +/* shift to put page number into pte */ +#define PTE_RPN_SHIFT (18) + +#define _PAGE_4K_PFN 0 +#ifndef __ASSEMBLY__ +/* + * 4-level page tables related bits + */ + +#define pgd_none(pgd) (!pgd_val(pgd)) +#define pgd_bad(pgd) (pgd_val(pgd) == 0) +#define pgd_present(pgd) (pgd_val(pgd) != 0) +#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) + +static inline void pgd_clear(pgd_t *pgdp) +{ + *pgdp = __pgd(0); +} + +static inline pte_t pgd_pte(pgd_t pgd) +{ + return __pte(pgd_val(pgd)); +} + +static inline pgd_t pte_pgd(pte_t pte) +{ + return __pgd(pte_val(pte)); +} +extern struct page *pgd_page(pgd_t pgd); + +#define pud_offset(pgdp, addr) \ + (((pud_t *) pgd_page_vaddr(*(pgdp))) + \ + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) + +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) + +/* + * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ +#define remap_4k_pfn(vma, addr, pfn, prot) \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) + +#ifdef CONFIG_HUGETLB_PAGE +/* + * For 4k page size, we support explicit hugepage via hugepd + */ +static inline int pmd_huge(pmd_t pmd) +{ + return 0; +} + +static inline int pud_huge(pud_t pud) +{ + return 0; +} + +static inline int pgd_huge(pgd_t pgd) +{ + return 0; +} +#define pgd_huge pgd_huge + +static inline int hugepd_ok(hugepd_t hpd) +{ + /* + * if it is not a pte and have hugepd shift mask + * set, then it is a hugepd directory pointer + */ + if (!(hpd.pd & _PAGE_PTE) && + ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)) + return true; + return false; +} +#define is_hugepd(hpd) (hugepd_ok(hpd)) +#endif + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h new file mode 100644 index 000000000000..9e55e3b1fef0 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -0,0 +1,312 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H +#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H + +#include <asm-generic/pgtable-nopud.h> + +#define PTE_INDEX_SIZE 8 +#define PMD_INDEX_SIZE 10 +#define PUD_INDEX_SIZE 0 +#define PGD_INDEX_SIZE 12 + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + +/* With 4k base page size, hugepage PTEs go at the PMD level */ +#define MIN_HUGEPTE_SHIFT PAGE_SHIFT + +/* PMD_SHIFT determines what a second-level page table entry can map */ +#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#define _PAGE_COMBO 0x00040000 /* this is a combo 4k page */ +#define _PAGE_4K_PFN 0x00080000 /* PFN is for a single 4k page */ +/* + * Used to track subpage group valid if _PAGE_COMBO is set + * This overloads _PAGE_F_GIX and _PAGE_F_SECOND + */ +#define _PAGE_COMBO_VALID (_PAGE_F_GIX | _PAGE_F_SECOND) + +/* PTE flags to conserve for HPTE identification */ +#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_F_SECOND | \ + _PAGE_F_GIX | _PAGE_HASHPTE | _PAGE_COMBO) + +/* Shift to put page number into pte. + * + * That gives us a max RPN of 34 bits, which means a max of 50 bits + * of addressable physical space, or 46 bits for the special 4k PFNs. + */ +#define PTE_RPN_SHIFT (30) +/* + * we support 16 fragments per PTE page of 64K size. + */ +#define PTE_FRAG_NR 16 +/* + * We use a 2K PTE page fragment and another 2K for storing + * real_pte_t hash index + */ +#define PTE_FRAG_SIZE_SHIFT 12 +#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) + +/* + * Bits to mask out from a PMD to get to the PTE page + * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned. + */ +#define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1) +/* Bits to mask out from a PGD/PUD to get to the PMD page */ +#define PUD_MASKED_BITS 0x1ff + +#ifndef __ASSEMBLY__ + +/* + * With 64K pages on hash table, we have a special PTE format that + * uses a second "half" of the page table to encode sub-page information + * in order to deal with 64K made of 4K HW pages. Thus we override the + * generic accessors and iterators here + */ +#define __real_pte __real_pte +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) +{ + real_pte_t rpte; + unsigned long *hidxp; + + rpte.pte = pte; + rpte.hidx = 0; + if (pte_val(pte) & _PAGE_COMBO) { + /* + * Make sure we order the hidx load against the _PAGE_COMBO + * check. The store side ordering is done in __hash_page_4K + */ + smp_rmb(); + hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); + rpte.hidx = *hidxp; + } + return rpte; +} + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + if ((pte_val(rpte.pte) & _PAGE_COMBO)) + return (rpte.hidx >> (index<<2)) & 0xf; + return (pte_val(rpte.pte) >> _PAGE_F_GIX_SHIFT) & 0xf; +} + +#define __rpte_to_pte(r) ((r).pte) +extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); +/* + * Trick: we set __end to va + 64k, which happens works for + * a 16M page as well as we want only one iteration + */ +#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ + do { \ + unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ + unsigned __split = (psize == MMU_PAGE_4K || \ + psize == MMU_PAGE_64K_AP); \ + shift = mmu_psize_defs[psize].shift; \ + for (index = 0; vpn < __end; index++, \ + vpn += (1L << (shift - VPN_SHIFT))) { \ + if (!__split || __rpte_sub_valid(rpte, index)) \ + do { + +#define pte_iterate_hashed_end() } while(0); } } while(0) + +#define pte_pagesize_index(mm, addr, pte) \ + (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) + +#define remap_4k_pfn(vma, addr, pfn, prot) \ + (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ + __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))) + +#define PTE_TABLE_SIZE PTE_FRAG_SIZE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + (sizeof(unsigned long) << PMD_INDEX_SIZE)) +#else +#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) +#endif +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) + +#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) +#define pte_pgd(pte) ((pgd_t)pte_pud(pte)) + +#ifdef CONFIG_HUGETLB_PAGE +/* + * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have + * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; + * + * Defined in such a way that we can optimize away code block at build time + * if CONFIG_HUGETLB_PAGE=n. + */ +static inline int pmd_huge(pmd_t pmd) +{ + /* + * leaf pte for huge page + */ + return !!(pmd_val(pmd) & _PAGE_PTE); +} + +static inline int pud_huge(pud_t pud) +{ + /* + * leaf pte for huge page + */ + return !!(pud_val(pud) & _PAGE_PTE); +} + +static inline int pgd_huge(pgd_t pgd) +{ + /* + * leaf pte for huge page + */ + return !!(pgd_val(pgd) & _PAGE_PTE); +} +#define pgd_huge pgd_huge + +#ifdef CONFIG_DEBUG_VM +extern int hugepd_ok(hugepd_t hpd); +#define is_hugepd(hpd) (hugepd_ok(hpd)) +#else +/* + * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't + * need to setup hugepage directory for them. Our pte and page directory format + * enable us to have this enabled. + */ +static inline int hugepd_ok(hugepd_t hpd) +{ + return 0; +} +#define is_hugepd(pdep) 0 +#endif /* CONFIG_DEBUG_VM */ + +#endif /* CONFIG_HUGETLB_PAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern unsigned long pmd_hugepage_update(struct mm_struct *mm, + unsigned long addr, + pmd_t *pmdp, + unsigned long clr, + unsigned long set); +static inline char *get_hpte_slot_array(pmd_t *pmdp) +{ + /* + * The hpte hindex is stored in the pgtable whose address is in the + * second half of the PMD + * + * Order this load with the test for pmd_trans_huge in the caller + */ + smp_rmb(); + return *(char **)(pmdp + PTRS_PER_PMD); + + +} +/* + * The linux hugepage PMD now include the pmd entries followed by the address + * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. + * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per + * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and + * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. + * + * The last three bits are intentionally left to zero. This memory location + * are also used as normal page PTE pointers. So if we have any pointers + * left around while we collapse a hugepage, we need to make sure + * _PAGE_PRESENT bit of that is zero when we look at them + */ +static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) +{ + return (hpte_slot_array[index] >> 3) & 0x1; +} + +static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, + int index) +{ + return hpte_slot_array[index] >> 4; +} + +static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, + unsigned int index, unsigned int hidx) +{ + hpte_slot_array[index] = hidx << 4 | 0x1 << 3; +} + +/* + * + * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs + * page. The hugetlbfs page table walking and mangling paths are totally + * separated form the core VM paths and they're differentiated by + * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. + * + * pmd_trans_huge() is defined as false at build time if + * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build + * time in such case. + * + * For ppc64 we need to differntiate from explicit hugepages from THP, because + * for THP we also track the subpage details at the pmd level. We don't do + * that for explicit huge pages. + * + */ +static inline int pmd_trans_huge(pmd_t pmd) +{ + return !!((pmd_val(pmd) & (_PAGE_PTE | _PAGE_THP_HUGE)) == + (_PAGE_PTE | _PAGE_THP_HUGE)); +} + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + if (pmd_trans_huge(pmd)) + return pmd_val(pmd) & _PAGE_SPLITTING; + return 0; +} + +static inline int pmd_large(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_PTE); +} + +static inline pmd_t pmd_mknotpresent(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); +} + +static inline pmd_t pmd_mksplitting(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) | _PAGE_SPLITTING); +} + +#define __HAVE_ARCH_PMD_SAME +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); +} + +static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + unsigned long old; + + if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + return 0; + old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); + return ((old & _PAGE_ACCESSED) != 0); +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + + if ((pmd_val(*pmdp) & _PAGE_RW) == 0) + return; + + pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); +} + +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h new file mode 100644 index 000000000000..2ff8b3df553d --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -0,0 +1,551 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H +#define _ASM_POWERPC_BOOK3S_64_HASH_H +#ifdef __KERNEL__ + +/* + * Common bits between 4K and 64K pages in a linux-style PTE. + * These match the bits in the (hardware-defined) PowerPC PTE as closely + * as possible. Additional bits may be defined in pgtable-hash64-*.h + * + * Note: We only support user read/write permissions. Supervisor always + * have full read/write to pages above PAGE_OFFSET (pages below that + * always use the user access permissions). + * + * We could create separate kernel read-only if we used the 3 PP bits + * combinations that newer processors provide but we currently don't. + */ +#define _PAGE_PTE 0x00001 +#define _PAGE_PRESENT 0x00002 /* software: pte contains a translation */ +#define _PAGE_BIT_SWAP_TYPE 2 +#define _PAGE_USER 0x00004 /* matches one of the PP bits */ +#define _PAGE_EXEC 0x00008 /* No execute on POWER4 and newer (we invert) */ +#define _PAGE_GUARDED 0x00010 +/* We can derive Memory coherence from _PAGE_NO_CACHE */ +#define _PAGE_COHERENT 0x0 +#define _PAGE_NO_CACHE 0x00020 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x00040 /* W: cache write-through */ +#define _PAGE_DIRTY 0x00080 /* C: page changed */ +#define _PAGE_ACCESSED 0x00100 /* R: page referenced */ +#define _PAGE_RW 0x00200 /* software: user write access allowed */ +#define _PAGE_HASHPTE 0x00400 /* software: pte has an associated HPTE */ +#define _PAGE_BUSY 0x00800 /* software: PTE & hash are busy */ +#define _PAGE_F_GIX 0x07000 /* full page: hidx bits */ +#define _PAGE_F_GIX_SHIFT 12 +#define _PAGE_F_SECOND 0x08000 /* Whether to use secondary hash or not */ +#define _PAGE_SPECIAL 0x10000 /* software: special page */ + +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _PAGE_SOFT_DIRTY 0x20000 /* software: software dirty tracking */ +#else +#define _PAGE_SOFT_DIRTY 0x00000 +#endif + +/* + * THP pages can't be special. So use the _PAGE_SPECIAL + */ +#define _PAGE_SPLITTING _PAGE_SPECIAL + +/* + * We need to differentiate between explicit huge page and THP huge + * page, since THP huge page also need to track real subpage details + */ +#define _PAGE_THP_HUGE _PAGE_4K_PFN + +/* + * set of bits not changed in pmd_modify. + */ +#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ + _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ + _PAGE_THP_HUGE | _PAGE_PTE | _PAGE_SOFT_DIRTY) + +#ifdef CONFIG_PPC_64K_PAGES +#include <asm/book3s/64/hash-64k.h> +#else +#include <asm/book3s/64/hash-4k.h> +#endif + +/* + * Size of EA range mapped by our pagetables. + */ +#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) +#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1) +#else +#define PMD_CACHE_INDEX PMD_INDEX_SIZE +#endif +/* + * Define the address range of the kernel non-linear virtual area + */ +#define KERN_VIRT_START ASM_CONST(0xD000000000000000) +#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) + +/* + * The vmalloc space starts at the beginning of that region, and + * occupies half of it on hash CPUs and a quarter of it on Book3E + * (we keep a quarter for the virtual memmap) + */ +#define VMALLOC_START KERN_VIRT_START +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) +#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) + +/* + * Region IDs + */ +#define REGION_SHIFT 60UL +#define REGION_MASK (0xfUL << REGION_SHIFT) +#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) + +#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) +#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) +#define VMEMMAP_REGION_ID (0xfUL) /* Server only */ +#define USER_REGION_ID (0UL) + +/* + * Defines the address of the vmemap area, in its own region on + * hash table CPUs. + */ +#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) + +#ifdef CONFIG_PPC_MM_SLICES +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN +#endif /* CONFIG_PPC_MM_SLICES */ + +/* No separate kernel read-only */ +#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ +#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) + +/* Strong Access Ordering */ +#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) + +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + +/* PTEIDX nibble */ +#define _PTEIDX_SECONDARY 0x8 +#define _PTEIDX_GROUP_IX 0x7 + +/* Hash table based platforms need atomic updates of the linux PTE */ +#define PTE_ATOMIC_UPDATES 1 +#define _PTE_NONE_MASK _PAGE_HPTEFLAGS +/* + * The mask convered by the RPN must be a ULL on 32-bit platforms with + * 64-bit PTEs + */ +#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) +/* + * _PAGE_CHG_MASK masks of bits that are to be preserved across + * pgprot changes + */ +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ + _PAGE_SOFT_DIRTY) +/* + * Mask of bits returned by pte_pgprot() + */ +#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU | _PAGE_4K_PFN | \ + _PAGE_USER | _PAGE_ACCESSED | \ + _PAGE_RW | _PAGE_DIRTY | _PAGE_EXEC | \ + _PAGE_SOFT_DIRTY) +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) + +/* Permission masks used to generate the __P and __S table, + * + * Note:__pgprot is defined in arch/powerpc/include/asm/page.h + * + * Write permissions imply read permissions for now (we could make write-only + * pages on BookE but we don't bother for now). Execute permission control is + * possible on platforms that define _PAGE_EXEC + * + * Note due to the way vm flags are laid out, the bits are XWR + */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \ + _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER ) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER ) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY_X +#define __P101 PAGE_READONLY_X +#define __P110 PAGE_COPY_X +#define __P111 PAGE_COPY_X + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY_X +#define __S101 PAGE_READONLY_X +#define __S110 PAGE_SHARED_X +#define __S111 PAGE_SHARED_X + +/* Permission masks used for kernel mappings */ +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ + _PAGE_NO_CACHE) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ + _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) +#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) + +/* Protection used for kernel text. We want the debuggers to be able to + * set breakpoints anywhere, so don't write protect the kernel text + * on platforms where such control is possible. + */ +#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) +#define PAGE_KERNEL_TEXT PAGE_KERNEL_X +#else +#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX +#endif + +/* Make modules code happy. We don't set RO yet */ +#define PAGE_KERNEL_EXEC PAGE_KERNEL_X +#define PAGE_AGP (PAGE_KERNEL_NC) + +#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) +#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) + +#ifndef __ASSEMBLY__ +#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ + || (pmd_val(pmd) & PMD_BAD_BITS)) +#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) + +#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ + || (pud_val(pud) & PUD_BAD_BITS)) +#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) + +#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) +#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) +#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) + +extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pte, int huge); +extern unsigned long htab_convert_pte_flags(unsigned long pteflags); +/* Atomic PTE updates */ +static inline unsigned long pte_update(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, unsigned long clr, + unsigned long set, + int huge) +{ + unsigned long old, tmp; + + __asm__ __volatile__( + "1: ldarx %0,0,%3 # pte_update\n\ + andi. %1,%0,%6\n\ + bne- 1b \n\ + andc %1,%0,%4 \n\ + or %1,%1,%7\n\ + stdcx. %1,0,%3 \n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*ptep) + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) + : "cc" ); + /* huge pages use the old page table lock */ + if (!huge) + assert_pte_locked(mm, addr); + + if (old & _PAGE_HASHPTE) + hpte_need_flush(mm, addr, ptep, old, huge); + + return old; +} + +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old; + + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + return 0; + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); + return (old & _PAGE_ACCESSED) != 0; +} +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ +({ \ + int __r; \ + __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ + __r; \ +}) + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + + if ((pte_val(*ptep) & _PAGE_RW) == 0) + return; + + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + if ((pte_val(*ptep) & _PAGE_RW) == 0) + return; + + pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); +} + +/* + * We currently remove entries from the hashtable regardless of whether + * the entry was young or dirty. The generic routines only flush if the + * entry was young or dirty which is not good enough. + * + * We should be more intelligent about this but for the moment we override + * these functions and force a tlb flush unconditionally + */ +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young(__vma, __address, __ptep) \ +({ \ + int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ + __ptep); \ + __young; \ +}) + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); + return __pte(old); +} + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t * ptep) +{ + pte_update(mm, addr, ptep, ~0UL, 0, 0); +} + + +/* Set the dirty and/or accessed bits atomically in a linux PTE, this + * function doesn't need to flush the hash entry + */ +static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) +{ + unsigned long bits = pte_val(entry) & + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC | + _PAGE_SOFT_DIRTY); + + unsigned long old, tmp; + + __asm__ __volatile__( + "1: ldarx %0,0,%4\n\ + andi. %1,%0,%6\n\ + bne- 1b \n\ + or %0,%3,%0\n\ + stdcx. %0,0,%4\n\ + bne- 1b" + :"=&r" (old), "=&r" (tmp), "=m" (*ptep) + :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) + :"cc"); +} + +#define __HAVE_ARCH_PTE_SAME +#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) + +/* Generic accessors to PTE bits */ +static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} +static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } +static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } +static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } +static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } +static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +static inline bool pte_soft_dirty(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SOFT_DIRTY); +} +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY); +} + +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY); +} +#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ + +#ifdef CONFIG_NUMA_BALANCING +/* + * These work without NUMA balancing but the kernel does not care. See the + * comment in include/asm-generic/pgtable.h . On powerpc, this will only + * work for user pages and always return true for kernel pages. + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & + (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; +} +#endif /* CONFIG_NUMA_BALANCING */ + +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +/* Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * Even if PTEs can be unsigned long long, a PFN is always an unsigned + * long for now. + */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) +{ + return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | + pgprot_val(pgprot)); +} + +static inline unsigned long pte_pfn(pte_t pte) +{ + return pte_val(pte) >> PTE_RPN_SHIFT; +} + +/* Generic modifiers for PTE bits */ +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RW); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_DIRTY); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_ACCESSED); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPECIAL); +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +/* This low level function performs the actual PTE insertion + * Setting the PTE depends on the MMU type and other factors. It's + * an horrible mess that I'm not going to try to clean up now but + * I'm keeping it in one place rather than spread around + */ +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, int percpu) +{ + /* + * Anything else just stores the PTE normally. That covers all 64-bit + * cases, and 32-bit non-hash with 32-bit PTEs. + */ + *ptep = pte; +} + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached pgprot_noncached +static inline pgprot_t pgprot_noncached(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_NO_CACHE | _PAGE_GUARDED); +} + +#define pgprot_noncached_wc pgprot_noncached_wc +static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_NO_CACHE); +} + +#define pgprot_cached pgprot_cached +static inline pgprot_t pgprot_cached(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_COHERENT); +} + +#define pgprot_cached_wthru pgprot_cached_wthru +static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) +{ + return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | + _PAGE_COHERENT | _PAGE_WRITETHRU); +} + +#define pgprot_cached_noncoherent pgprot_cached_noncoherent +static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) +{ + return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); +} + +#define pgprot_writecombine pgprot_writecombine +static inline pgprot_t pgprot_writecombine(pgprot_t prot) +{ + return pgprot_noncached_wc(prot); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, unsigned long old_pmd); +#else +static inline void hpte_do_hugepage_flush(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp, + unsigned long old_pmd) +{ + WARN(1, "%s called with THP disabled\n", __func__); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h new file mode 100644 index 000000000000..b3a5badab69f --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -0,0 +1,300 @@ +#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ +#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ +/* + * This file contains the functions and defines necessary to modify and use + * the ppc64 hashed page table. + */ + +#include <asm/book3s/64/hash.h> +#include <asm/barrier.h> + +/* + * The second half of the kernel virtual space is used for IO mappings, + * it's itself carved into the PIO region (ISA and PHB IO space) and + * the ioremap space + * + * ISA_IO_BASE = KERN_IO_START, 64K reserved area + * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces + * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE + */ +#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) +#define FULL_IO_SIZE 0x80000000ul +#define ISA_IO_BASE (KERN_IO_START) +#define ISA_IO_END (KERN_IO_START + 0x10000ul) +#define PHB_IO_BASE (ISA_IO_END) +#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) +#define IOREMAP_BASE (PHB_IO_END) +#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) + +#define vmemmap ((struct page *)VMEMMAP_BASE) + +/* Advertise special mapping type for AGP */ +#define HAVE_PAGE_AGP + +/* Advertise support for _PAGE_SPECIAL */ +#define __HAVE_ARCH_PTE_SPECIAL + +#ifndef __ASSEMBLY__ + +/* + * This is the default implementation of various PTE accessors, it's + * used in all cases except Book3S with 64K pages where we have a + * concept of sub-pages + */ +#ifndef __real_pte + +#ifdef CONFIG_STRICT_MM_TYPECHECKS +#define __real_pte(e,p) ((real_pte_t){(e)}) +#define __rpte_to_pte(r) ((r).pte) +#else +#define __real_pte(e,p) (e) +#define __rpte_to_pte(r) (__pte(r)) +#endif +#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT) + +#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ + do { \ + index = 0; \ + shift = mmu_psize_defs[psize].shift; \ + +#define pte_iterate_hashed_end() } while(0) + +/* + * We expect this to be called only for user addresses or kernel virtual + * addresses other than the linear mapping. + */ +#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K + +#endif /* __real_pte */ + +static inline void pmd_set(pmd_t *pmdp, unsigned long val) +{ + *pmdp = __pmd(val); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_present(pmd) (!pmd_none(pmd)) + +static inline void pud_set(pud_t *pudp, unsigned long val) +{ + *pudp = __pud(val); +} + +static inline void pud_clear(pud_t *pudp) +{ + *pudp = __pud(0); +} + +#define pud_none(pud) (!pud_val(pud)) +#define pud_present(pud) (pud_val(pud) != 0) + +extern struct page *pud_page(pud_t pud); +extern struct page *pmd_page(pmd_t pmd); +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + +static inline pud_t pte_pud(pte_t pte) +{ + return __pud(pte_val(pte)); +} +#define pud_write(pud) pte_write(pud_pte(pud)) +#define pgd_write(pgd) pte_write(pgd_pte(pgd)) +static inline void pgd_set(pgd_t *pgdp, unsigned long val) +{ + *pgdp = __pgd(val); +} + +/* + * Find an entry in a page-table-directory. We combine the address region + * (the high order N bits) and the pgd portion of the address. + */ + +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +#define pmd_offset(pudp,addr) \ + (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) + +#define pte_offset_kernel(dir,addr) \ + (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr)) + +#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) +#define pte_unmap(pte) do { } while(0) + +/* to find an entry in a kernel page-table-directory */ +/* This now only contains the vmalloc pages */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +/* Encode and de-code a swap entry */ +#define MAX_SWAPFILES_CHECK() do { \ + BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ + /* \ + * Don't have overlapping bits with _PAGE_HPTEFLAGS \ + * We filter HPTEFLAGS on set_pte. \ + */ \ + BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ + BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \ + } while (0) +/* + * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; + */ +#define SWP_TYPE_BITS 5 +#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ + & ((1UL << SWP_TYPE_BITS) - 1)) +#define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type) << _PAGE_BIT_SWAP_TYPE) \ + | ((offset) << PTE_RPN_SHIFT) }) +/* + * swp_entry_t must be independent of pte bits. We build a swp_entry_t from + * swap type and offset we get from swap and convert that to pte to find a + * matching pte in linux page table. + * Clear bits not found in swap entries here. + */ +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE }) +#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE) + +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE)) +#else +#define _PAGE_SWP_SOFT_DIRTY 0UL +#endif /* CONFIG_MEM_SOFT_DIRTY */ + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY); +} +static inline bool pte_swp_soft_dirty(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY); +} +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY); +} +#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ + +void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); +void pgtable_cache_init(void); + +struct page *realmode_pfn_to_page(unsigned long pfn); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); +extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); +extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); +extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd); +extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd); +extern int has_transparent_hugepage(void); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} + +static inline pte_t *pmdp_ptep(pmd_t *pmd) +{ + return (pte_t *)pmd; +} + +#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) +#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) +#define pmd_young(pmd) pte_young(pmd_pte(pmd)) +#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) +#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) +#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) +#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) +#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) +#define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd))) +#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd))) +#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ + +#ifdef CONFIG_NUMA_BALANCING +static inline int pmd_protnone(pmd_t pmd) +{ + return pte_protnone(pmd_pte(pmd)); +} +#endif /* CONFIG_NUMA_BALANCING */ + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write(pmd) pte_write(pmd_pte(pmd)) + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE)); +} + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#define pmdp_collapse_flush pmdp_collapse_flush + +#define __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +#define __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_INVALIDATE +extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); + +#define pmd_move_must_withdraw pmd_move_must_withdraw +struct spinlock; +static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl) +{ + /* + * Archs like ppc64 use pgtable to store per pmd + * specific information. So when we switch the pmd, + * we should also withdraw and deposit the pgtable + */ + return true; +} +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h new file mode 100644 index 000000000000..8b0f4a29259a --- /dev/null +++ b/arch/powerpc/include/asm/book3s/pgtable.h @@ -0,0 +1,29 @@ +#ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H +#define _ASM_POWERPC_BOOK3S_PGTABLE_H + +#ifdef CONFIG_PPC64 +#include <asm/book3s/64/pgtable.h> +#else +#include <asm/book3s/32/pgtable.h> +#endif + +#define FIRST_USER_ADDRESS 0UL +#ifndef __ASSEMBLY__ +/* Insert a PTE, top-level function is out of line. It uses an inline + * low level function in the respective pgtable-* files + */ +extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte); + + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, pte_t entry, int dirty); + +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#define __HAVE_PHYS_MEM_ACCESS_PROT + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index ad6263cffb0f..d1a8d93cccfd 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stwcx. %3,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (prev), "+m" (*(volatile unsigned int *)p) : "r" (p), "r" (val) : "cc", "memory"); @@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stdcx. %3,0,%2 \n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER : "=&r" (prev), "+m" (*(volatile unsigned long *)p) : "r" (p), "r" (val) : "cc", "memory"); @@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) unsigned int prev; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" PPC405_ERR77(0,%2) " stwcx. %4,0,%2\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&r" (prev), "+m" (*p) @@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) unsigned long prev; __asm__ __volatile__ ( - PPC_RELEASE_BARRIER + PPC_ATOMIC_ENTRY_BARRIER "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ cmpd 0,%0,%3\n\ bne- 2f\n\ stdcx. %4,0,%2\n\ bne- 1b" - PPC_ACQUIRE_BARRIER + PPC_ATOMIC_EXIT_BARRIER "\n\ 2:" : "=&r" (prev), "+m" (*p) diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h index 4398a6cdcf53..2c5c5b476804 100644 --- a/arch/powerpc/include/asm/cpm.h +++ b/arch/powerpc/include/asm/cpm.h @@ -5,6 +5,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/of.h> +#include <soc/fsl/qe/qe.h> /* * SPI Parameter RAM common to QE and CPM. @@ -155,49 +156,6 @@ typedef struct cpm_buf_desc { */ #define BD_I2C_START (0x0400) -int cpm_muram_init(void); - -#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) -unsigned long cpm_muram_alloc(unsigned long size, unsigned long align); -int cpm_muram_free(unsigned long offset); -unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size); -void __iomem *cpm_muram_addr(unsigned long offset); -unsigned long cpm_muram_offset(void __iomem *addr); -dma_addr_t cpm_muram_dma(void __iomem *addr); -#else -static inline unsigned long cpm_muram_alloc(unsigned long size, - unsigned long align) -{ - return -ENOSYS; -} - -static inline int cpm_muram_free(unsigned long offset) -{ - return -ENOSYS; -} - -static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset, - unsigned long size) -{ - return -ENOSYS; -} - -static inline void __iomem *cpm_muram_addr(unsigned long offset) -{ - return NULL; -} - -static inline unsigned long cpm_muram_offset(void __iomem *addr) -{ - return -ENOSYS; -} - -static inline dma_addr_t cpm_muram_dma(void __iomem *addr) -{ - return 0; -} -#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */ - #ifdef CONFIG_CPM int cpm_command(u32 command, u8 opcode); #else diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 77f52b26dad6..93ae809fe5ea 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -130,15 +130,6 @@ BEGIN_FTR_SECTION_NESTED(941) \ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941) /* - * Increase the priority on systems where PPR save/restore is not - * implemented/ supported. - */ -#define HMT_MEDIUM_PPR_DISCARD \ -BEGIN_FTR_SECTION_NESTED(942) \ - HMT_MEDIUM; \ -END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,0,942) /*non P7*/ - -/* * Get an SPR into a register if the CPU has the given feature */ #define OPT_GET_SPR(ra, spr, ftr) \ @@ -263,17 +254,6 @@ do_kvm_##n: \ #define KVM_HANDLER_SKIP(area, h, n) #endif -#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE -#define KVMTEST_PR(n) __KVMTEST(n) -#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) -#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) - -#else -#define KVMTEST_PR(n) -#define KVM_HANDLER_PR(area, h, n) -#define KVM_HANDLER_PR_SKIP(area, h, n) -#endif - #define NOTEST(n) /* @@ -353,27 +333,25 @@ do_kvm_##n: \ /* * Exception vectors. */ -#define STD_EXCEPTION_PSERIES(loc, vec, label) \ - . = loc; \ +#define STD_EXCEPTION_PSERIES(vec, label) \ + . = vec; \ .globl label##_pSeries; \ label##_pSeries: \ - HMT_MEDIUM_PPR_DISCARD; \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ - EXC_STD, KVMTEST_PR, vec) + EXC_STD, KVMTEST, vec) /* Version of above for when we have to branch out-of-line */ #define STD_EXCEPTION_PSERIES_OOL(vec, label) \ .globl label##_pSeries; \ label##_pSeries: \ - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ + EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD) #define STD_EXCEPTION_HV(loc, vec, label) \ . = loc; \ .globl label##_hv; \ label##_hv: \ - HMT_MEDIUM_PPR_DISCARD; \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ EXC_HV, KVMTEST, vec) @@ -389,7 +367,6 @@ label##_hv: \ . = loc; \ .globl label##_relon_pSeries; \ label##_relon_pSeries: \ - HMT_MEDIUM_PPR_DISCARD; \ /* No guest interrupts come through here */ \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ @@ -405,7 +382,6 @@ label##_relon_pSeries: \ . = loc; \ .globl label##_relon_hv; \ label##_relon_hv: \ - HMT_MEDIUM_PPR_DISCARD; \ /* No guest interrupts come through here */ \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ @@ -436,17 +412,13 @@ label##_relon_hv: \ #define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec) #define SOFTEN_TEST_PR(vec) \ - KVMTEST_PR(vec); \ + KVMTEST(vec); \ _SOFTEN_TEST(EXC_STD, vec) #define SOFTEN_TEST_HV(vec) \ KVMTEST(vec); \ _SOFTEN_TEST(EXC_HV, vec) -#define SOFTEN_TEST_HV_201(vec) \ - KVMTEST(vec); \ - _SOFTEN_TEST(EXC_STD, vec) - #define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec) #define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec) @@ -463,7 +435,6 @@ label##_relon_hv: \ . = loc; \ .globl label##_pSeries; \ label##_pSeries: \ - HMT_MEDIUM_PPR_DISCARD; \ _MASKABLE_EXCEPTION_PSERIES(vec, label, \ EXC_STD, SOFTEN_TEST_PR) @@ -481,7 +452,6 @@ label##_hv: \ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV); #define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \ - HMT_MEDIUM_PPR_DISCARD; \ SET_SCRATCH0(r13); /* save r13 */ \ EXCEPTION_PROLOG_0(PACA_EXGEN); \ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index e05808a328db..b0629249778b 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -47,12 +47,10 @@ #define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000) #define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000) #define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000) -#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000) #define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000) #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) -#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000) #ifndef __ASSEMBLY__ @@ -70,8 +68,7 @@ enum { FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, FW_FEATURE_PSERIES_ALWAYS = 0, - FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 | - FW_FEATURE_OPALv3, + FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL, FW_FEATURE_POWERNV_ALWAYS = 0, FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h deleted file mode 100644 index bedbff891423..000000000000 --- a/arch/powerpc/include/asm/immap_qe.h +++ /dev/null @@ -1,491 +0,0 @@ -/* - * QUICC Engine (QE) Internal Memory Map. - * The Internal Memory Map for devices with QE on them. This - * is the superset of all QE devices (8360, etc.). - - * Copyright (C) 2006. Freescale Semiconductor, Inc. All rights reserved. - * - * Authors: Shlomi Gridish <gridish@freescale.com> - * Li Yang <leoli@freescale.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#ifndef _ASM_POWERPC_IMMAP_QE_H -#define _ASM_POWERPC_IMMAP_QE_H -#ifdef __KERNEL__ - -#include <linux/kernel.h> -#include <asm/io.h> - -#define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */ - -/* QE I-RAM */ -struct qe_iram { - __be32 iadd; /* I-RAM Address Register */ - __be32 idata; /* I-RAM Data Register */ - u8 res0[0x04]; - __be32 iready; /* I-RAM Ready Register */ - u8 res1[0x70]; -} __attribute__ ((packed)); - -/* QE Interrupt Controller */ -struct qe_ic_regs { - __be32 qicr; - __be32 qivec; - __be32 qripnr; - __be32 qipnr; - __be32 qipxcc; - __be32 qipycc; - __be32 qipwcc; - __be32 qipzcc; - __be32 qimr; - __be32 qrimr; - __be32 qicnr; - u8 res0[0x4]; - __be32 qiprta; - __be32 qiprtb; - u8 res1[0x4]; - __be32 qricr; - u8 res2[0x20]; - __be32 qhivec; - u8 res3[0x1C]; -} __attribute__ ((packed)); - -/* Communications Processor */ -struct cp_qe { - __be32 cecr; /* QE command register */ - __be32 ceccr; /* QE controller configuration register */ - __be32 cecdr; /* QE command data register */ - u8 res0[0xA]; - __be16 ceter; /* QE timer event register */ - u8 res1[0x2]; - __be16 cetmr; /* QE timers mask register */ - __be32 cetscr; /* QE time-stamp timer control register */ - __be32 cetsr1; /* QE time-stamp register 1 */ - __be32 cetsr2; /* QE time-stamp register 2 */ - u8 res2[0x8]; - __be32 cevter; /* QE virtual tasks event register */ - __be32 cevtmr; /* QE virtual tasks mask register */ - __be16 cercr; /* QE RAM control register */ - u8 res3[0x2]; - u8 res4[0x24]; - __be16 ceexe1; /* QE external request 1 event register */ - u8 res5[0x2]; - __be16 ceexm1; /* QE external request 1 mask register */ - u8 res6[0x2]; - __be16 ceexe2; /* QE external request 2 event register */ - u8 res7[0x2]; - __be16 ceexm2; /* QE external request 2 mask register */ - u8 res8[0x2]; - __be16 ceexe3; /* QE external request 3 event register */ - u8 res9[0x2]; - __be16 ceexm3; /* QE external request 3 mask register */ - u8 res10[0x2]; - __be16 ceexe4; /* QE external request 4 event register */ - u8 res11[0x2]; - __be16 ceexm4; /* QE external request 4 mask register */ - u8 res12[0x3A]; - __be32 ceurnr; /* QE microcode revision number register */ - u8 res13[0x244]; -} __attribute__ ((packed)); - -/* QE Multiplexer */ -struct qe_mux { - __be32 cmxgcr; /* CMX general clock route register */ - __be32 cmxsi1cr_l; /* CMX SI1 clock route low register */ - __be32 cmxsi1cr_h; /* CMX SI1 clock route high register */ - __be32 cmxsi1syr; /* CMX SI1 SYNC route register */ - __be32 cmxucr[4]; /* CMX UCCx clock route registers */ - __be32 cmxupcr; /* CMX UPC clock route register */ - u8 res0[0x1C]; -} __attribute__ ((packed)); - -/* QE Timers */ -struct qe_timers { - u8 gtcfr1; /* Timer 1 and Timer 2 global config register*/ - u8 res0[0x3]; - u8 gtcfr2; /* Timer 3 and timer 4 global config register*/ - u8 res1[0xB]; - __be16 gtmdr1; /* Timer 1 mode register */ - __be16 gtmdr2; /* Timer 2 mode register */ - __be16 gtrfr1; /* Timer 1 reference register */ - __be16 gtrfr2; /* Timer 2 reference register */ - __be16 gtcpr1; /* Timer 1 capture register */ - __be16 gtcpr2; /* Timer 2 capture register */ - __be16 gtcnr1; /* Timer 1 counter */ - __be16 gtcnr2; /* Timer 2 counter */ - __be16 gtmdr3; /* Timer 3 mode register */ - __be16 gtmdr4; /* Timer 4 mode register */ - __be16 gtrfr3; /* Timer 3 reference register */ - __be16 gtrfr4; /* Timer 4 reference register */ - __be16 gtcpr3; /* Timer 3 capture register */ - __be16 gtcpr4; /* Timer 4 capture register */ - __be16 gtcnr3; /* Timer 3 counter */ - __be16 gtcnr4; /* Timer 4 counter */ - __be16 gtevr1; /* Timer 1 event register */ - __be16 gtevr2; /* Timer 2 event register */ - __be16 gtevr3; /* Timer 3 event register */ - __be16 gtevr4; /* Timer 4 event register */ - __be16 gtps; /* Timer 1 prescale register */ - u8 res2[0x46]; -} __attribute__ ((packed)); - -/* BRG */ -struct qe_brg { - __be32 brgc[16]; /* BRG configuration registers */ - u8 res0[0x40]; -} __attribute__ ((packed)); - -/* SPI */ -struct spi { - u8 res0[0x20]; - __be32 spmode; /* SPI mode register */ - u8 res1[0x2]; - u8 spie; /* SPI event register */ - u8 res2[0x1]; - u8 res3[0x2]; - u8 spim; /* SPI mask register */ - u8 res4[0x1]; - u8 res5[0x1]; - u8 spcom; /* SPI command register */ - u8 res6[0x2]; - __be32 spitd; /* SPI transmit data register (cpu mode) */ - __be32 spird; /* SPI receive data register (cpu mode) */ - u8 res7[0x8]; -} __attribute__ ((packed)); - -/* SI */ -struct si1 { - __be16 siamr1; /* SI1 TDMA mode register */ - __be16 sibmr1; /* SI1 TDMB mode register */ - __be16 sicmr1; /* SI1 TDMC mode register */ - __be16 sidmr1; /* SI1 TDMD mode register */ - u8 siglmr1_h; /* SI1 global mode register high */ - u8 res0[0x1]; - u8 sicmdr1_h; /* SI1 command register high */ - u8 res2[0x1]; - u8 sistr1_h; /* SI1 status register high */ - u8 res3[0x1]; - __be16 sirsr1_h; /* SI1 RAM shadow address register high */ - u8 sitarc1; /* SI1 RAM counter Tx TDMA */ - u8 sitbrc1; /* SI1 RAM counter Tx TDMB */ - u8 sitcrc1; /* SI1 RAM counter Tx TDMC */ - u8 sitdrc1; /* SI1 RAM counter Tx TDMD */ - u8 sirarc1; /* SI1 RAM counter Rx TDMA */ - u8 sirbrc1; /* SI1 RAM counter Rx TDMB */ - u8 sircrc1; /* SI1 RAM counter Rx TDMC */ - u8 sirdrc1; /* SI1 RAM counter Rx TDMD */ - u8 res4[0x8]; - __be16 siemr1; /* SI1 TDME mode register 16 bits */ - __be16 sifmr1; /* SI1 TDMF mode register 16 bits */ - __be16 sigmr1; /* SI1 TDMG mode register 16 bits */ - __be16 sihmr1; /* SI1 TDMH mode register 16 bits */ - u8 siglmg1_l; /* SI1 global mode register low 8 bits */ - u8 res5[0x1]; - u8 sicmdr1_l; /* SI1 command register low 8 bits */ - u8 res6[0x1]; - u8 sistr1_l; /* SI1 status register low 8 bits */ - u8 res7[0x1]; - __be16 sirsr1_l; /* SI1 RAM shadow address register low 16 bits*/ - u8 siterc1; /* SI1 RAM counter Tx TDME 8 bits */ - u8 sitfrc1; /* SI1 RAM counter Tx TDMF 8 bits */ - u8 sitgrc1; /* SI1 RAM counter Tx TDMG 8 bits */ - u8 sithrc1; /* SI1 RAM counter Tx TDMH 8 bits */ - u8 sirerc1; /* SI1 RAM counter Rx TDME 8 bits */ - u8 sirfrc1; /* SI1 RAM counter Rx TDMF 8 bits */ - u8 sirgrc1; /* SI1 RAM counter Rx TDMG 8 bits */ - u8 sirhrc1; /* SI1 RAM counter Rx TDMH 8 bits */ - u8 res8[0x8]; - __be32 siml1; /* SI1 multiframe limit register */ - u8 siedm1; /* SI1 extended diagnostic mode register */ - u8 res9[0xBB]; -} __attribute__ ((packed)); - -/* SI Routing Tables */ -struct sir { - u8 tx[0x400]; - u8 rx[0x400]; - u8 res0[0x800]; -} __attribute__ ((packed)); - -/* USB Controller */ -struct qe_usb_ctlr { - u8 usb_usmod; - u8 usb_usadr; - u8 usb_uscom; - u8 res1[1]; - __be16 usb_usep[4]; - u8 res2[4]; - __be16 usb_usber; - u8 res3[2]; - __be16 usb_usbmr; - u8 res4[1]; - u8 usb_usbs; - __be16 usb_ussft; - u8 res5[2]; - __be16 usb_usfrn; - u8 res6[0x22]; -} __attribute__ ((packed)); - -/* MCC */ -struct qe_mcc { - __be32 mcce; /* MCC event register */ - __be32 mccm; /* MCC mask register */ - __be32 mccf; /* MCC configuration register */ - __be32 merl; /* MCC emergency request level register */ - u8 res0[0xF0]; -} __attribute__ ((packed)); - -/* QE UCC Slow */ -struct ucc_slow { - __be32 gumr_l; /* UCCx general mode register (low) */ - __be32 gumr_h; /* UCCx general mode register (high) */ - __be16 upsmr; /* UCCx protocol-specific mode register */ - u8 res0[0x2]; - __be16 utodr; /* UCCx transmit on demand register */ - __be16 udsr; /* UCCx data synchronization register */ - __be16 ucce; /* UCCx event register */ - u8 res1[0x2]; - __be16 uccm; /* UCCx mask register */ - u8 res2[0x1]; - u8 uccs; /* UCCx status register */ - u8 res3[0x24]; - __be16 utpt; - u8 res4[0x52]; - u8 guemr; /* UCC general extended mode register */ -} __attribute__ ((packed)); - -/* QE UCC Fast */ -struct ucc_fast { - __be32 gumr; /* UCCx general mode register */ - __be32 upsmr; /* UCCx protocol-specific mode register */ - __be16 utodr; /* UCCx transmit on demand register */ - u8 res0[0x2]; - __be16 udsr; /* UCCx data synchronization register */ - u8 res1[0x2]; - __be32 ucce; /* UCCx event register */ - __be32 uccm; /* UCCx mask register */ - u8 uccs; /* UCCx status register */ - u8 res2[0x7]; - __be32 urfb; /* UCC receive FIFO base */ - __be16 urfs; /* UCC receive FIFO size */ - u8 res3[0x2]; - __be16 urfet; /* UCC receive FIFO emergency threshold */ - __be16 urfset; /* UCC receive FIFO special emergency - threshold */ - __be32 utfb; /* UCC transmit FIFO base */ - __be16 utfs; /* UCC transmit FIFO size */ - u8 res4[0x2]; - __be16 utfet; /* UCC transmit FIFO emergency threshold */ - u8 res5[0x2]; - __be16 utftt; /* UCC transmit FIFO transmit threshold */ - u8 res6[0x2]; - __be16 utpt; /* UCC transmit polling timer */ - u8 res7[0x2]; - __be32 urtry; /* UCC retry counter register */ - u8 res8[0x4C]; - u8 guemr; /* UCC general extended mode register */ -} __attribute__ ((packed)); - -struct ucc { - union { - struct ucc_slow slow; - struct ucc_fast fast; - u8 res[0x200]; /* UCC blocks are 512 bytes each */ - }; -} __attribute__ ((packed)); - -/* MultiPHY UTOPIA POS Controllers (UPC) */ -struct upc { - __be32 upgcr; /* UTOPIA/POS general configuration register */ - __be32 uplpa; /* UTOPIA/POS last PHY address */ - __be32 uphec; /* ATM HEC register */ - __be32 upuc; /* UTOPIA/POS UCC configuration */ - __be32 updc1; /* UTOPIA/POS device 1 configuration */ - __be32 updc2; /* UTOPIA/POS device 2 configuration */ - __be32 updc3; /* UTOPIA/POS device 3 configuration */ - __be32 updc4; /* UTOPIA/POS device 4 configuration */ - __be32 upstpa; /* UTOPIA/POS STPA threshold */ - u8 res0[0xC]; - __be32 updrs1_h; /* UTOPIA/POS device 1 rate select */ - __be32 updrs1_l; /* UTOPIA/POS device 1 rate select */ - __be32 updrs2_h; /* UTOPIA/POS device 2 rate select */ - __be32 updrs2_l; /* UTOPIA/POS device 2 rate select */ - __be32 updrs3_h; /* UTOPIA/POS device 3 rate select */ - __be32 updrs3_l; /* UTOPIA/POS device 3 rate select */ - __be32 updrs4_h; /* UTOPIA/POS device 4 rate select */ - __be32 updrs4_l; /* UTOPIA/POS device 4 rate select */ - __be32 updrp1; /* UTOPIA/POS device 1 receive priority low */ - __be32 updrp2; /* UTOPIA/POS device 2 receive priority low */ - __be32 updrp3; /* UTOPIA/POS device 3 receive priority low */ - __be32 updrp4; /* UTOPIA/POS device 4 receive priority low */ - __be32 upde1; /* UTOPIA/POS device 1 event */ - __be32 upde2; /* UTOPIA/POS device 2 event */ - __be32 upde3; /* UTOPIA/POS device 3 event */ - __be32 upde4; /* UTOPIA/POS device 4 event */ - __be16 uprp1; - __be16 uprp2; - __be16 uprp3; - __be16 uprp4; - u8 res1[0x8]; - __be16 uptirr1_0; /* Device 1 transmit internal rate 0 */ - __be16 uptirr1_1; /* Device 1 transmit internal rate 1 */ - __be16 uptirr1_2; /* Device 1 transmit internal rate 2 */ - __be16 uptirr1_3; /* Device 1 transmit internal rate 3 */ - __be16 uptirr2_0; /* Device 2 transmit internal rate 0 */ - __be16 uptirr2_1; /* Device 2 transmit internal rate 1 */ - __be16 uptirr2_2; /* Device 2 transmit internal rate 2 */ - __be16 uptirr2_3; /* Device 2 transmit internal rate 3 */ - __be16 uptirr3_0; /* Device 3 transmit internal rate 0 */ - __be16 uptirr3_1; /* Device 3 transmit internal rate 1 */ - __be16 uptirr3_2; /* Device 3 transmit internal rate 2 */ - __be16 uptirr3_3; /* Device 3 transmit internal rate 3 */ - __be16 uptirr4_0; /* Device 4 transmit internal rate 0 */ - __be16 uptirr4_1; /* Device 4 transmit internal rate 1 */ - __be16 uptirr4_2; /* Device 4 transmit internal rate 2 */ - __be16 uptirr4_3; /* Device 4 transmit internal rate 3 */ - __be32 uper1; /* Device 1 port enable register */ - __be32 uper2; /* Device 2 port enable register */ - __be32 uper3; /* Device 3 port enable register */ - __be32 uper4; /* Device 4 port enable register */ - u8 res2[0x150]; -} __attribute__ ((packed)); - -/* SDMA */ -struct sdma { - __be32 sdsr; /* Serial DMA status register */ - __be32 sdmr; /* Serial DMA mode register */ - __be32 sdtr1; /* SDMA system bus threshold register */ - __be32 sdtr2; /* SDMA secondary bus threshold register */ - __be32 sdhy1; /* SDMA system bus hysteresis register */ - __be32 sdhy2; /* SDMA secondary bus hysteresis register */ - __be32 sdta1; /* SDMA system bus address register */ - __be32 sdta2; /* SDMA secondary bus address register */ - __be32 sdtm1; /* SDMA system bus MSNUM register */ - __be32 sdtm2; /* SDMA secondary bus MSNUM register */ - u8 res0[0x10]; - __be32 sdaqr; /* SDMA address bus qualify register */ - __be32 sdaqmr; /* SDMA address bus qualify mask register */ - u8 res1[0x4]; - __be32 sdebcr; /* SDMA CAM entries base register */ - u8 res2[0x38]; -} __attribute__ ((packed)); - -/* Debug Space */ -struct dbg { - __be32 bpdcr; /* Breakpoint debug command register */ - __be32 bpdsr; /* Breakpoint debug status register */ - __be32 bpdmr; /* Breakpoint debug mask register */ - __be32 bprmrr0; /* Breakpoint request mode risc register 0 */ - __be32 bprmrr1; /* Breakpoint request mode risc register 1 */ - u8 res0[0x8]; - __be32 bprmtr0; /* Breakpoint request mode trb register 0 */ - __be32 bprmtr1; /* Breakpoint request mode trb register 1 */ - u8 res1[0x8]; - __be32 bprmir; /* Breakpoint request mode immediate register */ - __be32 bprmsr; /* Breakpoint request mode serial register */ - __be32 bpemr; /* Breakpoint exit mode register */ - u8 res2[0x48]; -} __attribute__ ((packed)); - -/* - * RISC Special Registers (Trap and Breakpoint). These are described in - * the QE Developer's Handbook. - */ -struct rsp { - __be32 tibcr[16]; /* Trap/instruction breakpoint control regs */ - u8 res0[64]; - __be32 ibcr0; - __be32 ibs0; - __be32 ibcnr0; - u8 res1[4]; - __be32 ibcr1; - __be32 ibs1; - __be32 ibcnr1; - __be32 npcr; - __be32 dbcr; - __be32 dbar; - __be32 dbamr; - __be32 dbsr; - __be32 dbcnr; - u8 res2[12]; - __be32 dbdr_h; - __be32 dbdr_l; - __be32 dbdmr_h; - __be32 dbdmr_l; - __be32 bsr; - __be32 bor; - __be32 bior; - u8 res3[4]; - __be32 iatr[4]; - __be32 eccr; /* Exception control configuration register */ - __be32 eicr; - u8 res4[0x100-0xf8]; -} __attribute__ ((packed)); - -struct qe_immap { - struct qe_iram iram; /* I-RAM */ - struct qe_ic_regs ic; /* Interrupt Controller */ - struct cp_qe cp; /* Communications Processor */ - struct qe_mux qmx; /* QE Multiplexer */ - struct qe_timers qet; /* QE Timers */ - struct spi spi[0x2]; /* spi */ - struct qe_mcc mcc; /* mcc */ - struct qe_brg brg; /* brg */ - struct qe_usb_ctlr usb; /* USB */ - struct si1 si1; /* SI */ - u8 res11[0x800]; - struct sir sir; /* SI Routing Tables */ - struct ucc ucc1; /* ucc1 */ - struct ucc ucc3; /* ucc3 */ - struct ucc ucc5; /* ucc5 */ - struct ucc ucc7; /* ucc7 */ - u8 res12[0x600]; - struct upc upc1; /* MultiPHY UTOPIA POS Ctrlr 1*/ - struct ucc ucc2; /* ucc2 */ - struct ucc ucc4; /* ucc4 */ - struct ucc ucc6; /* ucc6 */ - struct ucc ucc8; /* ucc8 */ - u8 res13[0x600]; - struct upc upc2; /* MultiPHY UTOPIA POS Ctrlr 2*/ - struct sdma sdma; /* SDMA */ - struct dbg dbg; /* 0x104080 - 0x1040FF - Debug Space */ - struct rsp rsp[0x2]; /* 0x104100 - 0x1042FF - RISC Special Registers - (Trap and Breakpoint) */ - u8 res14[0x300]; /* 0x104300 - 0x1045FF */ - u8 res15[0x3A00]; /* 0x104600 - 0x107FFF */ - u8 res16[0x8000]; /* 0x108000 - 0x110000 */ - u8 muram[0xC000]; /* 0x110000 - 0x11C000 - Multi-user RAM */ - u8 res17[0x24000]; /* 0x11C000 - 0x140000 */ - u8 res18[0xC0000]; /* 0x140000 - 0x200000 */ -} __attribute__ ((packed)); - -extern struct qe_immap __iomem *qe_immr; -extern phys_addr_t get_qe_base(void); - -/* - * Returns the offset within the QE address space of the given pointer. - * - * Note that the QE does not support 36-bit physical addresses, so if - * get_qe_base() returns a number above 4GB, the caller will probably fail. - */ -static inline phys_addr_t immrbar_virt_to_phys(void *address) -{ - void *q = (void *)qe_immr; - - /* Is it a MURAM address? */ - if ((address >= q) && (address < (q + QE_IMMAP_SIZE))) - return get_qe_base() + (address - q); - - /* It's an address returned by kmalloc */ - return virt_to_phys(address); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_IMMAP_QE_H */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 5879fde56f3c..6c1297ec374c 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -385,6 +385,17 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) { *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v; } + +/* + * Real mode version of the above. stdcix is only supposed to be used + * in hypervisor real mode as per the architecture spec. + */ +static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr) +{ + __asm__ __volatile__("stdcix %0,0,%1" + : : "r" (val), "r" (paddr) : "memory"); +} + #endif /* __powerpc64__ */ /* diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index ba3342bbdbda..7352d3f212df 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -21,7 +21,7 @@ * need for various slices related matters. Note that this isn't the * complete pgtable.h but only a portion of it. */ -#include <asm/pgtable-ppc64.h> +#include <asm/book3s/64/pgtable.h> #include <asm/bug.h> #include <asm/processor.h> diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9c326565d498..c82cbf52d19e 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PGTABLE_PPC32_H -#define _ASM_POWERPC_PGTABLE_PPC32_H +#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H +#define _ASM_POWERPC_NOHASH_32_PGTABLE_H #include <asm-generic/pgtable-nopmd.h> @@ -106,17 +106,15 @@ extern int icache_44x_need_flush; */ #if defined(CONFIG_40x) -#include <asm/pte-40x.h> +#include <asm/nohash/32/pte-40x.h> #elif defined(CONFIG_44x) -#include <asm/pte-44x.h> +#include <asm/nohash/32/pte-44x.h> #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) -#include <asm/pte-book3e.h> +#include <asm/nohash/pte-book3e.h> #elif defined(CONFIG_FSL_BOOKE) -#include <asm/pte-fsl-booke.h> +#include <asm/nohash/32/pte-fsl-booke.h> #elif defined(CONFIG_8xx) -#include <asm/pte-8xx.h> -#else /* CONFIG_6xx */ -#include <asm/pte-hash32.h> +#include <asm/nohash/32/pte-8xx.h> #endif /* And here we include common definitions */ @@ -130,7 +128,12 @@ extern int icache_44x_need_flush; #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) -#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + + /* * When flushing the tlb entry for a page, we also need to flush the hash @@ -337,4 +340,4 @@ extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, #endif /* !__ASSEMBLY__ */ -#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */ +#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h index 486b1ef81338..9624ebdacc47 100644 --- a/arch/powerpc/include/asm/pte-40x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_40x_H -#define _ASM_POWERPC_PTE_40x_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H +#define _ASM_POWERPC_NOHASH_32_PTE_40x_H #ifdef __KERNEL__ /* @@ -61,4 +61,4 @@ #define PTE_ATOMIC_UPDATES 1 #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_40x_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ diff --git a/arch/powerpc/include/asm/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h index 36f75fab23f5..fdab41c654ef 100644 --- a/arch/powerpc/include/asm/pte-44x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_44x_H -#define _ASM_POWERPC_PTE_44x_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H +#define _ASM_POWERPC_NOHASH_32_PTE_44x_H #ifdef __KERNEL__ /* @@ -94,4 +94,4 @@ #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_44x_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */ diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index a0e2ba960976..3742b1919661 100644 --- a/arch/powerpc/include/asm/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_8xx_H -#define _ASM_POWERPC_PTE_8xx_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_8xx_H +#define _ASM_POWERPC_NOHASH_32_PTE_8xx_H #ifdef __KERNEL__ /* @@ -62,4 +62,4 @@ _PAGE_HWWRITE | _PAGE_EXEC) #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_8xx_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */ diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h index 9f5c3d04a1a3..5422d00c6145 100644 --- a/arch/powerpc/include/asm/pte-fsl-booke.h +++ b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_FSL_BOOKE_H -#define _ASM_POWERPC_PTE_FSL_BOOKE_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H +#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H #ifdef __KERNEL__ /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based @@ -37,4 +37,4 @@ #define PTE_WIMGE_SHIFT (6) #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h index 132ee1d482c2..fc7d51753f81 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PGTABLE_PPC64_4K_H -#define _ASM_POWERPC_PGTABLE_PPC64_4K_H +#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H +#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H /* * Entries per page directory level. The PTE level must use a 64b record * for each page table entry. The PMD and PGD level use a 32b record for @@ -55,11 +55,15 @@ #define pgd_none(pgd) (!pgd_val(pgd)) #define pgd_bad(pgd) (pgd_val(pgd) == 0) #define pgd_present(pgd) (pgd_val(pgd) != 0) -#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) #ifndef __ASSEMBLY__ +static inline void pgd_clear(pgd_t *pgdp) +{ + *pgdp = __pgd(0); +} + static inline pte_t pgd_pte(pgd_t pgd) { return __pte(pgd_val(pgd)); @@ -85,4 +89,4 @@ extern struct page *pgd_page(pgd_t pgd); #define remap_4k_pfn(vma, addr, pfn, prot) \ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) -#endif /* _ASM_POWERPC_PGTABLE_PPC64_4K_H */ +#endif /* _ _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h index 1de35bbd02a6..570fb30be21c 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PGTABLE_PPC64_64K_H -#define _ASM_POWERPC_PGTABLE_PPC64_64K_H +#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H +#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H #include <asm-generic/pgtable-nopud.h> @@ -9,8 +9,19 @@ #define PUD_INDEX_SIZE 0 #define PGD_INDEX_SIZE 12 +/* + * we support 32 fragments per PTE page of 64K size + */ +#define PTE_FRAG_NR 32 +/* + * We use a 2K PTE page fragment and another 2K for storing + * real_pte_t hash index + */ +#define PTE_FRAG_SIZE_SHIFT 11 +#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) + #ifndef __ASSEMBLY__ -#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) +#define PTE_TABLE_SIZE PTE_FRAG_SIZE #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) #endif /* __ASSEMBLY__ */ @@ -32,13 +43,15 @@ #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -/* Bits to mask out from a PMD to get to the PTE page */ -/* PMDs point to PTE table fragments which are 4K aligned. */ -#define PMD_MASKED_BITS 0xfff +/* + * Bits to mask out from a PMD to get to the PTE page + * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned. + */ +#define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1) /* Bits to mask out from a PGD/PUD to get to the PMD page */ #define PUD_MASKED_BITS 0x1ff #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) #define pte_pgd(pte) ((pgd_t)pte_pud(pte)) -#endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */ +#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 3245f2d96d4f..b9f734dd5b81 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -1,14 +1,14 @@ -#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_ -#define _ASM_POWERPC_PGTABLE_PPC64_H_ +#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H +#define _ASM_POWERPC_NOHASH_64_PGTABLE_H /* * This file contains the functions and defines necessary to modify and use * the ppc64 hashed page table. */ #ifdef CONFIG_PPC_64K_PAGES -#include <asm/pgtable-ppc64-64k.h> +#include <asm/nohash/64/pgtable-64k.h> #else -#include <asm/pgtable-ppc64-4k.h> +#include <asm/nohash/64/pgtable-4k.h> #endif #include <asm/barrier.h> @@ -18,7 +18,7 @@ * Size of EA range mapped by our pagetables. */ #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ - PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -97,11 +97,7 @@ /* * Include the PTE bits definitions */ -#ifdef CONFIG_PPC_BOOK3S -#include <asm/pte-hash64.h> -#else -#include <asm/pte-book3e.h> -#endif +#include <asm/nohash/pte-book3e.h> #include <asm/pte-common.h> #ifdef CONFIG_PPC_MM_SLICES @@ -110,59 +106,47 @@ #endif /* CONFIG_PPC_MM_SLICES */ #ifndef __ASSEMBLY__ - -/* - * This is the default implementation of various PTE accessors, it's - * used in all cases except Book3S with 64K pages where we have a - * concept of sub-pages - */ -#ifndef __real_pte - -#ifdef CONFIG_STRICT_MM_TYPECHECKS -#define __real_pte(e,p) ((real_pte_t){(e)}) -#define __rpte_to_pte(r) ((r).pte) -#else -#define __real_pte(e,p) (e) -#define __rpte_to_pte(r) (__pte(r)) -#endif -#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) - -#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ - do { \ - index = 0; \ - shift = mmu_psize_defs[psize].shift; \ - -#define pte_iterate_hashed_end() } while(0) - -/* - * We expect this to be called only for user addresses or kernel virtual - * addresses other than the linear mapping. - */ -#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K - -#endif /* __real_pte */ - - /* pte_clear moved to later in this file */ #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) -#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) +static inline void pmd_set(pmd_t *pmdp, unsigned long val) +{ + *pmdp = __pmd(val); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ || (pmd_val(pmd) & PMD_BAD_BITS)) #define pmd_present(pmd) (!pmd_none(pmd)) -#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) extern struct page *pmd_page(pmd_t pmd); -#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) +static inline void pud_set(pud_t *pudp, unsigned long val) +{ + *pudp = __pud(val); +} + +static inline void pud_clear(pud_t *pudp) +{ + *pudp = __pud(0); +} + #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ || (pud_val(pud) & PUD_BAD_BITS)) #define pud_present(pud) (pud_val(pud) != 0) -#define pud_clear(pudp) (pud_val(*(pudp)) = 0) #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) extern struct page *pud_page(pud_t pud); @@ -177,9 +161,13 @@ static inline pud_t pte_pud(pte_t pte) return __pud(pte_val(pte)); } #define pud_write(pud) pte_write(pud_pte(pud)) -#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) #define pgd_write(pgd) pte_write(pgd_pte(pgd)) +static inline void pgd_set(pgd_t *pgdp, unsigned long val) +{ + *pgdp = __pgd(val); +} + /* * Find an entry in a page-table-directory. We combine the address region * (the high order N bits) and the pgd portion of the address. @@ -373,254 +361,4 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); void pgtable_cache_init(void); #endif /* __ASSEMBLY__ */ -/* - * THP pages can't be special. So use the _PAGE_SPECIAL - */ -#define _PAGE_SPLITTING _PAGE_SPECIAL - -/* - * We need to differentiate between explicit huge page and THP huge - * page, since THP huge page also need to track real subpage details - */ -#define _PAGE_THP_HUGE _PAGE_4K_PFN - -/* - * set of bits not changed in pmd_modify. - */ -#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ - _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ - _PAGE_THP_HUGE) - -#ifndef __ASSEMBLY__ -/* - * The linux hugepage PMD now include the pmd entries followed by the address - * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. - * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per - * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and - * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. - * - * The last three bits are intentionally left to zero. This memory location - * are also used as normal page PTE pointers. So if we have any pointers - * left around while we collapse a hugepage, we need to make sure - * _PAGE_PRESENT bit of that is zero when we look at them - */ -static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) -{ - return (hpte_slot_array[index] >> 3) & 0x1; -} - -static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, - int index) -{ - return hpte_slot_array[index] >> 4; -} - -static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, - unsigned int index, unsigned int hidx) -{ - hpte_slot_array[index] = hidx << 4 | 0x1 << 3; -} - -struct page *realmode_pfn_to_page(unsigned long pfn); - -static inline char *get_hpte_slot_array(pmd_t *pmdp) -{ - /* - * The hpte hindex is stored in the pgtable whose address is in the - * second half of the PMD - * - * Order this load with the test for pmd_trans_huge in the caller - */ - smp_rmb(); - return *(char **)(pmdp + PTRS_PER_PMD); - - -} - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, unsigned long old_pmd); -extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); -extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); -extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); -extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t pmd); -extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd); -/* - * - * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs - * page. The hugetlbfs page table walking and mangling paths are totally - * separated form the core VM paths and they're differentiated by - * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. - * - * pmd_trans_huge() is defined as false at build time if - * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build - * time in such case. - * - * For ppc64 we need to differntiate from explicit hugepages from THP, because - * for THP we also track the subpage details at the pmd level. We don't do - * that for explicit huge pages. - * - */ -static inline int pmd_trans_huge(pmd_t pmd) -{ - /* - * leaf pte for huge page, bottom two bits != 00 - */ - return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); -} - -static inline int pmd_trans_splitting(pmd_t pmd) -{ - if (pmd_trans_huge(pmd)) - return pmd_val(pmd) & _PAGE_SPLITTING; - return 0; -} - -extern int has_transparent_hugepage(void); -#else -static inline void hpte_do_hugepage_flush(struct mm_struct *mm, - unsigned long addr, pmd_t *pmdp, - unsigned long old_pmd) -{ - - WARN(1, "%s called with THP disabled\n", __func__); -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - -static inline int pmd_large(pmd_t pmd) -{ - /* - * leaf pte for huge page, bottom two bits != 00 - */ - return ((pmd_val(pmd) & 0x3) != 0x0); -} - -static inline pte_t pmd_pte(pmd_t pmd) -{ - return __pte(pmd_val(pmd)); -} - -static inline pmd_t pte_pmd(pte_t pte) -{ - return __pmd(pte_val(pte)); -} - -static inline pte_t *pmdp_ptep(pmd_t *pmd) -{ - return (pte_t *)pmd; -} - -#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) -#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) -#define pmd_young(pmd) pte_young(pmd_pte(pmd)) -#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) -#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) -#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) -#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) -#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) - -#define __HAVE_ARCH_PMD_WRITE -#define pmd_write(pmd) pte_write(pmd_pte(pmd)) - -static inline pmd_t pmd_mkhuge(pmd_t pmd) -{ - /* Do nothing, mk_pmd() does this part. */ - return pmd; -} - -static inline pmd_t pmd_mknotpresent(pmd_t pmd) -{ - pmd_val(pmd) &= ~_PAGE_PRESENT; - return pmd; -} - -static inline pmd_t pmd_mksplitting(pmd_t pmd) -{ - pmd_val(pmd) |= _PAGE_SPLITTING; - return pmd; -} - -#define __HAVE_ARCH_PMD_SAME -static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) -{ - return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); -} - -#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS -extern int pmdp_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp, - pmd_t entry, int dirty); - -extern unsigned long pmd_hugepage_update(struct mm_struct *mm, - unsigned long addr, - pmd_t *pmdp, - unsigned long clr, - unsigned long set); - -static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, - unsigned long addr, pmd_t *pmdp) -{ - unsigned long old; - - if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) - return 0; - old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); - return ((old & _PAGE_ACCESSED) != 0); -} - -#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG -extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); -#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH -extern int pmdp_clear_flush_young(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); - -#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR -extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, - unsigned long addr, pmd_t *pmdp); - -#define __HAVE_ARCH_PMDP_SET_WRPROTECT -static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp) -{ - - if ((pmd_val(*pmdp) & _PAGE_RW) == 0) - return; - - pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); -} - -#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH -extern void pmdp_splitting_flush(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); - -extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); -#define pmdp_collapse_flush pmdp_collapse_flush - -#define __HAVE_ARCH_PGTABLE_DEPOSIT -extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, - pgtable_t pgtable); -#define __HAVE_ARCH_PGTABLE_WITHDRAW -extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); - -#define __HAVE_ARCH_PMDP_INVALIDATE -extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmdp); - -#define pmd_move_must_withdraw pmd_move_must_withdraw -struct spinlock; -static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, - struct spinlock *old_pmd_ptl) -{ - /* - * Archs like ppc64 use pgtable to store per pmd - * specific information. So when we switch the pmd, - * we should also withdraw and deposit the pgtable - */ - return true; -} -#endif /* __ASSEMBLY__ */ -#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ +#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h new file mode 100644 index 000000000000..1263c22d60d8 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -0,0 +1,252 @@ +#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H +#define _ASM_POWERPC_NOHASH_PGTABLE_H + +#if defined(CONFIG_PPC64) +#include <asm/nohash/64/pgtable.h> +#else +#include <asm/nohash/32/pgtable.h> +#endif + +#ifndef __ASSEMBLY__ + +/* Generic accessors to PTE bits */ +static inline int pte_write(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; +} +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } +static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } +static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } + +#ifdef CONFIG_NUMA_BALANCING +/* + * These work without NUMA balancing but the kernel does not care. See the + * comment in include/asm-generic/pgtable.h . On powerpc, this will only + * work for user pages and always return true for kernel pages. + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & + (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return pte_protnone(pmd_pte(pmd)); +} +#endif /* CONFIG_NUMA_BALANCING */ + +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +/* Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * Even if PTEs can be unsigned long long, a PFN is always an unsigned + * long for now. + */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { + return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | + pgprot_val(pgprot)); } +static inline unsigned long pte_pfn(pte_t pte) { + return pte_val(pte) >> PTE_RPN_SHIFT; } + +/* Generic modifiers for PTE bits */ +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_basic_t ptev; + + ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE); + ptev |= _PAGE_RO; + return __pte(ptev); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE)); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_ACCESSED); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_basic_t ptev; + + ptev = pte_val(pte) & ~_PAGE_RO; + ptev |= _PAGE_RW; + return __pte(ptev); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPECIAL); +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +/* Insert a PTE, top-level function is out of line. It uses an inline + * low level function in the respective pgtable-* files + */ +extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte); + +/* This low level function performs the actual PTE insertion + * Setting the PTE depends on the MMU type and other factors. It's + * an horrible mess that I'm not going to try to clean up now but + * I'm keeping it in one place rather than spread around + */ +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, int percpu) +{ +#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) + /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the + * helper pte_update() which does an atomic update. We need to do that + * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a + * per-CPU PTE such as a kmap_atomic, we do a simple update preserving + * the hash bits instead (ie, same as the non-SMP case) + */ + if (percpu) + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + else + pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); + +#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) + /* Second case is 32-bit with 64-bit PTE. In this case, we + * can just store as long as we do the two halves in the right order + * with a barrier in between. This is possible because we take care, + * in the hash code, to pre-invalidate if the PTE was already hashed, + * which synchronizes us with any concurrent invalidation. + * In the percpu case, we also fallback to the simple update preserving + * the hash bits + */ + if (percpu) { + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + return; + } +#if _PAGE_HASHPTE != 0 + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); +#endif + __asm__ __volatile__("\ + stw%U0%X0 %2,%0\n\ + eieio\n\ + stw%U0%X0 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +#elif defined(CONFIG_PPC_STD_MMU_32) + /* Third case is 32-bit hash table in UP mode, we need to preserve + * the _PAGE_HASHPTE bit since we may not have invalidated the previous + * translation in the hash yet (done in a subsequent flush_tlb_xxx()) + * and see we need to keep track that this PTE needs invalidating + */ + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + +#else + /* Anything else just stores the PTE normally. That covers all 64-bit + * cases, and 32-bit non-hash with 32-bit PTEs. + */ + *ptep = pte; + +#ifdef CONFIG_PPC_BOOK3E_64 + /* + * With hardware tablewalk, a sync is needed to ensure that + * subsequent accesses see the PTE we just wrote. Unlike userspace + * mappings, we can't tolerate spurious faults, so make sure + * the new PTE will be seen the first time. + */ + if (is_kernel_addr(addr)) + mb(); +#endif +#endif +} + + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, pte_t entry, int dirty); + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE | _PAGE_GUARDED)) + +#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE)) + +#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT)) + +#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT | _PAGE_WRITETHRU)) + +#define pgprot_cached_noncoherent(prot) \ + (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) + +#define pgprot_writecombine pgprot_noncached_wc + +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#define __HAVE_PHYS_MEM_ACCESS_PROT + +#ifdef CONFIG_HUGETLB_PAGE +static inline int hugepd_ok(hugepd_t hpd) +{ + return (hpd.pd > 0); +} + +static inline int pmd_huge(pmd_t pmd) +{ + return 0; +} + +static inline int pud_huge(pud_t pud) +{ + return 0; +} + +static inline int pgd_huge(pgd_t pgd) +{ + return 0; +} +#define pgd_huge pgd_huge + +#define is_hugepd(hpd) (hugepd_ok(hpd)) +#endif + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h index 8d8473278d91..e16807b78edf 100644 --- a/arch/powerpc/include/asm/pte-book3e.h +++ b/arch/powerpc/include/asm/nohash/pte-book3e.h @@ -1,5 +1,5 @@ -#ifndef _ASM_POWERPC_PTE_BOOK3E_H -#define _ASM_POWERPC_PTE_BOOK3E_H +#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H +#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H #ifdef __KERNEL__ /* PTE bit definitions for processors compliant to the Book3E @@ -84,4 +84,4 @@ #endif #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */ +#endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */ diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 8374afed9d0a..f8faaaeeca1e 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -157,7 +157,8 @@ #define OPAL_LEDS_GET_INDICATOR 114 #define OPAL_LEDS_SET_INDICATOR 115 #define OPAL_CEC_REBOOT2 116 -#define OPAL_LAST 116 +#define OPAL_CONSOLE_FLUSH 117 +#define OPAL_LAST 117 /* Device tree flags */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 800115910e43..07a99e638449 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -35,6 +35,7 @@ int64_t opal_console_read(int64_t term_number, __be64 *length, uint8_t *buffer); int64_t opal_console_write_buffer_space(int64_t term_number, __be64 *length); +int64_t opal_console_flush(int64_t term_number); int64_t opal_rtc_read(__be32 *year_month_day, __be64 *hour_minute_second_millisecond); int64_t opal_rtc_write(uint32_t year_month_day, @@ -262,6 +263,8 @@ extern int opal_resync_timebase(void); extern void opal_lpc_init(void); +extern void opal_kmsg_init(void); + extern int opal_event_request(unsigned int opal_event_nr); struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 70bd4381f8e6..546540b91095 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -16,6 +16,7 @@ #ifdef CONFIG_PPC64 +#include <linux/string.h> #include <asm/types.h> #include <asm/lppaca.h> #include <asm/mmu.h> @@ -131,7 +132,16 @@ struct paca_struct { struct tlb_core_data tcd; #endif /* CONFIG_PPC_BOOK3E */ - mm_context_t context; +#ifdef CONFIG_PPC_BOOK3S + mm_context_id_t mm_ctx_id; +#ifdef CONFIG_PPC_MM_SLICES + u64 mm_ctx_low_slices_psize; + unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE]; +#else + u16 mm_ctx_user_psize; + u16 mm_ctx_sllp; +#endif +#endif /* * then miscellaneous read-write fields @@ -194,6 +204,23 @@ struct paca_struct { #endif }; +#ifdef CONFIG_PPC_BOOK3S +static inline void copy_mm_to_paca(mm_context_t *context) +{ + get_paca()->mm_ctx_id = context->id; +#ifdef CONFIG_PPC_MM_SLICES + get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize; + memcpy(&get_paca()->mm_ctx_high_slices_psize, + &context->high_slices_psize, SLICE_ARRAY_SIZE); +#else + get_paca()->mm_ctx_user_psize = context->user_psize; + get_paca()->mm_ctx_sllp = context->sllp; +#endif +} +#else +static inline void copy_mm_to_paca(mm_context_t *context){} +#endif + extern struct paca_struct *paca; extern void initialise_paca(struct paca_struct *new_paca, int cpu); extern void setup_paca(struct paca_struct *new_paca); diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 3140c19c448c..e34124f6fbf2 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -286,8 +286,11 @@ extern long long virt_phys_offset; /* PTE level */ typedef struct { pte_basic_t pte; } pte_t; -#define pte_val(x) ((x).pte) #define __pte(x) ((pte_t) { (x) }) +static inline pte_basic_t pte_val(pte_t x) +{ + return x.pte; +} /* 64k pages additionally define a bigger "real PTE" type that gathers * the "second half" part of the PTE for pseudo 64k pages @@ -301,21 +304,30 @@ typedef struct { pte_t pte; } real_pte_t; /* PMD level */ #ifdef CONFIG_PPC64 typedef struct { unsigned long pmd; } pmd_t; -#define pmd_val(x) ((x).pmd) #define __pmd(x) ((pmd_t) { (x) }) +static inline unsigned long pmd_val(pmd_t x) +{ + return x.pmd; +} /* PUD level exusts only on 4k pages */ #ifndef CONFIG_PPC_64K_PAGES typedef struct { unsigned long pud; } pud_t; -#define pud_val(x) ((x).pud) #define __pud(x) ((pud_t) { (x) }) +static inline unsigned long pud_val(pud_t x) +{ + return x.pud; +} #endif /* !CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC64 */ /* PGD level */ typedef struct { unsigned long pgd; } pgd_t; -#define pgd_val(x) ((x).pgd) #define __pgd(x) ((pgd_t) { (x) }) +static inline unsigned long pgd_val(pgd_t x) +{ + return x.pgd; +} /* Page protection bits */ typedef struct { unsigned long pgprot; } pgprot_t; @@ -329,8 +341,11 @@ typedef struct { unsigned long pgprot; } pgprot_t; */ typedef pte_basic_t pte_t; -#define pte_val(x) (x) #define __pte(x) (x) +static inline pte_basic_t pte_val(pte_t pte) +{ + return pte; +} #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; @@ -341,67 +356,42 @@ typedef pte_t real_pte_t; #ifdef CONFIG_PPC64 typedef unsigned long pmd_t; -#define pmd_val(x) (x) #define __pmd(x) (x) +static inline unsigned long pmd_val(pmd_t pmd) +{ + return pmd; +} #ifndef CONFIG_PPC_64K_PAGES typedef unsigned long pud_t; -#define pud_val(x) (x) #define __pud(x) (x) +static inline unsigned long pud_val(pud_t pud) +{ + return pud; +} #endif /* !CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC64 */ typedef unsigned long pgd_t; -#define pgd_val(x) (x) -#define pgprot_val(x) (x) +#define __pgd(x) (x) +static inline unsigned long pgd_val(pgd_t pgd) +{ + return pgd; +} typedef unsigned long pgprot_t; -#define __pgd(x) (x) +#define pgprot_val(x) (x) #define __pgprot(x) (x) #endif typedef struct { signed long pd; } hugepd_t; -#ifdef CONFIG_HUGETLB_PAGE -#ifdef CONFIG_PPC_BOOK3S_64 -#ifdef CONFIG_PPC_64K_PAGES -/* - * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't - * need to setup hugepage directory for them. Our pte and page directory format - * enable us to have this enabled. But to avoid errors when implementing new - * features disable hugepd for 64K. We enable a debug version here, So we catch - * wrong usage. - */ -#ifdef CONFIG_DEBUG_VM -extern int hugepd_ok(hugepd_t hpd); -#else -#define hugepd_ok(x) (0) -#endif -#else -static inline int hugepd_ok(hugepd_t hpd) -{ - /* - * hugepd pointer, bottom two bits == 00 and next 4 bits - * indicate size of table - */ - return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); -} -#endif -#else -static inline int hugepd_ok(hugepd_t hpd) -{ - return (hpd.pd > 0); -} -#endif - -#define is_hugepd(hpd) (hugepd_ok(hpd)) -#define pgd_huge pgd_huge -int pgd_huge(pgd_t pgd); -#else /* CONFIG_HUGETLB_PAGE */ -#define is_hugepd(pdep) 0 -#define pgd_huge(pgd) 0 +#ifndef CONFIG_HUGETLB_PAGE +#define is_hugepd(pdep) (0) +#define pgd_huge(pgd) (0) #endif /* CONFIG_HUGETLB_PAGE */ + #define __hugepd(x) ((hugepd_t) { (x) }) struct page; diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 37fc53587bb4..54843ca5fa2b 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -205,6 +205,7 @@ struct pci_dn { int pci_ext_config_space; /* for pci devices */ + struct pci_dev *pcidev; /* back-pointer to the pci device */ #ifdef CONFIG_EEH struct eeh_dev *edev; /* eeh device */ #endif diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 3453bd8dc18f..6f8065a7d487 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -149,4 +149,8 @@ extern void pcibios_setup_phb_io_space(struct pci_controller *hose); extern void pcibios_scan_phb(struct pci_controller *hose); #endif /* __KERNEL__ */ + +extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev); +extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index); + #endif /* __ASM_POWERPC_PCI_H */ diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 842846c1b711..76d6b9e0c8a9 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -21,16 +21,34 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); /* #define pgd_populate(mm, pmd, pte) BUG() */ #ifndef CONFIG_BOOKE -#define pmd_populate_kernel(mm, pmd, pte) \ - (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT) -#define pmd_populate(mm, pmd, pte) \ - (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT) + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, + pte_t *pte) +{ + *pmdp = __pmd(__pa(pte) | _PMD_PRESENT); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pte_page) +{ + *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); +} + #define pmd_pgtable(pmd) pmd_page(pmd) #else -#define pmd_populate_kernel(mm, pmd, pte) \ - (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT) -#define pmd_populate(mm, pmd, pte) \ - (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT) + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, + pte_t *pte) +{ + *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pte_page) +{ + *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); +} + #define pmd_pgtable(pmd) pmd_page(pmd) #endif diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 4b0be20fcbfd..69ef28a81733 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -53,7 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) #ifndef CONFIG_PPC_64K_PAGES -#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) +#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { @@ -71,9 +71,18 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_set(pud, (unsigned long)pmd); } -#define pmd_populate(mm, pmd, pte_page) \ - pmd_populate_kernel(mm, pmd, page_address(pte_page)) -#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + pmd_set(pmd, (unsigned long)pte); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte_page) +{ + pmd_set(pmd, (unsigned long)page_address(pte_page)); +} + #define pmd_pgtable(pmd) pmd_page(pmd) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, @@ -154,16 +163,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, } #else /* if CONFIG_PPC_64K_PAGES */ -/* - * we support 16 fragments per PTE page. - */ -#define PTE_FRAG_NR 16 -/* - * We use a 2K PTE page fragment and another 2K for storing - * real_pte_t hash index - */ -#define PTE_FRAG_SIZE_SHIFT 12 -#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t)) extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int); extern void page_table_free(struct mm_struct *, unsigned long *, int); diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index b64b4212b71f..ac9fb114e25d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -1,6 +1,5 @@ #ifndef _ASM_POWERPC_PGTABLE_H #define _ASM_POWERPC_PGTABLE_H -#ifdef __KERNEL__ #ifndef __ASSEMBLY__ #include <linux/mmdebug.h> @@ -13,210 +12,20 @@ struct mm_struct; #endif /* !__ASSEMBLY__ */ -#if defined(CONFIG_PPC64) -# include <asm/pgtable-ppc64.h> +#ifdef CONFIG_PPC_BOOK3S +#include <asm/book3s/pgtable.h> #else -# include <asm/pgtable-ppc32.h> -#endif - -/* - * We save the slot number & secondary bit in the second half of the - * PTE page. We use the 8 bytes per each pte entry. - */ -#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8) +#include <asm/nohash/pgtable.h> +#endif /* !CONFIG_PPC_BOOK3S */ #ifndef __ASSEMBLY__ #include <asm/tlbflush.h> -/* Generic accessors to PTE bits */ -static inline int pte_write(pte_t pte) -{ return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; } -static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } -static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } - -#ifdef CONFIG_NUMA_BALANCING -/* - * These work without NUMA balancing but the kernel does not care. See the - * comment in include/asm-generic/pgtable.h . On powerpc, this will only - * work for user pages and always return true for kernel pages. - */ -static inline int pte_protnone(pte_t pte) -{ - return (pte_val(pte) & - (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; -} - -static inline int pmd_protnone(pmd_t pmd) -{ - return pte_protnone(pmd_pte(pmd)); -} -#endif /* CONFIG_NUMA_BALANCING */ - -static inline int pte_present(pte_t pte) -{ - return pte_val(pte) & _PAGE_PRESENT; -} - -/* Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - * - * Even if PTEs can be unsigned long long, a PFN is always an unsigned - * long for now. - */ -static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { - return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | - pgprot_val(pgprot)); } -static inline unsigned long pte_pfn(pte_t pte) { - return pte_val(pte) >> PTE_RPN_SHIFT; } - /* Keep these as a macros to avoid include dependency mess */ #define pte_page(x) pfn_to_page(pte_pfn(x)) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -/* Generic modifiers for PTE bits */ -static inline pte_t pte_wrprotect(pte_t pte) { - pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); - pte_val(pte) |= _PAGE_RO; return pte; } -static inline pte_t pte_mkclean(pte_t pte) { - pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } -static inline pte_t pte_mkold(pte_t pte) { - pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkwrite(pte_t pte) { - pte_val(pte) &= ~_PAGE_RO; - pte_val(pte) |= _PAGE_RW; return pte; } -static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= _PAGE_DIRTY; return pte; } -static inline pte_t pte_mkyoung(pte_t pte) { - pte_val(pte) |= _PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkspecial(pte_t pte) { - pte_val(pte) |= _PAGE_SPECIAL; return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { - return pte; } -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) -{ - pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); - return pte; -} - - -/* Insert a PTE, top-level function is out of line. It uses an inline - * low level function in the respective pgtable-* files - */ -extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - pte_t pte); - -/* This low level function performs the actual PTE insertion - * Setting the PTE depends on the MMU type and other factors. It's - * an horrible mess that I'm not going to try to clean up now but - * I'm keeping it in one place rather than spread around - */ -static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, int percpu) -{ -#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) - /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the - * helper pte_update() which does an atomic update. We need to do that - * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a - * per-CPU PTE such as a kmap_atomic, we do a simple update preserving - * the hash bits instead (ie, same as the non-SMP case) - */ - if (percpu) - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - else - pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); - -#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) - /* Second case is 32-bit with 64-bit PTE. In this case, we - * can just store as long as we do the two halves in the right order - * with a barrier in between. This is possible because we take care, - * in the hash code, to pre-invalidate if the PTE was already hashed, - * which synchronizes us with any concurrent invalidation. - * In the percpu case, we also fallback to the simple update preserving - * the hash bits - */ - if (percpu) { - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - return; - } -#if _PAGE_HASHPTE != 0 - if (pte_val(*ptep) & _PAGE_HASHPTE) - flush_hash_entry(mm, ptep, addr); -#endif - __asm__ __volatile__("\ - stw%U0%X0 %2,%0\n\ - eieio\n\ - stw%U0%X0 %L2,%1" - : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) - : "r" (pte) : "memory"); - -#elif defined(CONFIG_PPC_STD_MMU_32) - /* Third case is 32-bit hash table in UP mode, we need to preserve - * the _PAGE_HASHPTE bit since we may not have invalidated the previous - * translation in the hash yet (done in a subsequent flush_tlb_xxx()) - * and see we need to keep track that this PTE needs invalidating - */ - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); - -#else - /* Anything else just stores the PTE normally. That covers all 64-bit - * cases, and 32-bit non-hash with 32-bit PTEs. - */ - *ptep = pte; - -#ifdef CONFIG_PPC_BOOK3E_64 - /* - * With hardware tablewalk, a sync is needed to ensure that - * subsequent accesses see the PTE we just wrote. Unlike userspace - * mappings, we can't tolerate spurious faults, so make sure - * the new PTE will be seen the first time. - */ - if (is_kernel_addr(addr)) - mb(); -#endif -#endif -} - - -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, - pte_t *ptep, pte_t entry, int dirty); - -/* - * Macro to mark a page protection value as "uncacheable". - */ - -#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ - _PAGE_WRITETHRU) - -#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ - _PAGE_NO_CACHE | _PAGE_GUARDED)) - -#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ - _PAGE_NO_CACHE)) - -#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ - _PAGE_COHERENT)) - -#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ - _PAGE_COHERENT | _PAGE_WRITETHRU)) - -#define pgprot_cached_noncoherent(prot) \ - (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) - -#define pgprot_writecombine pgprot_noncached_wc - -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#define __HAVE_PHYS_MEM_ACCESS_PROT - /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -271,5 +80,4 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, } #endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 67859edbf8fd..1b394247afc2 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -202,6 +202,23 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex, } /* + * ptes must be 8*sizeof(unsigned long) + */ +static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex, + unsigned long *ptes) + +{ + long rc; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + + rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex); + + memcpy(ptes, retbuf, 8*sizeof(unsigned long)); + + return rc; +} + +/* * plpar_pte_read_4_raw can be called in real mode. * ptes must be 8*sizeof(unsigned long) */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index dd0fc18d8103..499d9f89435a 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -413,24 +413,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) FTR_SECTION_ELSE_NESTED(848); \ mtocrf (FXM), RS; \ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848) - -/* - * PPR restore macros used in entry_64.S - * Used for P7 or later processors - */ -#define HMT_MEDIUM_LOW_HAS_PPR \ -BEGIN_FTR_SECTION_NESTED(944) \ - HMT_MEDIUM_LOW; \ -END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,944) - -#define SET_DEFAULT_THREAD_PPR(ra, rb) \ -BEGIN_FTR_SECTION_NESTED(945) \ - lis ra,INIT_PPR@highest; /* default ppr=3 */ \ - ld rb,PACACURRENT(r13); \ - sldi ra,ra,32; /* 11- 13 bits are used for ppr */ \ - std ra,TASKTHREADPPR(rb); \ -END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945) - #endif /* diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 5afea361beaa..ac2330820b9a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -88,12 +88,6 @@ struct task_struct; void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); void release_thread(struct task_struct *); -/* Lazy FPU handling on uni-processor */ -extern struct task_struct *last_task_used_math; -extern struct task_struct *last_task_used_altivec; -extern struct task_struct *last_task_used_vsx; -extern struct task_struct *last_task_used_spe; - #ifdef CONFIG_PPC32 #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START @@ -294,6 +288,7 @@ struct thread_struct { #endif #ifdef CONFIG_PPC64 unsigned long dscr; + unsigned long fscr; /* * This member element dscr_inherit indicates that the process * has explicitly attempted and changed the DSCR register value @@ -385,8 +380,6 @@ extern int set_endian(struct task_struct *tsk, unsigned int val); extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); -extern void fp_enable(void); -extern void vec_enable(void); extern void load_fp_state(struct thread_fp_state *fp); extern void store_fp_state(struct thread_fp_state *fp); extern void load_vr_state(struct thread_vr_state *vr); diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 71537a319fc8..1ec67b043065 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -40,6 +40,11 @@ #else #define _PAGE_RW 0 #endif + +#ifndef _PAGE_PTE +#define _PAGE_PTE 0 +#endif + #ifndef _PMD_PRESENT_MASK #define _PMD_PRESENT_MASK _PMD_PRESENT #endif diff --git a/arch/powerpc/include/asm/pte-hash64-4k.h b/arch/powerpc/include/asm/pte-hash64-4k.h deleted file mode 100644 index c134e809aac3..000000000000 --- a/arch/powerpc/include/asm/pte-hash64-4k.h +++ /dev/null @@ -1,17 +0,0 @@ -/* To be include by pgtable-hash64.h only */ - -/* PTE bits */ -#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ -#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ -#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ -#define _PAGE_F_SECOND _PAGE_SECONDARY -#define _PAGE_F_GIX _PAGE_GROUP_IX -#define _PAGE_SPECIAL 0x10000 /* software: special page */ - -/* PTE flags to conserve for HPTE identification */ -#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ - _PAGE_SECONDARY | _PAGE_GROUP_IX) - -/* shift to put page number into pte */ -#define PTE_RPN_SHIFT (17) - diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h deleted file mode 100644 index 4f4ec2ab45c9..000000000000 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ /dev/null @@ -1,102 +0,0 @@ -/* To be include by pgtable-hash64.h only */ - -/* Additional PTE bits (don't change without checking asm in hash_low.S) */ -#define _PAGE_SPECIAL 0x00000400 /* software: special page */ -#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ -#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ -#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ -#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ - -/* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead, - * we set that to be the whole sub-bits mask. The C code will only - * test this, so a multi-bit mask will work. For combo pages, this - * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of - * all the sub bits. For real 64k pages, we now have the assembly set - * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap - * that mask. This is fine as long as the HIDX bits are never set on - * a PTE that isn't hashed, which is the case today. - * - * A little nit is for the huge page C code, which does the hashing - * in C, we need to provide which bit to use. - */ -#define _PAGE_HASHPTE _PAGE_HPTE_SUB - -/* Note the full page bits must be in the same location as for normal - * 4k pages as the same assembly will be used to insert 64K pages - * whether the kernel has CONFIG_PPC_64K_PAGES or not - */ -#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ -#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ - -/* PTE flags to conserve for HPTE identification */ -#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO) - -/* Shift to put page number into pte. - * - * That gives us a max RPN of 34 bits, which means a max of 50 bits - * of addressable physical space, or 46 bits for the special 4k PFNs. - */ -#define PTE_RPN_SHIFT (30) - -#ifndef __ASSEMBLY__ - -/* - * With 64K pages on hash table, we have a special PTE format that - * uses a second "half" of the page table to encode sub-page information - * in order to deal with 64K made of 4K HW pages. Thus we override the - * generic accessors and iterators here - */ -#define __real_pte __real_pte -static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) -{ - real_pte_t rpte; - - rpte.pte = pte; - rpte.hidx = 0; - if (pte_val(pte) & _PAGE_COMBO) { - /* - * Make sure we order the hidx load against the _PAGE_COMBO - * check. The store side ordering is done in __hash_page_4K - */ - smp_rmb(); - rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE)); - } - return rpte; -} - -static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) -{ - if ((pte_val(rpte.pte) & _PAGE_COMBO)) - return (rpte.hidx >> (index<<2)) & 0xf; - return (pte_val(rpte.pte) >> 12) & 0xf; -} - -#define __rpte_to_pte(r) ((r).pte) -#define __rpte_sub_valid(rpte, index) \ - (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) - -/* Trick: we set __end to va + 64k, which happens works for - * a 16M page as well as we want only one iteration - */ -#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ - do { \ - unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ - unsigned __split = (psize == MMU_PAGE_4K || \ - psize == MMU_PAGE_64K_AP); \ - shift = mmu_psize_defs[psize].shift; \ - for (index = 0; vpn < __end; index++, \ - vpn += (1L << (shift - VPN_SHIFT))) { \ - if (!__split || __rpte_sub_valid(rpte, index)) \ - do { - -#define pte_iterate_hashed_end() } while(0); } } while(0) - -#define pte_pagesize_index(mm, addr, pte) \ - (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) - -#define remap_4k_pfn(vma, addr, pfn, prot) \ - (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \ - remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ - __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))) - -#endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h deleted file mode 100644 index ef612c160da7..000000000000 --- a/arch/powerpc/include/asm/pte-hash64.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef _ASM_POWERPC_PTE_HASH64_H -#define _ASM_POWERPC_PTE_HASH64_H -#ifdef __KERNEL__ - -/* - * Common bits between 4K and 64K pages in a linux-style PTE. - * These match the bits in the (hardware-defined) PowerPC PTE as closely - * as possible. Additional bits may be defined in pgtable-hash64-*.h - * - * Note: We only support user read/write permissions. Supervisor always - * have full read/write to pages above PAGE_OFFSET (pages below that - * always use the user access permissions). - * - * We could create separate kernel read-only if we used the 3 PP bits - * combinations that newer processors provide but we currently don't. - */ -#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ -#define _PAGE_USER 0x0002 /* matches one of the PP bits */ -#define _PAGE_BIT_SWAP_TYPE 2 -#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ -#define _PAGE_GUARDED 0x0008 -/* We can derive Memory coherence from _PAGE_NO_CACHE */ -#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ -#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ -#define _PAGE_DIRTY 0x0080 /* C: page changed */ -#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ -#define _PAGE_RW 0x0200 /* software: user write access allowed */ -#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ - -/* No separate kernel read-only */ -#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ -#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW - -/* Strong Access Ordering */ -#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) - -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - -/* PTEIDX nibble */ -#define _PTEIDX_SECONDARY 0x8 -#define _PTEIDX_GROUP_IX 0x7 - -/* Hash table based platforms need atomic updates of the linux PTE */ -#define PTE_ATOMIC_UPDATES 1 - -#ifdef CONFIG_PPC_64K_PAGES -#include <asm/pte-hash64-64k.h> -#else -#include <asm/pte-hash64-4k.h> -#endif - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PTE_HASH64_H */ diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h deleted file mode 100644 index 32b9bfa0c9bd..000000000000 --- a/arch/powerpc/include/asm/qe.h +++ /dev/null @@ -1,740 +0,0 @@ -/* - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Authors: Shlomi Gridish <gridish@freescale.com> - * Li Yang <leoli@freescale.com> - * - * Description: - * QUICC Engine (QE) external definitions and structure. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#ifndef _ASM_POWERPC_QE_H -#define _ASM_POWERPC_QE_H -#ifdef __KERNEL__ - -#include <linux/spinlock.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <asm/cpm.h> -#include <asm/immap_qe.h> - -#define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */ -#define QE_NUM_OF_BRGS 16 -#define QE_NUM_OF_PORTS 1024 - -/* Memory partitions -*/ -#define MEM_PART_SYSTEM 0 -#define MEM_PART_SECONDARY 1 -#define MEM_PART_MURAM 2 - -/* Clocks and BRGs */ -enum qe_clock { - QE_CLK_NONE = 0, - QE_BRG1, /* Baud Rate Generator 1 */ - QE_BRG2, /* Baud Rate Generator 2 */ - QE_BRG3, /* Baud Rate Generator 3 */ - QE_BRG4, /* Baud Rate Generator 4 */ - QE_BRG5, /* Baud Rate Generator 5 */ - QE_BRG6, /* Baud Rate Generator 6 */ - QE_BRG7, /* Baud Rate Generator 7 */ - QE_BRG8, /* Baud Rate Generator 8 */ - QE_BRG9, /* Baud Rate Generator 9 */ - QE_BRG10, /* Baud Rate Generator 10 */ - QE_BRG11, /* Baud Rate Generator 11 */ - QE_BRG12, /* Baud Rate Generator 12 */ - QE_BRG13, /* Baud Rate Generator 13 */ - QE_BRG14, /* Baud Rate Generator 14 */ - QE_BRG15, /* Baud Rate Generator 15 */ - QE_BRG16, /* Baud Rate Generator 16 */ - QE_CLK1, /* Clock 1 */ - QE_CLK2, /* Clock 2 */ - QE_CLK3, /* Clock 3 */ - QE_CLK4, /* Clock 4 */ - QE_CLK5, /* Clock 5 */ - QE_CLK6, /* Clock 6 */ - QE_CLK7, /* Clock 7 */ - QE_CLK8, /* Clock 8 */ - QE_CLK9, /* Clock 9 */ - QE_CLK10, /* Clock 10 */ - QE_CLK11, /* Clock 11 */ - QE_CLK12, /* Clock 12 */ - QE_CLK13, /* Clock 13 */ - QE_CLK14, /* Clock 14 */ - QE_CLK15, /* Clock 15 */ - QE_CLK16, /* Clock 16 */ - QE_CLK17, /* Clock 17 */ - QE_CLK18, /* Clock 18 */ - QE_CLK19, /* Clock 19 */ - QE_CLK20, /* Clock 20 */ - QE_CLK21, /* Clock 21 */ - QE_CLK22, /* Clock 22 */ - QE_CLK23, /* Clock 23 */ - QE_CLK24, /* Clock 24 */ - QE_CLK_DUMMY -}; - -static inline bool qe_clock_is_brg(enum qe_clock clk) -{ - return clk >= QE_BRG1 && clk <= QE_BRG16; -} - -extern spinlock_t cmxgcr_lock; - -/* Export QE common operations */ -#ifdef CONFIG_QUICC_ENGINE -extern void qe_reset(void); -#else -static inline void qe_reset(void) {} -#endif - -/* QE PIO */ -#define QE_PIO_PINS 32 - -struct qe_pio_regs { - __be32 cpodr; /* Open drain register */ - __be32 cpdata; /* Data register */ - __be32 cpdir1; /* Direction register */ - __be32 cpdir2; /* Direction register */ - __be32 cppar1; /* Pin assignment register */ - __be32 cppar2; /* Pin assignment register */ -#ifdef CONFIG_PPC_85xx - u8 pad[8]; -#endif -}; - -#define QE_PIO_DIR_IN 2 -#define QE_PIO_DIR_OUT 1 -extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, - int dir, int open_drain, int assignment, - int has_irq); -#ifdef CONFIG_QUICC_ENGINE -extern int par_io_init(struct device_node *np); -extern int par_io_of_config(struct device_node *np); -extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, - int assignment, int has_irq); -extern int par_io_data_set(u8 port, u8 pin, u8 val); -#else -static inline int par_io_init(struct device_node *np) { return -ENOSYS; } -static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; } -static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, - int assignment, int has_irq) { return -ENOSYS; } -static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; } -#endif /* CONFIG_QUICC_ENGINE */ - -/* - * Pin multiplexing functions. - */ -struct qe_pin; -#ifdef CONFIG_QE_GPIO -extern struct qe_pin *qe_pin_request(struct device_node *np, int index); -extern void qe_pin_free(struct qe_pin *qe_pin); -extern void qe_pin_set_gpio(struct qe_pin *qe_pin); -extern void qe_pin_set_dedicated(struct qe_pin *pin); -#else -static inline struct qe_pin *qe_pin_request(struct device_node *np, int index) -{ - return ERR_PTR(-ENOSYS); -} -static inline void qe_pin_free(struct qe_pin *qe_pin) {} -static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {} -static inline void qe_pin_set_dedicated(struct qe_pin *pin) {} -#endif /* CONFIG_QE_GPIO */ - -#ifdef CONFIG_QUICC_ENGINE -int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input); -#else -static inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, - u32 cmd_input) -{ - return -ENOSYS; -} -#endif /* CONFIG_QUICC_ENGINE */ - -/* QE internal API */ -enum qe_clock qe_clock_source(const char *source); -unsigned int qe_get_brg_clk(void); -int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); -int qe_get_snum(void); -void qe_put_snum(u8 snum); -unsigned int qe_get_num_of_risc(void); -unsigned int qe_get_num_of_snums(void); - -static inline int qe_alive_during_sleep(void) -{ - /* - * MPC8568E reference manual says: - * - * "...power down sequence waits for all I/O interfaces to become idle. - * In some applications this may happen eventually without actively - * shutting down interfaces, but most likely, software will have to - * take steps to shut down the eTSEC, QUICC Engine Block, and PCI - * interfaces before issuing the command (either the write to the core - * MSR[WE] as described above or writing to POWMGTCSR) to put the - * device into sleep state." - * - * MPC8569E reference manual has a similar paragraph. - */ -#ifdef CONFIG_PPC_85xx - return 0; -#else - return 1; -#endif -} - -/* we actually use cpm_muram implementation, define this for convenience */ -#define qe_muram_init cpm_muram_init -#define qe_muram_alloc cpm_muram_alloc -#define qe_muram_alloc_fixed cpm_muram_alloc_fixed -#define qe_muram_free cpm_muram_free -#define qe_muram_addr cpm_muram_addr -#define qe_muram_offset cpm_muram_offset - -/* Structure that defines QE firmware binary files. - * - * See Documentation/powerpc/qe_firmware.txt for a description of these - * fields. - */ -struct qe_firmware { - struct qe_header { - __be32 length; /* Length of the entire structure, in bytes */ - u8 magic[3]; /* Set to { 'Q', 'E', 'F' } */ - u8 version; /* Version of this layout. First ver is '1' */ - } header; - u8 id[62]; /* Null-terminated identifier string */ - u8 split; /* 0 = shared I-RAM, 1 = split I-RAM */ - u8 count; /* Number of microcode[] structures */ - struct { - __be16 model; /* The SOC model */ - u8 major; /* The SOC revision major */ - u8 minor; /* The SOC revision minor */ - } __attribute__ ((packed)) soc; - u8 padding[4]; /* Reserved, for alignment */ - __be64 extended_modes; /* Extended modes */ - __be32 vtraps[8]; /* Virtual trap addresses */ - u8 reserved[4]; /* Reserved, for future expansion */ - struct qe_microcode { - u8 id[32]; /* Null-terminated identifier */ - __be32 traps[16]; /* Trap addresses, 0 == ignore */ - __be32 eccr; /* The value for the ECCR register */ - __be32 iram_offset; /* Offset into I-RAM for the code */ - __be32 count; /* Number of 32-bit words of the code */ - __be32 code_offset; /* Offset of the actual microcode */ - u8 major; /* The microcode version major */ - u8 minor; /* The microcode version minor */ - u8 revision; /* The microcode version revision */ - u8 padding; /* Reserved, for alignment */ - u8 reserved[4]; /* Reserved, for future expansion */ - } __attribute__ ((packed)) microcode[1]; - /* All microcode binaries should be located here */ - /* CRC32 should be located here, after the microcode binaries */ -} __attribute__ ((packed)); - -struct qe_firmware_info { - char id[64]; /* Firmware name */ - u32 vtraps[8]; /* Virtual trap addresses */ - u64 extended_modes; /* Extended modes */ -}; - -#ifdef CONFIG_QUICC_ENGINE -/* Upload a firmware to the QE */ -int qe_upload_firmware(const struct qe_firmware *firmware); -#else -static inline int qe_upload_firmware(const struct qe_firmware *firmware) -{ - return -ENOSYS; -} -#endif /* CONFIG_QUICC_ENGINE */ - -/* Obtain information on the uploaded firmware */ -struct qe_firmware_info *qe_get_firmware_info(void); - -/* QE USB */ -int qe_usb_clock_set(enum qe_clock clk, int rate); - -/* Buffer descriptors */ -struct qe_bd { - __be16 status; - __be16 length; - __be32 buf; -} __attribute__ ((packed)); - -#define BD_STATUS_MASK 0xffff0000 -#define BD_LENGTH_MASK 0x0000ffff - -/* Alignment */ -#define QE_INTR_TABLE_ALIGN 16 /* ??? */ -#define QE_ALIGNMENT_OF_BD 8 -#define QE_ALIGNMENT_OF_PRAM 64 - -/* RISC allocation */ -#define QE_RISC_ALLOCATION_RISC1 0x1 /* RISC 1 */ -#define QE_RISC_ALLOCATION_RISC2 0x2 /* RISC 2 */ -#define QE_RISC_ALLOCATION_RISC3 0x4 /* RISC 3 */ -#define QE_RISC_ALLOCATION_RISC4 0x8 /* RISC 4 */ -#define QE_RISC_ALLOCATION_RISC1_AND_RISC2 (QE_RISC_ALLOCATION_RISC1 | \ - QE_RISC_ALLOCATION_RISC2) -#define QE_RISC_ALLOCATION_FOUR_RISCS (QE_RISC_ALLOCATION_RISC1 | \ - QE_RISC_ALLOCATION_RISC2 | \ - QE_RISC_ALLOCATION_RISC3 | \ - QE_RISC_ALLOCATION_RISC4) - -/* QE extended filtering Table Lookup Key Size */ -enum qe_fltr_tbl_lookup_key_size { - QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES - = 0x3f, /* LookupKey parsed by the Generate LookupKey - CMD is truncated to 8 bytes */ - QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES - = 0x5f, /* LookupKey parsed by the Generate LookupKey - CMD is truncated to 16 bytes */ -}; - -/* QE FLTR extended filtering Largest External Table Lookup Key Size */ -enum qe_fltr_largest_external_tbl_lookup_key_size { - QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE - = 0x0,/* not used */ - QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES - = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES, /* 8 bytes */ - QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES - = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES, /* 16 bytes */ -}; - -/* structure representing QE parameter RAM */ -struct qe_timer_tables { - u16 tm_base; /* QE timer table base adr */ - u16 tm_ptr; /* QE timer table pointer */ - u16 r_tmr; /* QE timer mode register */ - u16 r_tmv; /* QE timer valid register */ - u32 tm_cmd; /* QE timer cmd register */ - u32 tm_cnt; /* QE timer internal cnt */ -} __attribute__ ((packed)); - -#define QE_FLTR_TAD_SIZE 8 - -/* QE extended filtering Termination Action Descriptor (TAD) */ -struct qe_fltr_tad { - u8 serialized[QE_FLTR_TAD_SIZE]; -} __attribute__ ((packed)); - -/* Communication Direction */ -enum comm_dir { - COMM_DIR_NONE = 0, - COMM_DIR_RX = 1, - COMM_DIR_TX = 2, - COMM_DIR_RX_AND_TX = 3 -}; - -/* QE CMXUCR Registers. - * There are two UCCs represented in each of the four CMXUCR registers. - * These values are for the UCC in the LSBs - */ -#define QE_CMXUCR_MII_ENET_MNG 0x00007000 -#define QE_CMXUCR_MII_ENET_MNG_SHIFT 12 -#define QE_CMXUCR_GRANT 0x00008000 -#define QE_CMXUCR_TSA 0x00004000 -#define QE_CMXUCR_BKPT 0x00000100 -#define QE_CMXUCR_TX_CLK_SRC_MASK 0x0000000F - -/* QE CMXGCR Registers. -*/ -#define QE_CMXGCR_MII_ENET_MNG 0x00007000 -#define QE_CMXGCR_MII_ENET_MNG_SHIFT 12 -#define QE_CMXGCR_USBCS 0x0000000f -#define QE_CMXGCR_USBCS_CLK3 0x1 -#define QE_CMXGCR_USBCS_CLK5 0x2 -#define QE_CMXGCR_USBCS_CLK7 0x3 -#define QE_CMXGCR_USBCS_CLK9 0x4 -#define QE_CMXGCR_USBCS_CLK13 0x5 -#define QE_CMXGCR_USBCS_CLK17 0x6 -#define QE_CMXGCR_USBCS_CLK19 0x7 -#define QE_CMXGCR_USBCS_CLK21 0x8 -#define QE_CMXGCR_USBCS_BRG9 0x9 -#define QE_CMXGCR_USBCS_BRG10 0xa - -/* QE CECR Commands. -*/ -#define QE_CR_FLG 0x00010000 -#define QE_RESET 0x80000000 -#define QE_INIT_TX_RX 0x00000000 -#define QE_INIT_RX 0x00000001 -#define QE_INIT_TX 0x00000002 -#define QE_ENTER_HUNT_MODE 0x00000003 -#define QE_STOP_TX 0x00000004 -#define QE_GRACEFUL_STOP_TX 0x00000005 -#define QE_RESTART_TX 0x00000006 -#define QE_CLOSE_RX_BD 0x00000007 -#define QE_SWITCH_COMMAND 0x00000007 -#define QE_SET_GROUP_ADDRESS 0x00000008 -#define QE_START_IDMA 0x00000009 -#define QE_MCC_STOP_RX 0x00000009 -#define QE_ATM_TRANSMIT 0x0000000a -#define QE_HPAC_CLEAR_ALL 0x0000000b -#define QE_GRACEFUL_STOP_RX 0x0000001a -#define QE_RESTART_RX 0x0000001b -#define QE_HPAC_SET_PRIORITY 0x0000010b -#define QE_HPAC_STOP_TX 0x0000020b -#define QE_HPAC_STOP_RX 0x0000030b -#define QE_HPAC_GRACEFUL_STOP_TX 0x0000040b -#define QE_HPAC_GRACEFUL_STOP_RX 0x0000050b -#define QE_HPAC_START_TX 0x0000060b -#define QE_HPAC_START_RX 0x0000070b -#define QE_USB_STOP_TX 0x0000000a -#define QE_USB_RESTART_TX 0x0000000c -#define QE_QMC_STOP_TX 0x0000000c -#define QE_QMC_STOP_RX 0x0000000d -#define QE_SS7_SU_FIL_RESET 0x0000000e -/* jonathbr added from here down for 83xx */ -#define QE_RESET_BCS 0x0000000a -#define QE_MCC_INIT_TX_RX_16 0x00000003 -#define QE_MCC_STOP_TX 0x00000004 -#define QE_MCC_INIT_TX_1 0x00000005 -#define QE_MCC_INIT_RX_1 0x00000006 -#define QE_MCC_RESET 0x00000007 -#define QE_SET_TIMER 0x00000008 -#define QE_RANDOM_NUMBER 0x0000000c -#define QE_ATM_MULTI_THREAD_INIT 0x00000011 -#define QE_ASSIGN_PAGE 0x00000012 -#define QE_ADD_REMOVE_HASH_ENTRY 0x00000013 -#define QE_START_FLOW_CONTROL 0x00000014 -#define QE_STOP_FLOW_CONTROL 0x00000015 -#define QE_ASSIGN_PAGE_TO_DEVICE 0x00000016 - -#define QE_ASSIGN_RISC 0x00000010 -#define QE_CR_MCN_NORMAL_SHIFT 6 -#define QE_CR_MCN_USB_SHIFT 4 -#define QE_CR_MCN_RISC_ASSIGN_SHIFT 8 -#define QE_CR_SNUM_SHIFT 17 - -/* QE CECR Sub Block - sub block of QE command. -*/ -#define QE_CR_SUBBLOCK_INVALID 0x00000000 -#define QE_CR_SUBBLOCK_USB 0x03200000 -#define QE_CR_SUBBLOCK_UCCFAST1 0x02000000 -#define QE_CR_SUBBLOCK_UCCFAST2 0x02200000 -#define QE_CR_SUBBLOCK_UCCFAST3 0x02400000 -#define QE_CR_SUBBLOCK_UCCFAST4 0x02600000 -#define QE_CR_SUBBLOCK_UCCFAST5 0x02800000 -#define QE_CR_SUBBLOCK_UCCFAST6 0x02a00000 -#define QE_CR_SUBBLOCK_UCCFAST7 0x02c00000 -#define QE_CR_SUBBLOCK_UCCFAST8 0x02e00000 -#define QE_CR_SUBBLOCK_UCCSLOW1 0x00000000 -#define QE_CR_SUBBLOCK_UCCSLOW2 0x00200000 -#define QE_CR_SUBBLOCK_UCCSLOW3 0x00400000 -#define QE_CR_SUBBLOCK_UCCSLOW4 0x00600000 -#define QE_CR_SUBBLOCK_UCCSLOW5 0x00800000 -#define QE_CR_SUBBLOCK_UCCSLOW6 0x00a00000 -#define QE_CR_SUBBLOCK_UCCSLOW7 0x00c00000 -#define QE_CR_SUBBLOCK_UCCSLOW8 0x00e00000 -#define QE_CR_SUBBLOCK_MCC1 0x03800000 -#define QE_CR_SUBBLOCK_MCC2 0x03a00000 -#define QE_CR_SUBBLOCK_MCC3 0x03000000 -#define QE_CR_SUBBLOCK_IDMA1 0x02800000 -#define QE_CR_SUBBLOCK_IDMA2 0x02a00000 -#define QE_CR_SUBBLOCK_IDMA3 0x02c00000 -#define QE_CR_SUBBLOCK_IDMA4 0x02e00000 -#define QE_CR_SUBBLOCK_HPAC 0x01e00000 -#define QE_CR_SUBBLOCK_SPI1 0x01400000 -#define QE_CR_SUBBLOCK_SPI2 0x01600000 -#define QE_CR_SUBBLOCK_RAND 0x01c00000 -#define QE_CR_SUBBLOCK_TIMER 0x01e00000 -#define QE_CR_SUBBLOCK_GENERAL 0x03c00000 - -/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command */ -#define QE_CR_PROTOCOL_UNSPECIFIED 0x00 /* For all other protocols */ -#define QE_CR_PROTOCOL_HDLC_TRANSPARENT 0x00 -#define QE_CR_PROTOCOL_QMC 0x02 -#define QE_CR_PROTOCOL_UART 0x04 -#define QE_CR_PROTOCOL_ATM_POS 0x0A -#define QE_CR_PROTOCOL_ETHERNET 0x0C -#define QE_CR_PROTOCOL_L2_SWITCH 0x0D - -/* BRG configuration register */ -#define QE_BRGC_ENABLE 0x00010000 -#define QE_BRGC_DIVISOR_SHIFT 1 -#define QE_BRGC_DIVISOR_MAX 0xFFF -#define QE_BRGC_DIV16 1 - -/* QE Timers registers */ -#define QE_GTCFR1_PCAS 0x80 -#define QE_GTCFR1_STP2 0x20 -#define QE_GTCFR1_RST2 0x10 -#define QE_GTCFR1_GM2 0x08 -#define QE_GTCFR1_GM1 0x04 -#define QE_GTCFR1_STP1 0x02 -#define QE_GTCFR1_RST1 0x01 - -/* SDMA registers */ -#define QE_SDSR_BER1 0x02000000 -#define QE_SDSR_BER2 0x01000000 - -#define QE_SDMR_GLB_1_MSK 0x80000000 -#define QE_SDMR_ADR_SEL 0x20000000 -#define QE_SDMR_BER1_MSK 0x02000000 -#define QE_SDMR_BER2_MSK 0x01000000 -#define QE_SDMR_EB1_MSK 0x00800000 -#define QE_SDMR_ER1_MSK 0x00080000 -#define QE_SDMR_ER2_MSK 0x00040000 -#define QE_SDMR_CEN_MASK 0x0000E000 -#define QE_SDMR_SBER_1 0x00000200 -#define QE_SDMR_SBER_2 0x00000200 -#define QE_SDMR_EB1_PR_MASK 0x000000C0 -#define QE_SDMR_ER1_PR 0x00000008 - -#define QE_SDMR_CEN_SHIFT 13 -#define QE_SDMR_EB1_PR_SHIFT 6 - -#define QE_SDTM_MSNUM_SHIFT 24 - -#define QE_SDEBCR_BA_MASK 0x01FFFFFF - -/* Communication Processor */ -#define QE_CP_CERCR_MEE 0x8000 /* Multi-user RAM ECC enable */ -#define QE_CP_CERCR_IEE 0x4000 /* Instruction RAM ECC enable */ -#define QE_CP_CERCR_CIR 0x0800 /* Common instruction RAM */ - -/* I-RAM */ -#define QE_IRAM_IADD_AIE 0x80000000 /* Auto Increment Enable */ -#define QE_IRAM_IADD_BADDR 0x00080000 /* Base Address */ -#define QE_IRAM_READY 0x80000000 /* Ready */ - -/* UPC */ -#define UPGCR_PROTOCOL 0x80000000 /* protocol ul2 or pl2 */ -#define UPGCR_TMS 0x40000000 /* Transmit master/slave mode */ -#define UPGCR_RMS 0x20000000 /* Receive master/slave mode */ -#define UPGCR_ADDR 0x10000000 /* Master MPHY Addr multiplexing */ -#define UPGCR_DIAG 0x01000000 /* Diagnostic mode */ - -/* UCC GUEMR register */ -#define UCC_GUEMR_MODE_MASK_RX 0x02 -#define UCC_GUEMR_MODE_FAST_RX 0x02 -#define UCC_GUEMR_MODE_SLOW_RX 0x00 -#define UCC_GUEMR_MODE_MASK_TX 0x01 -#define UCC_GUEMR_MODE_FAST_TX 0x01 -#define UCC_GUEMR_MODE_SLOW_TX 0x00 -#define UCC_GUEMR_MODE_MASK (UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX) -#define UCC_GUEMR_SET_RESERVED3 0x10 /* Bit 3 in the guemr is reserved but - must be set 1 */ - -/* structure representing UCC SLOW parameter RAM */ -struct ucc_slow_pram { - __be16 rbase; /* RX BD base address */ - __be16 tbase; /* TX BD base address */ - u8 rbmr; /* RX bus mode register (same as CPM's RFCR) */ - u8 tbmr; /* TX bus mode register (same as CPM's TFCR) */ - __be16 mrblr; /* Rx buffer length */ - __be32 rstate; /* Rx internal state */ - __be32 rptr; /* Rx internal data pointer */ - __be16 rbptr; /* rb BD Pointer */ - __be16 rcount; /* Rx internal byte count */ - __be32 rtemp; /* Rx temp */ - __be32 tstate; /* Tx internal state */ - __be32 tptr; /* Tx internal data pointer */ - __be16 tbptr; /* Tx BD pointer */ - __be16 tcount; /* Tx byte count */ - __be32 ttemp; /* Tx temp */ - __be32 rcrc; /* temp receive CRC */ - __be32 tcrc; /* temp transmit CRC */ -} __attribute__ ((packed)); - -/* General UCC SLOW Mode Register (GUMRH & GUMRL) */ -#define UCC_SLOW_GUMR_H_SAM_QMC 0x00000000 -#define UCC_SLOW_GUMR_H_SAM_SATM 0x00008000 -#define UCC_SLOW_GUMR_H_REVD 0x00002000 -#define UCC_SLOW_GUMR_H_TRX 0x00001000 -#define UCC_SLOW_GUMR_H_TTX 0x00000800 -#define UCC_SLOW_GUMR_H_CDP 0x00000400 -#define UCC_SLOW_GUMR_H_CTSP 0x00000200 -#define UCC_SLOW_GUMR_H_CDS 0x00000100 -#define UCC_SLOW_GUMR_H_CTSS 0x00000080 -#define UCC_SLOW_GUMR_H_TFL 0x00000040 -#define UCC_SLOW_GUMR_H_RFW 0x00000020 -#define UCC_SLOW_GUMR_H_TXSY 0x00000010 -#define UCC_SLOW_GUMR_H_4SYNC 0x00000004 -#define UCC_SLOW_GUMR_H_8SYNC 0x00000008 -#define UCC_SLOW_GUMR_H_16SYNC 0x0000000c -#define UCC_SLOW_GUMR_H_RTSM 0x00000002 -#define UCC_SLOW_GUMR_H_RSYN 0x00000001 - -#define UCC_SLOW_GUMR_L_TCI 0x10000000 -#define UCC_SLOW_GUMR_L_RINV 0x02000000 -#define UCC_SLOW_GUMR_L_TINV 0x01000000 -#define UCC_SLOW_GUMR_L_TEND 0x00040000 -#define UCC_SLOW_GUMR_L_TDCR_MASK 0x00030000 -#define UCC_SLOW_GUMR_L_TDCR_32 0x00030000 -#define UCC_SLOW_GUMR_L_TDCR_16 0x00020000 -#define UCC_SLOW_GUMR_L_TDCR_8 0x00010000 -#define UCC_SLOW_GUMR_L_TDCR_1 0x00000000 -#define UCC_SLOW_GUMR_L_RDCR_MASK 0x0000c000 -#define UCC_SLOW_GUMR_L_RDCR_32 0x0000c000 -#define UCC_SLOW_GUMR_L_RDCR_16 0x00008000 -#define UCC_SLOW_GUMR_L_RDCR_8 0x00004000 -#define UCC_SLOW_GUMR_L_RDCR_1 0x00000000 -#define UCC_SLOW_GUMR_L_RENC_NRZI 0x00000800 -#define UCC_SLOW_GUMR_L_RENC_NRZ 0x00000000 -#define UCC_SLOW_GUMR_L_TENC_NRZI 0x00000100 -#define UCC_SLOW_GUMR_L_TENC_NRZ 0x00000000 -#define UCC_SLOW_GUMR_L_DIAG_MASK 0x000000c0 -#define UCC_SLOW_GUMR_L_DIAG_LE 0x000000c0 -#define UCC_SLOW_GUMR_L_DIAG_ECHO 0x00000080 -#define UCC_SLOW_GUMR_L_DIAG_LOOP 0x00000040 -#define UCC_SLOW_GUMR_L_DIAG_NORM 0x00000000 -#define UCC_SLOW_GUMR_L_ENR 0x00000020 -#define UCC_SLOW_GUMR_L_ENT 0x00000010 -#define UCC_SLOW_GUMR_L_MODE_MASK 0x0000000F -#define UCC_SLOW_GUMR_L_MODE_BISYNC 0x00000008 -#define UCC_SLOW_GUMR_L_MODE_AHDLC 0x00000006 -#define UCC_SLOW_GUMR_L_MODE_UART 0x00000004 -#define UCC_SLOW_GUMR_L_MODE_QMC 0x00000002 - -/* General UCC FAST Mode Register */ -#define UCC_FAST_GUMR_TCI 0x20000000 -#define UCC_FAST_GUMR_TRX 0x10000000 -#define UCC_FAST_GUMR_TTX 0x08000000 -#define UCC_FAST_GUMR_CDP 0x04000000 -#define UCC_FAST_GUMR_CTSP 0x02000000 -#define UCC_FAST_GUMR_CDS 0x01000000 -#define UCC_FAST_GUMR_CTSS 0x00800000 -#define UCC_FAST_GUMR_TXSY 0x00020000 -#define UCC_FAST_GUMR_RSYN 0x00010000 -#define UCC_FAST_GUMR_RTSM 0x00002000 -#define UCC_FAST_GUMR_REVD 0x00000400 -#define UCC_FAST_GUMR_ENR 0x00000020 -#define UCC_FAST_GUMR_ENT 0x00000010 - -/* UART Slow UCC Event Register (UCCE) */ -#define UCC_UART_UCCE_AB 0x0200 -#define UCC_UART_UCCE_IDLE 0x0100 -#define UCC_UART_UCCE_GRA 0x0080 -#define UCC_UART_UCCE_BRKE 0x0040 -#define UCC_UART_UCCE_BRKS 0x0020 -#define UCC_UART_UCCE_CCR 0x0008 -#define UCC_UART_UCCE_BSY 0x0004 -#define UCC_UART_UCCE_TX 0x0002 -#define UCC_UART_UCCE_RX 0x0001 - -/* HDLC Slow UCC Event Register (UCCE) */ -#define UCC_HDLC_UCCE_GLR 0x1000 -#define UCC_HDLC_UCCE_GLT 0x0800 -#define UCC_HDLC_UCCE_IDLE 0x0100 -#define UCC_HDLC_UCCE_BRKE 0x0040 -#define UCC_HDLC_UCCE_BRKS 0x0020 -#define UCC_HDLC_UCCE_TXE 0x0010 -#define UCC_HDLC_UCCE_RXF 0x0008 -#define UCC_HDLC_UCCE_BSY 0x0004 -#define UCC_HDLC_UCCE_TXB 0x0002 -#define UCC_HDLC_UCCE_RXB 0x0001 - -/* BISYNC Slow UCC Event Register (UCCE) */ -#define UCC_BISYNC_UCCE_GRA 0x0080 -#define UCC_BISYNC_UCCE_TXE 0x0010 -#define UCC_BISYNC_UCCE_RCH 0x0008 -#define UCC_BISYNC_UCCE_BSY 0x0004 -#define UCC_BISYNC_UCCE_TXB 0x0002 -#define UCC_BISYNC_UCCE_RXB 0x0001 - -/* Gigabit Ethernet Fast UCC Event Register (UCCE) */ -#define UCC_GETH_UCCE_MPD 0x80000000 -#define UCC_GETH_UCCE_SCAR 0x40000000 -#define UCC_GETH_UCCE_GRA 0x20000000 -#define UCC_GETH_UCCE_CBPR 0x10000000 -#define UCC_GETH_UCCE_BSY 0x08000000 -#define UCC_GETH_UCCE_RXC 0x04000000 -#define UCC_GETH_UCCE_TXC 0x02000000 -#define UCC_GETH_UCCE_TXE 0x01000000 -#define UCC_GETH_UCCE_TXB7 0x00800000 -#define UCC_GETH_UCCE_TXB6 0x00400000 -#define UCC_GETH_UCCE_TXB5 0x00200000 -#define UCC_GETH_UCCE_TXB4 0x00100000 -#define UCC_GETH_UCCE_TXB3 0x00080000 -#define UCC_GETH_UCCE_TXB2 0x00040000 -#define UCC_GETH_UCCE_TXB1 0x00020000 -#define UCC_GETH_UCCE_TXB0 0x00010000 -#define UCC_GETH_UCCE_RXB7 0x00008000 -#define UCC_GETH_UCCE_RXB6 0x00004000 -#define UCC_GETH_UCCE_RXB5 0x00002000 -#define UCC_GETH_UCCE_RXB4 0x00001000 -#define UCC_GETH_UCCE_RXB3 0x00000800 -#define UCC_GETH_UCCE_RXB2 0x00000400 -#define UCC_GETH_UCCE_RXB1 0x00000200 -#define UCC_GETH_UCCE_RXB0 0x00000100 -#define UCC_GETH_UCCE_RXF7 0x00000080 -#define UCC_GETH_UCCE_RXF6 0x00000040 -#define UCC_GETH_UCCE_RXF5 0x00000020 -#define UCC_GETH_UCCE_RXF4 0x00000010 -#define UCC_GETH_UCCE_RXF3 0x00000008 -#define UCC_GETH_UCCE_RXF2 0x00000004 -#define UCC_GETH_UCCE_RXF1 0x00000002 -#define UCC_GETH_UCCE_RXF0 0x00000001 - -/* UCC Protocol Specific Mode Register (UPSMR), when used for UART */ -#define UCC_UART_UPSMR_FLC 0x8000 -#define UCC_UART_UPSMR_SL 0x4000 -#define UCC_UART_UPSMR_CL_MASK 0x3000 -#define UCC_UART_UPSMR_CL_8 0x3000 -#define UCC_UART_UPSMR_CL_7 0x2000 -#define UCC_UART_UPSMR_CL_6 0x1000 -#define UCC_UART_UPSMR_CL_5 0x0000 -#define UCC_UART_UPSMR_UM_MASK 0x0c00 -#define UCC_UART_UPSMR_UM_NORMAL 0x0000 -#define UCC_UART_UPSMR_UM_MAN_MULTI 0x0400 -#define UCC_UART_UPSMR_UM_AUTO_MULTI 0x0c00 -#define UCC_UART_UPSMR_FRZ 0x0200 -#define UCC_UART_UPSMR_RZS 0x0100 -#define UCC_UART_UPSMR_SYN 0x0080 -#define UCC_UART_UPSMR_DRT 0x0040 -#define UCC_UART_UPSMR_PEN 0x0010 -#define UCC_UART_UPSMR_RPM_MASK 0x000c -#define UCC_UART_UPSMR_RPM_ODD 0x0000 -#define UCC_UART_UPSMR_RPM_LOW 0x0004 -#define UCC_UART_UPSMR_RPM_EVEN 0x0008 -#define UCC_UART_UPSMR_RPM_HIGH 0x000C -#define UCC_UART_UPSMR_TPM_MASK 0x0003 -#define UCC_UART_UPSMR_TPM_ODD 0x0000 -#define UCC_UART_UPSMR_TPM_LOW 0x0001 -#define UCC_UART_UPSMR_TPM_EVEN 0x0002 -#define UCC_UART_UPSMR_TPM_HIGH 0x0003 - -/* UCC Protocol Specific Mode Register (UPSMR), when used for Ethernet */ -#define UCC_GETH_UPSMR_FTFE 0x80000000 -#define UCC_GETH_UPSMR_PTPE 0x40000000 -#define UCC_GETH_UPSMR_ECM 0x04000000 -#define UCC_GETH_UPSMR_HSE 0x02000000 -#define UCC_GETH_UPSMR_PRO 0x00400000 -#define UCC_GETH_UPSMR_CAP 0x00200000 -#define UCC_GETH_UPSMR_RSH 0x00100000 -#define UCC_GETH_UPSMR_RPM 0x00080000 -#define UCC_GETH_UPSMR_R10M 0x00040000 -#define UCC_GETH_UPSMR_RLPB 0x00020000 -#define UCC_GETH_UPSMR_TBIM 0x00010000 -#define UCC_GETH_UPSMR_RES1 0x00002000 -#define UCC_GETH_UPSMR_RMM 0x00001000 -#define UCC_GETH_UPSMR_CAM 0x00000400 -#define UCC_GETH_UPSMR_BRO 0x00000200 -#define UCC_GETH_UPSMR_SMM 0x00000080 -#define UCC_GETH_UPSMR_SGMM 0x00000020 - -/* UCC Transmit On Demand Register (UTODR) */ -#define UCC_SLOW_TOD 0x8000 -#define UCC_FAST_TOD 0x8000 - -/* UCC Bus Mode Register masks */ -/* Not to be confused with the Bundle Mode Register */ -#define UCC_BMR_GBL 0x20 -#define UCC_BMR_BO_BE 0x10 -#define UCC_BMR_CETM 0x04 -#define UCC_BMR_DTB 0x02 -#define UCC_BMR_BDB 0x01 - -/* Function code masks */ -#define FC_GBL 0x20 -#define FC_DTB_LCL 0x02 -#define UCC_FAST_FUNCTION_CODE_GBL 0x20 -#define UCC_FAST_FUNCTION_CODE_DTB_LCL 0x02 -#define UCC_FAST_FUNCTION_CODE_BDB_LCL 0x01 - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_QE_H */ diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h deleted file mode 100644 index 1e155ca6d33c..000000000000 --- a/arch/powerpc/include/asm/qe_ic.h +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Authors: Shlomi Gridish <gridish@freescale.com> - * Li Yang <leoli@freescale.com> - * - * Description: - * QE IC external definitions and structure. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#ifndef _ASM_POWERPC_QE_IC_H -#define _ASM_POWERPC_QE_IC_H - -#include <linux/irq.h> - -struct device_node; -struct qe_ic; - -#define NUM_OF_QE_IC_GROUPS 6 - -/* Flags when we init the QE IC */ -#define QE_IC_SPREADMODE_GRP_W 0x00000001 -#define QE_IC_SPREADMODE_GRP_X 0x00000002 -#define QE_IC_SPREADMODE_GRP_Y 0x00000004 -#define QE_IC_SPREADMODE_GRP_Z 0x00000008 -#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010 -#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020 - -#define QE_IC_LOW_SIGNAL 0x00000100 -#define QE_IC_HIGH_SIGNAL 0x00000200 - -#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000 -#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000 -#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000 -#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000 -#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000 -#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000 -#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000 -#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000 -#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000 -#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000 -#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000 -#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000 -#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12) - -/* QE interrupt sources groups */ -enum qe_ic_grp_id { - QE_IC_GRP_W = 0, /* QE interrupt controller group W */ - QE_IC_GRP_X, /* QE interrupt controller group X */ - QE_IC_GRP_Y, /* QE interrupt controller group Y */ - QE_IC_GRP_Z, /* QE interrupt controller group Z */ - QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */ - QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */ -}; - -#ifdef CONFIG_QUICC_ENGINE -void qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(struct irq_desc *desc), - void (*high_handler)(struct irq_desc *desc)); -unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); -unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); -#else -static inline void qe_ic_init(struct device_node *node, unsigned int flags, - void (*low_handler)(struct irq_desc *desc), - void (*high_handler)(struct irq_desc *desc)) -{} -static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) -{ return 0; } -static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) -{ return 0; } -#endif /* CONFIG_QUICC_ENGINE */ - -void qe_ic_set_highest_priority(unsigned int virq, int high); -int qe_ic_set_priority(unsigned int virq, unsigned int priority); -int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); - -static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc) -{ - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); - unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); - - if (cascade_irq != NO_IRQ) - generic_handle_irq(cascade_irq); -} - -static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc) -{ - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); - unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); - - if (cascade_irq != NO_IRQ) - generic_handle_irq(cascade_irq); -} - -static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc) -{ - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); - unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); - struct irq_chip *chip = irq_desc_get_chip(desc); - - if (cascade_irq != NO_IRQ) - generic_handle_irq(cascade_irq); - - chip->irq_eoi(&desc->irq_data); -} - -static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc) -{ - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); - unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); - struct irq_chip *chip = irq_desc_get_chip(desc); - - if (cascade_irq != NO_IRQ) - generic_handle_irq(cascade_irq); - - chip->irq_eoi(&desc->irq_data); -} - -static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) -{ - struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); - unsigned int cascade_irq; - struct irq_chip *chip = irq_desc_get_chip(desc); - - cascade_irq = qe_ic_get_high_irq(qe_ic); - if (cascade_irq == NO_IRQ) - cascade_irq = qe_ic_get_low_irq(qe_ic); - - if (cascade_irq != NO_IRQ) - generic_handle_irq(cascade_irq); - - chip->irq_eoi(&desc->irq_data); -} - -#endif /* _ASM_POWERPC_QE_IC_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 2220f7a60def..c4cb2ffc624e 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1194,12 +1194,20 @@ #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ : : "r" (v) : "memory") #define mtmsr(v) __mtmsrd((v), 0) +#define __MTMSR "mtmsrd" #else #define mtmsr(v) asm volatile("mtmsr %0" : \ : "r" ((unsigned long)(v)) \ : "memory") +#define __MTMSR "mtmsr" #endif +static inline void mtmsr_isync(unsigned long val) +{ + asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : : + "r" (val), "i" (CPU_FTR_ARCH_206) : "memory"); +} + #define mfspr(rn) ({unsigned long rval; \ asm volatile("mfspr %0," __stringify(rn) \ : "=r" (rval)); rval;}) @@ -1207,6 +1215,15 @@ : "r" ((unsigned long)(v)) \ : "memory") +extern void msr_check_and_set(unsigned long bits); +extern bool strict_msr_control; +extern void __msr_check_and_clear(unsigned long bits); +static inline void msr_check_and_clear(unsigned long bits) +{ + if (strict_msr_control) + __msr_check_and_clear(bits); +} + static inline unsigned long mfvtb (void) { #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index b77ef369c0f0..51400baa8d48 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -334,10 +334,11 @@ extern void (*rtas_flash_term_hook)(int); extern struct rtas_t rtas; -extern void enter_rtas(unsigned long); extern int rtas_token(const char *service); extern int rtas_service_present(const char *service); extern int rtas_call(int token, int, int, int *, ...); +void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, + int nret, ...); extern void rtas_restart(char *cmd); extern void rtas_power_off(void); extern void rtas_halt(void); diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 15cca17cba4b..5b268b6be74c 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -4,6 +4,8 @@ #ifndef _ASM_POWERPC_SWITCH_TO_H #define _ASM_POWERPC_SWITCH_TO_H +#include <asm/reg.h> + struct thread_struct; struct task_struct; struct pt_regs; @@ -12,74 +14,59 @@ extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) -struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); -#ifdef CONFIG_PPC_BOOK3S_64 -static inline void save_early_sprs(struct thread_struct *prev) -{ - if (cpu_has_feature(CPU_FTR_ARCH_207S)) - prev->tar = mfspr(SPRN_TAR); - if (cpu_has_feature(CPU_FTR_DSCR)) - prev->dscr = mfspr(SPRN_DSCR); -} -#else -static inline void save_early_sprs(struct thread_struct *prev) {} -#endif -extern void enable_kernel_fp(void); -extern void enable_kernel_altivec(void); -extern void enable_kernel_vsx(void); -extern int emulate_altivec(struct pt_regs *); -extern void __giveup_vsx(struct task_struct *); -extern void giveup_vsx(struct task_struct *); -extern void enable_kernel_spe(void); -extern void giveup_spe(struct task_struct *); -extern void load_up_spe(struct task_struct *); extern void switch_booke_debug_regs(struct debug_reg *new_debug); -#ifndef CONFIG_SMP -extern void discard_lazy_cpu_state(void); -#else -static inline void discard_lazy_cpu_state(void) -{ -} -#endif +extern int emulate_altivec(struct pt_regs *); + +extern void flush_all_to_thread(struct task_struct *); +extern void giveup_all(struct task_struct *); #ifdef CONFIG_PPC_FPU +extern void enable_kernel_fp(void); extern void flush_fp_to_thread(struct task_struct *); extern void giveup_fpu(struct task_struct *); +extern void __giveup_fpu(struct task_struct *); +static inline void disable_kernel_fp(void) +{ + msr_check_and_clear(MSR_FP); +} #else static inline void flush_fp_to_thread(struct task_struct *t) { } -static inline void giveup_fpu(struct task_struct *t) { } #endif #ifdef CONFIG_ALTIVEC +extern void enable_kernel_altivec(void); extern void flush_altivec_to_thread(struct task_struct *); extern void giveup_altivec(struct task_struct *); -extern void giveup_altivec_notask(void); -#else -static inline void flush_altivec_to_thread(struct task_struct *t) -{ -} -static inline void giveup_altivec(struct task_struct *t) +extern void __giveup_altivec(struct task_struct *); +static inline void disable_kernel_altivec(void) { + msr_check_and_clear(MSR_VEC); } #endif #ifdef CONFIG_VSX +extern void enable_kernel_vsx(void); extern void flush_vsx_to_thread(struct task_struct *); -#else -static inline void flush_vsx_to_thread(struct task_struct *t) +extern void giveup_vsx(struct task_struct *); +extern void __giveup_vsx(struct task_struct *); +static inline void disable_kernel_vsx(void) { + msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); } #endif #ifdef CONFIG_SPE +extern void enable_kernel_spe(void); extern void flush_spe_to_thread(struct task_struct *); -#else -static inline void flush_spe_to_thread(struct task_struct *t) +extern void giveup_spe(struct task_struct *); +extern void __giveup_spe(struct task_struct *); +static inline void disable_kernel_spe(void) { + msr_check_and_clear(MSR_SPE); } #endif diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index e682a7143edb..c50868681f9e 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -44,7 +44,7 @@ static inline void isync(void) MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" -#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n" +#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n" #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n" #else #define PPC_ACQUIRE_BARRIER diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 10fc784a2ad4..2d7109a8d296 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -27,7 +27,6 @@ extern struct clock_event_device decrementer_clockevent; struct rtc_time; extern void to_tm(int tim, struct rtc_time * tm); -extern void GregorianDay(struct rtc_time *tm); extern void tick_broadcast_ipi_handler(void); extern void generic_calibrate_decr(void); diff --git a/arch/powerpc/include/asm/ucc.h b/arch/powerpc/include/asm/ucc.h deleted file mode 100644 index 6927ac26516e..000000000000 --- a/arch/powerpc/include/asm/ucc.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Authors: Shlomi Gridish <gridish@freescale.com> - * Li Yang <leoli@freescale.com> - * - * Description: - * Internal header file for UCC unit routines. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#ifndef __UCC_H__ -#define __UCC_H__ - -#include <asm/immap_qe.h> -#include <asm/qe.h> - -#define STATISTICS - -#define UCC_MAX_NUM 8 - -/* Slow or fast type for UCCs. -*/ -enum ucc_speed_type { - UCC_SPEED_TYPE_FAST = UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX, - UCC_SPEED_TYPE_SLOW = UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX -}; - -/* ucc_set_type - * Sets UCC to slow or fast mode. - * - * ucc_num - (In) number of UCC (0-7). - * speed - (In) slow or fast mode for UCC. - */ -int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed); - -int ucc_set_qe_mux_mii_mng(unsigned int ucc_num); - -int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, - enum comm_dir mode); - -int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask); - -/* QE MUX clock routing for UCC -*/ -static inline int ucc_set_qe_mux_grant(unsigned int ucc_num, int set) -{ - return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_GRANT); -} - -static inline int ucc_set_qe_mux_tsa(unsigned int ucc_num, int set) -{ - return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_TSA); -} - -static inline int ucc_set_qe_mux_bkpt(unsigned int ucc_num, int set) -{ - return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_BKPT); -} - -#endif /* __UCC_H__ */ diff --git a/arch/powerpc/include/asm/ucc_fast.h b/arch/powerpc/include/asm/ucc_fast.h deleted file mode 100644 index 72ea9bab07df..000000000000 --- a/arch/powerpc/include/asm/ucc_fast.h +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Internal header file for UCC FAST unit routines. - * - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Authors: Shlomi Gridish <gridish@freescale.com> - * Li Yang <leoli@freescale.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#ifndef __UCC_FAST_H__ -#define __UCC_FAST_H__ - -#include <linux/kernel.h> - -#include <asm/immap_qe.h> -#include <asm/qe.h> - -#include <asm/ucc.h> - -/* Receive BD's status */ -#define R_E 0x80000000 /* buffer empty */ -#define R_W 0x20000000 /* wrap bit */ -#define R_I 0x10000000 /* interrupt on reception */ -#define R_L 0x08000000 /* last */ -#define R_F 0x04000000 /* first */ - -/* transmit BD's status */ -#define T_R 0x80000000 /* ready bit */ -#define T_W 0x20000000 /* wrap bit */ -#define T_I 0x10000000 /* interrupt on completion */ -#define T_L 0x08000000 /* last */ - -/* Rx Data buffer must be 4 bytes aligned in most cases */ -#define UCC_FAST_RX_ALIGN 4 -#define UCC_FAST_MRBLR_ALIGNMENT 4 -#define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT 8 - -/* Sizes */ -#define UCC_FAST_URFS_MIN_VAL 0x88 -#define UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR 8 - -/* ucc_fast_channel_protocol_mode - UCC FAST mode */ -enum ucc_fast_channel_protocol_mode { - UCC_FAST_PROTOCOL_MODE_HDLC = 0x00000000, - UCC_FAST_PROTOCOL_MODE_RESERVED01 = 0x00000001, - UCC_FAST_PROTOCOL_MODE_RESERVED_QMC = 0x00000002, - UCC_FAST_PROTOCOL_MODE_RESERVED02 = 0x00000003, - UCC_FAST_PROTOCOL_MODE_RESERVED_UART = 0x00000004, - UCC_FAST_PROTOCOL_MODE_RESERVED03 = 0x00000005, - UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_1 = 0x00000006, - UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_2 = 0x00000007, - UCC_FAST_PROTOCOL_MODE_RESERVED_BISYNC = 0x00000008, - UCC_FAST_PROTOCOL_MODE_RESERVED04 = 0x00000009, - UCC_FAST_PROTOCOL_MODE_ATM = 0x0000000A, - UCC_FAST_PROTOCOL_MODE_RESERVED05 = 0x0000000B, - UCC_FAST_PROTOCOL_MODE_ETHERNET = 0x0000000C, - UCC_FAST_PROTOCOL_MODE_RESERVED06 = 0x0000000D, - UCC_FAST_PROTOCOL_MODE_POS = 0x0000000E, - UCC_FAST_PROTOCOL_MODE_RESERVED07 = 0x0000000F -}; - -/* ucc_fast_transparent_txrx - UCC Fast Transparent TX & RX */ -enum ucc_fast_transparent_txrx { - UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL = 0x00000000, - UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_TRANSPARENT = 0x18000000 -}; - -/* UCC fast diagnostic mode */ -enum ucc_fast_diag_mode { - UCC_FAST_DIAGNOSTIC_NORMAL = 0x0, - UCC_FAST_DIAGNOSTIC_LOCAL_LOOP_BACK = 0x40000000, - UCC_FAST_DIAGNOSTIC_AUTO_ECHO = 0x80000000, - UCC_FAST_DIAGNOSTIC_LOOP_BACK_AND_ECHO = 0xC0000000 -}; - -/* UCC fast Sync length (transparent mode only) */ -enum ucc_fast_sync_len { - UCC_FAST_SYNC_LEN_NOT_USED = 0x0, - UCC_FAST_SYNC_LEN_AUTOMATIC = 0x00004000, - UCC_FAST_SYNC_LEN_8_BIT = 0x00008000, - UCC_FAST_SYNC_LEN_16_BIT = 0x0000C000 -}; - -/* UCC fast RTS mode */ -enum ucc_fast_ready_to_send { - UCC_FAST_SEND_IDLES_BETWEEN_FRAMES = 0x00000000, - UCC_FAST_SEND_FLAGS_BETWEEN_FRAMES = 0x00002000 -}; - -/* UCC fast receiver decoding mode */ -enum ucc_fast_rx_decoding_method { - UCC_FAST_RX_ENCODING_NRZ = 0x00000000, - UCC_FAST_RX_ENCODING_NRZI = 0x00000800, - UCC_FAST_RX_ENCODING_RESERVED0 = 0x00001000, - UCC_FAST_RX_ENCODING_RESERVED1 = 0x00001800 -}; - -/* UCC fast transmitter encoding mode */ -enum ucc_fast_tx_encoding_method { - UCC_FAST_TX_ENCODING_NRZ = 0x00000000, - UCC_FAST_TX_ENCODING_NRZI = 0x00000100, - UCC_FAST_TX_ENCODING_RESERVED0 = 0x00000200, - UCC_FAST_TX_ENCODING_RESERVED1 = 0x00000300 -}; - -/* UCC fast CRC length */ -enum ucc_fast_transparent_tcrc { - UCC_FAST_16_BIT_CRC = 0x00000000, - UCC_FAST_CRC_RESERVED0 = 0x00000040, - UCC_FAST_32_BIT_CRC = 0x00000080, - UCC_FAST_CRC_RESERVED1 = 0x000000C0 -}; - -/* Fast UCC initialization structure */ -struct ucc_fast_info { - int ucc_num; - enum qe_clock rx_clock; - enum qe_clock tx_clock; - u32 regs; - int irq; - u32 uccm_mask; - int bd_mem_part; - int brkpt_support; - int grant_support; - int tsa; - int cdp; - int cds; - int ctsp; - int ctss; - int tci; - int txsy; - int rtsm; - int revd; - int rsyn; - u16 max_rx_buf_length; - u16 urfs; - u16 urfet; - u16 urfset; - u16 utfs; - u16 utfet; - u16 utftt; - u16 ufpt; - enum ucc_fast_channel_protocol_mode mode; - enum ucc_fast_transparent_txrx ttx_trx; - enum ucc_fast_tx_encoding_method tenc; - enum ucc_fast_rx_decoding_method renc; - enum ucc_fast_transparent_tcrc tcrc; - enum ucc_fast_sync_len synl; -}; - -struct ucc_fast_private { - struct ucc_fast_info *uf_info; - struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */ - u32 __iomem *p_ucce; /* a pointer to the event register in memory. */ - u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */ -#ifdef CONFIG_UGETH_TX_ON_DEMAND - u16 __iomem *p_utodr; /* pointer to the transmit on demand register */ -#endif - int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ - int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ - int stopped_tx; /* Whether channel has been stopped for Tx - (STOP_TX, etc.) */ - int stopped_rx; /* Whether channel has been stopped for Rx */ - u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx - virtual fifo */ - u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx - virtual fifo */ -#ifdef STATISTICS - u32 tx_frames; /* Transmitted frames counter. */ - u32 rx_frames; /* Received frames counter (only frames - passed to application). */ - u32 tx_discarded; /* Discarded tx frames counter (frames that - were discarded by the driver due to errors). - */ - u32 rx_discarded; /* Discarded rx frames counter (frames that - were discarded by the driver due to errors). - */ -#endif /* STATISTICS */ - u16 mrblr; /* maximum receive buffer length */ -}; - -/* ucc_fast_init - * Initializes Fast UCC according to user provided parameters. - * - * uf_info - (In) pointer to the fast UCC info structure. - * uccf_ret - (Out) pointer to the fast UCC structure. - */ -int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret); - -/* ucc_fast_free - * Frees all resources for fast UCC. - * - * uccf - (In) pointer to the fast UCC structure. - */ -void ucc_fast_free(struct ucc_fast_private * uccf); - -/* ucc_fast_enable - * Enables a fast UCC port. - * This routine enables Tx and/or Rx through the General UCC Mode Register. - * - * uccf - (In) pointer to the fast UCC structure. - * mode - (In) TX, RX, or both. - */ -void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode); - -/* ucc_fast_disable - * Disables a fast UCC port. - * This routine disables Tx and/or Rx through the General UCC Mode Register. - * - * uccf - (In) pointer to the fast UCC structure. - * mode - (In) TX, RX, or both. - */ -void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode); - -/* ucc_fast_irq - * Handles interrupts on fast UCC. - * Called from the general interrupt routine to handle interrupts on fast UCC. - * - * uccf - (In) pointer to the fast UCC structure. - */ -void ucc_fast_irq(struct ucc_fast_private * uccf); - -/* ucc_fast_transmit_on_demand - * Immediately forces a poll of the transmitter for data to be sent. - * Typically, the hardware performs a periodic poll for data that the - * transmit routine has set up to be transmitted. In cases where - * this polling cycle is not soon enough, this optional routine can - * be invoked to force a poll right away, instead. Proper use for - * each transmission for which this functionality is desired is to - * call the transmit routine and then this routine right after. - * - * uccf - (In) pointer to the fast UCC structure. - */ -void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf); - -u32 ucc_fast_get_qe_cr_subblock(int uccf_num); - -void ucc_fast_dump_regs(struct ucc_fast_private * uccf); - -#endif /* __UCC_FAST_H__ */ diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h deleted file mode 100644 index 233ef5fe5fde..000000000000 --- a/arch/powerpc/include/asm/ucc_slow.h +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. - * - * Authors: Shlomi Gridish <gridish@freescale.com> - * Li Yang <leoli@freescale.com> - * - * Description: - * Internal header file for UCC SLOW unit routines. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ -#ifndef __UCC_SLOW_H__ -#define __UCC_SLOW_H__ - -#include <linux/kernel.h> - -#include <asm/immap_qe.h> -#include <asm/qe.h> - -#include <asm/ucc.h> - -/* transmit BD's status */ -#define T_R 0x80000000 /* ready bit */ -#define T_PAD 0x40000000 /* add pads to short frames */ -#define T_W 0x20000000 /* wrap bit */ -#define T_I 0x10000000 /* interrupt on completion */ -#define T_L 0x08000000 /* last */ - -#define T_A 0x04000000 /* Address - the data transmitted as address - chars */ -#define T_TC 0x04000000 /* transmit CRC */ -#define T_CM 0x02000000 /* continuous mode */ -#define T_DEF 0x02000000 /* collision on previous attempt to transmit */ -#define T_P 0x01000000 /* Preamble - send Preamble sequence before - data */ -#define T_HB 0x01000000 /* heartbeat */ -#define T_NS 0x00800000 /* No Stop */ -#define T_LC 0x00800000 /* late collision */ -#define T_RL 0x00400000 /* retransmission limit */ -#define T_UN 0x00020000 /* underrun */ -#define T_CT 0x00010000 /* CTS lost */ -#define T_CSL 0x00010000 /* carrier sense lost */ -#define T_RC 0x003c0000 /* retry count */ - -/* Receive BD's status */ -#define R_E 0x80000000 /* buffer empty */ -#define R_W 0x20000000 /* wrap bit */ -#define R_I 0x10000000 /* interrupt on reception */ -#define R_L 0x08000000 /* last */ -#define R_C 0x08000000 /* the last byte in this buffer is a cntl - char */ -#define R_F 0x04000000 /* first */ -#define R_A 0x04000000 /* the first byte in this buffer is address - byte */ -#define R_CM 0x02000000 /* continuous mode */ -#define R_ID 0x01000000 /* buffer close on reception of idles */ -#define R_M 0x01000000 /* Frame received because of promiscuous - mode */ -#define R_AM 0x00800000 /* Address match */ -#define R_DE 0x00800000 /* Address match */ -#define R_LG 0x00200000 /* Break received */ -#define R_BR 0x00200000 /* Frame length violation */ -#define R_NO 0x00100000 /* Rx Non Octet Aligned Packet */ -#define R_FR 0x00100000 /* Framing Error (no stop bit) character - received */ -#define R_PR 0x00080000 /* Parity Error character received */ -#define R_AB 0x00080000 /* Frame Aborted */ -#define R_SH 0x00080000 /* frame is too short */ -#define R_CR 0x00040000 /* CRC Error */ -#define R_OV 0x00020000 /* Overrun */ -#define R_CD 0x00010000 /* CD lost */ -#define R_CL 0x00010000 /* this frame is closed because of a - collision */ - -/* Rx Data buffer must be 4 bytes aligned in most cases.*/ -#define UCC_SLOW_RX_ALIGN 4 -#define UCC_SLOW_MRBLR_ALIGNMENT 4 -#define UCC_SLOW_PRAM_SIZE 0x100 -#define ALIGNMENT_OF_UCC_SLOW_PRAM 64 - -/* UCC Slow Channel Protocol Mode */ -enum ucc_slow_channel_protocol_mode { - UCC_SLOW_CHANNEL_PROTOCOL_MODE_QMC = 0x00000002, - UCC_SLOW_CHANNEL_PROTOCOL_MODE_UART = 0x00000004, - UCC_SLOW_CHANNEL_PROTOCOL_MODE_BISYNC = 0x00000008, -}; - -/* UCC Slow Transparent Transmit CRC (TCRC) */ -enum ucc_slow_transparent_tcrc { - /* 16-bit CCITT CRC (HDLC). (X16 + X12 + X5 + 1) */ - UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC16 = 0x00000000, - /* CRC16 (BISYNC). (X16 + X15 + X2 + 1) */ - UCC_SLOW_TRANSPARENT_TCRC_CRC16 = 0x00004000, - /* 32-bit CCITT CRC (Ethernet and HDLC) */ - UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC32 = 0x00008000, -}; - -/* UCC Slow oversampling rate for transmitter (TDCR) */ -enum ucc_slow_tx_oversampling_rate { - /* 1x clock mode */ - UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_1 = 0x00000000, - /* 8x clock mode */ - UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_8 = 0x00010000, - /* 16x clock mode */ - UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_16 = 0x00020000, - /* 32x clock mode */ - UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_32 = 0x00030000, -}; - -/* UCC Slow Oversampling rate for receiver (RDCR) -*/ -enum ucc_slow_rx_oversampling_rate { - /* 1x clock mode */ - UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_1 = 0x00000000, - /* 8x clock mode */ - UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_8 = 0x00004000, - /* 16x clock mode */ - UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_16 = 0x00008000, - /* 32x clock mode */ - UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_32 = 0x0000c000, -}; - -/* UCC Slow Transmitter encoding method (TENC) -*/ -enum ucc_slow_tx_encoding_method { - UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZ = 0x00000000, - UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZI = 0x00000100 -}; - -/* UCC Slow Receiver decoding method (RENC) -*/ -enum ucc_slow_rx_decoding_method { - UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZ = 0x00000000, - UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZI = 0x00000800 -}; - -/* UCC Slow Diagnostic mode (DIAG) -*/ -enum ucc_slow_diag_mode { - UCC_SLOW_DIAG_MODE_NORMAL = 0x00000000, - UCC_SLOW_DIAG_MODE_LOOPBACK = 0x00000040, - UCC_SLOW_DIAG_MODE_ECHO = 0x00000080, - UCC_SLOW_DIAG_MODE_LOOPBACK_ECHO = 0x000000c0 -}; - -struct ucc_slow_info { - int ucc_num; - int protocol; /* QE_CR_PROTOCOL_xxx */ - enum qe_clock rx_clock; - enum qe_clock tx_clock; - phys_addr_t regs; - int irq; - u16 uccm_mask; - int data_mem_part; - int init_tx; - int init_rx; - u32 tx_bd_ring_len; - u32 rx_bd_ring_len; - int rx_interrupts; - int brkpt_support; - int grant_support; - int tsa; - int cdp; - int cds; - int ctsp; - int ctss; - int rinv; - int tinv; - int rtsm; - int rfw; - int tci; - int tend; - int tfl; - int txsy; - u16 max_rx_buf_length; - enum ucc_slow_transparent_tcrc tcrc; - enum ucc_slow_channel_protocol_mode mode; - enum ucc_slow_diag_mode diag; - enum ucc_slow_tx_oversampling_rate tdcr; - enum ucc_slow_rx_oversampling_rate rdcr; - enum ucc_slow_tx_encoding_method tenc; - enum ucc_slow_rx_decoding_method renc; -}; - -struct ucc_slow_private { - struct ucc_slow_info *us_info; - struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */ - struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */ - u32 us_pram_offset; - int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ - int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ - int stopped_tx; /* Whether channel has been stopped for Tx - (STOP_TX, etc.) */ - int stopped_rx; /* Whether channel has been stopped for Rx */ - struct list_head confQ; /* frames passed to chip waiting for tx */ - u32 first_tx_bd_mask; /* mask is used in Tx routine to save status - and length for first BD in a frame */ - u32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */ - u32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */ - struct qe_bd *confBd; /* next BD for confirm after Tx */ - struct qe_bd *tx_bd; /* next BD for new Tx request */ - struct qe_bd *rx_bd; /* next BD to collect after Rx */ - void *p_rx_frame; /* accumulating receive frame */ - u16 *p_ucce; /* a pointer to the event register in memory. - */ - u16 *p_uccm; /* a pointer to the mask register in memory */ - u16 saved_uccm; /* a saved mask for the RX Interrupt bits */ -#ifdef STATISTICS - u32 tx_frames; /* Transmitted frames counters */ - u32 rx_frames; /* Received frames counters (only frames - passed to application) */ - u32 rx_discarded; /* Discarded frames counters (frames that - were discarded by the driver due to - errors) */ -#endif /* STATISTICS */ -}; - -/* ucc_slow_init - * Initializes Slow UCC according to provided parameters. - * - * us_info - (In) pointer to the slow UCC info structure. - * uccs_ret - (Out) pointer to the slow UCC structure. - */ -int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret); - -/* ucc_slow_free - * Frees all resources for slow UCC. - * - * uccs - (In) pointer to the slow UCC structure. - */ -void ucc_slow_free(struct ucc_slow_private * uccs); - -/* ucc_slow_enable - * Enables a fast UCC port. - * This routine enables Tx and/or Rx through the General UCC Mode Register. - * - * uccs - (In) pointer to the slow UCC structure. - * mode - (In) TX, RX, or both. - */ -void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode); - -/* ucc_slow_disable - * Disables a fast UCC port. - * This routine disables Tx and/or Rx through the General UCC Mode Register. - * - * uccs - (In) pointer to the slow UCC structure. - * mode - (In) TX, RX, or both. - */ -void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode); - -/* ucc_slow_graceful_stop_tx - * Smoothly stops transmission on a specified slow UCC. - * - * uccs - (In) pointer to the slow UCC structure. - */ -void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs); - -/* ucc_slow_stop_tx - * Stops transmission on a specified slow UCC. - * - * uccs - (In) pointer to the slow UCC structure. - */ -void ucc_slow_stop_tx(struct ucc_slow_private * uccs); - -/* ucc_slow_restart_tx - * Restarts transmitting on a specified slow UCC. - * - * uccs - (In) pointer to the slow UCC structure. - */ -void ucc_slow_restart_tx(struct ucc_slow_private *uccs); - -u32 ucc_slow_get_qe_cr_subblock(int uccs_num); - -#endif /* __UCC_SLOW_H__ */ diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 4b6b8ace18e0..6a5ace5fa0c8 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,10 +12,9 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 379 +#define NR_syscalls 379 #define __NR__exit __NR_exit -#define NR_syscalls __NR_syscalls #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index b73a8199f161..1afe90ade595 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -41,7 +41,7 @@ #include <linux/unistd.h> #include <linux/time.h> -#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32) +#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32) /* * So here is the ppc64 backward compatible version |