summaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/cache.c')
-rw-r--r--arch/parisc/kernel/cache.c221
1 files changed, 186 insertions, 35 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 48e16dc20102..4b12890642eb 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -267,9 +267,11 @@ static inline void
__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long physaddr)
{
+ preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
flush_icache_page_asm(physaddr, vmaddr);
+ preempt_enable();
}
void flush_dcache_page(struct page *page)
@@ -329,17 +331,6 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
EXPORT_SYMBOL(flush_data_cache_local);
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
-void clear_user_page_asm(void *page, unsigned long vaddr)
-{
- unsigned long flags;
- /* This function is implemented in assembly in pacache.S */
- extern void __clear_user_page_asm(void *page, unsigned long vaddr);
-
- purge_tlb_start(flags);
- __clear_user_page_asm(page, vaddr);
- purge_tlb_end(flags);
-}
-
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
@@ -373,20 +364,9 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
}
-extern void purge_kernel_dcache_page(unsigned long);
-extern void clear_user_page_asm(void *page, unsigned long vaddr);
-
-void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
-{
- unsigned long flags;
-
- purge_kernel_dcache_page((unsigned long)page);
- purge_tlb_start(flags);
- pdtlb_kernel(page);
- purge_tlb_end(flags);
- clear_user_page_asm(page, vaddr);
-}
-EXPORT_SYMBOL(clear_user_page);
+extern void purge_kernel_dcache_page_asm(unsigned long);
+extern void clear_user_page_asm(void *, unsigned long);
+extern void copy_user_page_asm(void *, void *, unsigned long);
void flush_kernel_dcache_page_addr(void *addr)
{
@@ -399,11 +379,26 @@ void flush_kernel_dcache_page_addr(void *addr)
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
+{
+ clear_page_asm(vto);
+ if (!parisc_requires_coherency())
+ flush_kernel_dcache_page_asm(vto);
+}
+EXPORT_SYMBOL(clear_user_page);
+
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg)
+ struct page *pg)
{
- /* no coherency needed (all in kmap/kunmap) */
- copy_user_page_asm(vto, vfrom);
+ /* Copy using kernel mapping. No coherency is needed
+ (all in kmap/kunmap) on machines that don't support
+ non-equivalent aliasing. However, the `from' page
+ needs to be flushed before it can be accessed through
+ the kernel mapping. */
+ preempt_disable();
+ flush_dcache_page_asm(__pa(vfrom), vaddr);
+ preempt_enable();
+ copy_page_asm(vto, vfrom);
if (!parisc_requires_coherency())
flush_kernel_dcache_page_asm(vto);
}
@@ -419,6 +414,24 @@ void kunmap_parisc(void *addr)
EXPORT_SYMBOL(kunmap_parisc);
#endif
+void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long flags;
+
+ /* Note: purge_tlb_entries can be called at startup with
+ no context. */
+
+ /* Disable preemption while we play with %sr1. */
+ preempt_disable();
+ mtsp(mm->context, 1);
+ purge_tlb_start(flags);
+ pdtlb(addr);
+ pitlb(addr);
+ purge_tlb_end(flags);
+ preempt_enable();
+}
+EXPORT_SYMBOL(purge_tlb_entries);
+
void __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end)
{
@@ -458,8 +471,66 @@ void flush_cache_all(void)
on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
}
+static inline unsigned long mm_total_size(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ unsigned long usize = 0;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ usize += vma->vm_end - vma->vm_start;
+ return usize;
+}
+
+static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
+{
+ pte_t *ptep = NULL;
+
+ if (!pgd_none(*pgd)) {
+ pud_t *pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud)) {
+ pmd_t *pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd))
+ ptep = pte_offset_map(pmd, addr);
+ }
+ }
+ return ptep;
+}
+
void flush_cache_mm(struct mm_struct *mm)
{
+ /* Flushing the whole cache on each cpu takes forever on
+ rp3440, etc. So, avoid it if the mm isn't too big. */
+ if (mm_total_size(mm) < parisc_cache_flush_threshold) {
+ struct vm_area_struct *vma;
+
+ if (mm->context == mfsp(3)) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ flush_user_dcache_range_asm(vma->vm_start,
+ vma->vm_end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(
+ vma->vm_start, vma->vm_end);
+ }
+ } else {
+ pgd_t *pgd = mm->pgd;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (ptep != NULL) {
+ pte_t pte = *ptep;
+ __flush_cache_page(vma, addr,
+ page_to_phys(pte_page(pte)));
+ }
+ }
+ }
+ }
+ return;
+ }
+
#ifdef CONFIG_SMP
flush_cache_all();
#else
@@ -485,20 +556,36 @@ flush_user_icache_range(unsigned long start, unsigned long end)
flush_instruction_cache();
}
-
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- int sr3;
-
BUG_ON(!vma->vm_mm->context);
- sr3 = mfsp(3);
- if (vma->vm_mm->context == sr3) {
- flush_user_dcache_range(start,end);
- flush_user_icache_range(start,end);
+ if ((end - start) < parisc_cache_flush_threshold) {
+ if (vma->vm_mm->context == mfsp(3)) {
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ } else {
+ unsigned long addr;
+ pgd_t *pgd = vma->vm_mm->pgd;
+
+ for (addr = start & PAGE_MASK; addr < end;
+ addr += PAGE_SIZE) {
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (ptep != NULL) {
+ pte_t pte = *ptep;
+ flush_cache_page(vma,
+ addr, pte_pfn(pte));
+ }
+ }
+ }
} else {
+#ifdef CONFIG_SMP
flush_cache_all();
+#else
+ flush_cache_all_local();
+#endif
}
}
@@ -511,3 +598,67 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
}
+
+#ifdef CONFIG_PARISC_TMPALIAS
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ void *vto;
+ unsigned long flags;
+
+ /* Clear using TMPALIAS region. The page doesn't need to
+ be flushed but the kernel mapping needs to be purged. */
+
+ vto = kmap_atomic(page, KM_USER0);
+
+ /* The PA-RISC 2.0 Architecture book states on page F-6:
+ "Before a write-capable translation is enabled, *all*
+ non-equivalently-aliased translations must be removed
+ from the page table and purged from the TLB. (Note
+ that the caches are not required to be flushed at this
+ time.) Before any non-equivalent aliased translation
+ is re-enabled, the virtual address range for the writeable
+ page (the entire page) must be flushed from the cache,
+ and the write-capable translation removed from the page
+ table and purged from the TLB." */
+
+ purge_kernel_dcache_page_asm((unsigned long)vto);
+ purge_tlb_start(flags);
+ pdtlb_kernel(vto);
+ purge_tlb_end(flags);
+ preempt_disable();
+ clear_user_page_asm(vto, vaddr);
+ preempt_enable();
+
+ pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ void *vfrom, *vto;
+ unsigned long flags;
+
+ /* Copy using TMPALIAS region. This has the advantage
+ that the `from' page doesn't need to be flushed. However,
+ the `to' page must be flushed in copy_user_page_asm since
+ it can be used to bring in executable code. */
+
+ vfrom = kmap_atomic(from, KM_USER0);
+ vto = kmap_atomic(to, KM_USER1);
+
+ purge_kernel_dcache_page_asm((unsigned long)vto);
+ purge_tlb_start(flags);
+ pdtlb_kernel(vto);
+ pdtlb_kernel(vfrom);
+ purge_tlb_end(flags);
+ preempt_disable();
+ copy_user_page_asm(vto, vfrom, vaddr);
+ flush_dcache_page_asm(__pa(vto), vaddr);
+ preempt_enable();
+
+ pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
+ pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
+}
+
+#endif /* CONFIG_PARISC_TMPALIAS */