diff options
| author | Jeff Garzik <jgarzik@pobox.com> | 2005-10-26 01:08:05 -0400 |
|---|---|---|
| committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-26 01:08:05 -0400 |
| commit | ccd7bc2f67fdfa9c47ceae64f1117d1fb6cb8737 (patch) | |
| tree | 33b13cd5582c419f8ae098edb24ca355974a7ccf /mm/hugetlb.c | |
| parent | 6f0ef4fa57b9ba27d7b1c330bad041f1665501fe (diff) | |
| parent | 6693e74a16ef563960764bd963f1048392135c3c (diff) | |
Merge branch 'master'
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a1b30d45459e..61d380678030 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -394,6 +394,28 @@ out: return ret; } +/* + * On ia64 at least, it is possible to receive a hugetlb fault from a + * stale zero entry left in the TLB from earlier hardware prefetching. + * Low-level arch code should already have flushed the stale entry as + * part of its fault handling, but we do need to accept this minor fault + * and return successfully. Whereas the "normal" case is that this is + * an access to a hugetlb page which has been truncated off since mmap. + */ +int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + int ret = VM_FAULT_SIGBUS; + pte_t *pte; + + spin_lock(&mm->page_table_lock); + pte = huge_pte_offset(mm, address); + if (pte && !pte_none(*pte)) + ret = VM_FAULT_MINOR; + spin_unlock(&mm->page_table_lock); + return ret; +} + int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i) |
