summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c60
1 files changed, 25 insertions, 35 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index df3c17603a0d..51a04e5e9373 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1022,7 +1022,9 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
((node = hstate_next_node_to_free(hs, mask)) || 1); \
nr_nodes--)
-#if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
+#if (defined(CONFIG_X86_64) || defined(CONFIG_S390)) && \
+ ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \
+ defined(CONFIG_CMA))
static void destroy_compound_gigantic_page(struct page *page,
unsigned int order)
{
@@ -3177,7 +3179,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page)
{
- int force_flush = 0;
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *ptep;
@@ -3196,19 +3197,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb_start_vma(tlb, vma);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
address = start;
-again:
for (; address < end; address += sz) {
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
ptl = huge_pte_lock(h, mm, ptep);
- if (huge_pmd_unshare(mm, &address, ptep))
- goto unlock;
+ if (huge_pmd_unshare(mm, &address, ptep)) {
+ spin_unlock(ptl);
+ continue;
+ }
pte = huge_ptep_get(ptep);
- if (huge_pte_none(pte))
- goto unlock;
+ if (huge_pte_none(pte)) {
+ spin_unlock(ptl);
+ continue;
+ }
/*
* Migrating hugepage or HWPoisoned hugepage is already
@@ -3216,7 +3220,8 @@ again:
*/
if (unlikely(!pte_present(pte))) {
huge_pte_clear(mm, address, ptep);
- goto unlock;
+ spin_unlock(ptl);
+ continue;
}
page = pte_page(pte);
@@ -3226,9 +3231,10 @@ again:
* are about to unmap is the actual page of interest.
*/
if (ref_page) {
- if (page != ref_page)
- goto unlock;
-
+ if (page != ref_page) {
+ spin_unlock(ptl);
+ continue;
+ }
/*
* Mark the VMA as having unmapped its page so that
* future faults in this VMA will fail rather than
@@ -3244,30 +3250,14 @@ again:
hugetlb_count_sub(pages_per_huge_page(h), mm);
page_remove_rmap(page, true);
- force_flush = !__tlb_remove_page(tlb, page);
- if (force_flush) {
- address += sz;
- spin_unlock(ptl);
- break;
- }
- /* Bail out after unmapping reference page if supplied */
- if (ref_page) {
- spin_unlock(ptl);
- break;
- }
-unlock:
+
spin_unlock(ptl);
- }
- /*
- * mmu_gather ran out of room to batch pages, we break out of
- * the PTE lock to avoid doing the potential expensive TLB invalidate
- * and page-free while holding it.
- */
- if (force_flush) {
- force_flush = 0;
- tlb_flush_mmu(tlb);
- if (address < end && !ref_page)
- goto again;
+ tlb_remove_page_size(tlb, page, huge_page_size(h));
+ /*
+ * Bail out after unmapping reference page if supplied
+ */
+ if (ref_page)
+ break;
}
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
tlb_end_vma(tlb, vma);
@@ -3383,7 +3373,7 @@ retry_avoidcopy:
/* If no-one else is actually using this page, avoid the copy
* and just make the page writable */
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
- page_move_anon_rmap(old_page, vma, address);
+ page_move_anon_rmap(old_page, vma);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}