From 3b50a6e536d2d843857ffe5f923eff7be4222afe Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Wed, 1 Jul 2020 15:53:49 -0700 Subject: mm/hmm: provide the page mapping order in hmm_range_fault() hmm_range_fault() returns an array of page frame numbers and flags for how the pages are mapped in the requested process' page tables. The PFN can be used to get the struct page with hmm_pfn_to_page() and the page size order can be determined with compound_order(page). However, if the page is larger than order 0 (PAGE_SIZE), there is no indication that a compound page is mapped by the CPU using a larger page size. Without this information, the caller can't safely use a large device PTE to map the compound page because the CPU might be using smaller PTEs with different read/write permissions. Add a new function hmm_pfn_to_map_order() to return the mapping size order so that callers know the pages are being mapped with consistent permissions and a large device page table mapping can be used if one is available. This will allow devices to optimize mapping the page into HW by avoiding or batching work for huge pages. For instance the dma_map can be done with a high order directly. Link: https://lore.kernel.org/r/20200701225352.9649-3-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Signed-off-by: Jason Gunthorpe --- mm/hmm.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/hmm.c b/mm/hmm.c index e9a545751108..0809baee49d0 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -165,12 +165,19 @@ static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, return hmm_pfns_fill(addr, end, range, 0); } +static inline unsigned long hmm_pfn_flags_order(unsigned long order) +{ + return order << HMM_PFN_ORDER_SHIFT; +} + static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) { if (pmd_protnone(pmd)) return 0; - return pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; + return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : + HMM_PFN_VALID) | + hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -389,7 +396,9 @@ static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, { if (!pud_present(pud)) return 0; - return pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; + return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : + HMM_PFN_VALID) | + hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); } static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, @@ -474,7 +483,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, i = (start - range->start) >> PAGE_SHIFT; pfn_req_flags = range->hmm_pfns[i]; - cpu_flags = pte_to_hmm_pfn_flags(range, entry); + cpu_flags = pte_to_hmm_pfn_flags(range, entry) | + hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); required_fault = hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); if (required_fault) { -- cgit v1.2.3 From 5143192cd410c4fc83be09a2e73423765aee072b Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Thu, 23 Jul 2020 15:30:00 -0700 Subject: mm/migrate: add a flags parameter to migrate_vma The src_owner field in struct migrate_vma is being used for two purposes, it acts as a selection filter for which types of pages are to be migrated and it identifies device private pages owned by the caller. Split this into separate parameters so the src_owner field can be used just to identify device private pages owned by the caller of migrate_vma_setup(). Rename the src_owner field to pgmap_owner to reflect it is now used only to identify which device private pages to migrate. Link: https://lore.kernel.org/r/20200723223004.9586-3-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Reviewed-by: Bharata B Rao Signed-off-by: Jason Gunthorpe --- mm/migrate.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index f37729673558..e3ea68e3a08b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2287,7 +2287,9 @@ again: goto next; page = device_private_entry_to_page(entry); - if (page->pgmap->owner != migrate->src_owner) + if (!(migrate->flags & + MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || + page->pgmap->owner != migrate->pgmap_owner) goto next; mpfn = migrate_pfn(page_to_pfn(page)) | @@ -2295,7 +2297,7 @@ again: if (is_write_device_private_entry(entry)) mpfn |= MIGRATE_PFN_WRITE; } else { - if (migrate->src_owner) + if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) goto next; pfn = pte_pfn(pte); if (is_zero_pfn(pfn)) { -- cgit v1.2.3 From 998427b3ad2c769082853880cf353557ec0ec77d Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Thu, 23 Jul 2020 15:30:01 -0700 Subject: mm/notifier: add migration invalidation type Currently migrate_vma_setup() calls mmu_notifier_invalidate_range_start() which flushes all device private page mappings whether or not a page is being migrated to/from device private memory. In order to not disrupt device mappings that are not being migrated, shift the responsibility for clearing device private mappings to the device driver and leave CPU page table unmapping handled by migrate_vma_setup(). To support this, the caller of migrate_vma_setup() should always set struct migrate_vma::pgmap_owner to a non NULL value that matches the device private page->pgmap->owner. This value is then passed to the struct mmu_notifier_range with a new event type which the driver's invalidation function can use to avoid device MMU invalidations. Link: https://lore.kernel.org/r/20200723223004.9586-4-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Signed-off-by: Jason Gunthorpe --- mm/migrate.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index e3ea68e3a08b..96e1f41a991e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2392,8 +2392,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate) { struct mmu_notifier_range range; - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, + /* + * Note that the pgmap_owner is passed to the mmu notifier callback so + * that the registered device driver can skip invalidating device + * private page mappings that won't be migrated. + */ + mmu_notifier_range_init(&range, MMU_NOTIFY_MIGRATE, 0, migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end); + range.migrate_pgmap_owner = migrate->pgmap_owner; mmu_notifier_invalidate_range_start(&range); walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, -- cgit v1.2.3