diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-04 07:21:04 +0000 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-04 07:21:04 +0000 |
commit | 9717c1cea16e3eae81ca226f4c3670bb799b61ad (patch) | |
tree | 80137d0ab55ed25cce3d7ccb8dde8dc841ee7bdb /mm | |
parent | 79703e014ba02ad86af4bcdf0c16e4b208cca406 (diff) | |
parent | b45f1b3b585e195a7daead16d914e164310b1df6 (diff) |
Merge tag 'drm-next-2020-02-04' of git://anongit.freedesktop.org/drm/drm
Pull drm ttm/mm updates from Dave Airlie:
"Thomas Hellstrom has some more changes to the TTM layer that needed a
patch to the mm subsystem.
This adds a new mm API vmf_insert_mixed_prot to avoid an ugly hack
that has limitations in the TTM layer"
* tag 'drm-next-2020-02-04' of git://anongit.freedesktop.org/drm/drm:
mm, drm/ttm: Fix vm page protection handling
mm: Add a vmf_insert_mixed_prot() function
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 44 |
1 files changed, 40 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c index 1c4be871a237..0bccc622e482 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1664,6 +1664,9 @@ out_unlock: * vmf_insert_pfn_prot should only be used if using multiple VMAs is * impractical. * + * See vmf_insert_mixed_prot() for a discussion of the implication of using + * a value of @pgprot different from that of @vma->vm_page_prot. + * * Context: Process context. May allocate using %GFP_KERNEL. * Return: vm_fault_t value. */ @@ -1737,9 +1740,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) } static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, - unsigned long addr, pfn_t pfn, bool mkwrite) + unsigned long addr, pfn_t pfn, pgprot_t pgprot, + bool mkwrite) { - pgprot_t pgprot = vma->vm_page_prot; int err; BUG_ON(!vm_mixed_ok(vma, pfn)); @@ -1782,10 +1785,43 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } +/** + * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot + * @vma: user vma to map to + * @addr: target user address of this page + * @pfn: source kernel pfn + * @pgprot: pgprot flags for the inserted page + * + * This is exactly like vmf_insert_mixed(), except that it allows drivers to + * to override pgprot on a per-page basis. + * + * Typically this function should be used by drivers to set caching- and + * encryption bits different than those of @vma->vm_page_prot, because + * the caching- or encryption mode may not be known at mmap() time. + * This is ok as long as @vma->vm_page_prot is not used by the core vm + * to set caching and encryption bits for those vmas (except for COW pages). + * This is ensured by core vm only modifying these page table entries using + * functions that don't touch caching- or encryption bits, using pte_modify() + * if needed. (See for example mprotect()). + * Also when new page-table entries are created, this is only done using the + * fault() callback, and never using the value of vma->vm_page_prot, + * except for page-table entries that point to anonymous pages as the result + * of COW. + * + * Context: Process context. May allocate using %GFP_KERNEL. + * Return: vm_fault_t value. + */ +vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, + pfn_t pfn, pgprot_t pgprot) +{ + return __vm_insert_mixed(vma, addr, pfn, pgprot, false); +} +EXPORT_SYMBOL(vmf_insert_mixed_prot); + vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { - return __vm_insert_mixed(vma, addr, pfn, false); + return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false); } EXPORT_SYMBOL(vmf_insert_mixed); @@ -1797,7 +1833,7 @@ EXPORT_SYMBOL(vmf_insert_mixed); vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { - return __vm_insert_mixed(vma, addr, pfn, true); + return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true); } EXPORT_SYMBOL(vmf_insert_mixed_mkwrite); |