summaryrefslogtreecommitdiff
path: root/include/linux/highmem.h
diff options
context:
space:
mode:
authorMaxime Ripard <mripard@kernel.org>2026-02-23 10:09:45 +0100
committerMaxime Ripard <mripard@kernel.org>2026-02-23 10:09:45 +0100
commitc17ee635fd3a482b2ad2bf5e269755c2eae5f25e (patch)
treee3f147462d8a9fd0cf2312c8cd3c5a94da15c3e4 /include/linux/highmem.h
parent803ec1faf7c1823e6e3b1f2aaa81be18528c9436 (diff)
parent6de23f81a5e08be8fbf5e8d7e9febc72a5b5f27f (diff)
Merge drm/drm-fixes into drm-misc-fixes
7.0-rc1 was just released, let's merge it to kick the new release cycle. Signed-off-by: Maxime Ripard <mripard@kernel.org>
Diffstat (limited to 'include/linux/highmem.h')
-rw-r--r--include/linux/highmem.h98
1 files changed, 97 insertions, 1 deletions
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index abc20f9810fd..af03db851a1d 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -197,15 +197,111 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
}
#endif
-/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
+#ifndef clear_user_page
+/**
+ * clear_user_page() - clear a page to be mapped to user space
+ * @addr: the address of the page
+ * @vaddr: the address of the user mapping
+ * @page: the page
+ *
+ * We condition the definition of clear_user_page() on the architecture
+ * not having a custom clear_user_highpage(). That's because if there
+ * is some special flushing needed for clear_user_highpage() then it
+ * is likely that clear_user_page() also needs some magic. And, since
+ * our only caller is the generic clear_user_highpage(), not defining
+ * is not much of a loss.
+ */
+static inline void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
+{
+ clear_page(addr);
+}
+#endif
+
+/**
+ * clear_user_pages() - clear a page range to be mapped to user space
+ * @addr: start address
+ * @vaddr: start address of the user mapping
+ * @page: start page
+ * @npages: number of pages
+ *
+ * Assumes that the region (@addr, +@npages) has been validated
+ * already so this does no exception handling.
+ *
+ * If the architecture provides a clear_user_page(), use that;
+ * otherwise, we can safely use clear_pages().
+ */
+static inline void clear_user_pages(void *addr, unsigned long vaddr,
+ struct page *page, unsigned int npages)
+{
+
+#ifdef clear_user_page
+ do {
+ clear_user_page(addr, vaddr, page);
+ addr += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ page++;
+ } while (--npages);
+#else
+ /*
+ * Prefer clear_pages() to allow for architectural optimizations
+ * when operating on contiguous page ranges.
+ */
+ clear_pages(addr, npages);
+#endif
+}
+
+/**
+ * clear_user_highpage() - clear a page to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ *
+ * With !CONFIG_HIGHMEM this (and the copy_user_highpage() below) will
+ * be plain clear_user_page() (and copy_user_page()).
+ */
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_local_page(page);
clear_user_page(addr, vaddr, page);
kunmap_local(addr);
}
+#endif /* clear_user_highpage */
+
+/**
+ * clear_user_highpages() - clear a page range to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ * @npages: number of pages
+ *
+ * Assumes that all the pages in the region (@page, +@npages) are valid
+ * so this does no exception handling.
+ */
+static inline void clear_user_highpages(struct page *page, unsigned long vaddr,
+ unsigned int npages)
+{
+
+#if defined(clear_user_highpage) || defined(CONFIG_HIGHMEM)
+ /*
+ * An architecture defined clear_user_highpage() implies special
+ * handling is needed.
+ *
+ * So we use that or, the generic variant if CONFIG_HIGHMEM is
+ * enabled.
+ */
+ do {
+ clear_user_highpage(page, vaddr);
+ vaddr += PAGE_SIZE;
+ page++;
+ } while (--npages);
+#else
+
+ /*
+ * Prefer clear_user_pages() to allow for architectural optimizations
+ * when operating on contiguous page ranges.
+ */
+ clear_user_pages(page_address(page), vaddr, page, npages);
#endif
+}
#ifndef vma_alloc_zeroed_movable_folio
/**