summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2022-04-15 06:28:39 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-05-01 17:22:28 +0200
commit923f05a660e60ef22952e09acdd6e37e17ddf084 (patch)
tree67077858191bf79c664e000e3f9b93d0830e8839 /mm
parent19cbd78fb26a2622714183d400b9af2659fa5221 (diff)
gup: Turn fault_in_pages_{readable,writeable} into fault_in_{readable,writeable}
commit bb523b406c849eef8f265a07cd7f320f1f177743 upstream Turn fault_in_pages_{readable,writeable} into versions that return the number of bytes not faulted in, similar to copy_to_user, instead of returning a non-zero value when any of the requested pages couldn't be faulted in. This supports the existing users that require all pages to be faulted in as well as new users that are happy if any pages can be faulted in. Rename the functions to fault_in_{readable,writeable} to make sure this change doesn't silently break things. Neither of these functions is entirely trivial and it doesn't seem useful to inline them, so move them to mm/gup.c. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> Signed-off-by: Anand Jain <anand.jain@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/gup.c72
2 files changed, 73 insertions, 1 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 1293c3409e42..d697b3446a4a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -90,7 +90,7 @@
* ->lock_page (filemap_fault, access_process_vm)
*
* ->i_rwsem (generic_perform_write)
- * ->mmap_lock (fault_in_pages_readable->do_page_fault)
+ * ->mmap_lock (fault_in_readable->do_page_fault)
*
* bdi->wb.list_lock
* sb_lock (fs/fs-writeback.c)
diff --git a/mm/gup.c b/mm/gup.c
index 52f08e3177e9..e063cb2bb187 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1682,6 +1682,78 @@ finish_or_fault:
#endif /* !CONFIG_MMU */
/**
+ * fault_in_writeable - fault in userspace address range for writing
+ * @uaddr: start of address range
+ * @size: size of address range
+ *
+ * Returns the number of bytes not faulted in (like copy_to_user() and
+ * copy_from_user()).
+ */
+size_t fault_in_writeable(char __user *uaddr, size_t size)
+{
+ char __user *start = uaddr, *end;
+
+ if (unlikely(size == 0))
+ return 0;
+ if (!PAGE_ALIGNED(uaddr)) {
+ if (unlikely(__put_user(0, uaddr) != 0))
+ return size;
+ uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
+ }
+ end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
+ if (unlikely(end < start))
+ end = NULL;
+ while (uaddr != end) {
+ if (unlikely(__put_user(0, uaddr) != 0))
+ goto out;
+ uaddr += PAGE_SIZE;
+ }
+
+out:
+ if (size > uaddr - start)
+ return size - (uaddr - start);
+ return 0;
+}
+EXPORT_SYMBOL(fault_in_writeable);
+
+/**
+ * fault_in_readable - fault in userspace address range for reading
+ * @uaddr: start of user address range
+ * @size: size of user address range
+ *
+ * Returns the number of bytes not faulted in (like copy_to_user() and
+ * copy_from_user()).
+ */
+size_t fault_in_readable(const char __user *uaddr, size_t size)
+{
+ const char __user *start = uaddr, *end;
+ volatile char c;
+
+ if (unlikely(size == 0))
+ return 0;
+ if (!PAGE_ALIGNED(uaddr)) {
+ if (unlikely(__get_user(c, uaddr) != 0))
+ return size;
+ uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
+ }
+ end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
+ if (unlikely(end < start))
+ end = NULL;
+ while (uaddr != end) {
+ if (unlikely(__get_user(c, uaddr) != 0))
+ goto out;
+ uaddr += PAGE_SIZE;
+ }
+
+out:
+ (void)c;
+ if (size > uaddr - start)
+ return size - (uaddr - start);
+ return 0;
+}
+EXPORT_SYMBOL(fault_in_readable);
+
+/**
* get_dump_page() - pin user page in memory while writing it to core dump
* @addr: user address
*