diff options
-rw-r--r-- | fs/ceph/file.c | 3 | ||||
-rw-r--r-- | fs/cifs/file.c | 7 | ||||
-rw-r--r-- | include/linux/uio.h | 2 | ||||
-rw-r--r-- | mm/iov_iter.c | 27 | ||||
-rw-r--r-- | mm/process_vm_access.c | 6 |
5 files changed, 5 insertions, 40 deletions
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 88a6df4cbe6d..ef9115e4a6fa 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -737,13 +737,12 @@ static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov, left = len; for (n = 0; n < num_pages; n++) { size_t plen = min_t(size_t, left, PAGE_SIZE); - ret = iov_iter_copy_from_user(pages[n], &i, 0, plen); + ret = copy_page_from_iter(pages[n], 0, plen, &i); if (ret != plen) { ret = -EFAULT; break; } left -= ret; - iov_iter_advance(&i, ret); } if (ret < 0) { diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5ed03e0b8b40..2900d150654e 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2444,11 +2444,10 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, save_len = cur_len; for (i = 0; i < nr_pages; i++) { - bytes = min_t(const size_t, cur_len, PAGE_SIZE); - copied = iov_iter_copy_from_user(wdata->pages[i], &it, - 0, bytes); + bytes = min_t(size_t, cur_len, PAGE_SIZE); + copied = copy_page_from_iter(wdata->pages[i], 0, bytes, + &it); cur_len -= copied; - iov_iter_advance(&it, copied); /* * If we didn't copy as much as we expected, then that * may mean we trod into an unmapped area. Stop copying diff --git a/include/linux/uio.h b/include/linux/uio.h index 199bcc34241b..abbe83ded630 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -62,8 +62,6 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); -size_t iov_iter_copy_from_user(struct page *page, - struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(const struct iov_iter *i); diff --git a/mm/iov_iter.c b/mm/iov_iter.c index 10e46cd721de..22ec1ef068a8 100644 --- a/mm/iov_iter.c +++ b/mm/iov_iter.c @@ -129,33 +129,6 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, } EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); -/* - * This has the same sideeffects and return value as - * iov_iter_copy_from_user_atomic(). - * The difference is that it attempts to resolve faults. - * Page must not be locked. - */ -size_t iov_iter_copy_from_user(struct page *page, - struct iov_iter *i, unsigned long offset, size_t bytes) -{ - char *kaddr; - size_t copied; - - kaddr = kmap(page); - if (likely(i->nr_segs == 1)) { - int left; - char __user *buf = i->iov->iov_base + i->iov_offset; - left = __copy_from_user(kaddr + offset, buf, bytes); - copied = bytes - left; - } else { - copied = __iovec_copy_from_user_inatomic(kaddr + offset, - i->iov, i->iov_offset, bytes); - } - kunmap(page); - return copied; -} -EXPORT_SYMBOL(iov_iter_copy_from_user); - void iov_iter_advance(struct iov_iter *i, size_t bytes) { BUG_ON(i->count < bytes); diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 8505c9262b35..f32b1fbbfe69 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -46,11 +46,7 @@ static int process_vm_rw_pages(struct page **pages, copy = len; if (vm_write) { - if (copy > iov_iter_count(iter)) - copy = iov_iter_count(iter); - copied = iov_iter_copy_from_user(page, iter, - offset, copy); - iov_iter_advance(iter, copied); + copied = copy_page_from_iter(page, offset, copy, iter); set_page_dirty_lock(page); } else { copied = copy_page_to_iter(page, offset, copy, iter); |