mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
kill iov_iter_copy_from_user()
all callers can use copy_page_from_iter() and it actually simplifies them. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
f6c0a1920e
commit
e7c24607b5
@ -737,13 +737,12 @@ static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
|
||||
left = len;
|
||||
for (n = 0; n < num_pages; n++) {
|
||||
size_t plen = min_t(size_t, left, PAGE_SIZE);
|
||||
ret = iov_iter_copy_from_user(pages[n], &i, 0, plen);
|
||||
ret = copy_page_from_iter(pages[n], 0, plen, &i);
|
||||
if (ret != plen) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
left -= ret;
|
||||
iov_iter_advance(&i, ret);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
|
@ -2444,11 +2444,10 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
|
||||
|
||||
save_len = cur_len;
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
bytes = min_t(const size_t, cur_len, PAGE_SIZE);
|
||||
copied = iov_iter_copy_from_user(wdata->pages[i], &it,
|
||||
0, bytes);
|
||||
bytes = min_t(size_t, cur_len, PAGE_SIZE);
|
||||
copied = copy_page_from_iter(wdata->pages[i], 0, bytes,
|
||||
&it);
|
||||
cur_len -= copied;
|
||||
iov_iter_advance(&it, copied);
|
||||
/*
|
||||
* If we didn't copy as much as we expected, then that
|
||||
* may mean we trod into an unmapped area. Stop copying
|
||||
|
@ -62,8 +62,6 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
|
||||
|
||||
size_t iov_iter_copy_from_user_atomic(struct page *page,
|
||||
struct iov_iter *i, unsigned long offset, size_t bytes);
|
||||
size_t iov_iter_copy_from_user(struct page *page,
|
||||
struct iov_iter *i, unsigned long offset, size_t bytes);
|
||||
void iov_iter_advance(struct iov_iter *i, size_t bytes);
|
||||
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
|
||||
size_t iov_iter_single_seg_count(const struct iov_iter *i);
|
||||
|
@ -129,33 +129,6 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
|
||||
|
||||
/*
|
||||
* This has the same sideeffects and return value as
|
||||
* iov_iter_copy_from_user_atomic().
|
||||
* The difference is that it attempts to resolve faults.
|
||||
* Page must not be locked.
|
||||
*/
|
||||
size_t iov_iter_copy_from_user(struct page *page,
|
||||
struct iov_iter *i, unsigned long offset, size_t bytes)
|
||||
{
|
||||
char *kaddr;
|
||||
size_t copied;
|
||||
|
||||
kaddr = kmap(page);
|
||||
if (likely(i->nr_segs == 1)) {
|
||||
int left;
|
||||
char __user *buf = i->iov->iov_base + i->iov_offset;
|
||||
left = __copy_from_user(kaddr + offset, buf, bytes);
|
||||
copied = bytes - left;
|
||||
} else {
|
||||
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
|
||||
i->iov, i->iov_offset, bytes);
|
||||
}
|
||||
kunmap(page);
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_copy_from_user);
|
||||
|
||||
void iov_iter_advance(struct iov_iter *i, size_t bytes)
|
||||
{
|
||||
BUG_ON(i->count < bytes);
|
||||
|
@ -46,11 +46,7 @@ static int process_vm_rw_pages(struct page **pages,
|
||||
copy = len;
|
||||
|
||||
if (vm_write) {
|
||||
if (copy > iov_iter_count(iter))
|
||||
copy = iov_iter_count(iter);
|
||||
copied = iov_iter_copy_from_user(page, iter,
|
||||
offset, copy);
|
||||
iov_iter_advance(iter, copied);
|
||||
copied = copy_page_from_iter(page, offset, copy, iter);
|
||||
set_page_dirty_lock(page);
|
||||
} else {
|
||||
copied = copy_page_to_iter(page, offset, copy, iter);
|
||||
|
Loading…
Reference in New Issue
Block a user