mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 13:34:30 +00:00
iov_iter, net: Move csum_and_copy_to/from_iter() to net/
Move csum_and_copy_to/from_iter() to net code now that the iteration framework can be #included. Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20230925120309.1731676-10-dhowells@redhat.com cc: Alexander Viro <viro@zeniv.linux.org.uk> cc: Jens Axboe <axboe@kernel.dk> cc: Christoph Hellwig <hch@lst.de> cc: Christian Brauner <christian@brauner.io> cc: Matthew Wilcox <willy@infradead.org> cc: Linus Torvalds <torvalds@linux-foundation.org> cc: David Laight <David.Laight@ACULAB.COM> cc: "David S. Miller" <davem@davemloft.net> cc: Eric Dumazet <edumazet@google.com> cc: Jakub Kicinski <kuba@kernel.org> cc: Paolo Abeni <pabeni@redhat.com> cc: linux-block@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org cc: netdev@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
c9eec08bac
commit
6d0d419914
@ -3679,6 +3679,31 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
|
||||
return __skb_put_padto(skb, len, true);
|
||||
}
|
||||
|
||||
static inline __wsum csum_and_memcpy(void *to, const void *from, size_t len,
|
||||
__wsum sum, size_t off)
|
||||
{
|
||||
__wsum next = csum_partial_copy_nocheck(from, to, len);
|
||||
return csum_block_add(sum, next, off);
|
||||
}
|
||||
|
||||
struct csum_state {
|
||||
__wsum csum;
|
||||
size_t off;
|
||||
};
|
||||
|
||||
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
|
||||
static __always_inline __must_check
|
||||
bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
|
||||
__wsum *csum, struct iov_iter *i)
|
||||
{
|
||||
size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
|
||||
if (likely(copied == bytes))
|
||||
return true;
|
||||
iov_iter_revert(i, copied);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int skb_add_data(struct sk_buff *skb,
|
||||
struct iov_iter *from, int copy)
|
||||
{
|
||||
|
@ -338,24 +338,6 @@ iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
|
||||
return npages;
|
||||
}
|
||||
|
||||
struct csum_state {
|
||||
__wsum csum;
|
||||
size_t off;
|
||||
};
|
||||
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
|
||||
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
|
||||
static __always_inline __must_check
|
||||
bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
|
||||
__wsum *csum, struct iov_iter *i)
|
||||
{
|
||||
size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
|
||||
if (likely(copied == bytes))
|
||||
return true;
|
||||
iov_iter_revert(i, copied);
|
||||
return false;
|
||||
}
|
||||
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
|
||||
struct iov_iter *i);
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/splice.h>
|
||||
#include <linux/compat.h>
|
||||
#include <net/checksum.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/instrumented.h>
|
||||
#include <linux/iov_iter.h>
|
||||
@ -179,13 +178,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_init);
|
||||
|
||||
static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
|
||||
__wsum sum, size_t off)
|
||||
{
|
||||
__wsum next = csum_partial_copy_nocheck(from, to, len);
|
||||
return csum_block_add(sum, next, off);
|
||||
}
|
||||
|
||||
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
if (WARN_ON_ONCE(i->data_source))
|
||||
@ -1097,87 +1089,6 @@ ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
|
||||
|
||||
static __always_inline
|
||||
size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
|
||||
size_t len, void *to, void *priv2)
|
||||
{
|
||||
__wsum next, *csum = priv2;
|
||||
|
||||
next = csum_and_copy_from_user(iter_from, to + progress, len);
|
||||
*csum = csum_block_add(*csum, next, progress);
|
||||
return next ? 0 : len;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
size_t memcpy_from_iter_csum(void *iter_from, size_t progress,
|
||||
size_t len, void *to, void *priv2)
|
||||
{
|
||||
__wsum *csum = priv2;
|
||||
|
||||
*csum = csum_and_memcpy(to + progress, iter_from, len, *csum, progress);
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
if (WARN_ON_ONCE(!i->data_source))
|
||||
return 0;
|
||||
return iterate_and_advance2(i, bytes, addr, csum,
|
||||
copy_from_user_iter_csum,
|
||||
memcpy_from_iter_csum);
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_from_iter);
|
||||
|
||||
static __always_inline
|
||||
size_t copy_to_user_iter_csum(void __user *iter_to, size_t progress,
|
||||
size_t len, void *from, void *priv2)
|
||||
{
|
||||
__wsum next, *csum = priv2;
|
||||
|
||||
next = csum_and_copy_to_user(from + progress, iter_to, len);
|
||||
*csum = csum_block_add(*csum, next, progress);
|
||||
return next ? 0 : len;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
size_t memcpy_to_iter_csum(void *iter_to, size_t progress,
|
||||
size_t len, void *from, void *priv2)
|
||||
{
|
||||
__wsum *csum = priv2;
|
||||
|
||||
*csum = csum_and_memcpy(iter_to, from + progress, len, *csum, progress);
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
struct csum_state *csstate = _csstate;
|
||||
__wsum sum;
|
||||
|
||||
if (WARN_ON_ONCE(i->data_source))
|
||||
return 0;
|
||||
if (unlikely(iov_iter_is_discard(i))) {
|
||||
// can't use csum_memcpy() for that one - data is not copied
|
||||
csstate->csum = csum_block_add(csstate->csum,
|
||||
csum_partial(addr, bytes, 0),
|
||||
csstate->off);
|
||||
csstate->off += bytes;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
sum = csum_shift(csstate->csum, csstate->off);
|
||||
|
||||
bytes = iterate_and_advance2(i, bytes, (void *)addr, &sum,
|
||||
copy_to_user_iter_csum,
|
||||
memcpy_to_iter_csum);
|
||||
csstate->csum = csum_shift(sum, csstate->off);
|
||||
csstate->off += bytes;
|
||||
return bytes;
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_to_iter);
|
||||
|
||||
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
|
@ -50,7 +50,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/iov_iter.h>
|
||||
#include <linux/indirect_call_wrapper.h>
|
||||
|
||||
#include <net/protocol.h>
|
||||
@ -716,6 +716,54 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
|
||||
}
|
||||
EXPORT_SYMBOL(zerocopy_sg_from_iter);
|
||||
|
||||
static __always_inline
|
||||
size_t copy_to_user_iter_csum(void __user *iter_to, size_t progress,
|
||||
size_t len, void *from, void *priv2)
|
||||
{
|
||||
__wsum next, *csum = priv2;
|
||||
|
||||
next = csum_and_copy_to_user(from + progress, iter_to, len);
|
||||
*csum = csum_block_add(*csum, next, progress);
|
||||
return next ? 0 : len;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
size_t memcpy_to_iter_csum(void *iter_to, size_t progress,
|
||||
size_t len, void *from, void *priv2)
|
||||
{
|
||||
__wsum *csum = priv2;
|
||||
|
||||
*csum = csum_and_memcpy(iter_to, from + progress, len, *csum, progress);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
struct csum_state *csstate = _csstate;
|
||||
__wsum sum;
|
||||
|
||||
if (WARN_ON_ONCE(i->data_source))
|
||||
return 0;
|
||||
if (unlikely(iov_iter_is_discard(i))) {
|
||||
// can't use csum_memcpy() for that one - data is not copied
|
||||
csstate->csum = csum_block_add(csstate->csum,
|
||||
csum_partial(addr, bytes, 0),
|
||||
csstate->off);
|
||||
csstate->off += bytes;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
sum = csum_shift(csstate->csum, csstate->off);
|
||||
|
||||
bytes = iterate_and_advance2(i, bytes, (void *)addr, &sum,
|
||||
copy_to_user_iter_csum,
|
||||
memcpy_to_iter_csum);
|
||||
csstate->csum = csum_shift(sum, csstate->off);
|
||||
csstate->off += bytes;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
|
||||
* and update a checksum.
|
||||
|
@ -62,6 +62,7 @@
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/mpls.h>
|
||||
#include <linux/kcov.h>
|
||||
#include <linux/iov_iter.h>
|
||||
|
||||
#include <net/protocol.h>
|
||||
#include <net/dst.h>
|
||||
@ -6931,3 +6932,35 @@ out:
|
||||
return spliced ?: ret;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_splice_from_iter);
|
||||
|
||||
static __always_inline
|
||||
size_t memcpy_from_iter_csum(void *iter_from, size_t progress,
|
||||
size_t len, void *to, void *priv2)
|
||||
{
|
||||
__wsum *csum = priv2;
|
||||
|
||||
*csum = csum_and_memcpy(to + progress, iter_from, len, *csum, progress);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
|
||||
size_t len, void *to, void *priv2)
|
||||
{
|
||||
__wsum next, *csum = priv2;
|
||||
|
||||
next = csum_and_copy_from_user(iter_from, to + progress, len);
|
||||
*csum = csum_block_add(*csum, next, progress);
|
||||
return next ? 0 : len;
|
||||
}
|
||||
|
||||
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
if (WARN_ON_ONCE(!i->data_source))
|
||||
return 0;
|
||||
return iterate_and_advance2(i, bytes, addr, csum,
|
||||
copy_from_user_iter_csum,
|
||||
memcpy_from_iter_csum);
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_from_iter);
|
||||
|
Loading…
x
Reference in New Issue
Block a user