mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
sk_buff: add skb extension infrastructure
This adds an optional extension infrastructure, with ispec (xfrm) and bridge netfilter as first users. objdiff shows no changes if kernel is built without xfrm and br_netfilter support. The third (planned future) user is Multipath TCP which is still out-of-tree. MPTCP needs to map logical mptcp sequence numbers to the tcp sequence numbers used by individual subflows. This DSS mapping is read/written from tcp option space on receive and written to tcp option space on transmitted tcp packets that are part of and MPTCP connection. Extending skb_shared_info or adding a private data field to skb fclones doesn't work for incoming skb, so a different DSS propagation method would be required for the receive side. mptcp has same requirements as secpath/bridge netfilter: 1. extension memory is released when the sk_buff is free'd. 2. data is shared after cloning an skb (clone inherits extension) 3. adding extension to an skb will COW the extension buffer if needed. The "MPTCP upstreaming" effort adds SKB_EXT_MPTCP extension to store the mapping for tx and rx processing. Two new members are added to sk_buff: 1. 'active_extensions' byte (filling a hole), telling which extensions are available for this skb. This has two purposes. a) avoids the need to initialize the pointer. b) allows to "delete" an extension by clearing its bit value in ->active_extensions. While it would be possible to store the active_extensions byte in the extension struct instead of sk_buff, there is one problem with this: When an extension has to be disabled, we can always clear the bit in skb->active_extensions. But in case it would be stored in the extension buffer itself, we might have to COW it first, if we are dealing with a cloned skb. On kmalloc failure we would be unable to turn an extension off. 2. extension pointer, located at the end of the sk_buff. If the active_extensions byte is 0, the pointer is undefined, it is not initialized on skb allocation. This adds extra code to skb clone and free paths (to deal with refcount/free of extension area) but this replaces similar code that manages skb->nf_bridge and skb->sp structs in the followup patches of the series. It is possible to add support for extensions that are not preseved on clones/copies. To do this, it would be needed to define a bitmask of all extensions that need copy/cow semantics, and change __skb_ext_copy() to check ->active_extensions & SKB_EXT_PRESERVE_ON_CLONE, then just set ->active_extensions to 0 on the new clone. This isn't done here because all extensions that get added here need the copy/cow semantics. v2: Allocate entire extension space using kmem_cache. Upside is that this allows better tracking of used memory, downside is that we will allocate more space than strictly needed in most cases (its unlikely that all extensions are active/needed at same time for same skb). The allocated memory (except the small extension header) is not cleared, so no additonal overhead aside from memory usage. Avoid atomic_dec_and_test operation on skb_ext_put() by using similar trick as kfree_skbmem() does with fclone_ref: If recount is 1, there is no concurrent user and we can free right away. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c4b0e771f9
commit
df5042f4c5
@ -245,6 +245,7 @@ struct iov_iter;
|
||||
struct napi_struct;
|
||||
struct bpf_prog;
|
||||
union bpf_attr;
|
||||
struct skb_ext;
|
||||
|
||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||
struct nf_conntrack {
|
||||
@ -636,6 +637,7 @@ typedef unsigned char *sk_buff_data_t;
|
||||
* @queue_mapping: Queue mapping for multiqueue devices
|
||||
* @xmit_more: More SKBs are pending for this queue
|
||||
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
|
||||
* @active_extensions: active extensions (skb_ext_id types)
|
||||
* @ndisc_nodetype: router type (from link layer)
|
||||
* @ooo_okay: allow the mapping of a socket to a queue to be changed
|
||||
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
|
||||
@ -665,6 +667,7 @@ typedef unsigned char *sk_buff_data_t;
|
||||
* @data: Data head pointer
|
||||
* @truesize: Buffer size
|
||||
* @users: User count - see {datagram,tcp}.c
|
||||
* @extensions: allocated extensions, valid if active_extensions is nonzero
|
||||
*/
|
||||
|
||||
struct sk_buff {
|
||||
@ -747,7 +750,9 @@ struct sk_buff {
|
||||
head_frag:1,
|
||||
xmit_more:1,
|
||||
pfmemalloc:1;
|
||||
|
||||
#ifdef CONFIG_SKB_EXTENSIONS
|
||||
__u8 active_extensions;
|
||||
#endif
|
||||
/* fields enclosed in headers_start/headers_end are copied
|
||||
* using a single memcpy() in __copy_skb_header()
|
||||
*/
|
||||
@ -869,6 +874,11 @@ struct sk_buff {
|
||||
*data;
|
||||
unsigned int truesize;
|
||||
refcount_t users;
|
||||
|
||||
#ifdef CONFIG_SKB_EXTENSIONS
|
||||
/* only useable after checking ->active_extensions != 0 */
|
||||
struct skb_ext *extensions;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
@ -3896,6 +3906,105 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
|
||||
atomic_inc(&nfct->use);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SKB_EXTENSIONS
|
||||
enum skb_ext_id {
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
SKB_EXT_BRIDGE_NF,
|
||||
#endif
|
||||
SKB_EXT_NUM, /* must be last */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct skb_ext - sk_buff extensions
|
||||
* @refcnt: 1 on allocation, deallocated on 0
|
||||
* @offset: offset to add to @data to obtain extension address
|
||||
* @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
|
||||
* @data: start of extension data, variable sized
|
||||
*
|
||||
* Note: offsets/lengths are stored in chunks of 8 bytes, this allows
|
||||
* to use 'u8' types while allowing up to 2kb worth of extension data.
|
||||
*/
|
||||
struct skb_ext {
|
||||
refcount_t refcnt;
|
||||
u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
|
||||
u8 chunks; /* same */
|
||||
char data[0] __aligned(8);
|
||||
};
|
||||
|
||||
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
|
||||
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
|
||||
void __skb_ext_put(struct skb_ext *ext);
|
||||
|
||||
static inline void skb_ext_put(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->active_extensions)
|
||||
__skb_ext_put(skb->extensions);
|
||||
}
|
||||
|
||||
static inline void skb_ext_get(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->active_extensions) {
|
||||
struct skb_ext *ext = skb->extensions;
|
||||
|
||||
if (ext)
|
||||
refcount_inc(&ext->refcnt);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __skb_ext_copy(struct sk_buff *dst,
|
||||
const struct sk_buff *src)
|
||||
{
|
||||
dst->active_extensions = src->active_extensions;
|
||||
|
||||
if (src->active_extensions) {
|
||||
struct skb_ext *ext = src->extensions;
|
||||
|
||||
refcount_inc(&ext->refcnt);
|
||||
dst->extensions = ext;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
|
||||
{
|
||||
skb_ext_put(dst);
|
||||
__skb_ext_copy(dst, src);
|
||||
}
|
||||
|
||||
static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
|
||||
{
|
||||
return !!ext->offset[i];
|
||||
}
|
||||
|
||||
static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
|
||||
{
|
||||
return skb->active_extensions & (1 << id);
|
||||
}
|
||||
|
||||
static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
|
||||
{
|
||||
if (skb_ext_exist(skb, id))
|
||||
__skb_ext_del(skb, id);
|
||||
}
|
||||
|
||||
static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
|
||||
{
|
||||
if (skb_ext_exist(skb, id)) {
|
||||
struct skb_ext *ext = skb->extensions;
|
||||
|
||||
return (void *)ext + (ext->offset[id] << 3);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
static inline void skb_ext_put(struct sk_buff *skb) {}
|
||||
static inline void skb_ext_get(struct sk_buff *skb) {}
|
||||
static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
|
||||
static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
|
||||
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
|
||||
#endif /* CONFIG_SKB_EXTENSIONS */
|
||||
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
|
||||
{
|
||||
|
@ -51,6 +51,9 @@ config NET_INGRESS
|
||||
config NET_EGRESS
|
||||
bool
|
||||
|
||||
config SKB_EXTENSIONS
|
||||
bool
|
||||
|
||||
menu "Networking options"
|
||||
|
||||
source "net/packet/Kconfig"
|
||||
|
@ -79,6 +79,9 @@
|
||||
|
||||
struct kmem_cache *skbuff_head_cache __ro_after_init;
|
||||
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
|
||||
#ifdef CONFIG_SKB_EXTENSIONS
|
||||
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
|
||||
#endif
|
||||
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
|
||||
EXPORT_SYMBOL(sysctl_max_skb_frags);
|
||||
|
||||
@ -617,6 +620,7 @@ void skb_release_head_state(struct sk_buff *skb)
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
nf_bridge_put(skb->nf_bridge);
|
||||
#endif
|
||||
skb_ext_put(skb);
|
||||
}
|
||||
|
||||
/* Free everything but the sk_buff shell. */
|
||||
@ -796,6 +800,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
|
||||
new->dev = old->dev;
|
||||
memcpy(new->cb, old->cb, sizeof(old->cb));
|
||||
skb_dst_copy(new, old);
|
||||
__skb_ext_copy(new, old);
|
||||
#ifdef CONFIG_XFRM
|
||||
new->sp = secpath_get(old->sp);
|
||||
#endif
|
||||
@ -3902,6 +3907,40 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_gro_receive);
|
||||
|
||||
#ifdef CONFIG_SKB_EXTENSIONS
|
||||
#define SKB_EXT_ALIGN_VALUE 8
|
||||
#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
|
||||
|
||||
static const u8 skb_ext_type_len[] = {
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
[SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
|
||||
#endif
|
||||
};
|
||||
|
||||
static __always_inline unsigned int skb_ext_total_length(void)
|
||||
{
|
||||
return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
|
||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||
skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
|
||||
#endif
|
||||
0;
|
||||
}
|
||||
|
||||
static void skb_extensions_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(SKB_EXT_NUM >= 8);
|
||||
BUILD_BUG_ON(skb_ext_total_length() > 255);
|
||||
|
||||
skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
|
||||
SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
|
||||
0,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
|
||||
NULL);
|
||||
}
|
||||
#else
|
||||
static void skb_extensions_init(void) {}
|
||||
#endif
|
||||
|
||||
void __init skb_init(void)
|
||||
{
|
||||
skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
|
||||
@ -3916,6 +3955,7 @@ void __init skb_init(void)
|
||||
0,
|
||||
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
|
||||
NULL);
|
||||
skb_extensions_init();
|
||||
}
|
||||
|
||||
static int
|
||||
@ -5554,3 +5594,118 @@ void skb_condense(struct sk_buff *skb)
|
||||
*/
|
||||
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SKB_EXTENSIONS
|
||||
static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
|
||||
{
|
||||
return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
|
||||
}
|
||||
|
||||
static struct skb_ext *skb_ext_alloc(void)
|
||||
{
|
||||
struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
|
||||
|
||||
if (new) {
|
||||
memset(new->offset, 0, sizeof(new->offset));
|
||||
refcount_set(&new->refcnt, 1);
|
||||
}
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old)
|
||||
{
|
||||
struct skb_ext *new;
|
||||
|
||||
if (refcount_read(&old->refcnt) == 1)
|
||||
return old;
|
||||
|
||||
new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
|
||||
if (!new)
|
||||
return NULL;
|
||||
|
||||
memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
|
||||
refcount_set(&new->refcnt, 1);
|
||||
|
||||
__skb_ext_put(old);
|
||||
return new;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_ext_add - allocate space for given extension, COW if needed
|
||||
* @skb: buffer
|
||||
* @id: extension to allocate space for
|
||||
*
|
||||
* Allocates enough space for the given extension.
|
||||
* If the extension is already present, a pointer to that extension
|
||||
* is returned.
|
||||
*
|
||||
* If the skb was cloned, COW applies and the returned memory can be
|
||||
* modified without changing the extension space of clones buffers.
|
||||
*
|
||||
* Returns pointer to the extension or NULL on allocation failure.
|
||||
*/
|
||||
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
|
||||
{
|
||||
struct skb_ext *new, *old = NULL;
|
||||
unsigned int newlen, newoff;
|
||||
|
||||
if (skb->active_extensions) {
|
||||
old = skb->extensions;
|
||||
|
||||
new = skb_ext_maybe_cow(old);
|
||||
if (!new)
|
||||
return NULL;
|
||||
|
||||
if (__skb_ext_exist(old, id)) {
|
||||
if (old != new)
|
||||
skb->extensions = new;
|
||||
goto set_active;
|
||||
}
|
||||
|
||||
newoff = old->chunks;
|
||||
} else {
|
||||
newoff = SKB_EXT_CHUNKSIZEOF(*new);
|
||||
|
||||
new = skb_ext_alloc();
|
||||
if (!new)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
newlen = newoff + skb_ext_type_len[id];
|
||||
new->chunks = newlen;
|
||||
new->offset[id] = newoff;
|
||||
skb->extensions = new;
|
||||
set_active:
|
||||
skb->active_extensions |= 1 << id;
|
||||
return skb_ext_get_ptr(new, id);
|
||||
}
|
||||
EXPORT_SYMBOL(skb_ext_add);
|
||||
|
||||
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
|
||||
{
|
||||
struct skb_ext *ext = skb->extensions;
|
||||
|
||||
skb->active_extensions &= ~(1 << id);
|
||||
if (skb->active_extensions == 0) {
|
||||
skb->extensions = NULL;
|
||||
__skb_ext_put(ext);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_ext_del);
|
||||
|
||||
void __skb_ext_put(struct skb_ext *ext)
|
||||
{
|
||||
/* If this is last clone, nothing can increment
|
||||
* it after check passes. Avoids one atomic op.
|
||||
*/
|
||||
if (refcount_read(&ext->refcnt) == 1)
|
||||
goto free_now;
|
||||
|
||||
if (!refcount_dec_and_test(&ext->refcnt))
|
||||
return;
|
||||
free_now:
|
||||
kmem_cache_free(skbuff_ext_cache, ext);
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_ext_put);
|
||||
#endif /* CONFIG_SKB_EXTENSIONS */
|
||||
|
@ -533,6 +533,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
|
||||
to->tc_index = from->tc_index;
|
||||
#endif
|
||||
nf_copy(to, from);
|
||||
skb_ext_copy(to, from);
|
||||
#if IS_ENABLED(CONFIG_IP_VS)
|
||||
to->ipvs_property = from->ipvs_property;
|
||||
#endif
|
||||
|
@ -581,6 +581,7 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
|
||||
to->tc_index = from->tc_index;
|
||||
#endif
|
||||
nf_copy(to, from);
|
||||
skb_ext_copy(to, from);
|
||||
skb_copy_secmark(to, from);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user