mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-08 14:13:53 +00:00
[NET]: Get rid of alloc_skb_from_cache
Since this was added originally for Xen, and Xen has recently (~2.6.18) stopped using this function, we can safely get rid of it. Good timing too since this function has started to bit rot. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6f29e35e2d
commit
b4dfa0b1fb
@ -514,7 +514,6 @@
|
|||||||
*(.text.dentry_open)
|
*(.text.dentry_open)
|
||||||
*(.text.dentry_iput)
|
*(.text.dentry_iput)
|
||||||
*(.text.bio_alloc)
|
*(.text.bio_alloc)
|
||||||
*(.text.alloc_skb_from_cache)
|
|
||||||
*(.text.wait_on_page_bit)
|
*(.text.wait_on_page_bit)
|
||||||
*(.text.vfs_readdir)
|
*(.text.vfs_readdir)
|
||||||
*(.text.vfs_lstat)
|
*(.text.vfs_lstat)
|
||||||
|
@ -346,9 +346,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
|
|||||||
return __alloc_skb(size, priority, 1, -1);
|
return __alloc_skb(size, priority, 1, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
|
|
||||||
unsigned int size,
|
|
||||||
gfp_t priority);
|
|
||||||
extern void kfree_skbmem(struct sk_buff *skb);
|
extern void kfree_skbmem(struct sk_buff *skb);
|
||||||
extern struct sk_buff *skb_clone(struct sk_buff *skb,
|
extern struct sk_buff *skb_clone(struct sk_buff *skb,
|
||||||
gfp_t priority);
|
gfp_t priority);
|
||||||
|
@ -196,61 +196,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* alloc_skb_from_cache - allocate a network buffer
|
|
||||||
* @cp: kmem_cache from which to allocate the data area
|
|
||||||
* (object size must be big enough for @size bytes + skb overheads)
|
|
||||||
* @size: size to allocate
|
|
||||||
* @gfp_mask: allocation mask
|
|
||||||
*
|
|
||||||
* Allocate a new &sk_buff. The returned buffer has no headroom and
|
|
||||||
* tail room of size bytes. The object has a reference count of one.
|
|
||||||
* The return is the buffer. On a failure the return is %NULL.
|
|
||||||
*
|
|
||||||
* Buffers may only be allocated from interrupts using a @gfp_mask of
|
|
||||||
* %GFP_ATOMIC.
|
|
||||||
*/
|
|
||||||
struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
|
|
||||||
unsigned int size,
|
|
||||||
gfp_t gfp_mask)
|
|
||||||
{
|
|
||||||
struct sk_buff *skb;
|
|
||||||
u8 *data;
|
|
||||||
|
|
||||||
/* Get the HEAD */
|
|
||||||
skb = kmem_cache_alloc(skbuff_head_cache,
|
|
||||||
gfp_mask & ~__GFP_DMA);
|
|
||||||
if (!skb)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* Get the DATA. */
|
|
||||||
size = SKB_DATA_ALIGN(size);
|
|
||||||
data = kmem_cache_alloc(cp, gfp_mask);
|
|
||||||
if (!data)
|
|
||||||
goto nodata;
|
|
||||||
|
|
||||||
memset(skb, 0, offsetof(struct sk_buff, truesize));
|
|
||||||
skb->truesize = size + sizeof(struct sk_buff);
|
|
||||||
atomic_set(&skb->users, 1);
|
|
||||||
skb->head = data;
|
|
||||||
skb->data = data;
|
|
||||||
skb->tail = data;
|
|
||||||
skb->end = data + size;
|
|
||||||
|
|
||||||
atomic_set(&(skb_shinfo(skb)->dataref), 1);
|
|
||||||
skb_shinfo(skb)->nr_frags = 0;
|
|
||||||
skb_shinfo(skb)->gso_size = 0;
|
|
||||||
skb_shinfo(skb)->gso_segs = 0;
|
|
||||||
skb_shinfo(skb)->gso_type = 0;
|
|
||||||
skb_shinfo(skb)->frag_list = NULL;
|
|
||||||
out:
|
|
||||||
return skb;
|
|
||||||
nodata:
|
|
||||||
kmem_cache_free(skbuff_head_cache, skb);
|
|
||||||
skb = NULL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
|
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
|
||||||
* @dev: network device to receive on
|
* @dev: network device to receive on
|
||||||
|
Loading…
Reference in New Issue
Block a user