mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
net: introduce struct net_hotdata
Instead of spreading networking critical fields all over the places, add a custom net_hotdata structure so that we can precisely control its layout. In this first patch, move : - gro_normal_batch used in rx (GRO stack) - offload_base used in rx and tx (GRO and TSO stacks) Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Reviewed-by: David Ahern <dsahern@kernel.org> Link: https://lore.kernel.org/r/20240306160031.874438-2-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
d3423ed9b8
commit
2658b5a8a4
@ -4796,7 +4796,6 @@ void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
|
||||
extern int netdev_max_backlog;
|
||||
extern int dev_rx_weight;
|
||||
extern int dev_tx_weight;
|
||||
extern int gro_normal_batch;
|
||||
|
||||
enum {
|
||||
NESTED_SYNC_IMM_BIT,
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <net/ip6_checksum.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <net/udp.h>
|
||||
#include <net/hotdata.h>
|
||||
|
||||
struct napi_gro_cb {
|
||||
union {
|
||||
@ -446,7 +447,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
|
||||
{
|
||||
list_add_tail(&skb->list, &napi->rx_list);
|
||||
napi->rx_count += segs;
|
||||
if (napi->rx_count >= READ_ONCE(gro_normal_batch))
|
||||
if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
|
||||
gro_normal_list(napi);
|
||||
}
|
||||
|
||||
@ -493,6 +494,4 @@ static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *
|
||||
#endif
|
||||
}
|
||||
|
||||
extern struct list_head offload_base;
|
||||
|
||||
#endif /* _NET_IPV6_GRO_H */
|
||||
|
15
include/net/hotdata.h
Normal file
15
include/net/hotdata.h
Normal file
@ -0,0 +1,15 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
#ifndef _NET_HOTDATA_H
|
||||
#define _NET_HOTDATA_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Read mostly data used in network fast paths. */
|
||||
struct net_hotdata {
|
||||
struct list_head offload_base;
|
||||
int gro_normal_batch;
|
||||
};
|
||||
|
||||
extern struct net_hotdata net_hotdata;
|
||||
|
||||
#endif /* _NET_HOTDATA_H */
|
@ -18,6 +18,7 @@ obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
|
||||
obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
|
||||
|
||||
obj-y += net-sysfs.o
|
||||
obj-y += hotdata.o
|
||||
obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o
|
||||
obj-$(CONFIG_PROC_FS) += net-procfs.o
|
||||
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
|
||||
|
@ -10,9 +10,6 @@
|
||||
#define GRO_MAX_HEAD (MAX_HEADER + 128)
|
||||
|
||||
static DEFINE_SPINLOCK(offload_lock);
|
||||
struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
|
||||
/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
|
||||
int gro_normal_batch __read_mostly = 8;
|
||||
|
||||
/**
|
||||
* dev_add_offload - register offload handlers
|
||||
@ -31,7 +28,7 @@ void dev_add_offload(struct packet_offload *po)
|
||||
struct packet_offload *elem;
|
||||
|
||||
spin_lock(&offload_lock);
|
||||
list_for_each_entry(elem, &offload_base, list) {
|
||||
list_for_each_entry(elem, &net_hotdata.offload_base, list) {
|
||||
if (po->priority < elem->priority)
|
||||
break;
|
||||
}
|
||||
@ -55,7 +52,7 @@ EXPORT_SYMBOL(dev_add_offload);
|
||||
*/
|
||||
static void __dev_remove_offload(struct packet_offload *po)
|
||||
{
|
||||
struct list_head *head = &offload_base;
|
||||
struct list_head *head = &net_hotdata.offload_base;
|
||||
struct packet_offload *po1;
|
||||
|
||||
spin_lock(&offload_lock);
|
||||
@ -235,9 +232,9 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
|
||||
|
||||
static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
|
||||
{
|
||||
struct list_head *head = &net_hotdata.offload_base;
|
||||
struct packet_offload *ptype;
|
||||
__be16 type = skb->protocol;
|
||||
struct list_head *head = &offload_base;
|
||||
int err = -ENOENT;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
|
||||
@ -444,7 +441,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
{
|
||||
u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
|
||||
struct gro_list *gro_list = &napi->gro_hash[bucket];
|
||||
struct list_head *head = &offload_base;
|
||||
struct list_head *head = &net_hotdata.offload_base;
|
||||
struct packet_offload *ptype;
|
||||
__be16 type = skb->protocol;
|
||||
struct sk_buff *pp = NULL;
|
||||
@ -550,7 +547,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
|
||||
struct packet_offload *gro_find_receive_by_type(__be16 type)
|
||||
{
|
||||
struct list_head *offload_head = &offload_base;
|
||||
struct list_head *offload_head = &net_hotdata.offload_base;
|
||||
struct packet_offload *ptype;
|
||||
|
||||
list_for_each_entry_rcu(ptype, offload_head, list) {
|
||||
@ -564,7 +561,7 @@ EXPORT_SYMBOL(gro_find_receive_by_type);
|
||||
|
||||
struct packet_offload *gro_find_complete_by_type(__be16 type)
|
||||
{
|
||||
struct list_head *offload_head = &offload_base;
|
||||
struct list_head *offload_head = &net_hotdata.offload_base;
|
||||
struct packet_offload *ptype;
|
||||
|
||||
list_for_each_entry_rcu(ptype, offload_head, list) {
|
||||
|
@ -17,7 +17,7 @@ struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
|
||||
struct packet_offload *ptype;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ptype, &offload_base, list) {
|
||||
list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
|
||||
if (ptype->type == type && ptype->callbacks.gso_segment) {
|
||||
segs = ptype->callbacks.gso_segment(skb, features);
|
||||
break;
|
||||
@ -48,7 +48,7 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
|
||||
__skb_pull(skb, vlan_depth);
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ptype, &offload_base, list) {
|
||||
list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
|
||||
if (ptype->type == type && ptype->callbacks.gso_segment) {
|
||||
segs = ptype->callbacks.gso_segment(skb, features);
|
||||
break;
|
||||
|
9
net/core/hotdata.c
Normal file
9
net/core/hotdata.c
Normal file
@ -0,0 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
#include <net/hotdata.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct net_hotdata net_hotdata __cacheline_aligned = {
|
||||
.offload_base = LIST_HEAD_INIT(net_hotdata.offload_base),
|
||||
.gro_normal_batch = 8,
|
||||
};
|
@ -23,6 +23,7 @@
|
||||
#include <net/net_ratelimit.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/hotdata.h>
|
||||
|
||||
#include "dev.h"
|
||||
|
||||
@ -632,7 +633,7 @@ static struct ctl_table net_core_table[] = {
|
||||
},
|
||||
{
|
||||
.procname = "gro_normal_batch",
|
||||
.data = &gro_normal_batch,
|
||||
.data = &net_hotdata.gro_normal_batch,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
|
Loading…
Reference in New Issue
Block a user