net/udp: Add 4-tuple hash list basis

Add a new hash list, hash4, in udp table. It will be used to implement
4-tuple hash for connected udp sockets. This patch adds the hlist to
table, and implements helpers and the initialization. 4-tuple hash is
implemented in the following patch.

hash4 uses hlist_nulls to avoid moving wrongly onto another hlist due to
concurrent rehash, because rehash() can happen with lookup().

Co-developed-by: Cambda Zhu <cambda@linux.alibaba.com>
Signed-off-by: Cambda Zhu <cambda@linux.alibaba.com>
Co-developed-by: Fred Chen <fred.cc@alibaba-inc.com>
Signed-off-by: Fred Chen <fred.cc@alibaba-inc.com>
Co-developed-by: Yubing Qiu <yubing.qiuyubing@alibaba-inc.com>
Signed-off-by: Yubing Qiu <yubing.qiuyubing@alibaba-inc.com>
Signed-off-by: Philo Lu <lulie@linux.alibaba.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Acked-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Philo Lu 2024-11-14 18:52:05 +08:00 committed by David S. Miller
parent accdd51dc7
commit dab78a1745
3 changed files with 97 additions and 5 deletions

View File

@ -56,6 +56,12 @@ struct udp_sock {
int pending; /* Any pending frames ? */
__u8 encap_type; /* Is this an Encapsulation socket? */
#if !IS_ENABLED(CONFIG_BASE_SMALL)
/* For UDP 4-tuple hash */
__u16 udp_lrpa_hash;
struct hlist_nulls_node udp_lrpa_node;
#endif
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@ -206,6 +212,11 @@ static inline void udp_allow_gso(struct sock *sk)
#define udp_portaddr_for_each_entry_rcu(__sk, list) \
hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
#if !IS_ENABLED(CONFIG_BASE_SMALL)
#define udp_lrpa_for_each_entry_rcu(__up, node, list) \
hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
#endif
#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
#endif /* _LINUX_UDP_H */

View File

@ -50,14 +50,21 @@ struct udp_skb_cb {
#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
/**
* struct udp_hslot - UDP hash slot used by udp_table.hash
* struct udp_hslot - UDP hash slot used by udp_table.hash/hash4
*
* @head: head of list of sockets
* @nulls_head: head of list of sockets, only used by hash4
* @count: number of sockets in 'head' list
* @lock: spinlock protecting changes to head/count
*/
struct udp_hslot {
struct hlist_head head;
union {
struct hlist_head head;
/* hash4 uses hlist_nulls to avoid moving wrongly onto another
* hlist, because rehash() can happen with lookup().
*/
struct hlist_nulls_head nulls_head;
};
int count;
spinlock_t lock;
} __aligned(2 * sizeof(long));
@ -82,12 +89,17 @@ struct udp_hslot_main {
*
* @hash: hash table, sockets are hashed on (local port)
* @hash2: hash table, sockets are hashed on (local port, local address)
* @hash4: hash table, connected sockets are hashed on
* (local port, local address, remote port, remote address)
* @mask: number of slots in hash tables, minus 1
* @log: log2(number of slots in hash table)
*/
struct udp_table {
struct udp_hslot *hash;
struct udp_hslot_main *hash2;
#if !IS_ENABLED(CONFIG_BASE_SMALL)
struct udp_hslot *hash4;
#endif
unsigned int mask;
unsigned int log;
};
@ -114,13 +126,80 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
static inline void udp_table_hash4_init(struct udp_table *table)
{
}
static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
unsigned int hash)
{
BUILD_BUG();
return NULL;
}
static inline bool udp_hashed4(const struct sock *sk)
{
return false;
}
static inline unsigned int udp_hash4_slot_size(void)
{
return 0;
}
static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
{
return false;
}
static inline void udp_hash4_inc(struct udp_hslot *hslot2)
{
}
static inline void udp_hash4_dec(struct udp_hslot *hslot2)
{
}
#else /* !CONFIG_BASE_SMALL */
/* Must be called with table->hash2 initialized */
static inline void udp_table_hash4_init(struct udp_table *table)
{
for (int i = 0; i <= table->mask; i++)
table->hash4 = (void *)(table->hash2 + (table->mask + 1));
for (int i = 0; i <= table->mask; i++) {
table->hash2[i].hash4_cnt = 0;
INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i);
table->hash4[i].count = 0;
spin_lock_init(&table->hash4[i].lock);
}
}
static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
unsigned int hash)
{
return &table->hash4[hash & table->mask];
}
static inline bool udp_hashed4(const struct sock *sk)
{
return !hlist_nulls_unhashed(&udp_sk(sk)->udp_lrpa_node);
}
static inline unsigned int udp_hash4_slot_size(void)
{
return sizeof(struct udp_hslot);
}
static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
{
return UDP_HSLOT_MAIN(hslot2)->hash4_cnt;
}
static inline void udp_hash4_inc(struct udp_hslot *hslot2)
{
UDP_HSLOT_MAIN(hslot2)->hash4_cnt++;
}
static inline void udp_hash4_dec(struct udp_hslot *hslot2)
{
UDP_HSLOT_MAIN(hslot2)->hash4_cnt--;
}
#endif /* CONFIG_BASE_SMALL */

View File

@ -3427,7 +3427,8 @@ void __init udp_table_init(struct udp_table *table, const char *name)
{
unsigned int i, slot_size;
slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main);
slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) +
udp_hash4_slot_size();
table->hash = alloc_large_system_hash(name,
slot_size,
uhash_entries,
@ -3482,7 +3483,8 @@ static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_ent
if (!udptable)
goto out;
slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main);
slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) +
udp_hash4_slot_size();
udptable->hash = vmalloc_huge(hash_entries * slot_size,
GFP_KERNEL_ACCOUNT);
if (!udptable->hash)