mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 06:43:09 +00:00
Merge branch 'raw-ping-fix-locking-in-proc-net-raw-icmp'
Kuniyuki Iwashima says: ==================== raw/ping: Fix locking in /proc/net/{raw,icmp}. The first patch fixes a NULL deref for /proc/net/raw and second one fixes the same issue for ping sockets. The first patch also converts hlist_nulls to hlist, but this is because the current code uses sk_nulls_for_each() for lockless readers, instead of sk_nulls_for_each_rcu() which adds memory barrier, but raw sockets does not use the nulls marker nor SLAB_TYPESAFE_BY_RCU in the first place. OTOH, the ping sockets already uses sk_nulls_for_each_rcu(), and such conversion can be posted later for net-next. ==================== Link: https://lore.kernel.org/r/20230403194959.48928-1-kuniyu@amazon.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
95fac54004
@ -37,7 +37,7 @@ int raw_rcv(struct sock *, struct sk_buff *);
|
||||
struct raw_hashinfo {
|
||||
spinlock_t lock;
|
||||
|
||||
struct hlist_nulls_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
|
||||
struct hlist_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
static inline u32 raw_hashfunc(const struct net *net, u32 proto)
|
||||
@ -51,7 +51,7 @@ static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
|
||||
|
||||
spin_lock_init(&hashinfo->lock);
|
||||
for (i = 0; i < RAW_HTABLE_SIZE; i++)
|
||||
INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i);
|
||||
INIT_HLIST_HEAD(&hashinfo->ht[i]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
@ -1089,13 +1089,13 @@ static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
|
||||
}
|
||||
|
||||
void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family)
|
||||
__acquires(RCU)
|
||||
__acquires(ping_table.lock)
|
||||
{
|
||||
struct ping_iter_state *state = seq->private;
|
||||
state->bucket = 0;
|
||||
state->family = family;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&ping_table.lock);
|
||||
|
||||
return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
|
||||
}
|
||||
@ -1121,9 +1121,9 @@ void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
EXPORT_SYMBOL_GPL(ping_seq_next);
|
||||
|
||||
void ping_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(RCU)
|
||||
__releases(ping_table.lock)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&ping_table.lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ping_seq_stop);
|
||||
|
||||
|
@ -91,12 +91,12 @@ EXPORT_SYMBOL_GPL(raw_v4_hashinfo);
|
||||
int raw_hash_sk(struct sock *sk)
|
||||
{
|
||||
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_head *hlist;
|
||||
|
||||
hlist = &h->ht[raw_hashfunc(sock_net(sk), inet_sk(sk)->inet_num)];
|
||||
|
||||
spin_lock(&h->lock);
|
||||
__sk_nulls_add_node_rcu(sk, hlist);
|
||||
sk_add_node_rcu(sk, hlist);
|
||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||
spin_unlock(&h->lock);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||
@ -110,7 +110,7 @@ void raw_unhash_sk(struct sock *sk)
|
||||
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
|
||||
|
||||
spin_lock(&h->lock);
|
||||
if (__sk_nulls_del_node_init_rcu(sk))
|
||||
if (sk_del_node_init_rcu(sk))
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||
spin_unlock(&h->lock);
|
||||
}
|
||||
@ -163,16 +163,15 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
|
||||
static int raw_v4_input(struct net *net, struct sk_buff *skb,
|
||||
const struct iphdr *iph, int hash)
|
||||
{
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
int sdif = inet_sdif(skb);
|
||||
struct hlist_head *hlist;
|
||||
int dif = inet_iif(skb);
|
||||
int delivered = 0;
|
||||
struct sock *sk;
|
||||
|
||||
hlist = &raw_v4_hashinfo.ht[hash];
|
||||
rcu_read_lock();
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
if (!raw_v4_match(net, sk, iph->protocol,
|
||||
iph->saddr, iph->daddr, dif, sdif))
|
||||
continue;
|
||||
@ -264,10 +263,9 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
|
||||
void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
int dif = skb->dev->ifindex;
|
||||
int sdif = inet_sdif(skb);
|
||||
struct hlist_head *hlist;
|
||||
const struct iphdr *iph;
|
||||
struct sock *sk;
|
||||
int hash;
|
||||
@ -276,7 +274,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
|
||||
hlist = &raw_v4_hashinfo.ht[hash];
|
||||
|
||||
rcu_read_lock();
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
iph = (const struct iphdr *)skb->data;
|
||||
if (!raw_v4_match(net, sk, iph->protocol,
|
||||
iph->daddr, iph->saddr, dif, sdif))
|
||||
@ -950,14 +948,13 @@ static struct sock *raw_get_first(struct seq_file *seq, int bucket)
|
||||
{
|
||||
struct raw_hashinfo *h = pde_data(file_inode(seq->file));
|
||||
struct raw_iter_state *state = raw_seq_private(seq);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk;
|
||||
|
||||
for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE;
|
||||
++state->bucket) {
|
||||
hlist = &h->ht[state->bucket];
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each(sk, hlist) {
|
||||
if (sock_net(sk) == seq_file_net(seq))
|
||||
return sk;
|
||||
}
|
||||
@ -970,7 +967,7 @@ static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
|
||||
struct raw_iter_state *state = raw_seq_private(seq);
|
||||
|
||||
do {
|
||||
sk = sk_nulls_next(sk);
|
||||
sk = sk_next(sk);
|
||||
} while (sk && sock_net(sk) != seq_file_net(seq));
|
||||
|
||||
if (!sk)
|
||||
@ -989,9 +986,12 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
|
||||
}
|
||||
|
||||
void *raw_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(RCU)
|
||||
__acquires(&h->lock)
|
||||
{
|
||||
rcu_read_lock();
|
||||
struct raw_hashinfo *h = pde_data(file_inode(seq->file));
|
||||
|
||||
spin_lock(&h->lock);
|
||||
|
||||
return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(raw_seq_start);
|
||||
@ -1010,9 +1010,11 @@ void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
EXPORT_SYMBOL_GPL(raw_seq_next);
|
||||
|
||||
void raw_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(RCU)
|
||||
__releases(&h->lock)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
struct raw_hashinfo *h = pde_data(file_inode(seq->file));
|
||||
|
||||
spin_unlock(&h->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(raw_seq_stop);
|
||||
|
||||
|
@ -57,8 +57,7 @@ static bool raw_lookup(struct net *net, struct sock *sk,
|
||||
static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r)
|
||||
{
|
||||
struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk;
|
||||
int slot;
|
||||
|
||||
@ -68,7 +67,7 @@ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2
|
||||
rcu_read_lock();
|
||||
for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) {
|
||||
hlist = &hashinfo->ht[slot];
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
if (raw_lookup(net, sk, r)) {
|
||||
/*
|
||||
* Grab it and keep until we fill
|
||||
@ -142,9 +141,8 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct inet_diag_dump_data *cb_data;
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
int num, s_num, slot, s_slot;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk = NULL;
|
||||
struct nlattr *bc;
|
||||
|
||||
@ -161,7 +159,7 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
num = 0;
|
||||
|
||||
hlist = &hashinfo->ht[slot];
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
|
@ -141,10 +141,9 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
|
||||
static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
const struct in6_addr *saddr;
|
||||
const struct in6_addr *daddr;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk;
|
||||
bool delivered = false;
|
||||
__u8 hash;
|
||||
@ -155,7 +154,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
|
||||
hash = raw_hashfunc(net, nexthdr);
|
||||
hlist = &raw_v6_hashinfo.ht[hash];
|
||||
rcu_read_lock();
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
int filtered;
|
||||
|
||||
if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
|
||||
@ -333,15 +332,14 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
|
||||
u8 type, u8 code, int inner_offset, __be32 info)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct hlist_nulls_head *hlist;
|
||||
struct hlist_nulls_node *hnode;
|
||||
struct hlist_head *hlist;
|
||||
struct sock *sk;
|
||||
int hash;
|
||||
|
||||
hash = raw_hashfunc(net, nexthdr);
|
||||
hlist = &raw_v6_hashinfo.ht[hash];
|
||||
rcu_read_lock();
|
||||
sk_nulls_for_each(sk, hnode, hlist) {
|
||||
sk_for_each_rcu(sk, hlist) {
|
||||
/* Note: ipv6_hdr(skb) != skb->data */
|
||||
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user