mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
gro: remove rcu_read_lock/rcu_read_unlock from gro_complete handlers
All gro_complete() handlers are called from napi_gro_complete() while rcu_read_lock() has been called. There is no point stacking more rcu_read_lock() Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
fc1ca3348a
commit
627b94f75b
@ -545,13 +545,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
|
||||
gh_len = geneve_hlen(gh);
|
||||
type = gh->proto_type;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype)
|
||||
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff + gh_len);
|
||||
|
||||
return err;
|
||||
|
@ -513,14 +513,12 @@ static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
struct packet_offload *ptype;
|
||||
int err = -ENOENT;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype)
|
||||
err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
|
||||
ipv6_gro_complete, inet_gro_complete,
|
||||
skb, nhoff + sizeof(*vhdr));
|
||||
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -466,14 +466,12 @@ int eth_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
if (skb->encapsulation)
|
||||
skb_set_inner_mac_header(skb, nhoff);
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype != NULL)
|
||||
err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
|
||||
ipv6_gro_complete, inet_gro_complete,
|
||||
skb, nhoff + sizeof(*eh));
|
||||
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(eth_gro_complete);
|
||||
|
@ -1612,10 +1612,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
csum_replace2(&iph->check, iph->tot_len, newlen);
|
||||
iph->tot_len = newlen;
|
||||
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(inet_offloads[proto]);
|
||||
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
/* Only need to add sizeof(*iph) to get to the next hdr below
|
||||
* because any hdr with option will have been flushed in
|
||||
@ -1625,9 +1624,7 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
tcp4_gro_complete, udp4_gro_complete,
|
||||
skb, nhoff + sizeof(*iph));
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -266,19 +266,16 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
|
||||
const struct net_offload *ops;
|
||||
int err = -ENOSYS;
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
err = ops->callbacks.gro_complete(skb, nhoff);
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -480,18 +477,16 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
|
||||
return err;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff + guehlen);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -253,13 +253,10 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
if (greh->flags & GRE_CSUM)
|
||||
grehlen += GRE_HEADER_SECTION;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_complete_by_type(type);
|
||||
if (ptype)
|
||||
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff + grehlen);
|
||||
|
||||
return err;
|
||||
|
@ -667,7 +667,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
|
||||
|
||||
uh->len = newlen;
|
||||
|
||||
rcu_read_lock();
|
||||
sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
|
||||
udp4_lib_lookup_skb, skb, uh->source, uh->dest);
|
||||
if (sk && udp_sk(sk)->gro_complete) {
|
||||
@ -688,7 +687,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
|
||||
} else {
|
||||
err = udp_gro_complete_segment(skb);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (skb->remcsum_offload)
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
|
||||
|
@ -327,18 +327,14 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
|
||||
iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
|
||||
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
|
||||
udp6_gro_complete, skb, nhoff);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user