mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
sock: Use sock_owned_by_user_nocheck() instead of sk_lock.owned.
This patch moves sock_release_ownership() down in include/net/sock.h and replaces some sk_lock.owned tests with sock_owned_by_user_nocheck(). Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> Link: https://lore.kernel.org/r/20211208062158.54132-1-kuniyu@amazon.co.jp Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
be3158290d
commit
33d60fbd21
@ -1635,16 +1635,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
|
||||
__sk_mem_reclaim(sk, SK_RECLAIM_CHUNK);
|
||||
}
|
||||
|
||||
static inline void sock_release_ownership(struct sock *sk)
|
||||
{
|
||||
if (sk->sk_lock.owned) {
|
||||
sk->sk_lock.owned = 0;
|
||||
|
||||
/* The sk_lock has mutex_unlock() semantics: */
|
||||
mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Macro so as to not evaluate some arguments when
|
||||
* lockdep is not enabled.
|
||||
@ -1771,12 +1761,23 @@ static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
|
||||
return sk->sk_lock.owned;
|
||||
}
|
||||
|
||||
static inline void sock_release_ownership(struct sock *sk)
|
||||
{
|
||||
if (sock_owned_by_user_nocheck(sk)) {
|
||||
sk->sk_lock.owned = 0;
|
||||
|
||||
/* The sk_lock has mutex_unlock() semantics: */
|
||||
mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
|
||||
}
|
||||
}
|
||||
|
||||
/* no reclassification while locks are held */
|
||||
static inline bool sock_allow_reclassification(const struct sock *csk)
|
||||
{
|
||||
struct sock *sk = (struct sock *)csk;
|
||||
|
||||
return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
|
||||
return !sock_owned_by_user_nocheck(sk) &&
|
||||
!spin_is_locked(&sk->sk_lock.slock);
|
||||
}
|
||||
|
||||
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
|
||||
|
@ -3292,7 +3292,7 @@ void lock_sock_nested(struct sock *sk, int subclass)
|
||||
|
||||
might_sleep();
|
||||
spin_lock_bh(&sk->sk_lock.slock);
|
||||
if (sk->sk_lock.owned)
|
||||
if (sock_owned_by_user_nocheck(sk))
|
||||
__lock_sock(sk);
|
||||
sk->sk_lock.owned = 1;
|
||||
spin_unlock_bh(&sk->sk_lock.slock);
|
||||
@ -3323,7 +3323,7 @@ bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
|
||||
might_sleep();
|
||||
spin_lock_bh(&sk->sk_lock.slock);
|
||||
|
||||
if (!sk->sk_lock.owned) {
|
||||
if (!sock_owned_by_user_nocheck(sk)) {
|
||||
/*
|
||||
* Fast path return with bottom halves disabled and
|
||||
* sock::sk_lock.slock held.
|
||||
|
@ -195,7 +195,7 @@ static int llc_seq_core_show(struct seq_file *seq, void *v)
|
||||
timer_pending(&llc->pf_cycle_timer.timer),
|
||||
timer_pending(&llc->rej_sent_timer.timer),
|
||||
timer_pending(&llc->busy_state_timer.timer),
|
||||
!!sk->sk_backlog.tail, !!sk->sk_lock.owned);
|
||||
!!sk->sk_backlog.tail, sock_owned_by_user_nocheck(sk));
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user