mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 13:34:30 +00:00
net:rds: Fix possible deadlock in rds_message_put
Functions rds_still_queued and rds_clear_recv_queue lock a given socket in order to safely iterate over the incoming rds messages. However calling rds_inc_put while under this lock creates a potential deadlock. rds_inc_put may eventually call rds_message_purge, which will lock m_rs_lock. This is the incorrect locking order since m_rs_lock is meant to be locked before the socket. To fix this, we move the message item to a local list or variable that wont need rs_recv_lock protection. Then we can safely call rds_inc_put on any item stored locally after rs_recv_lock is released. Fixes: bdbe6fbc6a2f ("RDS: recv.c") Reported-by: syzbot+f9db6ff27b9bfdcfeca0@syzkaller.appspotmail.com Reported-by: syzbot+dcd73ff9291e6d34b3ab@syzkaller.appspotmail.com Signed-off-by: Allison Henderson <allison.henderson@oracle.com> Link: https://lore.kernel.org/r/20240209022854.200292-1-allison.henderson@oracle.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
parent
9f30831390
commit
f1acf1ac84
@ -425,6 +425,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
|
|||||||
struct sock *sk = rds_rs_to_sk(rs);
|
struct sock *sk = rds_rs_to_sk(rs);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct rds_incoming *to_drop = NULL;
|
||||||
|
|
||||||
write_lock_irqsave(&rs->rs_recv_lock, flags);
|
write_lock_irqsave(&rs->rs_recv_lock, flags);
|
||||||
if (!list_empty(&inc->i_item)) {
|
if (!list_empty(&inc->i_item)) {
|
||||||
@ -435,11 +436,14 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
|
|||||||
-be32_to_cpu(inc->i_hdr.h_len),
|
-be32_to_cpu(inc->i_hdr.h_len),
|
||||||
inc->i_hdr.h_dport);
|
inc->i_hdr.h_dport);
|
||||||
list_del_init(&inc->i_item);
|
list_del_init(&inc->i_item);
|
||||||
rds_inc_put(inc);
|
to_drop = inc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
write_unlock_irqrestore(&rs->rs_recv_lock, flags);
|
write_unlock_irqrestore(&rs->rs_recv_lock, flags);
|
||||||
|
|
||||||
|
if (to_drop)
|
||||||
|
rds_inc_put(to_drop);
|
||||||
|
|
||||||
rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
|
rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -758,16 +762,21 @@ void rds_clear_recv_queue(struct rds_sock *rs)
|
|||||||
struct sock *sk = rds_rs_to_sk(rs);
|
struct sock *sk = rds_rs_to_sk(rs);
|
||||||
struct rds_incoming *inc, *tmp;
|
struct rds_incoming *inc, *tmp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
LIST_HEAD(to_drop);
|
||||||
|
|
||||||
write_lock_irqsave(&rs->rs_recv_lock, flags);
|
write_lock_irqsave(&rs->rs_recv_lock, flags);
|
||||||
list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
|
list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
|
||||||
rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
|
rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
|
||||||
-be32_to_cpu(inc->i_hdr.h_len),
|
-be32_to_cpu(inc->i_hdr.h_len),
|
||||||
inc->i_hdr.h_dport);
|
inc->i_hdr.h_dport);
|
||||||
|
list_move(&inc->i_item, &to_drop);
|
||||||
|
}
|
||||||
|
write_unlock_irqrestore(&rs->rs_recv_lock, flags);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(inc, tmp, &to_drop, i_item) {
|
||||||
list_del_init(&inc->i_item);
|
list_del_init(&inc->i_item);
|
||||||
rds_inc_put(inc);
|
rds_inc_put(inc);
|
||||||
}
|
}
|
||||||
write_unlock_irqrestore(&rs->rs_recv_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user