mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
mptcp: fix shutdown vs fallback race
If the MPTCP socket shutdown happens before a fallback to TCP, and all the pending data have been already spooled, we never close the TCP connection. Address the issue explicitly checking for critical condition at fallback time. Fixes:1e39e5a32a
("mptcp: infinite mapping sending") Fixes:0348c690ed
("mptcp: add the fallback check") Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
76a13b3157
commit
d51991e2e3
@ -967,7 +967,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
|
||||
goto reset;
|
||||
subflow->mp_capable = 0;
|
||||
pr_fallback(msk);
|
||||
__mptcp_do_fallback(msk);
|
||||
mptcp_do_fallback(ssk);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1245,7 +1245,7 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
|
||||
mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
|
||||
pr_fallback(msk);
|
||||
__mptcp_do_fallback(msk);
|
||||
mptcp_do_fallback(ssk);
|
||||
}
|
||||
|
||||
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
|
||||
|
@ -927,12 +927,25 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
|
||||
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
|
||||
}
|
||||
|
||||
static inline void mptcp_do_fallback(struct sock *sk)
|
||||
static inline void mptcp_do_fallback(struct sock *ssk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
|
||||
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
struct sock *sk = subflow->conn;
|
||||
struct mptcp_sock *msk;
|
||||
|
||||
msk = mptcp_sk(sk);
|
||||
__mptcp_do_fallback(msk);
|
||||
if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) {
|
||||
gfp_t saved_allocation = ssk->sk_allocation;
|
||||
|
||||
/* we are in a atomic (BH) scope, override ssk default for data
|
||||
* fin allocation
|
||||
*/
|
||||
ssk->sk_allocation = GFP_ATOMIC;
|
||||
ssk->sk_shutdown |= SEND_SHUTDOWN;
|
||||
tcp_shutdown(ssk, SEND_SHUTDOWN);
|
||||
ssk->sk_allocation = saved_allocation;
|
||||
}
|
||||
}
|
||||
|
||||
#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
|
||||
|
@ -1279,7 +1279,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
|
||||
return false;
|
||||
}
|
||||
|
||||
__mptcp_do_fallback(msk);
|
||||
mptcp_do_fallback(ssk);
|
||||
}
|
||||
|
||||
skb = skb_peek(&ssk->sk_receive_queue);
|
||||
|
Loading…
Reference in New Issue
Block a user