mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-06 05:02:31 +00:00
mptcp: drop last_snd and MPTCP_RESET_SCHEDULER
Since the burst check conditions have moved out of the function mptcp_subflow_get_send(), it makes all msk->last_snd useless. This patch drops them as well as the macro MPTCP_RESET_SCHEDULER. Reviewed-by: Mat Martineau <martineau@kernel.org> Signed-off-by: Geliang Tang <geliang.tang@suse.com> Signed-off-by: Mat Martineau <martineau@kernel.org> Link: https://lore.kernel.org/r/20230821-upstream-net-next-20230818-v1-2-0c860fb256a8@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
c5b4297dee
commit
ebc1e08f01
@ -299,15 +299,8 @@ void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup)
|
||||
|
||||
pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
|
||||
msk = mptcp_sk(sk);
|
||||
if (subflow->backup != bkup) {
|
||||
if (subflow->backup != bkup)
|
||||
subflow->backup = bkup;
|
||||
mptcp_data_lock(sk);
|
||||
if (!sock_owned_by_user(sk))
|
||||
msk->last_snd = NULL;
|
||||
else
|
||||
__set_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags);
|
||||
mptcp_data_unlock(sk);
|
||||
}
|
||||
|
||||
mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC);
|
||||
}
|
||||
|
@ -472,9 +472,6 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
|
||||
|
||||
slow = lock_sock_fast(ssk);
|
||||
if (prio) {
|
||||
if (subflow->backup != backup)
|
||||
msk->last_snd = NULL;
|
||||
|
||||
subflow->send_mp_prio = 1;
|
||||
subflow->backup = backup;
|
||||
subflow->request_bkup = backup;
|
||||
|
@ -1438,16 +1438,13 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
|
||||
|
||||
burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
|
||||
wmem = READ_ONCE(ssk->sk_wmem_queued);
|
||||
if (!burst) {
|
||||
msk->last_snd = NULL;
|
||||
if (!burst)
|
||||
return ssk;
|
||||
}
|
||||
|
||||
subflow = mptcp_subflow_ctx(ssk);
|
||||
subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
|
||||
READ_ONCE(ssk->sk_pacing_rate) * burst,
|
||||
burst + wmem);
|
||||
msk->last_snd = ssk;
|
||||
msk->snd_burst = burst;
|
||||
return ssk;
|
||||
}
|
||||
@ -2379,9 +2376,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
|
||||
WRITE_ONCE(msk->first, NULL);
|
||||
|
||||
out:
|
||||
if (ssk == msk->last_snd)
|
||||
msk->last_snd = NULL;
|
||||
|
||||
if (need_push)
|
||||
__mptcp_push_pending(sk, 0);
|
||||
}
|
||||
@ -3046,7 +3040,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
|
||||
* subflow
|
||||
*/
|
||||
mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
|
||||
msk->last_snd = NULL;
|
||||
WRITE_ONCE(msk->flags, 0);
|
||||
msk->cb_flags = 0;
|
||||
msk->push_pending = 0;
|
||||
@ -3316,8 +3309,6 @@ static void mptcp_release_cb(struct sock *sk)
|
||||
__mptcp_set_connected(sk);
|
||||
if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
|
||||
__mptcp_error_report(sk);
|
||||
if (__test_and_clear_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags))
|
||||
msk->last_snd = NULL;
|
||||
}
|
||||
|
||||
__mptcp_update_rmem(sk);
|
||||
|
@ -123,7 +123,6 @@
|
||||
#define MPTCP_RETRANSMIT 4
|
||||
#define MPTCP_FLUSH_JOIN_LIST 5
|
||||
#define MPTCP_CONNECTED 6
|
||||
#define MPTCP_RESET_SCHEDULER 7
|
||||
|
||||
struct mptcp_skb_cb {
|
||||
u64 map_seq;
|
||||
@ -269,7 +268,6 @@ struct mptcp_sock {
|
||||
u64 rcv_data_fin_seq;
|
||||
u64 bytes_retrans;
|
||||
int rmem_fwd_alloc;
|
||||
struct sock *last_snd;
|
||||
int snd_burst;
|
||||
int old_wspace;
|
||||
u64 recovery_snd_nxt; /* in recovery mode accept up to this seq;
|
||||
|
Loading…
Reference in New Issue
Block a user