mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 08:18:47 +00:00
tcp: kill eff_sacks "cache", the sole user can calculate itself
Also fixes insignificant bug that would cause sending of stale SACK block (would occur in some corner cases). Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
758ce5c8d1
commit
cabeccbd17
@ -218,7 +218,6 @@ struct tcp_options_received {
|
|||||||
snd_wscale : 4, /* Window scaling received from sender */
|
snd_wscale : 4, /* Window scaling received from sender */
|
||||||
rcv_wscale : 4; /* Window scaling to send to receiver */
|
rcv_wscale : 4; /* Window scaling to send to receiver */
|
||||||
/* SACKs data */
|
/* SACKs data */
|
||||||
u8 eff_sacks; /* Size of SACK array to send with next packet */
|
|
||||||
u8 num_sacks; /* Number of SACK blocks */
|
u8 num_sacks; /* Number of SACK blocks */
|
||||||
u16 user_mss; /* mss requested by user in ioctl */
|
u16 user_mss; /* mss requested by user in ioctl */
|
||||||
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
|
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
|
||||||
|
@ -926,7 +926,6 @@ extern void tcp_done(struct sock *sk);
|
|||||||
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
|
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
|
||||||
{
|
{
|
||||||
rx_opt->dsack = 0;
|
rx_opt->dsack = 0;
|
||||||
rx_opt->eff_sacks = 0;
|
|
||||||
rx_opt->num_sacks = 0;
|
rx_opt->num_sacks = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4099,7 +4099,6 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
|
|||||||
tp->rx_opt.dsack = 1;
|
tp->rx_opt.dsack = 1;
|
||||||
tp->duplicate_sack[0].start_seq = seq;
|
tp->duplicate_sack[0].start_seq = seq;
|
||||||
tp->duplicate_sack[0].end_seq = end_seq;
|
tp->duplicate_sack[0].end_seq = end_seq;
|
||||||
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4154,8 +4153,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
|
|||||||
* Decrease num_sacks.
|
* Decrease num_sacks.
|
||||||
*/
|
*/
|
||||||
tp->rx_opt.num_sacks--;
|
tp->rx_opt.num_sacks--;
|
||||||
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
|
|
||||||
tp->rx_opt.dsack;
|
|
||||||
for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
|
for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
|
||||||
sp[i] = sp[i + 1];
|
sp[i] = sp[i + 1];
|
||||||
continue;
|
continue;
|
||||||
@ -4218,7 +4215,6 @@ new_sack:
|
|||||||
sp->start_seq = seq;
|
sp->start_seq = seq;
|
||||||
sp->end_seq = end_seq;
|
sp->end_seq = end_seq;
|
||||||
tp->rx_opt.num_sacks++;
|
tp->rx_opt.num_sacks++;
|
||||||
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* RCV.NXT advances, some SACKs should be eaten. */
|
/* RCV.NXT advances, some SACKs should be eaten. */
|
||||||
@ -4232,7 +4228,6 @@ static void tcp_sack_remove(struct tcp_sock *tp)
|
|||||||
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
|
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
|
||||||
if (skb_queue_empty(&tp->out_of_order_queue)) {
|
if (skb_queue_empty(&tp->out_of_order_queue)) {
|
||||||
tp->rx_opt.num_sacks = 0;
|
tp->rx_opt.num_sacks = 0;
|
||||||
tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4253,11 +4248,8 @@ static void tcp_sack_remove(struct tcp_sock *tp)
|
|||||||
this_sack++;
|
this_sack++;
|
||||||
sp++;
|
sp++;
|
||||||
}
|
}
|
||||||
if (num_sacks != tp->rx_opt.num_sacks) {
|
if (num_sacks != tp->rx_opt.num_sacks)
|
||||||
tp->rx_opt.num_sacks = num_sacks;
|
tp->rx_opt.num_sacks = num_sacks;
|
||||||
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
|
|
||||||
tp->rx_opt.dsack;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This one checks to see if we can put data from the
|
/* This one checks to see if we can put data from the
|
||||||
@ -4333,10 +4325,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|||||||
|
|
||||||
TCP_ECN_accept_cwr(tp, skb);
|
TCP_ECN_accept_cwr(tp, skb);
|
||||||
|
|
||||||
if (tp->rx_opt.dsack) {
|
if (tp->rx_opt.dsack)
|
||||||
tp->rx_opt.dsack = 0;
|
tp->rx_opt.dsack = 0;
|
||||||
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Queue data for delivery to the user.
|
/* Queue data for delivery to the user.
|
||||||
* Packets in sequence go to the receive queue.
|
* Packets in sequence go to the receive queue.
|
||||||
@ -4456,7 +4446,6 @@ drop:
|
|||||||
if (tcp_is_sack(tp)) {
|
if (tcp_is_sack(tp)) {
|
||||||
tp->rx_opt.num_sacks = 1;
|
tp->rx_opt.num_sacks = 1;
|
||||||
tp->rx_opt.dsack = 0;
|
tp->rx_opt.dsack = 0;
|
||||||
tp->rx_opt.eff_sacks = 1;
|
|
||||||
tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
|
tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
|
||||||
tp->selective_acks[0].end_seq =
|
tp->selective_acks[0].end_seq =
|
||||||
TCP_SKB_CB(skb)->end_seq;
|
TCP_SKB_CB(skb)->end_seq;
|
||||||
|
@ -434,9 +434,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
|
|||||||
newtp->rx_opt.saw_tstamp = 0;
|
newtp->rx_opt.saw_tstamp = 0;
|
||||||
|
|
||||||
newtp->rx_opt.dsack = 0;
|
newtp->rx_opt.dsack = 0;
|
||||||
newtp->rx_opt.eff_sacks = 0;
|
|
||||||
|
|
||||||
newtp->rx_opt.num_sacks = 0;
|
newtp->rx_opt.num_sacks = 0;
|
||||||
|
|
||||||
newtp->urg_data = 0;
|
newtp->urg_data = 0;
|
||||||
|
|
||||||
if (sock_flag(newsk, SOCK_KEEPOPEN))
|
if (sock_flag(newsk, SOCK_KEEPOPEN))
|
||||||
|
@ -441,10 +441,8 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
|
|||||||
*ptr++ = htonl(sp[this_sack].end_seq);
|
*ptr++ = htonl(sp[this_sack].end_seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tp->rx_opt.dsack) {
|
if (tp->rx_opt.dsack)
|
||||||
tp->rx_opt.dsack = 0;
|
tp->rx_opt.dsack = 0;
|
||||||
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -550,6 +548,7 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
|
|||||||
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
|
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
unsigned size = 0;
|
unsigned size = 0;
|
||||||
|
unsigned int eff_sacks;
|
||||||
|
|
||||||
#ifdef CONFIG_TCP_MD5SIG
|
#ifdef CONFIG_TCP_MD5SIG
|
||||||
*md5 = tp->af_specific->md5_lookup(sk, sk);
|
*md5 = tp->af_specific->md5_lookup(sk, sk);
|
||||||
@ -568,10 +567,11 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
|
|||||||
size += TCPOLEN_TSTAMP_ALIGNED;
|
size += TCPOLEN_TSTAMP_ALIGNED;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(tp->rx_opt.eff_sacks)) {
|
eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
|
||||||
|
if (unlikely(eff_sacks)) {
|
||||||
const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
|
const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
|
||||||
opts->num_sack_blocks =
|
opts->num_sack_blocks =
|
||||||
min_t(unsigned, tp->rx_opt.eff_sacks,
|
min_t(unsigned, eff_sacks,
|
||||||
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
|
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
|
||||||
TCPOLEN_SACK_PERBLOCK);
|
TCPOLEN_SACK_PERBLOCK);
|
||||||
size += TCPOLEN_SACK_BASE_ALIGNED +
|
size += TCPOLEN_SACK_BASE_ALIGNED +
|
||||||
@ -1418,7 +1418,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
|||||||
icsk->icsk_mtup.probe_size ||
|
icsk->icsk_mtup.probe_size ||
|
||||||
inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
|
inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
|
||||||
tp->snd_cwnd < 11 ||
|
tp->snd_cwnd < 11 ||
|
||||||
tp->rx_opt.eff_sacks)
|
tp->rx_opt.num_sacks || tp->rx_opt.dsack)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
/* Very simple search strategy: just double the MSS. */
|
/* Very simple search strategy: just double the MSS. */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user