diff options
author | Joerg Roedel <jroedel@suse.de> | 2023-10-26 17:05:58 +0200 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2023-10-26 17:05:58 +0200 |
commit | 3613047280ec42a4e1350fdc1a6dd161ff4008cc (patch) | |
tree | 6cf2f03f518537f3229e6066a8b2638755f0bb48 /net/ipv4/tcp_input.c | |
parent | bbc70e0aec287e164344b1a071bd46466a4f29b3 (diff) | |
parent | 05d3ef8bba77c1b5f98d941d8b2d4aeab8118ef1 (diff) |
Merge tag 'v6.6-rc7' into core
Linux 6.6-rc7
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 06fe1cf645d5..8afb0950a697 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -253,6 +253,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) if (unlikely(len > icsk->icsk_ack.rcv_mss + MAX_TCP_OPTION_SPACE)) tcp_gro_dev_warn(sk, skb, len); + /* If the skb has a len of exactly 1*MSS and has the PSH bit + * set then it is likely the end of an application write. So + * more data may not be arriving soon, and yet the data sender + * may be waiting for an ACK if cwnd-bound or using TX zero + * copy. So we set ICSK_ACK_PUSHED here so that + * tcp_cleanup_rbuf() will send an ACK immediately if the app + * reads all of the data and is not ping-pong. If len > MSS + * then this logic does not matter (and does not hurt) because + * tcp_cleanup_rbuf() will always ACK immediately if the app + * reads data and there is more than an MSS of unACKed data. + */ + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH) + icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. |