]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
tcp: lack of available data can also cause TSO defer
authorEric Dumazet <edumazet@google.com>
Thu, 6 Dec 2018 17:58:24 +0000 (09:58 -0800)
committerSultan Alsawaf <sultan.alsawaf@canonical.com>
Wed, 24 Jul 2019 15:45:10 +0000 (09:45 -0600)
BugLink: https://bugs.launchpad.net/bugs/1837161
commit f9bfe4e6a9d08d405fe7b081ee9a13e649c97ecf upstream.

tcp_tso_should_defer() can return true in three different cases :

 1) We are cwnd-limited
 2) We are rwnd-limited
 3) We are application limited.

Neal pointed out that my recent fix went too far, since
it assumed that if we were not in 1) case, we must be rwnd-limited

Fix this by properly populating the is_cwnd_limited and
is_rwnd_limited booleans.

After this change, we can finally move the silly check for FIN
flag only for the application-limited case.

The same move for EOR bit will be handled in net-next,
since commit 1c09f7d073b1 ("tcp: do not try to defer skbs
with eor mark (MSG_EOR)") is scheduled for linux-4.21

Tested by running 200 concurrent netperf -t TCP_RR -- -r 60000,100
and checking none of them was rwnd_limited in the chrono_stat
output from "ss -ti" command.

Fixes: 41727549de3e ("tcp: Do not underestimate rwnd_limited")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Suggested-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Neal Cardwell <ncardwell@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
net/ipv4/tcp_output.c

index 0fb345c40ac14649e8e814477250d23ba590422d..6e846ae265793c3469d2c48b83664821db79f252 100644 (file)
@@ -1940,7 +1940,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
  * This algorithm is from John Heffner.
  */
 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
-                                bool *is_cwnd_limited, u32 max_segs)
+                                bool *is_cwnd_limited,
+                                bool *is_rwnd_limited,
+                                u32 max_segs)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 age, send_win, cong_win, limit, in_flight;
@@ -1948,9 +1950,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        struct sk_buff *head;
        int win_divisor;
 
-       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
-               goto send_now;
-
        if (icsk->icsk_ca_state >= TCP_CA_Recovery)
                goto send_now;
 
@@ -2008,10 +2007,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        if (age < (tp->srtt_us >> 4))
                goto send_now;
 
-       /* Ok, it looks like it is advisable to defer. */
+       /* Ok, it looks like it is advisable to defer.
+        * Three cases are tracked :
+        * 1) We are cwnd-limited
+        * 2) We are rwnd-limited
+        * 3) We are application limited.
+        */
+       if (cong_win < send_win) {
+               if (cong_win <= skb->len) {
+                       *is_cwnd_limited = true;
+                       return true;
+               }
+       } else {
+               if (send_win <= skb->len) {
+                       *is_rwnd_limited = true;
+                       return true;
+               }
+       }
 
-       if (cong_win < send_win && cong_win <= skb->len)
-               *is_cwnd_limited = true;
+       /* If this packet won't get more data, do not wait. */
+       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+               goto send_now;
 
        return true;
 
@@ -2385,11 +2401,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                } else {
                        if (!push_one &&
                            tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
-                                                max_segs)) {
-                               if (!is_cwnd_limited)
-                                       is_rwnd_limited = true;
+                                                &is_rwnd_limited, max_segs))
                                break;
-                       }
                }
 
                limit = mss_now;