]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
tcp: pause Fast Open globally after third consecutive timeout
authorYuchung Cheng <ycheng@google.com>
Tue, 12 Dec 2017 21:10:40 +0000 (13:10 -0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 13 Dec 2017 20:51:12 +0000 (15:51 -0500)
Prior to this patch, active Fast Open is paused on a specific
destination IP address if the previous connections to the
IP address have experienced recurring timeouts . But recent
experiments by Microsoft (https://goo.gl/cykmn7) and Mozilla
browsers indicate the isssue is often caused by broken middle-boxes
sitting close to the client. Therefore it is much better user
experience if Fast Open is disabled out-right globally to avoid
experiencing further timeouts on connections toward other
destinations.

This patch changes the destination-IP disablement to global
disablement if a connection experiencing recurring timeouts
or aborts due to timeout.  Repeated incidents would still
exponentially increase the pause time, starting from an hour.
This is extremely conservative but an unfortunate compromise to
minimize bad experience due to broken middle-boxes.

Reported-by: Dragana Damjanovic <ddamjanovic@mozilla.com>
Reported-by: Patrick McManus <mcmanus@ducksong.com>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Reviewed-by: Wei Wang <weiwan@google.com>
Reviewed-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Documentation/networking/ip-sysctl.txt
include/net/tcp.h
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_timer.c

index 46c7e1085efcccc178a644917e5e80fa736781fd..3f2c40d8e6aa895452d6cf15977b080f4d79360e 100644 (file)
@@ -606,6 +606,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
        This time period will grow exponentially when more blackhole issues
        get detected right after Fastopen is re-enabled and will reset to
        initial value when the blackhole issue goes away.
+       0 to disable the blackhole detection.
        By default, it is set to 1hr.
 
 tcp_syn_retries - INTEGER
index 3c3744e52cd15e353a477a29fcf558c9f6931aeb..6939e69d3c373d073791d9a6191b05ea94e8512f 100644 (file)
@@ -1507,8 +1507,7 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
 
 /* From tcp_fastopen.c */
 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-                           struct tcp_fastopen_cookie *cookie, int *syn_loss,
-                           unsigned long *last_syn_loss);
+                           struct tcp_fastopen_cookie *cookie);
 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
                            struct tcp_fastopen_cookie *cookie, bool syn_lost,
                            u16 try_exp);
@@ -1546,7 +1545,7 @@ extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
 void tcp_fastopen_active_disable(struct sock *sk);
 bool tcp_fastopen_active_should_disable(struct sock *sk);
 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
-void tcp_fastopen_active_timeout_reset(void);
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
 
 /* Latencies incurred by various limits for a sender. They are
  * chronograph-like stats that are mutually exclusive.
index 78c192ee03a4fad5a43f91f693354e0889283632..018a484773551f8e67fe20be97b04f6d02104a56 100644 (file)
@@ -379,18 +379,9 @@ fastopen:
 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
                               struct tcp_fastopen_cookie *cookie)
 {
-       unsigned long last_syn_loss = 0;
        const struct dst_entry *dst;
-       int syn_loss = 0;
 
-       tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
-
-       /* Recurring FO SYN losses: no cookie or data in SYN */
-       if (syn_loss > 1 &&
-           time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
-               cookie->len = -1;
-               return false;
-       }
+       tcp_fastopen_cache_get(sk, mss, cookie);
 
        /* Firewall blackhole issue check */
        if (tcp_fastopen_active_should_disable(sk)) {
@@ -448,6 +439,8 @@ EXPORT_SYMBOL(tcp_fastopen_defer_connect);
  * following circumstances:
  *   1. client side TFO socket receives out of order FIN
  *   2. client side TFO socket receives out of order RST
+ *   3. client side TFO socket has timed out three times consecutively during
+ *      or after handshake
  * We disable active side TFO globally for 1hr at first. Then if it
  * happens again, we disable it for 2h, then 4h, 8h, ...
  * And we reset the timeout back to 1hr when we see a successful active
@@ -524,3 +517,20 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
                dst_release(dst);
        }
 }
+
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
+{
+       u32 timeouts = inet_csk(sk)->icsk_retransmits;
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       /* Broken middle-boxes may black-hole Fast Open connection during or
+        * even after the handshake. Be extremely conservative and pause
+        * Fast Open globally after hitting the third consecutive timeout or
+        * exceeding the configured timeout limit.
+        */
+       if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
+           (timeouts == 2 || (timeouts < 2 && expired))) {
+               tcp_fastopen_active_disable(sk);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+       }
+}
index 7097f92d16e5645b35f19d9434d309cf92b337d5..759e6bc8327b1e6122fd6485cdbbe756d81ed608 100644 (file)
@@ -546,8 +546,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
 static DEFINE_SEQLOCK(fastopen_seqlock);
 
 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-                           struct tcp_fastopen_cookie *cookie,
-                           int *syn_loss, unsigned long *last_syn_loss)
+                           struct tcp_fastopen_cookie *cookie)
 {
        struct tcp_metrics_block *tm;
 
@@ -564,8 +563,6 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
                        *cookie = tfom->cookie;
                        if (cookie->len <= 0 && tfom->try_exp == 1)
                                cookie->exp = true;
-                       *syn_loss = tfom->syn_loss;
-                       *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
                } while (read_seqretry(&fastopen_seqlock, seq));
        }
        rcu_read_unlock();
index 16df6dd44b988a128d97df3a7953437499a216e8..c9a63417af48dad1e5de39719e83c73f5d633bbc 100644 (file)
@@ -183,11 +183,6 @@ static int tcp_write_timeout(struct sock *sk)
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                if (icsk->icsk_retransmits) {
                        dst_negative_advice(sk);
-                       if (tp->syn_fastopen || tp->syn_data)
-                               tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-                       if (tp->syn_data && icsk->icsk_retransmits == 1)
-                               NET_INC_STATS(sock_net(sk),
-                                             LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                } else if (!tp->syn_data && !tp->syn_fastopen) {
                        sk_rethink_txhash(sk);
                }
@@ -195,17 +190,6 @@ static int tcp_write_timeout(struct sock *sk)
                expired = icsk->icsk_retransmits >= retry_until;
        } else {
                if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
-                       /* Some middle-boxes may black-hole Fast Open _after_
-                        * the handshake. Therefore we conservatively disable
-                        * Fast Open on this path on recurring timeouts after
-                        * successful Fast Open.
-                        */
-                       if (tp->syn_data_acked) {
-                               tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-                               if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
-                                       NET_INC_STATS(sock_net(sk),
-                                                     LINUX_MIB_TCPFASTOPENACTIVEFAIL);
-                       }
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
@@ -228,6 +212,7 @@ static int tcp_write_timeout(struct sock *sk)
                expired = retransmits_timed_out(sk, retry_until,
                                                icsk->icsk_user_timeout);
        }
+       tcp_fastopen_active_detect_blackhole(sk, expired);
        if (expired) {
                /* Has it gone just too far? */
                tcp_write_err(sk);