]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - net/ipv4/tcp_output.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_output.c
index 699fb102e9710694f342951cf194facd153f7d37..694711a140d46fe8877c493df407fe946fbade1d 100644 (file)
@@ -878,15 +878,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        BUG_ON(!skb || !tcp_skb_pcount(skb));
 
        if (clone_it) {
-               const struct sk_buff *fclone = skb + 1;
-
                skb_mstamp_get(&skb->skb_mstamp);
 
-               if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
-                            fclone->fclone == SKB_FCLONE_CLONE))
-                       NET_INC_STATS(sock_net(sk),
-                                     LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
-
                if (unlikely(skb_cloned(skb)))
                        skb = pskb_copy(skb, gfp_mask);
                else
@@ -981,7 +974,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
                              tcp_skb_pcount(skb));
 
-       err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
+       err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
        if (likely(err <= 0))
                return err;
 
@@ -1387,12 +1380,35 @@ unsigned int tcp_current_mss(struct sock *sk)
        return mss_now;
 }
 
-/* Congestion window validation. (RFC2861) */
-static void tcp_cwnd_validate(struct sock *sk)
+/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
+ * As additional protections, we do not touch cwnd in retransmission phases,
+ * and if application hit its sndbuf limit recently.
+ */
+static void tcp_cwnd_application_limited(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
+           sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
+               /* Limited by application or receiver window. */
+               u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
+               u32 win_used = max(tp->snd_cwnd_used, init_win);
+               if (win_used < tp->snd_cwnd) {
+                       tp->snd_ssthresh = tcp_current_ssthresh(sk);
+                       tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
+               }
+               tp->snd_cwnd_used = 0;
+       }
+       tp->snd_cwnd_stamp = tcp_time_stamp;
+}
+
+static void tcp_cwnd_validate(struct sock *sk, u32 unsent_segs)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tp->packets_out >= tp->snd_cwnd) {
+       tp->lsnd_pending = tp->packets_out + unsent_segs;
+
+       if (tcp_is_cwnd_limited(sk)) {
                /* Network is feed fully. */
                tp->snd_cwnd_used = 0;
                tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1865,7 +1881,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       unsigned int tso_segs, sent_pkts;
+       unsigned int tso_segs, sent_pkts, unsent_segs = 0;
        int cwnd_quota;
        int result;
 
@@ -1909,7 +1925,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                                break;
                } else {
                        if (!push_one && tcp_tso_should_defer(sk, skb))
-                               break;
+                               goto compute_unsent_segs;
                }
 
                /* TCP Small Queues :
@@ -1934,8 +1950,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                         * there is no smp_mb__after_set_bit() yet
                         */
                        smp_mb__after_clear_bit();
-                       if (atomic_read(&sk->sk_wmem_alloc) > limit)
+                       if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+                               u32 unsent_bytes;
+
+compute_unsent_segs:
+                               unsent_bytes = tp->write_seq - tp->snd_nxt;
+                               unsent_segs = DIV_ROUND_UP(unsent_bytes, mss_now);
                                break;
+                       }
                }
 
                limit = mss_now;
@@ -1975,7 +1997,7 @@ repair:
                /* Send one loss probe per tail loss episode. */
                if (push_one != 2)
                        tcp_schedule_loss_probe(sk);
-               tcp_cwnd_validate(sk);
+               tcp_cwnd_validate(sk, unsent_segs);
                return false;
        }
        return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
@@ -2039,6 +2061,25 @@ bool tcp_schedule_loss_probe(struct sock *sk)
        return true;
 }
 
+/* Thanks to skb fast clones, we can detect if a prior transmit of
+ * a packet is still in a qdisc or driver queue.
+ * In this case, there is very little point doing a retransmit !
+ * Note: This is called from BH context only.
+ */
+static bool skb_still_in_host_queue(const struct sock *sk,
+                                   const struct sk_buff *skb)
+{
+       const struct sk_buff *fclone = skb + 1;
+
+       if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
+                    fclone->fclone == SKB_FCLONE_CLONE)) {
+               NET_INC_STATS_BH(sock_net(sk),
+                                LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
+               return true;
+       }
+       return false;
+}
+
 /* When probe timeout (PTO) fires, send a new segment if one exists, else
  * retransmit the last segment.
  */
@@ -2064,6 +2105,9 @@ void tcp_send_loss_probe(struct sock *sk)
        if (WARN_ON(!skb))
                goto rearm_timer;
 
+       if (skb_still_in_host_queue(sk, skb))
+               goto rearm_timer;
+
        pcount = tcp_skb_pcount(skb);
        if (WARN_ON(!pcount))
                goto rearm_timer;
@@ -2385,6 +2429,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
            min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
                return -EAGAIN;
 
+       if (skb_still_in_host_queue(sk, skb))
+               return -EBUSY;
+
        if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
                if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
                        BUG();
@@ -2441,8 +2488,14 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
        }
 
-       if (likely(!err))
+       if (likely(!err)) {
                TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+               /* Update global TCP statistics. */
+               TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+               tp->total_retrans++;
+       }
        return err;
 }
 
@@ -2452,12 +2505,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        int err = __tcp_retransmit_skb(sk, skb);
 
        if (err == 0) {
-               /* Update global TCP statistics. */
-               TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
-               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-               tp->total_retrans++;
-
 #if FASTRETRANS_DEBUG > 0
                if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
                        net_dbg_ratelimited("retrans_out leaked\n");
@@ -2478,7 +2525,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                 * see tcp_input.c tcp_sacktag_write_queue().
                 */
                TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
-       } else {
+       } else if (err != -EBUSY) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
        }
        return err;