]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - net/ipv4/tcp_input.c
tcp: de-bloat a bit with factoring NET_INC_STATS_BH out
[mirror_ubuntu-zesty-kernel.git] / net / ipv4 / tcp_input.c
index de30e70ff256335b8cbc8ec531e1a8c3433a76e5..d6ea970a15135da3e2ce5041ca7a0569b1effa31 100644 (file)
@@ -947,17 +947,21 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        if (metric > tp->reordering) {
+               int mib_idx;
+
                tp->reordering = min(TCP_MAX_REORDERING, metric);
 
                /* This exciting event is worth to be remembered. 8) */
                if (ts)
-                       NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
+                       mib_idx = LINUX_MIB_TCPTSREORDER;
                else if (tcp_is_reno(tp))
-                       NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
+                       mib_idx = LINUX_MIB_TCPRENOREORDER;
                else if (tcp_is_fack(tp))
-                       NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
+                       mib_idx = LINUX_MIB_TCPFACKREORDER;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
+                       mib_idx = LINUX_MIB_TCPSACKREORDER;
+
+               NET_INC_STATS_BH(mib_idx);
 #if FASTRETRANS_DEBUG > 1
                printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
                       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1456,18 +1460,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                if (!tcp_is_sackblock_valid(tp, dup_sack,
                                            sp[used_sacks].start_seq,
                                            sp[used_sacks].end_seq)) {
+                       int mib_idx;
+
                        if (dup_sack) {
                                if (!tp->undo_marker)
-                                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
+                                       mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
                                else
-                                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD);
+                                       mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
                        } else {
                                /* Don't count olds caused by ACK reordering */
                                if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
                                    !after(sp[used_sacks].end_seq, tp->snd_una))
                                        continue;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
+                               mib_idx = LINUX_MIB_TCPSACKDISCARD;
                        }
+
+                       NET_INC_STATS_BH(mib_idx);
                        if (i == 0)
                                first_sack_index = -1;
                        continue;
@@ -2380,15 +2388,19 @@ static int tcp_try_undo_recovery(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tcp_may_undo(tp)) {
+               int mib_idx;
+
                /* Happy end! We did not retransmit anything
                 * or our original transmission succeeded.
                 */
                DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
                tcp_undo_cwr(sk, 1);
                if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
-                       NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+                       mib_idx = LINUX_MIB_TCPLOSSUNDO;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
+                       mib_idx = LINUX_MIB_TCPFULLUNDO;
+
+               NET_INC_STATS_BH(mib_idx);
                tp->undo_marker = 0;
        }
        if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2560,7 +2572,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
        int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
        int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
                                    (tcp_fackets_out(tp) > tp->reordering));
-       int fast_rexmit = 0;
+       int fast_rexmit = 0, mib_idx;
 
        if (WARN_ON(!tp->packets_out && tp->sacked_out))
                tp->sacked_out = 0;
@@ -2683,9 +2695,11 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
                /* Otherwise enter Recovery state */
 
                if (tcp_is_reno(tp))
-                       NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
+                       mib_idx = LINUX_MIB_TCPRENORECOVERY;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
+                       mib_idx = LINUX_MIB_TCPSACKRECOVERY;
+
+               NET_INC_STATS_BH(mib_idx);
 
                tp->high_seq = tp->snd_nxt;
                tp->prior_ssthresh = 0;
@@ -3700,10 +3714,14 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
 static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
 {
        if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
+               int mib_idx;
+
                if (before(seq, tp->rcv_nxt))
-                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
+                       mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
+                       mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
+
+               NET_INC_STATS_BH(mib_idx);
 
                tp->rx_opt.dsack = 1;
                tp->duplicate_sack[0].start_seq = seq;