]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
tcp: add new SNMP counter for drops when try to queue in rcv queue
authorYafang Shao <laoar.shao@gmail.com>
Thu, 28 Jun 2018 04:22:56 +0000 (00:22 -0400)
committerDavid S. Miller <davem@davemloft.net>
Sat, 30 Jun 2018 09:43:53 +0000 (18:43 +0900)
When sk_rmem_alloc is larger than the receive buffer and we can't
schedule more memory for it, the skb will be dropped.

In above situation, if this skb is put into the ofo queue,
LINUX_MIB_TCPOFODROP is incremented to track it.

While if this skb is put into the receive queue, there's no record.
So a new SNMP counter is introduced to track this behavior.

LINUX_MIB_TCPRCVQDROP:  Number of packets meant to be queued in rcv queue
but dropped because socket rcvbuf limit hit.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/uapi/linux/snmp.h
net/ipv4/proc.c
net/ipv4/tcp_input.c

index 97517f36a5f967910b89eae4b2b69a71657f0f9c..e5ebc83827abbcaaf82e1f46011540fc273c65f2 100644 (file)
@@ -280,6 +280,7 @@ enum
        LINUX_MIB_TCPDELIVEREDCE,               /* TCPDeliveredCE */
        LINUX_MIB_TCPACKCOMPRESSED,             /* TCPAckCompressed */
        LINUX_MIB_TCPZEROWINDOWDROP,            /* TCPZeroWindowDrop */
+       LINUX_MIB_TCPRCVQDROP,                  /* TCPRcvQDrop */
        __LINUX_MIB_MAX
 };
 
index 225ef3433fe5c700099fb1eeeb0cdadbcb3700c8..b46e4cf9a55a1aa58e1fa344443e184053e05ffd 100644 (file)
@@ -288,6 +288,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPDeliveredCE", LINUX_MIB_TCPDELIVEREDCE),
        SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED),
        SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP),
+       SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP),
        SNMP_MIB_SENTINEL
 };
 
index 9c5b3415413f15edd6ebf686e1cb50a7f2801e64..eecd359595fc17d98e331b9560a79e924ded8b25 100644 (file)
@@ -4611,8 +4611,10 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
        skb->data_len = data_len;
        skb->len = size;
 
-       if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+       if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
                goto err_free;
+       }
 
        err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
        if (err)
@@ -4677,8 +4679,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 queue_and_out:
                if (skb_queue_len(&sk->sk_receive_queue) == 0)
                        sk_forced_mem_schedule(sk, skb->truesize);
-               else if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+               else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
+                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
                        goto drop;
+               }
 
                eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
                tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);