TIPC_NLA_SOCK_STAT_SENDQ, /* u32 */
TIPC_NLA_SOCK_STAT_LINK_CONG, /* flag */
TIPC_NLA_SOCK_STAT_CONN_CONG, /* flag */
+ TIPC_NLA_SOCK_STAT_DROP, /* u32 */
__TIPC_NLA_SOCK_STAT_MAX,
TIPC_NLA_SOCK_STAT_MAX = __TIPC_NLA_SOCK_STAT_MAX - 1
(!sk_conn && msg_connected(hdr)) ||
(!grp && msg_in_group(hdr)))
err = TIPC_ERR_NO_PORT;
- else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit)
+ else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
+ atomic_inc(&sk->sk_drops);
err = TIPC_ERR_OVERLOAD;
+ }
if (unlikely(err)) {
tipc_skb_reject(net, err, skb, xmitq);
/* Overload => reject message back to sender */
onode = tipc_own_addr(sock_net(sk));
+ atomic_inc(&sk->sk_drops);
if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
__skb_queue_tail(xmitq, skb);
break;
if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
skb_queue_len(&sk->sk_receive_queue)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
- skb_queue_len(&sk->sk_write_queue)))
+ skb_queue_len(&sk->sk_write_queue)) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
+ atomic_read(&sk->sk_drops)))
goto stat_msg_cancel;
if (tsk->cong_link_cnt &&