]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/commitdiff
net: do not block BH while processing socket backlog
authorEric Dumazet <edumazet@google.com>
Fri, 29 Apr 2016 21:16:52 +0000 (14:16 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 2 May 2016 21:02:26 +0000 (17:02 -0400)
Socket backlog processing is a major latency source.

With current TCP socket sk_rcvbuf limits, I have sampled __release_sock()
holding cpu for more than 5 ms, and packets being dropped by the NIC
once ring buffer is filled.

All users are now ready to be called from process context,
we can unblock BH and let interrupts be serviced faster.

cond_resched_softirq() could be removed, as it has no more user.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/sock.c

index e16a5db853c6f455b0ac826744d0ee5e96a44863..70744dbb6c3f80da11b9f5149ba65c680d317dda 100644 (file)
@@ -2019,33 +2019,27 @@ static void __release_sock(struct sock *sk)
        __releases(&sk->sk_lock.slock)
        __acquires(&sk->sk_lock.slock)
 {
-       struct sk_buff *skb = sk->sk_backlog.head;
+       struct sk_buff *skb, *next;
 
-       do {
+       while ((skb = sk->sk_backlog.head) != NULL) {
                sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
-               bh_unlock_sock(sk);
 
-               do {
-                       struct sk_buff *next = skb->next;
+               spin_unlock_bh(&sk->sk_lock.slock);
 
+               do {
+                       next = skb->next;
                        prefetch(next);
                        WARN_ON_ONCE(skb_dst_is_noref(skb));
                        skb->next = NULL;
                        sk_backlog_rcv(sk, skb);
 
-                       /*
-                        * We are in process context here with softirqs
-                        * disabled, use cond_resched_softirq() to preempt.
-                        * This is safe to do because we've taken the backlog
-                        * queue private:
-                        */
-                       cond_resched_softirq();
+                       cond_resched();
 
                        skb = next;
                } while (skb != NULL);
 
-               bh_lock_sock(sk);
-       } while ((skb = sk->sk_backlog.head) != NULL);
+               spin_lock_bh(&sk->sk_lock.slock);
+       }
 
        /*
         * Doing the zeroing here guarantee we can not loop forever