]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
net: solve a NAPI race
authorEric Dumazet <edumazet@google.com>
Fri, 17 Mar 2017 02:02:33 +0000 (19:02 -0700)
committerTim Gardner <tim.gardner@canonical.com>
Thu, 30 Mar 2017 12:35:45 +0000 (06:35 -0600)
BugLink: http://bugs.launchpad.net/bugs/1677589
commit 39e6c8208d7b6fb9d2047850fb3327db567b564b upstream.

While playing with mlx4 hardware timestamping of RX packets, I found
that some packets were received by TCP stack with a ~200 ms delay...

Since the timestamp was provided by the NIC, and my probe was added
in tcp_v4_rcv() while in BH handler, I was confident it was not
a sender issue, or a drop in the network.

This would happen with a very low probability, but hurting RPC
workloads.

A NAPI driver normally arms the IRQ after the napi_complete_done(),
after NAPI_STATE_SCHED is cleared, so that the hard irq handler can grab
it.

Problem is that if another point in the stack grabs NAPI_STATE_SCHED bit
while IRQ are not disabled, we might have later an IRQ firing and
finding this bit set, right before napi_complete_done() clears it.

This can happen with busy polling users, or if gro_flush_timeout is
used. But some other uses of napi_schedule() in drivers can cause this
as well.

thread 1                                 thread 2 (could be on same cpu, or not)

// busy polling or napi_watchdog()
napi_schedule();
...
napi->poll()

device polling:
read 2 packets from ring buffer
                                          Additional 3rd packet is
available.
                                          device hard irq

                                          // does nothing because
NAPI_STATE_SCHED bit is owned by thread 1
                                          napi_schedule();

napi_complete_done(napi, 2);
rearm_irq();

Note that rearm_irq() will not force the device to send an additional
IRQ for the packet it already signaled (3rd packet in my example)

This patch adds a new NAPI_STATE_MISSED bit, that napi_schedule_prep()
can set if it could not grab NAPI_STATE_SCHED

Then napi_complete_done() properly reschedules the napi to make sure
we do not miss something.

Since we manipulate multiple bits at once, use cmpxchg() like in
sk_busy_loop() to provide proper transactions.

In v2, I changed napi_watchdog() to use a relaxed variant of
napi_schedule_prep() : No need to set NAPI_STATE_MISSED from this point.

In v3, I added more details in the changelog and clears
NAPI_STATE_MISSED in busy_poll_stop()

In v4, I added the ideas given by Alexander Duyck in v3 review

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexander Duyck <alexander.duyck@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
include/linux/netdevice.h
net/core/dev.c

index 27914672602d9d573e6a3da271cec33ccef51b16..bdef8b7d4305f27a5f34509d0f4ef0a865f8e14c 100644 (file)
@@ -330,6 +330,7 @@ struct napi_struct {
 
 enum {
        NAPI_STATE_SCHED,       /* Poll is scheduled */
+       NAPI_STATE_MISSED,      /* reschedule a napi */
        NAPI_STATE_DISABLE,     /* Disable pending */
        NAPI_STATE_NPSVC,       /* Netpoll - don't dequeue from poll_list */
        NAPI_STATE_HASHED,      /* In NAPI hash (busy polling possible) */
@@ -338,12 +339,13 @@ enum {
 };
 
 enum {
-       NAPIF_STATE_SCHED        = (1UL << NAPI_STATE_SCHED),
-       NAPIF_STATE_DISABLE      = (1UL << NAPI_STATE_DISABLE),
-       NAPIF_STATE_NPSVC        = (1UL << NAPI_STATE_NPSVC),
-       NAPIF_STATE_HASHED       = (1UL << NAPI_STATE_HASHED),
-       NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL),
-       NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL),
+       NAPIF_STATE_SCHED        = BIT(NAPI_STATE_SCHED),
+       NAPIF_STATE_MISSED       = BIT(NAPI_STATE_MISSED),
+       NAPIF_STATE_DISABLE      = BIT(NAPI_STATE_DISABLE),
+       NAPIF_STATE_NPSVC        = BIT(NAPI_STATE_NPSVC),
+       NAPIF_STATE_HASHED       = BIT(NAPI_STATE_HASHED),
+       NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
+       NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
 };
 
 enum gro_result {
@@ -413,20 +415,7 @@ static inline bool napi_disable_pending(struct napi_struct *n)
        return test_bit(NAPI_STATE_DISABLE, &n->state);
 }
 
-/**
- *     napi_schedule_prep - check if NAPI can be scheduled
- *     @n: NAPI context
- *
- * Test if NAPI routine is already running, and if not mark
- * it as running.  This is used as a condition variable to
- * insure only one NAPI poll instance runs.  We also make
- * sure there is no pending NAPI disable.
- */
-static inline bool napi_schedule_prep(struct napi_struct *n)
-{
-       return !napi_disable_pending(n) &&
-               !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
-}
+bool napi_schedule_prep(struct napi_struct *n);
 
 /**
  *     napi_schedule - schedule NAPI poll
index fd6e2dfda45f9554ac746ba92e3238a3e11c15a1..54f8c162ded8a32524d4729cbc41be54b0470f50 100644 (file)
@@ -4912,6 +4912,39 @@ void __napi_schedule(struct napi_struct *n)
 }
 EXPORT_SYMBOL(__napi_schedule);
 
+/**
+ *     napi_schedule_prep - check if napi can be scheduled
+ *     @n: napi context
+ *
+ * Test if NAPI routine is already running, and if not mark
+ * it as running.  This is used as a condition variable
+ * insure only one NAPI poll instance runs.  We also make
+ * sure there is no pending NAPI disable.
+ */
+bool napi_schedule_prep(struct napi_struct *n)
+{
+       unsigned long val, new;
+
+       do {
+               val = READ_ONCE(n->state);
+               if (unlikely(val & NAPIF_STATE_DISABLE))
+                       return false;
+               new = val | NAPIF_STATE_SCHED;
+
+               /* Sets STATE_MISSED bit if STATE_SCHED was already set
+                * This was suggested by Alexander Duyck, as compiler
+                * emits better code than :
+                * if (val & NAPIF_STATE_SCHED)
+                *     new |= NAPIF_STATE_MISSED;
+                */
+               new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
+                                                  NAPIF_STATE_MISSED;
+       } while (cmpxchg(&n->state, val, new) != val);
+
+       return !(val & NAPIF_STATE_SCHED);
+}
+EXPORT_SYMBOL(napi_schedule_prep);
+
 /**
  * __napi_schedule_irqoff - schedule for receive
  * @n: entry to schedule
@@ -4943,7 +4976,7 @@ EXPORT_SYMBOL(__napi_complete);
 
 bool napi_complete_done(struct napi_struct *n, int work_done)
 {
-       unsigned long flags;
+       unsigned long flags, val, new;
 
        /*
         * 1) Don't let napi dequeue from the cpu poll list
@@ -4967,14 +5000,33 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                else
                        napi_gro_flush(n, false);
        }
-       if (likely(list_empty(&n->poll_list))) {
-               WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
-       } else {
+       if (unlikely(!list_empty(&n->poll_list))) {
                /* If n->poll_list is not empty, we need to mask irqs */
                local_irq_save(flags);
-               __napi_complete(n);
+               list_del_init(&n->poll_list);
                local_irq_restore(flags);
        }
+
+       do {
+               val = READ_ONCE(n->state);
+
+               WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
+
+               new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
+
+               /* If STATE_MISSED was set, leave STATE_SCHED set,
+                * because we will call napi->poll() one more time.
+                * This C code was suggested by Alexander Duyck to help gcc.
+                */
+               new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
+                                                   NAPIF_STATE_SCHED;
+       } while (cmpxchg(&n->state, val, new) != val);
+
+       if (unlikely(val & NAPIF_STATE_MISSED)) {
+               __napi_schedule(n);
+               return false;
+       }
+
        return true;
 }
 EXPORT_SYMBOL(napi_complete_done);
@@ -5000,6 +5052,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
 {
        int rc;
 
+       /* Busy polling means there is a high chance device driver hard irq
+        * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
+        * set in napi_schedule_prep().
+        * Since we are about to call napi->poll() once more, we can safely
+        * clear NAPI_STATE_MISSED.
+        *
+        * Note: x86 could use a single "lock and ..." instruction
+        * to perform these two clear_bit()
+        */
+       clear_bit(NAPI_STATE_MISSED, &napi->state);
        clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
 
        local_bh_disable();
@@ -5146,8 +5208,13 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
        struct napi_struct *napi;
 
        napi = container_of(timer, struct napi_struct, timer);
-       if (napi->gro_list)
-               napi_schedule(napi);
+
+       /* Note : we use a relaxed variant of napi_schedule_prep() not setting
+        * NAPI_STATE_MISSED, since we do not react to a device IRQ.
+        */
+       if (napi->gro_list && !napi_disable_pending(napi) &&
+           !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
+               __napi_schedule_irqoff(napi);
 
        return HRTIMER_NORESTART;
 }