]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
netpoll: more efficient locking
authorEric Dumazet <edumazet@google.com>
Wed, 16 Nov 2016 22:54:50 +0000 (14:54 -0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 16 Nov 2016 23:32:02 +0000 (18:32 -0500)
Callers of netpoll_poll_lock() own NAPI_STATE_SCHED

Callers of netpoll_poll_unlock() have BH blocked between
the NAPI_STATE_SCHED being cleared and poll_lock is released.

We can avoid the spinlock which has no contention, and use cmpxchg()
on poll_owner which we need to set anyway.

This removes a possible lockdep violation after the cited commit,
since sk_busy_loop() re-enables BH before calling busy_poll_stop()

Fixes: 217f69743681 ("net: busy-poll: allow preemption in sk_busy_loop()")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/netdevice.h
include/linux/netpoll.h
net/core/dev.c
net/core/netpoll.c

index bcddf951ccee9406e7cda0598cbb920fd0ae749e..e84800edd2491885df54e78a4795256c8a2eae56 100644 (file)
@@ -316,7 +316,6 @@ struct napi_struct {
        unsigned int            gro_count;
        int                     (*poll)(struct napi_struct *, int);
 #ifdef CONFIG_NETPOLL
-       spinlock_t              poll_lock;
        int                     poll_owner;
 #endif
        struct net_device       *dev;
index b25ee9ffdbe67e06a5c70305360cceea3de43aef..1828900c94118ac959168873a91a6dcd4cb8d4cf 100644 (file)
@@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
        struct net_device *dev = napi->dev;
 
        if (dev && dev->npinfo) {
-               spin_lock(&napi->poll_lock);
-               napi->poll_owner = smp_processor_id();
+               int owner = smp_processor_id();
+
+               while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
+                       cpu_relax();
+
                return napi;
        }
        return NULL;
@@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have)
 {
        struct napi_struct *napi = have;
 
-       if (napi) {
-               napi->poll_owner = -1;
-               spin_unlock(&napi->poll_lock);
-       }
+       if (napi)
+               smp_store_release(&napi->poll_owner, -1);
 }
 
 static inline bool netpoll_tx_running(struct net_device *dev)
index edba9efeb2e93d69a917c059da4383a126bc5389..f71b34ab57a5132647729d20e21376d362d4e630 100644 (file)
@@ -5143,7 +5143,6 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
        list_add(&napi->dev_list, &dev->napi_list);
        napi->dev = dev;
 #ifdef CONFIG_NETPOLL
-       spin_lock_init(&napi->poll_lock);
        napi->poll_owner = -1;
 #endif
        set_bit(NAPI_STATE_SCHED, &napi->state);
index 53599bd0c82df605e6c4b8a6e4f0ef6aa2ee9fee..9424673009c14e0fb288b8e4041dba596b37ee8d 100644 (file)
@@ -171,12 +171,12 @@ static void poll_one_napi(struct napi_struct *napi)
 static void poll_napi(struct net_device *dev)
 {
        struct napi_struct *napi;
+       int cpu = smp_processor_id();
 
        list_for_each_entry(napi, &dev->napi_list, dev_list) {
-               if (napi->poll_owner != smp_processor_id() &&
-                   spin_trylock(&napi->poll_lock)) {
+               if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
                        poll_one_napi(napi);
-                       spin_unlock(&napi->poll_lock);
+                       smp_store_release(&napi->poll_owner, -1);
                }
        }
 }