]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
net: do not provide hard irq safety for sd->defer_lock
authorEric Dumazet <edumazet@google.com>
Fri, 21 Apr 2023 09:43:54 +0000 (09:43 +0000)
committerDavid S. Miller <davem@davemloft.net>
Sun, 23 Apr 2023 12:35:07 +0000 (13:35 +0100)
kfree_skb() can be called from hard irq handlers,
but skb_attempt_defer_free() is meant to be used
from process or BH contexts, and skb_defer_free_flush()
is meant to be called from BH contexts.

Not having to mask hard irq can save some cycles.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/dev.c
net/core/skbuff.c

index 1551aabac3437938566813363d748ac639fb0075..d15568f5a44f1a397941bd5fca3873ee4d7d0e48 100644 (file)
@@ -6632,11 +6632,11 @@ static void skb_defer_free_flush(struct softnet_data *sd)
        if (!READ_ONCE(sd->defer_list))
                return;
 
-       spin_lock_irq(&sd->defer_lock);
+       spin_lock(&sd->defer_lock);
        skb = sd->defer_list;
        sd->defer_list = NULL;
        sd->defer_count = 0;
-       spin_unlock_irq(&sd->defer_lock);
+       spin_unlock(&sd->defer_lock);
 
        while (skb != NULL) {
                next = skb->next;
index bd815a00d2affae9be4ea6cdba188423e1122164..304a966164d82600cf196e512f24e3deee0c9bc5 100644 (file)
@@ -6870,7 +6870,6 @@ void skb_attempt_defer_free(struct sk_buff *skb)
 {
        int cpu = skb->alloc_cpu;
        struct softnet_data *sd;
-       unsigned long flags;
        unsigned int defer_max;
        bool kick;
 
@@ -6889,7 +6888,7 @@ nodefer:  __kfree_skb(skb);
        if (READ_ONCE(sd->defer_count) >= defer_max)
                goto nodefer;
 
-       spin_lock_irqsave(&sd->defer_lock, flags);
+       spin_lock_bh(&sd->defer_lock);
        /* Send an IPI every time queue reaches half capacity. */
        kick = sd->defer_count == (defer_max >> 1);
        /* Paired with the READ_ONCE() few lines above */
@@ -6898,7 +6897,7 @@ nodefer:  __kfree_skb(skb);
        skb->next = sd->defer_list;
        /* Paired with READ_ONCE() in skb_defer_free_flush() */
        WRITE_ONCE(sd->defer_list, skb);
-       spin_unlock_irqrestore(&sd->defer_lock, flags);
+       spin_unlock_bh(&sd->defer_lock);
 
        /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
         * if we are unlucky enough (this seems very unlikely).