]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - net/sched/sch_generic.c
net: Prevent invalid access to skb->prev in __qdisc_drop_all
[mirror_ubuntu-bionic-kernel.git] / net / sched / sch_generic.c
index 661c7144b53af048b3a65484777910e2d60f25aa..dec48b60410b0345440a94cd45a3072870c2709b 100644 (file)
@@ -369,7 +369,7 @@ void netif_carrier_on(struct net_device *dev)
        if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
                if (dev->reg_state == NETREG_UNINITIALIZED)
                        return;
-               atomic_inc(&dev->carrier_changes);
+               atomic_inc(&dev->carrier_up_count);
                linkwatch_fire_event(dev);
                if (netif_running(dev))
                        __netdev_watchdog_up(dev);
@@ -388,7 +388,7 @@ void netif_carrier_off(struct net_device *dev)
        if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
                if (dev->reg_state == NETREG_UNINITIALIZED)
                        return;
-               atomic_inc(&dev->carrier_changes);
+               atomic_inc(&dev->carrier_down_count);
                linkwatch_fire_event(dev);
        }
 }
@@ -633,6 +633,19 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        qdisc_skb_head_init(&sch->q);
        spin_lock_init(&sch->q.lock);
 
+       if (ops->static_flags & TCQ_F_CPUSTATS) {
+               sch->cpu_bstats =
+                       netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+               if (!sch->cpu_bstats)
+                       goto errout1;
+
+               sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+               if (!sch->cpu_qstats) {
+                       free_percpu(sch->cpu_bstats);
+                       goto errout1;
+               }
+       }
+
        spin_lock_init(&sch->busylock);
        lockdep_set_class(&sch->busylock,
                          dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
@@ -642,6 +655,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          dev->qdisc_running_key ?: &qdisc_running_key);
 
        sch->ops = ops;
+       sch->flags = ops->static_flags;
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev_queue = dev_queue;
@@ -649,6 +663,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        refcount_set(&sch->refcnt, 1);
 
        return sch;
+errout1:
+       kfree(p);
 errout:
        return ERR_PTR(err);
 }
@@ -698,7 +714,7 @@ void qdisc_reset(struct Qdisc *qdisc)
 }
 EXPORT_SYMBOL(qdisc_reset);
 
-static void qdisc_free(struct Qdisc *qdisc)
+void qdisc_free(struct Qdisc *qdisc)
 {
        if (qdisc_is_percpu_stats(qdisc)) {
                free_percpu(qdisc->cpu_bstats);
@@ -746,10 +762,6 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
        root_lock = qdisc_lock(oqdisc);
        spin_lock_bh(root_lock);
 
-       /* Prune old scheduler */
-       if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
-               qdisc_reset(oqdisc);
-
        /* ... and graft new one */
        if (qdisc == NULL)
                qdisc = &noop_qdisc;
@@ -900,6 +912,16 @@ static bool some_qdisc_is_busy(struct net_device *dev)
        return false;
 }
 
+static void dev_qdisc_reset(struct net_device *dev,
+                           struct netdev_queue *dev_queue,
+                           void *none)
+{
+       struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+
+       if (qdisc)
+               qdisc_reset(qdisc);
+}
+
 /**
  *     dev_deactivate_many - deactivate transmissions on several devices
  *     @head: list of devices to deactivate
@@ -910,7 +932,6 @@ static bool some_qdisc_is_busy(struct net_device *dev)
 void dev_deactivate_many(struct list_head *head)
 {
        struct net_device *dev;
-       bool sync_needed = false;
 
        list_for_each_entry(dev, head, close_list) {
                netdev_for_each_tx_queue(dev, dev_deactivate_queue,
@@ -920,20 +941,25 @@ void dev_deactivate_many(struct list_head *head)
                                             &noop_qdisc);
 
                dev_watchdog_down(dev);
-               sync_needed |= !dev->dismantle;
        }
 
        /* Wait for outstanding qdisc-less dev_queue_xmit calls.
         * This is avoided if all devices are in dismantle phase :
         * Caller will call synchronize_net() for us
         */
-       if (sync_needed)
-               synchronize_net();
+       synchronize_net();
 
        /* Wait for outstanding qdisc_run calls. */
-       list_for_each_entry(dev, head, close_list)
+       list_for_each_entry(dev, head, close_list) {
                while (some_qdisc_is_busy(dev))
                        yield();
+               /* The new qdisc is assigned at this point so we can safely
+                * unwind stale skb lists and qdisc statistics
+                */
+               netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
+               if (dev_ingress_queue(dev))
+                       dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
+       }
 }
 
 void dev_deactivate(struct net_device *dev)