spinlock_t _xmit_lock;
int xmit_lock_owner;
struct Qdisc *qdisc_sleeping;
- struct list_head qdisc_list;
} ____cacheline_aligned_in_smp;
/*
unsigned int real_num_tx_queues;
unsigned long tx_queue_len; /* Max frames per queue allowed */
+ spinlock_t qdisc_list_lock;
+ struct list_head qdisc_list;
/*
* One part is mostly used on xmit path (device)
(root qdisc, all its children, children of children etc.)
*/
-static struct Qdisc *__qdisc_lookup(struct netdev_queue *dev_queue, u32 handle)
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
struct Qdisc *q;
- list_for_each_entry(q, &dev_queue->qdisc_list, list) {
+ list_for_each_entry(q, &dev->qdisc_list, list) {
if (q->handle == handle)
return q;
}
return NULL;
}
-struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- struct Qdisc *q = __qdisc_lookup(txq, handle);
- if (q)
- return q;
- }
- return NULL;
-}
-
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{
unsigned long cl;
goto err_out3;
}
}
- qdisc_lock_tree(dev);
- list_add_tail(&sch->list, &dev_queue->qdisc_list);
- qdisc_unlock_tree(dev);
+ spin_lock_bh(&dev->qdisc_list_lock);
+ list_add_tail(&sch->list, &dev->qdisc_list);
+ spin_unlock_bh(&dev->qdisc_list_lock);
return sch;
}
read_lock(&dev_base_lock);
idx = 0;
for_each_netdev(&init_net, dev) {
- struct netdev_queue *dev_queue;
if (idx < s_idx)
goto cont;
if (idx > s_idx)
s_q_idx = 0;
q_idx = 0;
- dev_queue = netdev_get_tx_queue(dev, 0);
- list_for_each_entry(q, &dev_queue->qdisc_list, list) {
+ list_for_each_entry(q, &dev->qdisc_list, list) {
if (q_idx < s_q_idx) {
q_idx++;
continue;
static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct netdev_queue *dev_queue;
int t;
int s_t;
struct net_device *dev;
s_t = cb->args[0];
t = 0;
- dev_queue = netdev_get_tx_queue(dev, 0);
- list_for_each_entry(q, &dev_queue->qdisc_list, list) {
+ list_for_each_entry(q, &dev->qdisc_list, list) {
if (t < s_t || !q->ops->cl_ops ||
(tcm->tcm_parent &&
TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
void qdisc_destroy(struct Qdisc *qdisc)
{
+ struct net_device *dev = qdisc_dev(qdisc);
+
if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt))
return;
+ spin_lock_bh(&dev->qdisc_list_lock);
list_del(&qdisc->list);
+ spin_unlock_bh(&dev->qdisc_list_lock);
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
}
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
- list_add_tail(&qdisc->list, &dev_queue->qdisc_list);
+ spin_lock_bh(&dev->qdisc_list_lock);
+ list_add_tail(&qdisc->list, &dev->qdisc_list);
+ spin_unlock_bh(&dev->qdisc_list_lock);
} else {
qdisc = &noqueue_qdisc;
}
dev_queue->qdisc = qdisc;
dev_queue->qdisc_sleeping = qdisc;
- INIT_LIST_HEAD(&dev_queue->qdisc_list);
}
void dev_init_scheduler(struct net_device *dev)