]>
Commit | Line | Data |
---|---|---|
b8970f0b JF |
1 | /* |
2 | * net/sched/sch_mqprio.c | |
3 | * | |
4 | * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * version 2 as published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/skbuff.h> | |
3a9a231d | 17 | #include <linux/module.h> |
b8970f0b JF |
18 | #include <net/netlink.h> |
19 | #include <net/pkt_sched.h> | |
20 | #include <net/sch_generic.h> | |
21 | ||
22 | struct mqprio_sched { | |
23 | struct Qdisc **qdiscs; | |
2026fecf | 24 | int hw_offload; |
b8970f0b JF |
25 | }; |
26 | ||
27 | static void mqprio_destroy(struct Qdisc *sch) | |
28 | { | |
29 | struct net_device *dev = qdisc_dev(sch); | |
30 | struct mqprio_sched *priv = qdisc_priv(sch); | |
31 | unsigned int ntx; | |
32 | ||
ac7100ba BH |
33 | if (priv->qdiscs) { |
34 | for (ntx = 0; | |
35 | ntx < dev->num_tx_queues && priv->qdiscs[ntx]; | |
36 | ntx++) | |
37 | qdisc_destroy(priv->qdiscs[ntx]); | |
38 | kfree(priv->qdiscs); | |
39 | } | |
b8970f0b | 40 | |
56f36acd AN |
41 | if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) { |
42 | struct tc_mqprio_qopt offload = { 0 }; | |
43 | struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO, | |
44 | { .mqprio = &offload } }; | |
45 | ||
a5fcf8a6 | 46 | dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, 0, &tc); |
56f36acd | 47 | } else { |
b8970f0b | 48 | netdev_set_num_tc(dev, 0); |
56f36acd | 49 | } |
b8970f0b JF |
50 | } |
51 | ||
52 | static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) | |
53 | { | |
54 | int i, j; | |
55 | ||
56 | /* Verify num_tc is not out of max range */ | |
57 | if (qopt->num_tc > TC_MAX_QUEUE) | |
58 | return -EINVAL; | |
59 | ||
60 | /* Verify priority mapping uses valid tcs */ | |
61 | for (i = 0; i < TC_BITMASK + 1; i++) { | |
62 | if (qopt->prio_tc_map[i] >= qopt->num_tc) | |
63 | return -EINVAL; | |
64 | } | |
65 | ||
2026fecf AD |
66 | /* Limit qopt->hw to maximum supported offload value. Drivers have |
67 | * the option of overriding this later if they don't support the a | |
68 | * given offload type. | |
69 | */ | |
70 | if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX) | |
71 | qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX; | |
b8970f0b | 72 | |
2026fecf AD |
73 | /* If hardware offload is requested we will leave it to the device |
74 | * to either populate the queue counts itself or to validate the | |
75 | * provided queue counts. If ndo_setup_tc is not present then | |
76 | * hardware doesn't support offload and we should return an error. | |
b8970f0b JF |
77 | */ |
78 | if (qopt->hw) | |
2026fecf | 79 | return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL; |
b8970f0b JF |
80 | |
81 | for (i = 0; i < qopt->num_tc; i++) { | |
82 | unsigned int last = qopt->offset[i] + qopt->count[i]; | |
83 | ||
84 | /* Verify the queue count is in tx range being equal to the | |
85 | * real_num_tx_queues indicates the last queue is in use. | |
86 | */ | |
87 | if (qopt->offset[i] >= dev->real_num_tx_queues || | |
88 | !qopt->count[i] || | |
89 | last > dev->real_num_tx_queues) | |
90 | return -EINVAL; | |
91 | ||
92 | /* Verify that the offset and counts do not overlap */ | |
93 | for (j = i + 1; j < qopt->num_tc; j++) { | |
94 | if (last > qopt->offset[j]) | |
95 | return -EINVAL; | |
96 | } | |
97 | } | |
98 | ||
99 | return 0; | |
100 | } | |
101 | ||
102 | static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) | |
103 | { | |
104 | struct net_device *dev = qdisc_dev(sch); | |
105 | struct mqprio_sched *priv = qdisc_priv(sch); | |
106 | struct netdev_queue *dev_queue; | |
107 | struct Qdisc *qdisc; | |
108 | int i, err = -EOPNOTSUPP; | |
109 | struct tc_mqprio_qopt *qopt = NULL; | |
110 | ||
111 | BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); | |
112 | BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); | |
113 | ||
114 | if (sch->parent != TC_H_ROOT) | |
115 | return -EOPNOTSUPP; | |
116 | ||
117 | if (!netif_is_multiqueue(dev)) | |
118 | return -EOPNOTSUPP; | |
119 | ||
7838f2ce | 120 | if (!opt || nla_len(opt) < sizeof(*qopt)) |
b8970f0b JF |
121 | return -EINVAL; |
122 | ||
123 | qopt = nla_data(opt); | |
124 | if (mqprio_parse_opt(dev, qopt)) | |
125 | return -EINVAL; | |
126 | ||
127 | /* pre-allocate qdisc, attachment can't fail */ | |
128 | priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), | |
129 | GFP_KERNEL); | |
87b60cfa ED |
130 | if (!priv->qdiscs) |
131 | return -ENOMEM; | |
b8970f0b JF |
132 | |
133 | for (i = 0; i < dev->num_tx_queues; i++) { | |
134 | dev_queue = netdev_get_tx_queue(dev, i); | |
1f27cde3 ED |
135 | qdisc = qdisc_create_dflt(dev_queue, |
136 | get_default_qdisc_ops(dev, i), | |
b8970f0b JF |
137 | TC_H_MAKE(TC_H_MAJ(sch->handle), |
138 | TC_H_MIN(i + 1))); | |
87b60cfa ED |
139 | if (!qdisc) |
140 | return -ENOMEM; | |
141 | ||
b8970f0b | 142 | priv->qdiscs[i] = qdisc; |
4eaf3b84 | 143 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
b8970f0b JF |
144 | } |
145 | ||
146 | /* If the mqprio options indicate that hardware should own | |
147 | * the queue mapping then run ndo_setup_tc otherwise use the | |
148 | * supplied and verified mapping | |
149 | */ | |
150 | if (qopt->hw) { | |
56f36acd AN |
151 | struct tc_mqprio_qopt offload = *qopt; |
152 | struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO, | |
153 | { .mqprio = &offload } }; | |
16e5cc64 | 154 | |
a5fcf8a6 JP |
155 | err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, |
156 | 0, 0, &tc); | |
b8970f0b | 157 | if (err) |
87b60cfa | 158 | return err; |
2026fecf | 159 | |
56f36acd | 160 | priv->hw_offload = offload.hw; |
b8970f0b JF |
161 | } else { |
162 | netdev_set_num_tc(dev, qopt->num_tc); | |
163 | for (i = 0; i < qopt->num_tc; i++) | |
164 | netdev_set_tc_queue(dev, i, | |
165 | qopt->count[i], qopt->offset[i]); | |
166 | } | |
167 | ||
168 | /* Always use supplied priority mappings */ | |
169 | for (i = 0; i < TC_BITMASK + 1; i++) | |
170 | netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]); | |
171 | ||
172 | sch->flags |= TCQ_F_MQROOT; | |
173 | return 0; | |
b8970f0b JF |
174 | } |
175 | ||
176 | static void mqprio_attach(struct Qdisc *sch) | |
177 | { | |
178 | struct net_device *dev = qdisc_dev(sch); | |
179 | struct mqprio_sched *priv = qdisc_priv(sch); | |
95dc1929 | 180 | struct Qdisc *qdisc, *old; |
b8970f0b JF |
181 | unsigned int ntx; |
182 | ||
183 | /* Attach underlying qdisc */ | |
184 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | |
185 | qdisc = priv->qdiscs[ntx]; | |
95dc1929 ED |
186 | old = dev_graft_qdisc(qdisc->dev_queue, qdisc); |
187 | if (old) | |
188 | qdisc_destroy(old); | |
189 | if (ntx < dev->real_num_tx_queues) | |
49b49971 | 190 | qdisc_hash_add(qdisc, false); |
b8970f0b JF |
191 | } |
192 | kfree(priv->qdiscs); | |
193 | priv->qdiscs = NULL; | |
194 | } | |
195 | ||
196 | static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch, | |
197 | unsigned long cl) | |
198 | { | |
199 | struct net_device *dev = qdisc_dev(sch); | |
200 | unsigned long ntx = cl - 1 - netdev_get_num_tc(dev); | |
201 | ||
202 | if (ntx >= dev->num_tx_queues) | |
203 | return NULL; | |
204 | return netdev_get_tx_queue(dev, ntx); | |
205 | } | |
206 | ||
207 | static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, | |
208 | struct Qdisc **old) | |
209 | { | |
210 | struct net_device *dev = qdisc_dev(sch); | |
211 | struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); | |
212 | ||
213 | if (!dev_queue) | |
214 | return -EINVAL; | |
215 | ||
216 | if (dev->flags & IFF_UP) | |
217 | dev_deactivate(dev); | |
218 | ||
219 | *old = dev_graft_qdisc(dev_queue, new); | |
220 | ||
1abbe139 | 221 | if (new) |
4eaf3b84 | 222 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
1abbe139 | 223 | |
b8970f0b JF |
224 | if (dev->flags & IFF_UP) |
225 | dev_activate(dev); | |
226 | ||
227 | return 0; | |
228 | } | |
229 | ||
230 | static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) | |
231 | { | |
232 | struct net_device *dev = qdisc_dev(sch); | |
233 | struct mqprio_sched *priv = qdisc_priv(sch); | |
234 | unsigned char *b = skb_tail_pointer(skb); | |
144ce879 | 235 | struct tc_mqprio_qopt opt = { 0 }; |
b8970f0b JF |
236 | struct Qdisc *qdisc; |
237 | unsigned int i; | |
238 | ||
239 | sch->q.qlen = 0; | |
240 | memset(&sch->bstats, 0, sizeof(sch->bstats)); | |
241 | memset(&sch->qstats, 0, sizeof(sch->qstats)); | |
242 | ||
243 | for (i = 0; i < dev->num_tx_queues; i++) { | |
46e5da40 | 244 | qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); |
b8970f0b JF |
245 | spin_lock_bh(qdisc_lock(qdisc)); |
246 | sch->q.qlen += qdisc->q.qlen; | |
247 | sch->bstats.bytes += qdisc->bstats.bytes; | |
248 | sch->bstats.packets += qdisc->bstats.packets; | |
b8970f0b JF |
249 | sch->qstats.backlog += qdisc->qstats.backlog; |
250 | sch->qstats.drops += qdisc->qstats.drops; | |
251 | sch->qstats.requeues += qdisc->qstats.requeues; | |
252 | sch->qstats.overlimits += qdisc->qstats.overlimits; | |
253 | spin_unlock_bh(qdisc_lock(qdisc)); | |
254 | } | |
255 | ||
256 | opt.num_tc = netdev_get_num_tc(dev); | |
257 | memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); | |
2026fecf | 258 | opt.hw = priv->hw_offload; |
b8970f0b JF |
259 | |
260 | for (i = 0; i < netdev_get_num_tc(dev); i++) { | |
261 | opt.count[i] = dev->tc_to_txq[i].count; | |
262 | opt.offset[i] = dev->tc_to_txq[i].offset; | |
263 | } | |
264 | ||
1b34ec43 DM |
265 | if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) |
266 | goto nla_put_failure; | |
b8970f0b JF |
267 | |
268 | return skb->len; | |
269 | nla_put_failure: | |
270 | nlmsg_trim(skb, b); | |
271 | return -1; | |
272 | } | |
273 | ||
274 | static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl) | |
275 | { | |
276 | struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); | |
277 | ||
278 | if (!dev_queue) | |
279 | return NULL; | |
280 | ||
281 | return dev_queue->qdisc_sleeping; | |
282 | } | |
283 | ||
284 | static unsigned long mqprio_get(struct Qdisc *sch, u32 classid) | |
285 | { | |
286 | struct net_device *dev = qdisc_dev(sch); | |
287 | unsigned int ntx = TC_H_MIN(classid); | |
288 | ||
289 | if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev)) | |
290 | return 0; | |
291 | return ntx; | |
292 | } | |
293 | ||
294 | static void mqprio_put(struct Qdisc *sch, unsigned long cl) | |
295 | { | |
296 | } | |
297 | ||
298 | static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl, | |
299 | struct sk_buff *skb, struct tcmsg *tcm) | |
300 | { | |
301 | struct net_device *dev = qdisc_dev(sch); | |
302 | ||
303 | if (cl <= netdev_get_num_tc(dev)) { | |
304 | tcm->tcm_parent = TC_H_ROOT; | |
305 | tcm->tcm_info = 0; | |
306 | } else { | |
307 | int i; | |
308 | struct netdev_queue *dev_queue; | |
309 | ||
310 | dev_queue = mqprio_queue_get(sch, cl); | |
311 | tcm->tcm_parent = 0; | |
312 | for (i = 0; i < netdev_get_num_tc(dev); i++) { | |
313 | struct netdev_tc_txq tc = dev->tc_to_txq[i]; | |
314 | int q_idx = cl - netdev_get_num_tc(dev); | |
315 | ||
316 | if (q_idx > tc.offset && | |
317 | q_idx <= tc.offset + tc.count) { | |
318 | tcm->tcm_parent = | |
319 | TC_H_MAKE(TC_H_MAJ(sch->handle), | |
320 | TC_H_MIN(i + 1)); | |
321 | break; | |
322 | } | |
323 | } | |
324 | tcm->tcm_info = dev_queue->qdisc_sleeping->handle; | |
325 | } | |
326 | tcm->tcm_handle |= TC_H_MIN(cl); | |
327 | return 0; | |
328 | } | |
329 | ||
330 | static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |
ea18fd95 | 331 | struct gnet_dump *d) |
332 | __releases(d->lock) | |
333 | __acquires(d->lock) | |
b8970f0b JF |
334 | { |
335 | struct net_device *dev = qdisc_dev(sch); | |
336 | ||
337 | if (cl <= netdev_get_num_tc(dev)) { | |
338 | int i; | |
64015853 | 339 | __u32 qlen = 0; |
b8970f0b JF |
340 | struct Qdisc *qdisc; |
341 | struct gnet_stats_queue qstats = {0}; | |
342 | struct gnet_stats_basic_packed bstats = {0}; | |
343 | struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1]; | |
344 | ||
345 | /* Drop lock here it will be reclaimed before touching | |
346 | * statistics this is required because the d->lock we | |
347 | * hold here is the look on dev_queue->qdisc_sleeping | |
348 | * also acquired below. | |
349 | */ | |
edb09eb1 ED |
350 | if (d->lock) |
351 | spin_unlock_bh(d->lock); | |
b8970f0b JF |
352 | |
353 | for (i = tc.offset; i < tc.offset + tc.count; i++) { | |
46e5da40 JF |
354 | struct netdev_queue *q = netdev_get_tx_queue(dev, i); |
355 | ||
356 | qdisc = rtnl_dereference(q->qdisc); | |
b8970f0b | 357 | spin_lock_bh(qdisc_lock(qdisc)); |
64015853 | 358 | qlen += qdisc->q.qlen; |
b8970f0b JF |
359 | bstats.bytes += qdisc->bstats.bytes; |
360 | bstats.packets += qdisc->bstats.packets; | |
b8970f0b JF |
361 | qstats.backlog += qdisc->qstats.backlog; |
362 | qstats.drops += qdisc->qstats.drops; | |
363 | qstats.requeues += qdisc->qstats.requeues; | |
364 | qstats.overlimits += qdisc->qstats.overlimits; | |
365 | spin_unlock_bh(qdisc_lock(qdisc)); | |
366 | } | |
367 | /* Reclaim root sleeping lock before completing stats */ | |
edb09eb1 ED |
368 | if (d->lock) |
369 | spin_lock_bh(d->lock); | |
370 | if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 || | |
b0ab6f92 | 371 | gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) |
b8970f0b JF |
372 | return -1; |
373 | } else { | |
374 | struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); | |
375 | ||
376 | sch = dev_queue->qdisc_sleeping; | |
edb09eb1 ED |
377 | if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), |
378 | d, NULL, &sch->bstats) < 0 || | |
b0ab6f92 JF |
379 | gnet_stats_copy_queue(d, NULL, |
380 | &sch->qstats, sch->q.qlen) < 0) | |
b8970f0b JF |
381 | return -1; |
382 | } | |
383 | return 0; | |
384 | } | |
385 | ||
386 | static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
387 | { | |
388 | struct net_device *dev = qdisc_dev(sch); | |
389 | unsigned long ntx; | |
390 | ||
391 | if (arg->stop) | |
392 | return; | |
393 | ||
394 | /* Walk hierarchy with a virtual class per tc */ | |
395 | arg->count = arg->skip; | |
396 | for (ntx = arg->skip; | |
397 | ntx < dev->num_tx_queues + netdev_get_num_tc(dev); | |
398 | ntx++) { | |
399 | if (arg->fn(sch, ntx + 1, arg) < 0) { | |
400 | arg->stop = 1; | |
401 | break; | |
402 | } | |
403 | arg->count++; | |
404 | } | |
405 | } | |
406 | ||
407 | static const struct Qdisc_class_ops mqprio_class_ops = { | |
408 | .graft = mqprio_graft, | |
409 | .leaf = mqprio_leaf, | |
410 | .get = mqprio_get, | |
411 | .put = mqprio_put, | |
412 | .walk = mqprio_walk, | |
413 | .dump = mqprio_dump_class, | |
414 | .dump_stats = mqprio_dump_class_stats, | |
415 | }; | |
416 | ||
ea18fd95 | 417 | static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = { |
b8970f0b JF |
418 | .cl_ops = &mqprio_class_ops, |
419 | .id = "mqprio", | |
420 | .priv_size = sizeof(struct mqprio_sched), | |
421 | .init = mqprio_init, | |
422 | .destroy = mqprio_destroy, | |
423 | .attach = mqprio_attach, | |
424 | .dump = mqprio_dump, | |
425 | .owner = THIS_MODULE, | |
426 | }; | |
427 | ||
428 | static int __init mqprio_module_init(void) | |
429 | { | |
430 | return register_qdisc(&mqprio_qdisc_ops); | |
431 | } | |
432 | ||
433 | static void __exit mqprio_module_exit(void) | |
434 | { | |
435 | unregister_qdisc(&mqprio_qdisc_ops); | |
436 | } | |
437 | ||
438 | module_init(mqprio_module_init); | |
439 | module_exit(mqprio_module_exit); | |
440 | ||
441 | MODULE_LICENSE("GPL"); |