]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/mac80211/wme.c
mac80211: use multi-queue master netdevice
[mirror_ubuntu-bionic-kernel.git] / net / mac80211 / wme.c
1 /*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/module.h>
12 #include <linux/if_arp.h>
13 #include <linux/types.h>
14 #include <net/ip.h>
15 #include <net/pkt_sched.h>
16
17 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
19 #include "wme.h"
20
21 /* maximum number of hardware queues we support. */
22 #define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23 /* current number of hardware queues we support. */
24 #define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
25
26 /*
27 * Default mapping in classifier to work with default
28 * queue setup.
29 */
30 const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
31
32 struct ieee80211_sched_data
33 {
34 unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
35 struct tcf_proto *filter_list;
36 struct Qdisc *queues[QD_MAX_QUEUES];
37 struct sk_buff_head requeued[QD_MAX_QUEUES];
38 };
39
40 static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
41
42 /* given a data frame determine the 802.1p/1d tag to use */
43 static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
44 {
45 struct iphdr *ip;
46 int dscp;
47 int offset;
48
49 struct ieee80211_sched_data *q = qdisc_priv(qd);
50 struct tcf_result res = { -1, 0 };
51
52 /* if there is a user set filter list, call out to that */
53 if (q->filter_list) {
54 tc_classify(skb, q->filter_list, &res);
55 if (res.class != -1)
56 return res.class;
57 }
58
59 /* skb->priority values from 256->263 are magic values to
60 * directly indicate a specific 802.1d priority.
61 * This is used to allow 802.1d priority to be passed directly in
62 * from VLAN tags, etc. */
63 if (skb->priority >= 256 && skb->priority <= 263)
64 return skb->priority - 256;
65
66 /* check there is a valid IP header present */
67 offset = ieee80211_get_hdrlen_from_skb(skb);
68 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
69 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
70 return 0;
71
72 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
73
74 dscp = ip->tos & 0xfc;
75 if (dscp & 0x1c)
76 return 0;
77 return dscp >> 5;
78 }
79
80
81 static inline int wme_downgrade_ac(struct sk_buff *skb)
82 {
83 switch (skb->priority) {
84 case 6:
85 case 7:
86 skb->priority = 5; /* VO -> VI */
87 return 0;
88 case 4:
89 case 5:
90 skb->priority = 3; /* VI -> BE */
91 return 0;
92 case 0:
93 case 3:
94 skb->priority = 2; /* BE -> BK */
95 return 0;
96 default:
97 return -1;
98 }
99 }
100
101
102 /* positive return value indicates which queue to use
103 * negative return value indicates to drop the frame */
104 static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
105 {
106 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
108 unsigned short fc = le16_to_cpu(hdr->frame_control);
109 int qos;
110
111 /* see if frame is data or non data frame */
112 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
113 /* management frames go on AC_VO queue, but are sent
114 * without QoS control fields */
115 return 0;
116 }
117
118 if (0 /* injected */) {
119 /* use AC from radiotap */
120 }
121
122 /* is this a QoS frame? */
123 qos = fc & IEEE80211_STYPE_QOS_DATA;
124
125 if (!qos) {
126 skb->priority = 0; /* required for correct WPA/11i MIC */
127 return ieee802_1d_to_ac[skb->priority];
128 }
129
130 /* use the data classifier to determine what 802.1d tag the
131 * data frame has */
132 skb->priority = classify_1d(skb, qd);
133
134 /* in case we are a client verify acm is not set for this ac */
135 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
136 if (wme_downgrade_ac(skb)) {
137 /* No AC with lower priority has acm=0, drop packet. */
138 return -1;
139 }
140 }
141
142 /* look up which queue to use for frames with this 1d tag */
143 return ieee802_1d_to_ac[skb->priority];
144 }
145
146
147 static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
148 {
149 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
150 struct ieee80211_hw *hw = &local->hw;
151 struct ieee80211_sched_data *q = qdisc_priv(qd);
152 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
153 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
154 unsigned short fc = le16_to_cpu(hdr->frame_control);
155 struct Qdisc *qdisc;
156 struct sta_info *sta;
157 int err, queue;
158 u8 tid;
159
160 if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
161 queue = skb_get_queue_mapping(skb);
162 rcu_read_lock();
163 sta = sta_info_get(local, hdr->addr1);
164 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
165 if (sta) {
166 int ampdu_queue = sta->tid_to_tx_q[tid];
167 if ((ampdu_queue < QD_NUM(hw)) &&
168 test_bit(ampdu_queue, q->qdisc_pool)) {
169 queue = ampdu_queue;
170 info->flags |= IEEE80211_TX_CTL_AMPDU;
171 } else {
172 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
173 }
174 }
175 rcu_read_unlock();
176 skb_queue_tail(&q->requeued[queue], skb);
177 qd->q.qlen++;
178 return 0;
179 }
180
181 queue = classify80211(skb, qd);
182
183 if (unlikely(queue >= local->hw.queues))
184 queue = local->hw.queues - 1;
185
186 /* now we know the 1d priority, fill in the QoS header if there is one
187 */
188 if (WLAN_FC_IS_QOS_DATA(fc)) {
189 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
190 u8 ack_policy = 0;
191 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
192 if (local->wifi_wme_noack_test)
193 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
194 QOS_CONTROL_ACK_POLICY_SHIFT;
195 /* qos header is 2 bytes, second reserved */
196 *p = ack_policy | tid;
197 p++;
198 *p = 0;
199
200 rcu_read_lock();
201
202 sta = sta_info_get(local, hdr->addr1);
203 if (sta) {
204 int ampdu_queue = sta->tid_to_tx_q[tid];
205 if ((ampdu_queue < QD_NUM(hw)) &&
206 test_bit(ampdu_queue, q->qdisc_pool)) {
207 queue = ampdu_queue;
208 info->flags |= IEEE80211_TX_CTL_AMPDU;
209 } else {
210 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
211 }
212 }
213
214 rcu_read_unlock();
215 }
216
217 if (unlikely(queue < 0)) {
218 kfree_skb(skb);
219 err = NET_XMIT_DROP;
220 } else {
221 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
222 skb_set_queue_mapping(skb, queue);
223 qdisc = q->queues[queue];
224 err = qdisc->enqueue(skb, qdisc);
225 if (err == NET_XMIT_SUCCESS) {
226 qd->q.qlen++;
227 qd->bstats.bytes += skb->len;
228 qd->bstats.packets++;
229 return NET_XMIT_SUCCESS;
230 }
231 }
232 qd->qstats.drops++;
233 return err;
234 }
235
236
237 /* TODO: clean up the cases where master_hard_start_xmit
238 * returns non 0 - it shouldn't ever do that. Once done we
239 * can remove this function */
240 static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
241 {
242 struct ieee80211_sched_data *q = qdisc_priv(qd);
243 struct Qdisc *qdisc;
244 int err;
245
246 /* we recorded which queue to use earlier! */
247 qdisc = q->queues[skb_get_queue_mapping(skb)];
248
249 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
250 qd->q.qlen++;
251 return 0;
252 }
253 qd->qstats.drops++;
254 return err;
255 }
256
257
258 static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
259 {
260 struct ieee80211_sched_data *q = qdisc_priv(qd);
261 struct net_device *dev = qd->dev;
262 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
263 struct ieee80211_hw *hw = &local->hw;
264 struct sk_buff *skb;
265 struct Qdisc *qdisc;
266 int queue;
267
268 /* check all the h/w queues in numeric/priority order */
269 for (queue = 0; queue < QD_NUM(hw); queue++) {
270 /* see if there is room in this hardware queue */
271 if (__netif_subqueue_stopped(local->mdev, queue) ||
272 !test_bit(queue, q->qdisc_pool))
273 continue;
274
275 /* there is space - try and get a frame */
276 skb = skb_dequeue(&q->requeued[queue]);
277 if (skb) {
278 qd->q.qlen--;
279 return skb;
280 }
281
282 qdisc = q->queues[queue];
283 skb = qdisc->dequeue(qdisc);
284 if (skb) {
285 qd->q.qlen--;
286 return skb;
287 }
288 }
289 /* returning a NULL here when all the h/w queues are full means we
290 * never need to call netif_stop_queue in the driver */
291 return NULL;
292 }
293
294
295 static void wme_qdiscop_reset(struct Qdisc* qd)
296 {
297 struct ieee80211_sched_data *q = qdisc_priv(qd);
298 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
299 struct ieee80211_hw *hw = &local->hw;
300 int queue;
301
302 /* QUESTION: should we have some hardware flush functionality here? */
303
304 for (queue = 0; queue < QD_NUM(hw); queue++) {
305 skb_queue_purge(&q->requeued[queue]);
306 qdisc_reset(q->queues[queue]);
307 }
308 qd->q.qlen = 0;
309 }
310
311
312 static void wme_qdiscop_destroy(struct Qdisc* qd)
313 {
314 struct ieee80211_sched_data *q = qdisc_priv(qd);
315 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
316 struct ieee80211_hw *hw = &local->hw;
317 int queue;
318
319 tcf_destroy_chain(q->filter_list);
320 q->filter_list = NULL;
321
322 for (queue = 0; queue < QD_NUM(hw); queue++) {
323 skb_queue_purge(&q->requeued[queue]);
324 qdisc_destroy(q->queues[queue]);
325 q->queues[queue] = &noop_qdisc;
326 }
327 }
328
329
330 /* called whenever parameters are updated on existing qdisc */
331 static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
332 {
333 return 0;
334 }
335
336
337 /* called during initial creation of qdisc on device */
338 static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
339 {
340 struct ieee80211_sched_data *q = qdisc_priv(qd);
341 struct net_device *dev = qd->dev;
342 struct ieee80211_local *local;
343 struct ieee80211_hw *hw;
344 int err = 0, i;
345
346 /* check that device is a mac80211 device */
347 if (!dev->ieee80211_ptr ||
348 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
349 return -EINVAL;
350
351 local = wdev_priv(dev->ieee80211_ptr);
352 hw = &local->hw;
353
354 /* only allow on master dev */
355 if (dev != local->mdev)
356 return -EINVAL;
357
358 /* ensure that we are root qdisc */
359 if (qd->parent != TC_H_ROOT)
360 return -EINVAL;
361
362 if (qd->flags & TCQ_F_INGRESS)
363 return -EINVAL;
364
365 /* if options were passed in, set them */
366 if (opt)
367 err = wme_qdiscop_tune(qd, opt);
368
369 /* create child queues */
370 for (i = 0; i < QD_NUM(hw); i++) {
371 skb_queue_head_init(&q->requeued[i]);
372 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
373 qd->handle);
374 if (!q->queues[i]) {
375 q->queues[i] = &noop_qdisc;
376 printk(KERN_ERR "%s child qdisc %i creation failed\n",
377 dev->name, i);
378 }
379 }
380
381 /* non-aggregation queues: reserve/mark as used */
382 for (i = 0; i < local->hw.queues; i++)
383 set_bit(i, q->qdisc_pool);
384
385 return err;
386 }
387
388 static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
389 {
390 return -1;
391 }
392
393
394 static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
395 struct Qdisc *new, struct Qdisc **old)
396 {
397 struct ieee80211_sched_data *q = qdisc_priv(qd);
398 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
399 struct ieee80211_hw *hw = &local->hw;
400 unsigned long queue = arg - 1;
401
402 if (queue >= QD_NUM(hw))
403 return -EINVAL;
404
405 if (!new)
406 new = &noop_qdisc;
407
408 sch_tree_lock(qd);
409 *old = q->queues[queue];
410 q->queues[queue] = new;
411 qdisc_reset(*old);
412 sch_tree_unlock(qd);
413
414 return 0;
415 }
416
417
418 static struct Qdisc *
419 wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
420 {
421 struct ieee80211_sched_data *q = qdisc_priv(qd);
422 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
423 struct ieee80211_hw *hw = &local->hw;
424 unsigned long queue = arg - 1;
425
426 if (queue >= QD_NUM(hw))
427 return NULL;
428
429 return q->queues[queue];
430 }
431
432
433 static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
434 {
435 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
436 struct ieee80211_hw *hw = &local->hw;
437 unsigned long queue = TC_H_MIN(classid);
438
439 if (queue - 1 >= QD_NUM(hw))
440 return 0;
441
442 return queue;
443 }
444
445
446 static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
447 u32 classid)
448 {
449 return wme_classop_get(qd, classid);
450 }
451
452
453 static void wme_classop_put(struct Qdisc *q, unsigned long cl)
454 {
455 }
456
457
458 static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
459 struct nlattr **tca, unsigned long *arg)
460 {
461 unsigned long cl = *arg;
462 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
463 struct ieee80211_hw *hw = &local->hw;
464
465 if (cl - 1 > QD_NUM(hw))
466 return -ENOENT;
467
468 /* TODO: put code to program hardware queue parameters here,
469 * to allow programming from tc command line */
470
471 return 0;
472 }
473
474
475 /* we don't support deleting hardware queues
476 * when we add WMM-SA support - TSPECs may be deleted here */
477 static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
478 {
479 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
480 struct ieee80211_hw *hw = &local->hw;
481
482 if (cl - 1 > QD_NUM(hw))
483 return -ENOENT;
484 return 0;
485 }
486
487
488 static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
489 struct sk_buff *skb, struct tcmsg *tcm)
490 {
491 struct ieee80211_sched_data *q = qdisc_priv(qd);
492 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
493 struct ieee80211_hw *hw = &local->hw;
494
495 if (cl - 1 > QD_NUM(hw))
496 return -ENOENT;
497 tcm->tcm_handle = TC_H_MIN(cl);
498 tcm->tcm_parent = qd->handle;
499 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
500 return 0;
501 }
502
503
504 static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
505 {
506 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
507 struct ieee80211_hw *hw = &local->hw;
508 int queue;
509
510 if (arg->stop)
511 return;
512
513 for (queue = 0; queue < QD_NUM(hw); queue++) {
514 if (arg->count < arg->skip) {
515 arg->count++;
516 continue;
517 }
518 /* we should return classids for our internal queues here
519 * as well as the external ones */
520 if (arg->fn(qd, queue+1, arg) < 0) {
521 arg->stop = 1;
522 break;
523 }
524 arg->count++;
525 }
526 }
527
528
529 static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
530 unsigned long cl)
531 {
532 struct ieee80211_sched_data *q = qdisc_priv(qd);
533
534 if (cl)
535 return NULL;
536
537 return &q->filter_list;
538 }
539
540
541 /* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
542 * - these are the operations on the classes */
543 static const struct Qdisc_class_ops class_ops =
544 {
545 .graft = wme_classop_graft,
546 .leaf = wme_classop_leaf,
547
548 .get = wme_classop_get,
549 .put = wme_classop_put,
550 .change = wme_classop_change,
551 .delete = wme_classop_delete,
552 .walk = wme_classop_walk,
553
554 .tcf_chain = wme_classop_find_tcf,
555 .bind_tcf = wme_classop_bind,
556 .unbind_tcf = wme_classop_put,
557
558 .dump = wme_classop_dump_class,
559 };
560
561
562 /* queueing discipline operations */
563 static struct Qdisc_ops wme_qdisc_ops __read_mostly =
564 {
565 .next = NULL,
566 .cl_ops = &class_ops,
567 .id = "ieee80211",
568 .priv_size = sizeof(struct ieee80211_sched_data),
569
570 .enqueue = wme_qdiscop_enqueue,
571 .dequeue = wme_qdiscop_dequeue,
572 .requeue = wme_qdiscop_requeue,
573 .drop = NULL, /* drop not needed since we are always the root qdisc */
574
575 .init = wme_qdiscop_init,
576 .reset = wme_qdiscop_reset,
577 .destroy = wme_qdiscop_destroy,
578 .change = wme_qdiscop_tune,
579
580 .dump = wme_qdiscop_dump,
581 };
582
583
584 void ieee80211_install_qdisc(struct net_device *dev)
585 {
586 struct Qdisc *qdisc;
587
588 qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
589 if (!qdisc) {
590 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
591 return;
592 }
593
594 /* same handle as would be allocated by qdisc_alloc_handle() */
595 qdisc->handle = 0x80010000;
596
597 qdisc_lock_tree(dev);
598 list_add_tail(&qdisc->list, &dev->qdisc_list);
599 dev->qdisc_sleeping = qdisc;
600 qdisc_unlock_tree(dev);
601 }
602
603
604 int ieee80211_qdisc_installed(struct net_device *dev)
605 {
606 return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
607 }
608
609
610 int ieee80211_wme_register(void)
611 {
612 return register_qdisc(&wme_qdisc_ops);
613 }
614
615
616 void ieee80211_wme_unregister(void)
617 {
618 unregister_qdisc(&wme_qdisc_ops);
619 }
620
621 int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
622 struct sta_info *sta, u16 tid)
623 {
624 int i;
625 struct ieee80211_sched_data *q =
626 qdisc_priv(local->mdev->qdisc_sleeping);
627 DECLARE_MAC_BUF(mac);
628
629 /* prepare the filter and save it for the SW queue
630 * matching the received HW queue */
631
632 if (!local->hw.ampdu_queues)
633 return -EPERM;
634
635 /* try to get a Qdisc from the pool */
636 for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
637 if (!test_and_set_bit(i, q->qdisc_pool)) {
638 ieee80211_stop_queue(local_to_hw(local), i);
639 sta->tid_to_tx_q[tid] = i;
640
641 /* IF there are already pending packets
642 * on this tid first we need to drain them
643 * on the previous queue
644 * since HT is strict in order */
645 #ifdef CONFIG_MAC80211_HT_DEBUG
646 if (net_ratelimit())
647 printk(KERN_DEBUG "allocated aggregation queue"
648 " %d tid %d addr %s pool=0x%lX",
649 i, tid, print_mac(mac, sta->addr),
650 q->qdisc_pool[0]);
651 #endif /* CONFIG_MAC80211_HT_DEBUG */
652 return 0;
653 }
654
655 return -EAGAIN;
656 }
657
658 /**
659 * the caller needs to hold local->mdev->queue_lock
660 */
661 void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
662 struct sta_info *sta, u16 tid,
663 u8 requeue)
664 {
665 struct ieee80211_hw *hw = &local->hw;
666 struct ieee80211_sched_data *q =
667 qdisc_priv(local->mdev->qdisc_sleeping);
668 int agg_queue = sta->tid_to_tx_q[tid];
669
670 /* return the qdisc to the pool */
671 clear_bit(agg_queue, q->qdisc_pool);
672 sta->tid_to_tx_q[tid] = QD_NUM(hw);
673
674 if (requeue)
675 ieee80211_requeue(local, agg_queue);
676 else
677 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
678 }
679
680 void ieee80211_requeue(struct ieee80211_local *local, int queue)
681 {
682 struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
683 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
684 struct Qdisc *qdisc = q->queues[queue];
685 struct sk_buff *skb = NULL;
686 u32 len;
687
688 if (!qdisc || !qdisc->dequeue)
689 return;
690
691 printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
692 for (len = qdisc->q.qlen; len > 0; len--) {
693 skb = qdisc->dequeue(qdisc);
694 root_qd->q.qlen--;
695 /* packet will be classified again and */
696 /* skb->packet_data->queue will be overridden if needed */
697 if (skb)
698 wme_qdiscop_enqueue(skb, root_qd);
699 }
700 }