]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/mac80211/main.c
mac80211: revamp virtual interface handling
[mirror_ubuntu-zesty-kernel.git] / net / mac80211 / main.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <net/mac80211.h>
12 #include <net/ieee80211_radiotap.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/netdevice.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/etherdevice.h>
20 #include <linux/if_arp.h>
21 #include <linux/wireless.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/bitmap.h>
24 #include <net/net_namespace.h>
25 #include <net/cfg80211.h>
26
27 #include "ieee80211_i.h"
28 #include "rate.h"
29 #include "mesh.h"
30 #include "wep.h"
31 #include "wme.h"
32 #include "aes_ccm.h"
33 #include "led.h"
34 #include "cfg.h"
35 #include "debugfs.h"
36 #include "debugfs_netdev.h"
37
38 /*
39 * For seeing transmitted packets on monitor interfaces
40 * we have a radiotap header too.
41 */
42 struct ieee80211_tx_status_rtap_hdr {
43 struct ieee80211_radiotap_header hdr;
44 __le16 tx_flags;
45 u8 data_retries;
46 } __attribute__ ((packed));
47
48 /* common interface routines */
49
50 static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
51 {
52 memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
53 return ETH_ALEN;
54 }
55
56 /* must be called under mdev tx lock */
57 static void ieee80211_configure_filter(struct ieee80211_local *local)
58 {
59 unsigned int changed_flags;
60 unsigned int new_flags = 0;
61
62 if (atomic_read(&local->iff_promiscs))
63 new_flags |= FIF_PROMISC_IN_BSS;
64
65 if (atomic_read(&local->iff_allmultis))
66 new_flags |= FIF_ALLMULTI;
67
68 if (local->monitors)
69 new_flags |= FIF_BCN_PRBRESP_PROMISC;
70
71 if (local->fif_fcsfail)
72 new_flags |= FIF_FCSFAIL;
73
74 if (local->fif_plcpfail)
75 new_flags |= FIF_PLCPFAIL;
76
77 if (local->fif_control)
78 new_flags |= FIF_CONTROL;
79
80 if (local->fif_other_bss)
81 new_flags |= FIF_OTHER_BSS;
82
83 changed_flags = local->filter_flags ^ new_flags;
84
85 /* be a bit nasty */
86 new_flags |= (1<<31);
87
88 local->ops->configure_filter(local_to_hw(local),
89 changed_flags, &new_flags,
90 local->mdev->mc_count,
91 local->mdev->mc_list);
92
93 WARN_ON(new_flags & (1<<31));
94
95 local->filter_flags = new_flags & ~(1<<31);
96 }
97
98 /* master interface */
99
100 static int ieee80211_master_open(struct net_device *dev)
101 {
102 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
103 struct ieee80211_sub_if_data *sdata;
104 int res = -EOPNOTSUPP;
105
106 /* we hold the RTNL here so can safely walk the list */
107 list_for_each_entry(sdata, &local->interfaces, list) {
108 if (netif_running(sdata->dev)) {
109 res = 0;
110 break;
111 }
112 }
113
114 if (res)
115 return res;
116
117 netif_start_queue(local->mdev);
118
119 return 0;
120 }
121
122 static int ieee80211_master_stop(struct net_device *dev)
123 {
124 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
125 struct ieee80211_sub_if_data *sdata;
126
127 /* we hold the RTNL here so can safely walk the list */
128 list_for_each_entry(sdata, &local->interfaces, list)
129 if (netif_running(sdata->dev))
130 dev_close(sdata->dev);
131
132 return 0;
133 }
134
135 static void ieee80211_master_set_multicast_list(struct net_device *dev)
136 {
137 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
138
139 ieee80211_configure_filter(local);
140 }
141
142 /* regular interfaces */
143
144 static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
145 {
146 int meshhdrlen;
147 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
148
149 meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
150
151 /* FIX: what would be proper limits for MTU?
152 * This interface uses 802.3 frames. */
153 if (new_mtu < 256 ||
154 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
155 return -EINVAL;
156 }
157
158 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
159 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
160 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
161 dev->mtu = new_mtu;
162 return 0;
163 }
164
165 static inline int identical_mac_addr_allowed(int type1, int type2)
166 {
167 return (type1 == IEEE80211_IF_TYPE_MNTR ||
168 type2 == IEEE80211_IF_TYPE_MNTR ||
169 (type1 == IEEE80211_IF_TYPE_AP &&
170 type2 == IEEE80211_IF_TYPE_WDS) ||
171 (type1 == IEEE80211_IF_TYPE_WDS &&
172 (type2 == IEEE80211_IF_TYPE_WDS ||
173 type2 == IEEE80211_IF_TYPE_AP)) ||
174 (type1 == IEEE80211_IF_TYPE_AP &&
175 type2 == IEEE80211_IF_TYPE_VLAN) ||
176 (type1 == IEEE80211_IF_TYPE_VLAN &&
177 (type2 == IEEE80211_IF_TYPE_AP ||
178 type2 == IEEE80211_IF_TYPE_VLAN)));
179 }
180
181 static int ieee80211_open(struct net_device *dev)
182 {
183 struct ieee80211_sub_if_data *sdata, *nsdata;
184 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
185 struct sta_info *sta;
186 struct ieee80211_if_init_conf conf;
187 u32 changed = 0;
188 int res;
189 bool need_hw_reconfig = 0;
190
191 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
192
193 /* we hold the RTNL here so can safely walk the list */
194 list_for_each_entry(nsdata, &local->interfaces, list) {
195 struct net_device *ndev = nsdata->dev;
196
197 if (ndev != dev && netif_running(ndev)) {
198 /*
199 * Allow only a single IBSS interface to be up at any
200 * time. This is restricted because beacon distribution
201 * cannot work properly if both are in the same IBSS.
202 *
203 * To remove this restriction we'd have to disallow them
204 * from setting the same SSID on different IBSS interfaces
205 * belonging to the same hardware. Then, however, we're
206 * faced with having to adopt two different TSF timers...
207 */
208 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
209 nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
210 return -EBUSY;
211
212 /*
213 * The remaining checks are only performed for interfaces
214 * with the same MAC address.
215 */
216 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
217 continue;
218
219 /*
220 * check whether it may have the same address
221 */
222 if (!identical_mac_addr_allowed(sdata->vif.type,
223 nsdata->vif.type))
224 return -ENOTUNIQ;
225
226 /*
227 * can only add VLANs to enabled APs
228 */
229 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
230 nsdata->vif.type == IEEE80211_IF_TYPE_AP)
231 sdata->bss = &nsdata->u.ap;
232 }
233 }
234
235 switch (sdata->vif.type) {
236 case IEEE80211_IF_TYPE_WDS:
237 if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
238 return -ENOLINK;
239 break;
240 case IEEE80211_IF_TYPE_VLAN:
241 if (!sdata->bss)
242 return -ENOLINK;
243 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
244 break;
245 case IEEE80211_IF_TYPE_AP:
246 sdata->bss = &sdata->u.ap;
247 break;
248 case IEEE80211_IF_TYPE_STA:
249 case IEEE80211_IF_TYPE_MNTR:
250 case IEEE80211_IF_TYPE_IBSS:
251 case IEEE80211_IF_TYPE_MESH_POINT:
252 /* no special treatment */
253 break;
254 case IEEE80211_IF_TYPE_INVALID:
255 /* cannot happen */
256 WARN_ON(1);
257 break;
258 }
259
260 if (local->open_count == 0) {
261 res = 0;
262 if (local->ops->start)
263 res = local->ops->start(local_to_hw(local));
264 if (res)
265 goto err_del_bss;
266 need_hw_reconfig = 1;
267 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
268 }
269
270 switch (sdata->vif.type) {
271 case IEEE80211_IF_TYPE_VLAN:
272 /* no need to tell driver */
273 break;
274 case IEEE80211_IF_TYPE_MNTR:
275 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
276 local->cooked_mntrs++;
277 break;
278 }
279
280 /* must be before the call to ieee80211_configure_filter */
281 local->monitors++;
282 if (local->monitors == 1)
283 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
284
285 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
286 local->fif_fcsfail++;
287 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
288 local->fif_plcpfail++;
289 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
290 local->fif_control++;
291 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
292 local->fif_other_bss++;
293
294 netif_tx_lock_bh(local->mdev);
295 ieee80211_configure_filter(local);
296 netif_tx_unlock_bh(local->mdev);
297 break;
298 case IEEE80211_IF_TYPE_STA:
299 case IEEE80211_IF_TYPE_IBSS:
300 sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
301 /* fall through */
302 default:
303 conf.vif = &sdata->vif;
304 conf.type = sdata->vif.type;
305 conf.mac_addr = dev->dev_addr;
306 res = local->ops->add_interface(local_to_hw(local), &conf);
307 if (res)
308 goto err_stop;
309
310 ieee80211_if_config(dev);
311 changed |= ieee80211_reset_erp_info(dev);
312 ieee80211_bss_info_change_notify(sdata, changed);
313 ieee80211_enable_keys(sdata);
314
315 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
316 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
317 netif_carrier_off(dev);
318 else
319 netif_carrier_on(dev);
320 }
321
322 if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
323 /* Create STA entry for the WDS peer */
324 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
325 GFP_KERNEL);
326 if (!sta) {
327 res = -ENOMEM;
328 goto err_del_interface;
329 }
330
331 /* no locking required since STA is not live yet */
332 sta->flags |= WLAN_STA_AUTHORIZED;
333
334 res = sta_info_insert(sta);
335 if (res) {
336 /* STA has been freed */
337 goto err_del_interface;
338 }
339 }
340
341 if (local->open_count == 0) {
342 res = dev_open(local->mdev);
343 WARN_ON(res);
344 if (res)
345 goto err_del_interface;
346 tasklet_enable(&local->tx_pending_tasklet);
347 tasklet_enable(&local->tasklet);
348 }
349
350 /*
351 * set_multicast_list will be invoked by the networking core
352 * which will check whether any increments here were done in
353 * error and sync them down to the hardware as filter flags.
354 */
355 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
356 atomic_inc(&local->iff_allmultis);
357
358 if (sdata->flags & IEEE80211_SDATA_PROMISC)
359 atomic_inc(&local->iff_promiscs);
360
361 local->open_count++;
362 if (need_hw_reconfig)
363 ieee80211_hw_config(local);
364
365 /*
366 * ieee80211_sta_work is disabled while network interface
367 * is down. Therefore, some configuration changes may not
368 * yet be effective. Trigger execution of ieee80211_sta_work
369 * to fix this.
370 */
371 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
372 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
373 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
374 queue_work(local->hw.workqueue, &ifsta->work);
375 }
376
377 netif_start_queue(dev);
378
379 return 0;
380 err_del_interface:
381 local->ops->remove_interface(local_to_hw(local), &conf);
382 err_stop:
383 if (!local->open_count && local->ops->stop)
384 local->ops->stop(local_to_hw(local));
385 err_del_bss:
386 sdata->bss = NULL;
387 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
388 list_del(&sdata->u.vlan.list);
389 return res;
390 }
391
392 static int ieee80211_stop(struct net_device *dev)
393 {
394 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
395 struct ieee80211_local *local = sdata->local;
396 struct ieee80211_if_init_conf conf;
397 struct sta_info *sta;
398
399 /*
400 * Stop TX on this interface first.
401 */
402 netif_stop_queue(dev);
403
404 /*
405 * Now delete all active aggregation sessions.
406 */
407 rcu_read_lock();
408
409 list_for_each_entry_rcu(sta, &local->sta_list, list) {
410 if (sta->sdata == sdata)
411 ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
412 }
413
414 rcu_read_unlock();
415
416 /*
417 * Remove all stations associated with this interface.
418 *
419 * This must be done before calling ops->remove_interface()
420 * because otherwise we can later invoke ops->sta_notify()
421 * whenever the STAs are removed, and that invalidates driver
422 * assumptions about always getting a vif pointer that is valid
423 * (because if we remove a STA after ops->remove_interface()
424 * the driver will have removed the vif info already!)
425 *
426 * We could relax this and only unlink the stations from the
427 * hash table and list but keep them on a per-sdata list that
428 * will be inserted back again when the interface is brought
429 * up again, but I don't currently see a use case for that,
430 * except with WDS which gets a STA entry created when it is
431 * brought up.
432 */
433 sta_info_flush(local, sdata);
434
435 /*
436 * Don't count this interface for promisc/allmulti while it
437 * is down. dev_mc_unsync() will invoke set_multicast_list
438 * on the master interface which will sync these down to the
439 * hardware as filter flags.
440 */
441 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
442 atomic_dec(&local->iff_allmultis);
443
444 if (sdata->flags & IEEE80211_SDATA_PROMISC)
445 atomic_dec(&local->iff_promiscs);
446
447 dev_mc_unsync(local->mdev, dev);
448
449 /* APs need special treatment */
450 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
451 struct ieee80211_sub_if_data *vlan, *tmp;
452 struct beacon_data *old_beacon = sdata->u.ap.beacon;
453
454 /* remove beacon */
455 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
456 synchronize_rcu();
457 kfree(old_beacon);
458
459 /* down all dependent devices, that is VLANs */
460 list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
461 u.vlan.list)
462 dev_close(vlan->dev);
463 WARN_ON(!list_empty(&sdata->u.ap.vlans));
464 }
465
466 local->open_count--;
467
468 switch (sdata->vif.type) {
469 case IEEE80211_IF_TYPE_VLAN:
470 list_del(&sdata->u.vlan.list);
471 /* no need to tell driver */
472 break;
473 case IEEE80211_IF_TYPE_MNTR:
474 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
475 local->cooked_mntrs--;
476 break;
477 }
478
479 local->monitors--;
480 if (local->monitors == 0)
481 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
482
483 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
484 local->fif_fcsfail--;
485 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
486 local->fif_plcpfail--;
487 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
488 local->fif_control--;
489 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
490 local->fif_other_bss--;
491
492 netif_tx_lock_bh(local->mdev);
493 ieee80211_configure_filter(local);
494 netif_tx_unlock_bh(local->mdev);
495 break;
496 case IEEE80211_IF_TYPE_MESH_POINT:
497 case IEEE80211_IF_TYPE_STA:
498 case IEEE80211_IF_TYPE_IBSS:
499 sdata->u.sta.state = IEEE80211_DISABLED;
500 memset(sdata->u.sta.bssid, 0, ETH_ALEN);
501 del_timer_sync(&sdata->u.sta.timer);
502 /*
503 * When we get here, the interface is marked down.
504 * Call synchronize_rcu() to wait for the RX path
505 * should it be using the interface and enqueuing
506 * frames at this very time on another CPU.
507 */
508 synchronize_rcu();
509 skb_queue_purge(&sdata->u.sta.skb_queue);
510
511 if (local->scan_dev == sdata->dev) {
512 if (!local->ops->hw_scan) {
513 local->sta_sw_scanning = 0;
514 cancel_delayed_work(&local->scan_work);
515 } else
516 local->sta_hw_scanning = 0;
517 }
518
519 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
520 kfree(sdata->u.sta.extra_ie);
521 sdata->u.sta.extra_ie = NULL;
522 sdata->u.sta.extra_ie_len = 0;
523 /* fall through */
524 default:
525 conf.vif = &sdata->vif;
526 conf.type = sdata->vif.type;
527 conf.mac_addr = dev->dev_addr;
528 /* disable all keys for as long as this netdev is down */
529 ieee80211_disable_keys(sdata);
530 local->ops->remove_interface(local_to_hw(local), &conf);
531 }
532
533 sdata->bss = NULL;
534
535 if (local->open_count == 0) {
536 if (netif_running(local->mdev))
537 dev_close(local->mdev);
538
539 if (local->ops->stop)
540 local->ops->stop(local_to_hw(local));
541
542 ieee80211_led_radio(local, 0);
543
544 flush_workqueue(local->hw.workqueue);
545
546 tasklet_disable(&local->tx_pending_tasklet);
547 tasklet_disable(&local->tasklet);
548 }
549
550 return 0;
551 }
552
553 int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
554 {
555 struct ieee80211_local *local = hw_to_local(hw);
556 struct netdev_queue *txq;
557 struct sta_info *sta;
558 struct ieee80211_sub_if_data *sdata;
559 u16 start_seq_num = 0;
560 u8 *state;
561 int ret;
562 DECLARE_MAC_BUF(mac);
563
564 if (tid >= STA_TID_NUM)
565 return -EINVAL;
566
567 #ifdef CONFIG_MAC80211_HT_DEBUG
568 printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
569 print_mac(mac, ra), tid);
570 #endif /* CONFIG_MAC80211_HT_DEBUG */
571
572 rcu_read_lock();
573
574 sta = sta_info_get(local, ra);
575 if (!sta) {
576 #ifdef CONFIG_MAC80211_HT_DEBUG
577 printk(KERN_DEBUG "Could not find the station\n");
578 #endif
579 ret = -ENOENT;
580 goto exit;
581 }
582
583 spin_lock_bh(&sta->lock);
584
585 /* we have tried too many times, receiver does not want A-MPDU */
586 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
587 ret = -EBUSY;
588 goto err_unlock_sta;
589 }
590
591 state = &sta->ampdu_mlme.tid_state_tx[tid];
592 /* check if the TID is not in aggregation flow already */
593 if (*state != HT_AGG_STATE_IDLE) {
594 #ifdef CONFIG_MAC80211_HT_DEBUG
595 printk(KERN_DEBUG "BA request denied - session is not "
596 "idle on tid %u\n", tid);
597 #endif /* CONFIG_MAC80211_HT_DEBUG */
598 ret = -EAGAIN;
599 goto err_unlock_sta;
600 }
601
602 /* prepare A-MPDU MLME for Tx aggregation */
603 sta->ampdu_mlme.tid_tx[tid] =
604 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
605 if (!sta->ampdu_mlme.tid_tx[tid]) {
606 #ifdef CONFIG_MAC80211_HT_DEBUG
607 if (net_ratelimit())
608 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
609 tid);
610 #endif
611 ret = -ENOMEM;
612 goto err_unlock_sta;
613 }
614 /* Tx timer */
615 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
616 sta_addba_resp_timer_expired;
617 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
618 (unsigned long)&sta->timer_to_tid[tid];
619 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
620
621 /* ensure that TX flow won't interrupt us
622 * until the end of the call to requeue function */
623 txq = &local->mdev->tx_queue;
624 spin_lock_bh(&txq->lock);
625
626 /* create a new queue for this aggregation */
627 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
628
629 /* case no queue is available to aggregation
630 * don't switch to aggregation */
631 if (ret) {
632 #ifdef CONFIG_MAC80211_HT_DEBUG
633 printk(KERN_DEBUG "BA request denied - queue unavailable for"
634 " tid %d\n", tid);
635 #endif /* CONFIG_MAC80211_HT_DEBUG */
636 goto err_unlock_queue;
637 }
638 sdata = sta->sdata;
639
640 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
641 * call back right away, it must see that the flow has begun */
642 *state |= HT_ADDBA_REQUESTED_MSK;
643
644 if (local->ops->ampdu_action)
645 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
646 ra, tid, &start_seq_num);
647
648 if (ret) {
649 /* No need to requeue the packets in the agg queue, since we
650 * held the tx lock: no packet could be enqueued to the newly
651 * allocated queue */
652 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
653 #ifdef CONFIG_MAC80211_HT_DEBUG
654 printk(KERN_DEBUG "BA request denied - HW unavailable for"
655 " tid %d\n", tid);
656 #endif /* CONFIG_MAC80211_HT_DEBUG */
657 *state = HT_AGG_STATE_IDLE;
658 goto err_unlock_queue;
659 }
660
661 /* Will put all the packets in the new SW queue */
662 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
663 spin_unlock_bh(&txq->lock);
664 spin_unlock_bh(&sta->lock);
665
666 /* send an addBA request */
667 sta->ampdu_mlme.dialog_token_allocator++;
668 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
669 sta->ampdu_mlme.dialog_token_allocator;
670 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
671
672
673 ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
674 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
675 sta->ampdu_mlme.tid_tx[tid]->ssn,
676 0x40, 5000);
677 /* activate the timer for the recipient's addBA response */
678 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
679 jiffies + ADDBA_RESP_INTERVAL;
680 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
681 #ifdef CONFIG_MAC80211_HT_DEBUG
682 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
683 #endif
684 goto exit;
685
686 err_unlock_queue:
687 kfree(sta->ampdu_mlme.tid_tx[tid]);
688 sta->ampdu_mlme.tid_tx[tid] = NULL;
689 spin_unlock_bh(&txq->lock);
690 ret = -EBUSY;
691 err_unlock_sta:
692 spin_unlock_bh(&sta->lock);
693 exit:
694 rcu_read_unlock();
695 return ret;
696 }
697 EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
698
699 int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
700 u8 *ra, u16 tid,
701 enum ieee80211_back_parties initiator)
702 {
703 struct ieee80211_local *local = hw_to_local(hw);
704 struct sta_info *sta;
705 u8 *state;
706 int ret = 0;
707 DECLARE_MAC_BUF(mac);
708
709 if (tid >= STA_TID_NUM)
710 return -EINVAL;
711
712 rcu_read_lock();
713 sta = sta_info_get(local, ra);
714 if (!sta) {
715 rcu_read_unlock();
716 return -ENOENT;
717 }
718
719 /* check if the TID is in aggregation */
720 state = &sta->ampdu_mlme.tid_state_tx[tid];
721 spin_lock_bh(&sta->lock);
722
723 if (*state != HT_AGG_STATE_OPERATIONAL) {
724 ret = -ENOENT;
725 goto stop_BA_exit;
726 }
727
728 #ifdef CONFIG_MAC80211_HT_DEBUG
729 printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
730 print_mac(mac, ra), tid);
731 #endif /* CONFIG_MAC80211_HT_DEBUG */
732
733 ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
734
735 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
736 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
737
738 if (local->ops->ampdu_action)
739 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
740 ra, tid, NULL);
741
742 /* case HW denied going back to legacy */
743 if (ret) {
744 WARN_ON(ret != -EBUSY);
745 *state = HT_AGG_STATE_OPERATIONAL;
746 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
747 goto stop_BA_exit;
748 }
749
750 stop_BA_exit:
751 spin_unlock_bh(&sta->lock);
752 rcu_read_unlock();
753 return ret;
754 }
755 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
756
757 void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
758 {
759 struct ieee80211_local *local = hw_to_local(hw);
760 struct sta_info *sta;
761 u8 *state;
762 DECLARE_MAC_BUF(mac);
763
764 if (tid >= STA_TID_NUM) {
765 #ifdef CONFIG_MAC80211_HT_DEBUG
766 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
767 tid, STA_TID_NUM);
768 #endif
769 return;
770 }
771
772 rcu_read_lock();
773 sta = sta_info_get(local, ra);
774 if (!sta) {
775 rcu_read_unlock();
776 #ifdef CONFIG_MAC80211_HT_DEBUG
777 printk(KERN_DEBUG "Could not find station: %s\n",
778 print_mac(mac, ra));
779 #endif
780 return;
781 }
782
783 state = &sta->ampdu_mlme.tid_state_tx[tid];
784 spin_lock_bh(&sta->lock);
785
786 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
787 #ifdef CONFIG_MAC80211_HT_DEBUG
788 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
789 *state);
790 #endif
791 spin_unlock_bh(&sta->lock);
792 rcu_read_unlock();
793 return;
794 }
795
796 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
797
798 *state |= HT_ADDBA_DRV_READY_MSK;
799
800 if (*state == HT_AGG_STATE_OPERATIONAL) {
801 #ifdef CONFIG_MAC80211_HT_DEBUG
802 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
803 #endif
804 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
805 }
806 spin_unlock_bh(&sta->lock);
807 rcu_read_unlock();
808 }
809 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
810
811 void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
812 {
813 struct ieee80211_local *local = hw_to_local(hw);
814 struct netdev_queue *txq;
815 struct sta_info *sta;
816 u8 *state;
817 int agg_queue;
818 DECLARE_MAC_BUF(mac);
819
820 if (tid >= STA_TID_NUM) {
821 #ifdef CONFIG_MAC80211_HT_DEBUG
822 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
823 tid, STA_TID_NUM);
824 #endif
825 return;
826 }
827
828 #ifdef CONFIG_MAC80211_HT_DEBUG
829 printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
830 print_mac(mac, ra), tid);
831 #endif /* CONFIG_MAC80211_HT_DEBUG */
832
833 rcu_read_lock();
834 sta = sta_info_get(local, ra);
835 if (!sta) {
836 #ifdef CONFIG_MAC80211_HT_DEBUG
837 printk(KERN_DEBUG "Could not find station: %s\n",
838 print_mac(mac, ra));
839 #endif
840 rcu_read_unlock();
841 return;
842 }
843 state = &sta->ampdu_mlme.tid_state_tx[tid];
844
845 /* NOTE: no need to use sta->lock in this state check, as
846 * ieee80211_stop_tx_ba_session will let only
847 * one stop call to pass through per sta/tid */
848 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
849 #ifdef CONFIG_MAC80211_HT_DEBUG
850 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
851 #endif
852 rcu_read_unlock();
853 return;
854 }
855
856 if (*state & HT_AGG_STATE_INITIATOR_MSK)
857 ieee80211_send_delba(sta->sdata->dev, ra, tid,
858 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
859
860 agg_queue = sta->tid_to_tx_q[tid];
861
862 /* avoid ordering issues: we are the only one that can modify
863 * the content of the qdiscs */
864 txq = &local->mdev->tx_queue;
865 spin_lock_bh(&txq->lock);
866 /* remove the queue for this aggregation */
867 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
868 spin_unlock_bh(&txq->lock);
869
870 /* we just requeued the all the frames that were in the removed
871 * queue, and since we might miss a softirq we do netif_schedule_queue.
872 * ieee80211_wake_queue is not used here as this queue is not
873 * necessarily stopped */
874 netif_schedule_queue(txq);
875 spin_lock_bh(&sta->lock);
876 *state = HT_AGG_STATE_IDLE;
877 sta->ampdu_mlme.addba_req_num[tid] = 0;
878 kfree(sta->ampdu_mlme.tid_tx[tid]);
879 sta->ampdu_mlme.tid_tx[tid] = NULL;
880 spin_unlock_bh(&sta->lock);
881
882 rcu_read_unlock();
883 }
884 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
885
886 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
887 const u8 *ra, u16 tid)
888 {
889 struct ieee80211_local *local = hw_to_local(hw);
890 struct ieee80211_ra_tid *ra_tid;
891 struct sk_buff *skb = dev_alloc_skb(0);
892
893 if (unlikely(!skb)) {
894 #ifdef CONFIG_MAC80211_HT_DEBUG
895 if (net_ratelimit())
896 printk(KERN_WARNING "%s: Not enough memory, "
897 "dropping start BA session", skb->dev->name);
898 #endif
899 return;
900 }
901 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
902 memcpy(&ra_tid->ra, ra, ETH_ALEN);
903 ra_tid->tid = tid;
904
905 skb->pkt_type = IEEE80211_ADDBA_MSG;
906 skb_queue_tail(&local->skb_queue, skb);
907 tasklet_schedule(&local->tasklet);
908 }
909 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
910
911 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
912 const u8 *ra, u16 tid)
913 {
914 struct ieee80211_local *local = hw_to_local(hw);
915 struct ieee80211_ra_tid *ra_tid;
916 struct sk_buff *skb = dev_alloc_skb(0);
917
918 if (unlikely(!skb)) {
919 #ifdef CONFIG_MAC80211_HT_DEBUG
920 if (net_ratelimit())
921 printk(KERN_WARNING "%s: Not enough memory, "
922 "dropping stop BA session", skb->dev->name);
923 #endif
924 return;
925 }
926 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
927 memcpy(&ra_tid->ra, ra, ETH_ALEN);
928 ra_tid->tid = tid;
929
930 skb->pkt_type = IEEE80211_DELBA_MSG;
931 skb_queue_tail(&local->skb_queue, skb);
932 tasklet_schedule(&local->tasklet);
933 }
934 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
935
936 static void ieee80211_set_multicast_list(struct net_device *dev)
937 {
938 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
939 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
940 int allmulti, promisc, sdata_allmulti, sdata_promisc;
941
942 allmulti = !!(dev->flags & IFF_ALLMULTI);
943 promisc = !!(dev->flags & IFF_PROMISC);
944 sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
945 sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
946
947 if (allmulti != sdata_allmulti) {
948 if (dev->flags & IFF_ALLMULTI)
949 atomic_inc(&local->iff_allmultis);
950 else
951 atomic_dec(&local->iff_allmultis);
952 sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
953 }
954
955 if (promisc != sdata_promisc) {
956 if (dev->flags & IFF_PROMISC)
957 atomic_inc(&local->iff_promiscs);
958 else
959 atomic_dec(&local->iff_promiscs);
960 sdata->flags ^= IEEE80211_SDATA_PROMISC;
961 }
962
963 dev_mc_sync(local->mdev, dev);
964 }
965
966 static const struct header_ops ieee80211_header_ops = {
967 .create = eth_header,
968 .parse = header_parse_80211,
969 .rebuild = eth_rebuild_header,
970 .cache = eth_header_cache,
971 .cache_update = eth_header_cache_update,
972 };
973
974 void ieee80211_if_setup(struct net_device *dev)
975 {
976 ether_setup(dev);
977 dev->hard_start_xmit = ieee80211_subif_start_xmit;
978 dev->wireless_handlers = &ieee80211_iw_handler_def;
979 dev->set_multicast_list = ieee80211_set_multicast_list;
980 dev->change_mtu = ieee80211_change_mtu;
981 dev->open = ieee80211_open;
982 dev->stop = ieee80211_stop;
983 dev->destructor = free_netdev;
984 }
985
986 /* everything else */
987
988 static int __ieee80211_if_config(struct net_device *dev,
989 struct sk_buff *beacon)
990 {
991 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
992 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
993 struct ieee80211_if_conf conf;
994
995 if (!local->ops->config_interface || !netif_running(dev))
996 return 0;
997
998 memset(&conf, 0, sizeof(conf));
999 conf.type = sdata->vif.type;
1000 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
1001 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
1002 conf.bssid = sdata->u.sta.bssid;
1003 conf.ssid = sdata->u.sta.ssid;
1004 conf.ssid_len = sdata->u.sta.ssid_len;
1005 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
1006 conf.beacon = beacon;
1007 ieee80211_start_mesh(dev);
1008 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
1009 conf.ssid = sdata->u.ap.ssid;
1010 conf.ssid_len = sdata->u.ap.ssid_len;
1011 conf.beacon = beacon;
1012 }
1013 return local->ops->config_interface(local_to_hw(local),
1014 &sdata->vif, &conf);
1015 }
1016
1017 int ieee80211_if_config(struct net_device *dev)
1018 {
1019 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1020 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1021 if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
1022 (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
1023 return ieee80211_if_config_beacon(dev);
1024 return __ieee80211_if_config(dev, NULL);
1025 }
1026
1027 int ieee80211_if_config_beacon(struct net_device *dev)
1028 {
1029 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1030 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1031 struct sk_buff *skb;
1032
1033 if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
1034 return 0;
1035 skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif);
1036 if (!skb)
1037 return -ENOMEM;
1038 return __ieee80211_if_config(dev, skb);
1039 }
1040
1041 int ieee80211_hw_config(struct ieee80211_local *local)
1042 {
1043 struct ieee80211_channel *chan;
1044 int ret = 0;
1045
1046 if (local->sta_sw_scanning)
1047 chan = local->scan_channel;
1048 else
1049 chan = local->oper_channel;
1050
1051 local->hw.conf.channel = chan;
1052
1053 if (!local->hw.conf.power_level)
1054 local->hw.conf.power_level = chan->max_power;
1055 else
1056 local->hw.conf.power_level = min(chan->max_power,
1057 local->hw.conf.power_level);
1058
1059 local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
1060
1061 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1062 printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
1063 wiphy_name(local->hw.wiphy), chan->center_freq);
1064 #endif
1065
1066 if (local->open_count)
1067 ret = local->ops->config(local_to_hw(local), &local->hw.conf);
1068
1069 return ret;
1070 }
1071
1072 /**
1073 * ieee80211_handle_ht should be used only after legacy configuration
1074 * has been determined namely band, as ht configuration depends upon
1075 * the hardware's HT abilities for a _specific_ band.
1076 */
1077 u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
1078 struct ieee80211_ht_info *req_ht_cap,
1079 struct ieee80211_ht_bss_info *req_bss_cap)
1080 {
1081 struct ieee80211_conf *conf = &local->hw.conf;
1082 struct ieee80211_supported_band *sband;
1083 struct ieee80211_ht_info ht_conf;
1084 struct ieee80211_ht_bss_info ht_bss_conf;
1085 u32 changed = 0;
1086 int i;
1087 u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS;
1088 u8 tx_mcs_set_cap;
1089
1090 sband = local->hw.wiphy->bands[conf->channel->band];
1091
1092 memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
1093 memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
1094
1095 /* HT is not supported */
1096 if (!sband->ht_info.ht_supported) {
1097 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1098 goto out;
1099 }
1100
1101 /* disable HT */
1102 if (!enable_ht) {
1103 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
1104 changed |= BSS_CHANGED_HT;
1105 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1106 conf->ht_conf.ht_supported = 0;
1107 goto out;
1108 }
1109
1110
1111 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
1112 changed |= BSS_CHANGED_HT;
1113
1114 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
1115 ht_conf.ht_supported = 1;
1116
1117 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
1118 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
1119 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
1120 ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
1121 ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
1122 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
1123
1124 ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
1125 ht_conf.ampdu_density = req_ht_cap->ampdu_density;
1126
1127 /* Bits 96-100 */
1128 tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12];
1129
1130 /* configure suppoerted Tx MCS according to requested MCS
1131 * (based in most cases on Rx capabilities of peer) and self
1132 * Tx MCS capabilities (as defined by low level driver HW
1133 * Tx capabilities) */
1134 if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED))
1135 goto check_changed;
1136
1137 /* Counting from 0 therfore + 1 */
1138 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF)
1139 max_tx_streams = ((tx_mcs_set_cap &
1140 IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1;
1141
1142 for (i = 0; i < max_tx_streams; i++)
1143 ht_conf.supp_mcs_set[i] =
1144 sband->ht_info.supp_mcs_set[i] &
1145 req_ht_cap->supp_mcs_set[i];
1146
1147 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM)
1148 for (i = IEEE80211_SUPP_MCS_SET_UEQM;
1149 i < IEEE80211_SUPP_MCS_SET_LEN; i++)
1150 ht_conf.supp_mcs_set[i] =
1151 sband->ht_info.supp_mcs_set[i] &
1152 req_ht_cap->supp_mcs_set[i];
1153
1154 check_changed:
1155 /* if bss configuration changed store the new one */
1156 if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
1157 memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
1158 changed |= BSS_CHANGED_HT;
1159 memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
1160 memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
1161 }
1162 out:
1163 return changed;
1164 }
1165
1166 void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
1167 u32 changed)
1168 {
1169 struct ieee80211_local *local = sdata->local;
1170
1171 if (!changed)
1172 return;
1173
1174 if (local->ops->bss_info_changed)
1175 local->ops->bss_info_changed(local_to_hw(local),
1176 &sdata->vif,
1177 &sdata->bss_conf,
1178 changed);
1179 }
1180
1181 u32 ieee80211_reset_erp_info(struct net_device *dev)
1182 {
1183 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1184
1185 sdata->bss_conf.use_cts_prot = 0;
1186 sdata->bss_conf.use_short_preamble = 0;
1187 return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE;
1188 }
1189
1190 void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
1191 struct sk_buff *skb)
1192 {
1193 struct ieee80211_local *local = hw_to_local(hw);
1194 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1195 int tmp;
1196
1197 skb->dev = local->mdev;
1198 skb->pkt_type = IEEE80211_TX_STATUS_MSG;
1199 skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
1200 &local->skb_queue : &local->skb_queue_unreliable, skb);
1201 tmp = skb_queue_len(&local->skb_queue) +
1202 skb_queue_len(&local->skb_queue_unreliable);
1203 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
1204 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
1205 dev_kfree_skb_irq(skb);
1206 tmp--;
1207 I802_DEBUG_INC(local->tx_status_drop);
1208 }
1209 tasklet_schedule(&local->tasklet);
1210 }
1211 EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
1212
1213 static void ieee80211_tasklet_handler(unsigned long data)
1214 {
1215 struct ieee80211_local *local = (struct ieee80211_local *) data;
1216 struct sk_buff *skb;
1217 struct ieee80211_rx_status rx_status;
1218 struct ieee80211_ra_tid *ra_tid;
1219
1220 while ((skb = skb_dequeue(&local->skb_queue)) ||
1221 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
1222 switch (skb->pkt_type) {
1223 case IEEE80211_RX_MSG:
1224 /* status is in skb->cb */
1225 memcpy(&rx_status, skb->cb, sizeof(rx_status));
1226 /* Clear skb->pkt_type in order to not confuse kernel
1227 * netstack. */
1228 skb->pkt_type = 0;
1229 __ieee80211_rx(local_to_hw(local), skb, &rx_status);
1230 break;
1231 case IEEE80211_TX_STATUS_MSG:
1232 skb->pkt_type = 0;
1233 ieee80211_tx_status(local_to_hw(local), skb);
1234 break;
1235 case IEEE80211_DELBA_MSG:
1236 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
1237 ieee80211_stop_tx_ba_cb(local_to_hw(local),
1238 ra_tid->ra, ra_tid->tid);
1239 dev_kfree_skb(skb);
1240 break;
1241 case IEEE80211_ADDBA_MSG:
1242 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
1243 ieee80211_start_tx_ba_cb(local_to_hw(local),
1244 ra_tid->ra, ra_tid->tid);
1245 dev_kfree_skb(skb);
1246 break ;
1247 default:
1248 WARN_ON(1);
1249 dev_kfree_skb(skb);
1250 break;
1251 }
1252 }
1253 }
1254
1255 /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
1256 * make a prepared TX frame (one that has been given to hw) to look like brand
1257 * new IEEE 802.11 frame that is ready to go through TX processing again.
1258 * Also, tx_packet_data in cb is restored from tx_control. */
1259 static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1260 struct ieee80211_key *key,
1261 struct sk_buff *skb)
1262 {
1263 int hdrlen, iv_len, mic_len;
1264 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1265
1266 info->flags &= IEEE80211_TX_CTL_REQ_TX_STATUS |
1267 IEEE80211_TX_CTL_DO_NOT_ENCRYPT |
1268 IEEE80211_TX_CTL_REQUEUE |
1269 IEEE80211_TX_CTL_EAPOL_FRAME;
1270
1271 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1272
1273 if (!key)
1274 goto no_key;
1275
1276 switch (key->conf.alg) {
1277 case ALG_WEP:
1278 iv_len = WEP_IV_LEN;
1279 mic_len = WEP_ICV_LEN;
1280 break;
1281 case ALG_TKIP:
1282 iv_len = TKIP_IV_LEN;
1283 mic_len = TKIP_ICV_LEN;
1284 break;
1285 case ALG_CCMP:
1286 iv_len = CCMP_HDR_LEN;
1287 mic_len = CCMP_MIC_LEN;
1288 break;
1289 default:
1290 goto no_key;
1291 }
1292
1293 if (skb->len >= mic_len &&
1294 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
1295 skb_trim(skb, skb->len - mic_len);
1296 if (skb->len >= iv_len && skb->len > hdrlen) {
1297 memmove(skb->data + iv_len, skb->data, hdrlen);
1298 skb_pull(skb, iv_len);
1299 }
1300
1301 no_key:
1302 {
1303 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1304 u16 fc = le16_to_cpu(hdr->frame_control);
1305 if ((fc & 0x8C) == 0x88) /* QoS Control Field */ {
1306 fc &= ~IEEE80211_STYPE_QOS_DATA;
1307 hdr->frame_control = cpu_to_le16(fc);
1308 memmove(skb->data + 2, skb->data, hdrlen - 2);
1309 skb_pull(skb, 2);
1310 }
1311 }
1312 }
1313
1314 static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1315 struct sta_info *sta,
1316 struct sk_buff *skb)
1317 {
1318 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1319
1320 sta->tx_filtered_count++;
1321
1322 /*
1323 * Clear the TX filter mask for this STA when sending the next
1324 * packet. If the STA went to power save mode, this will happen
1325 * when it wakes up for the next time.
1326 */
1327 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
1328
1329 /*
1330 * This code races in the following way:
1331 *
1332 * (1) STA sends frame indicating it will go to sleep and does so
1333 * (2) hardware/firmware adds STA to filter list, passes frame up
1334 * (3) hardware/firmware processes TX fifo and suppresses a frame
1335 * (4) we get TX status before having processed the frame and
1336 * knowing that the STA has gone to sleep.
1337 *
1338 * This is actually quite unlikely even when both those events are
1339 * processed from interrupts coming in quickly after one another or
1340 * even at the same time because we queue both TX status events and
1341 * RX frames to be processed by a tasklet and process them in the
1342 * same order that they were received or TX status last. Hence, there
1343 * is no race as long as the frame RX is processed before the next TX
1344 * status, which drivers can ensure, see below.
1345 *
1346 * Note that this can only happen if the hardware or firmware can
1347 * actually add STAs to the filter list, if this is done by the
1348 * driver in response to set_tim() (which will only reduce the race
1349 * this whole filtering tries to solve, not completely solve it)
1350 * this situation cannot happen.
1351 *
1352 * To completely solve this race drivers need to make sure that they
1353 * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
1354 * functions and
1355 * (b) always process RX events before TX status events if ordering
1356 * can be unknown, for example with different interrupt status
1357 * bits.
1358 */
1359 if (test_sta_flags(sta, WLAN_STA_PS) &&
1360 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
1361 ieee80211_remove_tx_extra(local, sta->key, skb);
1362 skb_queue_tail(&sta->tx_filtered, skb);
1363 return;
1364 }
1365
1366 if (!test_sta_flags(sta, WLAN_STA_PS) &&
1367 !(info->flags & IEEE80211_TX_CTL_REQUEUE)) {
1368 /* Software retry the packet once */
1369 info->flags |= IEEE80211_TX_CTL_REQUEUE;
1370 ieee80211_remove_tx_extra(local, sta->key, skb);
1371 dev_queue_xmit(skb);
1372 return;
1373 }
1374
1375 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1376 if (net_ratelimit())
1377 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
1378 "queue_len=%d PS=%d @%lu\n",
1379 wiphy_name(local->hw.wiphy),
1380 skb_queue_len(&sta->tx_filtered),
1381 !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
1382 #endif
1383 dev_kfree_skb(skb);
1384 }
1385
1386 void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1387 {
1388 struct sk_buff *skb2;
1389 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1390 struct ieee80211_local *local = hw_to_local(hw);
1391 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1392 u16 frag, type;
1393 __le16 fc;
1394 struct ieee80211_tx_status_rtap_hdr *rthdr;
1395 struct ieee80211_sub_if_data *sdata;
1396 struct net_device *prev_dev = NULL;
1397 struct sta_info *sta;
1398
1399 rcu_read_lock();
1400
1401 if (info->status.excessive_retries) {
1402 sta = sta_info_get(local, hdr->addr1);
1403 if (sta) {
1404 if (test_sta_flags(sta, WLAN_STA_PS)) {
1405 /*
1406 * The STA is in power save mode, so assume
1407 * that this TX packet failed because of that.
1408 */
1409 ieee80211_handle_filtered_frame(local, sta, skb);
1410 rcu_read_unlock();
1411 return;
1412 }
1413 }
1414 }
1415
1416 fc = hdr->frame_control;
1417
1418 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
1419 (ieee80211_is_data_qos(fc))) {
1420 u16 tid, ssn;
1421 u8 *qc;
1422 sta = sta_info_get(local, hdr->addr1);
1423 if (sta) {
1424 qc = ieee80211_get_qos_ctl(hdr);
1425 tid = qc[0] & 0xf;
1426 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
1427 & IEEE80211_SCTL_SEQ);
1428 ieee80211_send_bar(sta->sdata->dev, hdr->addr1,
1429 tid, ssn);
1430 }
1431 }
1432
1433 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1434 sta = sta_info_get(local, hdr->addr1);
1435 if (sta) {
1436 ieee80211_handle_filtered_frame(local, sta, skb);
1437 rcu_read_unlock();
1438 return;
1439 }
1440 } else
1441 rate_control_tx_status(local->mdev, skb);
1442
1443 rcu_read_unlock();
1444
1445 ieee80211_led_tx(local, 0);
1446
1447 /* SNMP counters
1448 * Fragments are passed to low-level drivers as separate skbs, so these
1449 * are actually fragments, not frames. Update frame counters only for
1450 * the first fragment of the frame. */
1451
1452 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1453 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
1454
1455 if (info->flags & IEEE80211_TX_STAT_ACK) {
1456 if (frag == 0) {
1457 local->dot11TransmittedFrameCount++;
1458 if (is_multicast_ether_addr(hdr->addr1))
1459 local->dot11MulticastTransmittedFrameCount++;
1460 if (info->status.retry_count > 0)
1461 local->dot11RetryCount++;
1462 if (info->status.retry_count > 1)
1463 local->dot11MultipleRetryCount++;
1464 }
1465
1466 /* This counter shall be incremented for an acknowledged MPDU
1467 * with an individual address in the address 1 field or an MPDU
1468 * with a multicast address in the address 1 field of type Data
1469 * or Management. */
1470 if (!is_multicast_ether_addr(hdr->addr1) ||
1471 type == IEEE80211_FTYPE_DATA ||
1472 type == IEEE80211_FTYPE_MGMT)
1473 local->dot11TransmittedFragmentCount++;
1474 } else {
1475 if (frag == 0)
1476 local->dot11FailedCount++;
1477 }
1478
1479 /* this was a transmitted frame, but now we want to reuse it */
1480 skb_orphan(skb);
1481
1482 /*
1483 * This is a bit racy but we can avoid a lot of work
1484 * with this test...
1485 */
1486 if (!local->monitors && !local->cooked_mntrs) {
1487 dev_kfree_skb(skb);
1488 return;
1489 }
1490
1491 /* send frame to monitor interfaces now */
1492
1493 if (skb_headroom(skb) < sizeof(*rthdr)) {
1494 printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
1495 dev_kfree_skb(skb);
1496 return;
1497 }
1498
1499 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
1500 skb_push(skb, sizeof(*rthdr));
1501
1502 memset(rthdr, 0, sizeof(*rthdr));
1503 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1504 rthdr->hdr.it_present =
1505 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
1506 (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
1507
1508 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
1509 !is_multicast_ether_addr(hdr->addr1))
1510 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
1511
1512 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) &&
1513 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
1514 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
1515 else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
1516 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
1517
1518 rthdr->data_retries = info->status.retry_count;
1519
1520 /* XXX: is this sufficient for BPF? */
1521 skb_set_mac_header(skb, 0);
1522 skb->ip_summed = CHECKSUM_UNNECESSARY;
1523 skb->pkt_type = PACKET_OTHERHOST;
1524 skb->protocol = htons(ETH_P_802_2);
1525 memset(skb->cb, 0, sizeof(skb->cb));
1526
1527 rcu_read_lock();
1528 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1529 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
1530 if (!netif_running(sdata->dev))
1531 continue;
1532
1533 if (prev_dev) {
1534 skb2 = skb_clone(skb, GFP_ATOMIC);
1535 if (skb2) {
1536 skb2->dev = prev_dev;
1537 netif_rx(skb2);
1538 }
1539 }
1540
1541 prev_dev = sdata->dev;
1542 }
1543 }
1544 if (prev_dev) {
1545 skb->dev = prev_dev;
1546 netif_rx(skb);
1547 skb = NULL;
1548 }
1549 rcu_read_unlock();
1550 dev_kfree_skb(skb);
1551 }
1552 EXPORT_SYMBOL(ieee80211_tx_status);
1553
1554 struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1555 const struct ieee80211_ops *ops)
1556 {
1557 struct ieee80211_local *local;
1558 int priv_size;
1559 struct wiphy *wiphy;
1560
1561 /* Ensure 32-byte alignment of our private data and hw private data.
1562 * We use the wiphy priv data for both our ieee80211_local and for
1563 * the driver's private data
1564 *
1565 * In memory it'll be like this:
1566 *
1567 * +-------------------------+
1568 * | struct wiphy |
1569 * +-------------------------+
1570 * | struct ieee80211_local |
1571 * +-------------------------+
1572 * | driver's private data |
1573 * +-------------------------+
1574 *
1575 */
1576 priv_size = ((sizeof(struct ieee80211_local) +
1577 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
1578 priv_data_len;
1579
1580 wiphy = wiphy_new(&mac80211_config_ops, priv_size);
1581
1582 if (!wiphy)
1583 return NULL;
1584
1585 wiphy->privid = mac80211_wiphy_privid;
1586
1587 local = wiphy_priv(wiphy);
1588 local->hw.wiphy = wiphy;
1589
1590 local->hw.priv = (char *)local +
1591 ((sizeof(struct ieee80211_local) +
1592 NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
1593
1594 BUG_ON(!ops->tx);
1595 BUG_ON(!ops->start);
1596 BUG_ON(!ops->stop);
1597 BUG_ON(!ops->config);
1598 BUG_ON(!ops->add_interface);
1599 BUG_ON(!ops->remove_interface);
1600 BUG_ON(!ops->configure_filter);
1601 local->ops = ops;
1602
1603 local->hw.queues = 1; /* default */
1604
1605 local->bridge_packets = 1;
1606
1607 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
1608 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
1609 local->short_retry_limit = 7;
1610 local->long_retry_limit = 4;
1611 local->hw.conf.radio_enabled = 1;
1612
1613 INIT_LIST_HEAD(&local->interfaces);
1614
1615 spin_lock_init(&local->key_lock);
1616
1617 INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
1618
1619 sta_info_init(local);
1620
1621 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
1622 (unsigned long)local);
1623 tasklet_disable(&local->tx_pending_tasklet);
1624
1625 tasklet_init(&local->tasklet,
1626 ieee80211_tasklet_handler,
1627 (unsigned long) local);
1628 tasklet_disable(&local->tasklet);
1629
1630 skb_queue_head_init(&local->skb_queue);
1631 skb_queue_head_init(&local->skb_queue_unreliable);
1632
1633 return local_to_hw(local);
1634 }
1635 EXPORT_SYMBOL(ieee80211_alloc_hw);
1636
1637 int ieee80211_register_hw(struct ieee80211_hw *hw)
1638 {
1639 struct ieee80211_local *local = hw_to_local(hw);
1640 const char *name;
1641 int result;
1642 enum ieee80211_band band;
1643 struct net_device *mdev;
1644 struct wireless_dev *mwdev;
1645
1646 /*
1647 * generic code guarantees at least one band,
1648 * set this very early because much code assumes
1649 * that hw.conf.channel is assigned
1650 */
1651 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1652 struct ieee80211_supported_band *sband;
1653
1654 sband = local->hw.wiphy->bands[band];
1655 if (sband) {
1656 /* init channel we're on */
1657 local->hw.conf.channel =
1658 local->oper_channel =
1659 local->scan_channel = &sband->channels[0];
1660 break;
1661 }
1662 }
1663
1664 result = wiphy_register(local->hw.wiphy);
1665 if (result < 0)
1666 return result;
1667
1668 /*
1669 * We use the number of queues for feature tests (QoS, HT) internally
1670 * so restrict them appropriately.
1671 */
1672 #ifdef CONFIG_MAC80211_QOS
1673 if (hw->queues > IEEE80211_MAX_QUEUES)
1674 hw->queues = IEEE80211_MAX_QUEUES;
1675 if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
1676 hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
1677 if (hw->queues < 4)
1678 hw->ampdu_queues = 0;
1679 #else
1680 hw->queues = 1;
1681 hw->ampdu_queues = 0;
1682 #endif
1683
1684 mdev = alloc_netdev_mq(sizeof(struct wireless_dev),
1685 "wmaster%d", ether_setup,
1686 ieee80211_num_queues(hw));
1687 if (!mdev)
1688 goto fail_mdev_alloc;
1689
1690 if (ieee80211_num_queues(hw) > 1)
1691 mdev->features |= NETIF_F_MULTI_QUEUE;
1692
1693 mwdev = netdev_priv(mdev);
1694 mdev->ieee80211_ptr = mwdev;
1695 mwdev->wiphy = local->hw.wiphy;
1696
1697 local->mdev = mdev;
1698
1699 ieee80211_rx_bss_list_init(local);
1700
1701 mdev->hard_start_xmit = ieee80211_master_start_xmit;
1702 mdev->open = ieee80211_master_open;
1703 mdev->stop = ieee80211_master_stop;
1704 mdev->type = ARPHRD_IEEE80211;
1705 mdev->header_ops = &ieee80211_header_ops;
1706 mdev->set_multicast_list = ieee80211_master_set_multicast_list;
1707
1708 name = wiphy_dev(local->hw.wiphy)->driver->name;
1709 local->hw.workqueue = create_freezeable_workqueue(name);
1710 if (!local->hw.workqueue) {
1711 result = -ENOMEM;
1712 goto fail_workqueue;
1713 }
1714
1715 /*
1716 * The hardware needs headroom for sending the frame,
1717 * and we need some headroom for passing the frame to monitor
1718 * interfaces, but never both at the same time.
1719 */
1720 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
1721 sizeof(struct ieee80211_tx_status_rtap_hdr));
1722
1723 debugfs_hw_add(local);
1724
1725 if (local->hw.conf.beacon_int < 10)
1726 local->hw.conf.beacon_int = 100;
1727
1728 local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
1729 IEEE80211_HW_SIGNAL_DB |
1730 IEEE80211_HW_SIGNAL_DBM) ?
1731 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
1732 local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
1733 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
1734 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
1735 local->wstats_flags |= IW_QUAL_DBM;
1736
1737 result = sta_info_start(local);
1738 if (result < 0)
1739 goto fail_sta_info;
1740
1741 rtnl_lock();
1742 result = dev_alloc_name(local->mdev, local->mdev->name);
1743 if (result < 0)
1744 goto fail_dev;
1745
1746 memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
1747 SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy));
1748
1749 result = register_netdevice(local->mdev);
1750 if (result < 0)
1751 goto fail_dev;
1752
1753 result = ieee80211_init_rate_ctrl_alg(local,
1754 hw->rate_control_algorithm);
1755 if (result < 0) {
1756 printk(KERN_DEBUG "%s: Failed to initialize rate control "
1757 "algorithm\n", wiphy_name(local->hw.wiphy));
1758 goto fail_rate;
1759 }
1760
1761 result = ieee80211_wep_init(local);
1762
1763 if (result < 0) {
1764 printk(KERN_DEBUG "%s: Failed to initialize wep\n",
1765 wiphy_name(local->hw.wiphy));
1766 goto fail_wep;
1767 }
1768
1769 ieee80211_install_qdisc(local->mdev);
1770
1771 /* add one default STA interface */
1772 result = ieee80211_if_add(local, "wlan%d", NULL,
1773 IEEE80211_IF_TYPE_STA, NULL);
1774 if (result)
1775 printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
1776 wiphy_name(local->hw.wiphy));
1777
1778 rtnl_unlock();
1779
1780 ieee80211_led_init(local);
1781
1782 return 0;
1783
1784 fail_wep:
1785 rate_control_deinitialize(local);
1786 fail_rate:
1787 unregister_netdevice(local->mdev);
1788 local->mdev = NULL;
1789 fail_dev:
1790 rtnl_unlock();
1791 sta_info_stop(local);
1792 fail_sta_info:
1793 debugfs_hw_del(local);
1794 destroy_workqueue(local->hw.workqueue);
1795 fail_workqueue:
1796 if (local->mdev)
1797 free_netdev(local->mdev);
1798 fail_mdev_alloc:
1799 wiphy_unregister(local->hw.wiphy);
1800 return result;
1801 }
1802 EXPORT_SYMBOL(ieee80211_register_hw);
1803
1804 void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1805 {
1806 struct ieee80211_local *local = hw_to_local(hw);
1807
1808 tasklet_kill(&local->tx_pending_tasklet);
1809 tasklet_kill(&local->tasklet);
1810
1811 rtnl_lock();
1812
1813 /*
1814 * At this point, interface list manipulations are fine
1815 * because the driver cannot be handing us frames any
1816 * more and the tasklet is killed.
1817 */
1818
1819 /* First, we remove all virtual interfaces. */
1820 ieee80211_remove_interfaces(local);
1821
1822 /* then, finally, remove the master interface */
1823 unregister_netdevice(local->mdev);
1824
1825 rtnl_unlock();
1826
1827 ieee80211_rx_bss_list_deinit(local);
1828 ieee80211_clear_tx_pending(local);
1829 sta_info_stop(local);
1830 rate_control_deinitialize(local);
1831 debugfs_hw_del(local);
1832
1833 if (skb_queue_len(&local->skb_queue)
1834 || skb_queue_len(&local->skb_queue_unreliable))
1835 printk(KERN_WARNING "%s: skb_queue not empty\n",
1836 wiphy_name(local->hw.wiphy));
1837 skb_queue_purge(&local->skb_queue);
1838 skb_queue_purge(&local->skb_queue_unreliable);
1839
1840 destroy_workqueue(local->hw.workqueue);
1841 wiphy_unregister(local->hw.wiphy);
1842 ieee80211_wep_free(local);
1843 ieee80211_led_exit(local);
1844 free_netdev(local->mdev);
1845 }
1846 EXPORT_SYMBOL(ieee80211_unregister_hw);
1847
1848 void ieee80211_free_hw(struct ieee80211_hw *hw)
1849 {
1850 struct ieee80211_local *local = hw_to_local(hw);
1851
1852 wiphy_free(local->hw.wiphy);
1853 }
1854 EXPORT_SYMBOL(ieee80211_free_hw);
1855
1856 static int __init ieee80211_init(void)
1857 {
1858 struct sk_buff *skb;
1859 int ret;
1860
1861 BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
1862 BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
1863 IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
1864
1865 ret = rc80211_pid_init();
1866 if (ret)
1867 goto out;
1868
1869 ret = ieee80211_wme_register();
1870 if (ret) {
1871 printk(KERN_DEBUG "ieee80211_init: failed to "
1872 "initialize WME (err=%d)\n", ret);
1873 goto out_cleanup_pid;
1874 }
1875
1876 ieee80211_debugfs_netdev_init();
1877
1878 return 0;
1879
1880 out_cleanup_pid:
1881 rc80211_pid_exit();
1882 out:
1883 return ret;
1884 }
1885
1886 static void __exit ieee80211_exit(void)
1887 {
1888 rc80211_pid_exit();
1889
1890 /*
1891 * For key todo, it'll be empty by now but the work
1892 * might still be scheduled.
1893 */
1894 flush_scheduled_work();
1895
1896 if (mesh_allocated)
1897 ieee80211s_stop();
1898
1899 ieee80211_wme_unregister();
1900 ieee80211_debugfs_netdev_exit();
1901 }
1902
1903
1904 subsys_initcall(ieee80211_init);
1905 module_exit(ieee80211_exit);
1906
1907 MODULE_DESCRIPTION("IEEE 802.11 subsystem");
1908 MODULE_LICENSE("GPL");