]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/mac80211/rx.c
mac80211: 802.11w - Drop unprotected robust management frames if MFP is used
[mirror_ubuntu-bionic-kernel.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "led.h"
23 #include "mesh.h"
24 #include "wep.h"
25 #include "wpa.h"
26 #include "tkip.h"
27 #include "wme.h"
28
29 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
30 struct tid_ampdu_rx *tid_agg_rx,
31 struct sk_buff *skb,
32 u16 mpdu_seq_num,
33 int bar_req);
34 /*
35 * monitor mode reception
36 *
37 * This function cleans up the SKB, i.e. it removes all the stuff
38 * only useful for monitoring.
39 */
40 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
41 struct sk_buff *skb,
42 int rtap_len)
43 {
44 skb_pull(skb, rtap_len);
45
46 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
47 if (likely(skb->len > FCS_LEN))
48 skb_trim(skb, skb->len - FCS_LEN);
49 else {
50 /* driver bug */
51 WARN_ON(1);
52 dev_kfree_skb(skb);
53 skb = NULL;
54 }
55 }
56
57 return skb;
58 }
59
60 static inline int should_drop_frame(struct ieee80211_rx_status *status,
61 struct sk_buff *skb,
62 int present_fcs_len,
63 int radiotap_len)
64 {
65 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
66
67 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
68 return 1;
69 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
70 return 1;
71 if (ieee80211_is_ctl(hdr->frame_control) &&
72 !ieee80211_is_pspoll(hdr->frame_control) &&
73 !ieee80211_is_back_req(hdr->frame_control))
74 return 1;
75 return 0;
76 }
77
78 static int
79 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
80 struct ieee80211_rx_status *status)
81 {
82 int len;
83
84 /* always present fields */
85 len = sizeof(struct ieee80211_radiotap_header) + 9;
86
87 if (status->flag & RX_FLAG_TSFT)
88 len += 8;
89 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB ||
90 local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
91 len += 1;
92 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
93 len += 1;
94
95 if (len & 1) /* padding for RX_FLAGS if necessary */
96 len++;
97
98 /* make sure radiotap starts at a naturally aligned address */
99 if (len % 8)
100 len = roundup(len, 8);
101
102 return len;
103 }
104
105 /*
106 * ieee80211_add_rx_radiotap_header - add radiotap header
107 *
108 * add a radiotap header containing all the fields which the hardware provided.
109 */
110 static void
111 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
112 struct sk_buff *skb,
113 struct ieee80211_rx_status *status,
114 struct ieee80211_rate *rate,
115 int rtap_len)
116 {
117 struct ieee80211_radiotap_header *rthdr;
118 unsigned char *pos;
119
120 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
121 memset(rthdr, 0, rtap_len);
122
123 /* radiotap header, set always present flags */
124 rthdr->it_present =
125 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
126 (1 << IEEE80211_RADIOTAP_CHANNEL) |
127 (1 << IEEE80211_RADIOTAP_ANTENNA) |
128 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
129 rthdr->it_len = cpu_to_le16(rtap_len);
130
131 pos = (unsigned char *)(rthdr+1);
132
133 /* the order of the following fields is important */
134
135 /* IEEE80211_RADIOTAP_TSFT */
136 if (status->flag & RX_FLAG_TSFT) {
137 *(__le64 *)pos = cpu_to_le64(status->mactime);
138 rthdr->it_present |=
139 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
140 pos += 8;
141 }
142
143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 if (status->flag & RX_FLAG_SHORTPRE)
147 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
148 pos++;
149
150 /* IEEE80211_RADIOTAP_RATE */
151 if (status->flag & RX_FLAG_HT) {
152 /*
153 * TODO: add following information into radiotap header once
154 * suitable fields are defined for it:
155 * - MCS index (status->rate_idx)
156 * - HT40 (status->flag & RX_FLAG_40MHZ)
157 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
158 */
159 *pos = 0;
160 } else {
161 rthdr->it_present |= (1 << IEEE80211_RADIOTAP_RATE);
162 *pos = rate->bitrate / 5;
163 }
164 pos++;
165
166 /* IEEE80211_RADIOTAP_CHANNEL */
167 *(__le16 *)pos = cpu_to_le16(status->freq);
168 pos += 2;
169 if (status->band == IEEE80211_BAND_5GHZ)
170 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
171 IEEE80211_CHAN_5GHZ);
172 else if (rate->flags & IEEE80211_RATE_ERP_G)
173 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
174 IEEE80211_CHAN_2GHZ);
175 else
176 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
177 IEEE80211_CHAN_2GHZ);
178 pos += 2;
179
180 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
181 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
182 *pos = status->signal;
183 rthdr->it_present |=
184 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
185 pos++;
186 }
187
188 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
189 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
190 *pos = status->noise;
191 rthdr->it_present |=
192 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
193 pos++;
194 }
195
196 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
197
198 /* IEEE80211_RADIOTAP_ANTENNA */
199 *pos = status->antenna;
200 pos++;
201
202 /* IEEE80211_RADIOTAP_DB_ANTSIGNAL */
203 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB) {
204 *pos = status->signal;
205 rthdr->it_present |=
206 cpu_to_le32(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL);
207 pos++;
208 }
209
210 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
211
212 /* IEEE80211_RADIOTAP_RX_FLAGS */
213 /* ensure 2 byte alignment for the 2 byte field as required */
214 if ((pos - (unsigned char *)rthdr) & 1)
215 pos++;
216 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
217 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
218 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
219 pos += 2;
220 }
221
222 /*
223 * This function copies a received frame to all monitor interfaces and
224 * returns a cleaned-up SKB that no longer includes the FCS nor the
225 * radiotap header the driver might have added.
226 */
227 static struct sk_buff *
228 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
229 struct ieee80211_rx_status *status,
230 struct ieee80211_rate *rate)
231 {
232 struct ieee80211_sub_if_data *sdata;
233 int needed_headroom = 0;
234 struct sk_buff *skb, *skb2;
235 struct net_device *prev_dev = NULL;
236 int present_fcs_len = 0;
237 int rtap_len = 0;
238
239 /*
240 * First, we may need to make a copy of the skb because
241 * (1) we need to modify it for radiotap (if not present), and
242 * (2) the other RX handlers will modify the skb we got.
243 *
244 * We don't need to, of course, if we aren't going to return
245 * the SKB because it has a bad FCS/PLCP checksum.
246 */
247 if (status->flag & RX_FLAG_RADIOTAP)
248 rtap_len = ieee80211_get_radiotap_len(origskb->data);
249 else
250 /* room for the radiotap header based on driver features */
251 needed_headroom = ieee80211_rx_radiotap_len(local, status);
252
253 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
254 present_fcs_len = FCS_LEN;
255
256 if (!local->monitors) {
257 if (should_drop_frame(status, origskb, present_fcs_len,
258 rtap_len)) {
259 dev_kfree_skb(origskb);
260 return NULL;
261 }
262
263 return remove_monitor_info(local, origskb, rtap_len);
264 }
265
266 if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
267 /* only need to expand headroom if necessary */
268 skb = origskb;
269 origskb = NULL;
270
271 /*
272 * This shouldn't trigger often because most devices have an
273 * RX header they pull before we get here, and that should
274 * be big enough for our radiotap information. We should
275 * probably export the length to drivers so that we can have
276 * them allocate enough headroom to start with.
277 */
278 if (skb_headroom(skb) < needed_headroom &&
279 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
280 dev_kfree_skb(skb);
281 return NULL;
282 }
283 } else {
284 /*
285 * Need to make a copy and possibly remove radiotap header
286 * and FCS from the original.
287 */
288 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
289
290 origskb = remove_monitor_info(local, origskb, rtap_len);
291
292 if (!skb)
293 return origskb;
294 }
295
296 /* if necessary, prepend radiotap information */
297 if (!(status->flag & RX_FLAG_RADIOTAP))
298 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
299 needed_headroom);
300
301 skb_reset_mac_header(skb);
302 skb->ip_summed = CHECKSUM_UNNECESSARY;
303 skb->pkt_type = PACKET_OTHERHOST;
304 skb->protocol = htons(ETH_P_802_2);
305
306 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
307 if (!netif_running(sdata->dev))
308 continue;
309
310 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
311 continue;
312
313 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
314 continue;
315
316 if (prev_dev) {
317 skb2 = skb_clone(skb, GFP_ATOMIC);
318 if (skb2) {
319 skb2->dev = prev_dev;
320 netif_rx(skb2);
321 }
322 }
323
324 prev_dev = sdata->dev;
325 sdata->dev->stats.rx_packets++;
326 sdata->dev->stats.rx_bytes += skb->len;
327 }
328
329 if (prev_dev) {
330 skb->dev = prev_dev;
331 netif_rx(skb);
332 } else
333 dev_kfree_skb(skb);
334
335 return origskb;
336 }
337
338
339 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
340 {
341 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
342 int tid;
343
344 /* does the frame have a qos control field? */
345 if (ieee80211_is_data_qos(hdr->frame_control)) {
346 u8 *qc = ieee80211_get_qos_ctl(hdr);
347 /* frame has qos control */
348 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
349 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
350 rx->flags |= IEEE80211_RX_AMSDU;
351 else
352 rx->flags &= ~IEEE80211_RX_AMSDU;
353 } else {
354 /*
355 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
356 *
357 * Sequence numbers for management frames, QoS data
358 * frames with a broadcast/multicast address in the
359 * Address 1 field, and all non-QoS data frames sent
360 * by QoS STAs are assigned using an additional single
361 * modulo-4096 counter, [...]
362 *
363 * We also use that counter for non-QoS STAs.
364 */
365 tid = NUM_RX_DATA_QUEUES - 1;
366 }
367
368 rx->queue = tid;
369 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
370 * For now, set skb->priority to 0 for other cases. */
371 rx->skb->priority = (tid > 7) ? 0 : tid;
372 }
373
374 /**
375 * DOC: Packet alignment
376 *
377 * Drivers always need to pass packets that are aligned to two-byte boundaries
378 * to the stack.
379 *
380 * Additionally, should, if possible, align the payload data in a way that
381 * guarantees that the contained IP header is aligned to a four-byte
382 * boundary. In the case of regular frames, this simply means aligning the
383 * payload to a four-byte boundary (because either the IP header is directly
384 * contained, or IV/RFC1042 headers that have a length divisible by four are
385 * in front of it).
386 *
387 * With A-MSDU frames, however, the payload data address must yield two modulo
388 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
389 * push the IP header further back to a multiple of four again. Thankfully, the
390 * specs were sane enough this time around to require padding each A-MSDU
391 * subframe to a length that is a multiple of four.
392 *
393 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
394 * the payload is not supported, the driver is required to move the 802.11
395 * header to be directly in front of the payload in that case.
396 */
397 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
398 {
399 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
400 int hdrlen;
401
402 #ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
403 return;
404 #endif
405
406 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
407 "unaligned packet at 0x%p\n", rx->skb->data))
408 return;
409
410 if (!ieee80211_is_data_present(hdr->frame_control))
411 return;
412
413 hdrlen = ieee80211_hdrlen(hdr->frame_control);
414 if (rx->flags & IEEE80211_RX_AMSDU)
415 hdrlen += ETH_HLEN;
416 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
417 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
418 }
419
420
421 /* rx handlers */
422
423 static ieee80211_rx_result debug_noinline
424 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
425 {
426 struct ieee80211_local *local = rx->local;
427 struct sk_buff *skb = rx->skb;
428
429 if (unlikely(local->hw_scanning))
430 return ieee80211_scan_rx(rx->sdata, skb, rx->status);
431
432 if (unlikely(local->sw_scanning)) {
433 /* drop all the other packets during a software scan anyway */
434 if (ieee80211_scan_rx(rx->sdata, skb, rx->status)
435 != RX_QUEUED)
436 dev_kfree_skb(skb);
437 return RX_QUEUED;
438 }
439
440 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
441 /* scanning finished during invoking of handlers */
442 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
443 return RX_DROP_UNUSABLE;
444 }
445
446 return RX_CONTINUE;
447 }
448
449
450 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
451 {
452 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
453
454 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
455 return 0;
456
457 return ieee80211_is_robust_mgmt_frame(hdr);
458 }
459
460
461 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
462 {
463 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
464
465 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
466 return 0;
467
468 return ieee80211_is_robust_mgmt_frame(hdr);
469 }
470
471
472 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
473 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
474 {
475 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
476 struct ieee80211_mmie *mmie;
477
478 if (skb->len < 24 + sizeof(*mmie) ||
479 !is_multicast_ether_addr(hdr->da))
480 return -1;
481
482 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
483 return -1; /* not a robust management frame */
484
485 mmie = (struct ieee80211_mmie *)
486 (skb->data + skb->len - sizeof(*mmie));
487 if (mmie->element_id != WLAN_EID_MMIE ||
488 mmie->length != sizeof(*mmie) - 2)
489 return -1;
490
491 return le16_to_cpu(mmie->key_id);
492 }
493
494
495 static ieee80211_rx_result
496 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
497 {
498 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
499 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
500
501 if (ieee80211_is_data(hdr->frame_control)) {
502 if (!ieee80211_has_a4(hdr->frame_control))
503 return RX_DROP_MONITOR;
504 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
505 return RX_DROP_MONITOR;
506 }
507
508 /* If there is not an established peer link and this is not a peer link
509 * establisment frame, beacon or probe, drop the frame.
510 */
511
512 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
513 struct ieee80211_mgmt *mgmt;
514
515 if (!ieee80211_is_mgmt(hdr->frame_control))
516 return RX_DROP_MONITOR;
517
518 if (ieee80211_is_action(hdr->frame_control)) {
519 mgmt = (struct ieee80211_mgmt *)hdr;
520 if (mgmt->u.action.category != PLINK_CATEGORY)
521 return RX_DROP_MONITOR;
522 return RX_CONTINUE;
523 }
524
525 if (ieee80211_is_probe_req(hdr->frame_control) ||
526 ieee80211_is_probe_resp(hdr->frame_control) ||
527 ieee80211_is_beacon(hdr->frame_control))
528 return RX_CONTINUE;
529
530 return RX_DROP_MONITOR;
531
532 }
533
534 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
535
536 if (ieee80211_is_data(hdr->frame_control) &&
537 is_multicast_ether_addr(hdr->addr1) &&
538 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
539 return RX_DROP_MONITOR;
540 #undef msh_h_get
541
542 return RX_CONTINUE;
543 }
544
545
546 static ieee80211_rx_result debug_noinline
547 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
548 {
549 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
550
551 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
552 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
553 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
554 rx->sta->last_seq_ctrl[rx->queue] ==
555 hdr->seq_ctrl)) {
556 if (rx->flags & IEEE80211_RX_RA_MATCH) {
557 rx->local->dot11FrameDuplicateCount++;
558 rx->sta->num_duplicates++;
559 }
560 return RX_DROP_MONITOR;
561 } else
562 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
563 }
564
565 if (unlikely(rx->skb->len < 16)) {
566 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
567 return RX_DROP_MONITOR;
568 }
569
570 /* Drop disallowed frame classes based on STA auth/assoc state;
571 * IEEE 802.11, Chap 5.5.
572 *
573 * mac80211 filters only based on association state, i.e. it drops
574 * Class 3 frames from not associated stations. hostapd sends
575 * deauth/disassoc frames when needed. In addition, hostapd is
576 * responsible for filtering on both auth and assoc states.
577 */
578
579 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
580 return ieee80211_rx_mesh_check(rx);
581
582 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
583 ieee80211_is_pspoll(hdr->frame_control)) &&
584 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
585 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
586 if ((!ieee80211_has_fromds(hdr->frame_control) &&
587 !ieee80211_has_tods(hdr->frame_control) &&
588 ieee80211_is_data(hdr->frame_control)) ||
589 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
590 /* Drop IBSS frames and frames for other hosts
591 * silently. */
592 return RX_DROP_MONITOR;
593 }
594
595 return RX_DROP_MONITOR;
596 }
597
598 return RX_CONTINUE;
599 }
600
601
602 static ieee80211_rx_result debug_noinline
603 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
604 {
605 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
606 int keyidx;
607 int hdrlen;
608 ieee80211_rx_result result = RX_DROP_UNUSABLE;
609 struct ieee80211_key *stakey = NULL;
610 int mmie_keyidx = -1;
611
612 /*
613 * Key selection 101
614 *
615 * There are four types of keys:
616 * - GTK (group keys)
617 * - IGTK (group keys for management frames)
618 * - PTK (pairwise keys)
619 * - STK (station-to-station pairwise keys)
620 *
621 * When selecting a key, we have to distinguish between multicast
622 * (including broadcast) and unicast frames, the latter can only
623 * use PTKs and STKs while the former always use GTKs and IGTKs.
624 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
625 * unicast frames can also use key indices like GTKs. Hence, if we
626 * don't have a PTK/STK we check the key index for a WEP key.
627 *
628 * Note that in a regular BSS, multicast frames are sent by the
629 * AP only, associated stations unicast the frame to the AP first
630 * which then multicasts it on their behalf.
631 *
632 * There is also a slight problem in IBSS mode: GTKs are negotiated
633 * with each station, that is something we don't currently handle.
634 * The spec seems to expect that one negotiates the same key with
635 * every station but there's no such requirement; VLANs could be
636 * possible.
637 */
638
639 if (!ieee80211_has_protected(hdr->frame_control)) {
640 if (!ieee80211_is_mgmt(hdr->frame_control) ||
641 rx->sta == NULL || !test_sta_flags(rx->sta, WLAN_STA_MFP))
642 return RX_CONTINUE;
643 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
644 if (mmie_keyidx < 0)
645 return RX_CONTINUE;
646 }
647
648 /*
649 * No point in finding a key and decrypting if the frame is neither
650 * addressed to us nor a multicast frame.
651 */
652 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
653 return RX_CONTINUE;
654
655 if (rx->sta)
656 stakey = rcu_dereference(rx->sta->key);
657
658 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
659 rx->key = stakey;
660 } else if (mmie_keyidx >= 0) {
661 /* Broadcast/multicast robust management frame / BIP */
662 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
663 (rx->status->flag & RX_FLAG_IV_STRIPPED))
664 return RX_CONTINUE;
665
666 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
667 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
668 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
669 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
670 } else {
671 /*
672 * The device doesn't give us the IV so we won't be
673 * able to look up the key. That's ok though, we
674 * don't need to decrypt the frame, we just won't
675 * be able to keep statistics accurate.
676 * Except for key threshold notifications, should
677 * we somehow allow the driver to tell us which key
678 * the hardware used if this flag is set?
679 */
680 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
681 (rx->status->flag & RX_FLAG_IV_STRIPPED))
682 return RX_CONTINUE;
683
684 hdrlen = ieee80211_hdrlen(hdr->frame_control);
685
686 if (rx->skb->len < 8 + hdrlen)
687 return RX_DROP_UNUSABLE; /* TODO: count this? */
688
689 /*
690 * no need to call ieee80211_wep_get_keyidx,
691 * it verifies a bunch of things we've done already
692 */
693 keyidx = rx->skb->data[hdrlen + 3] >> 6;
694
695 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
696
697 /*
698 * RSNA-protected unicast frames should always be sent with
699 * pairwise or station-to-station keys, but for WEP we allow
700 * using a key index as well.
701 */
702 if (rx->key && rx->key->conf.alg != ALG_WEP &&
703 !is_multicast_ether_addr(hdr->addr1))
704 rx->key = NULL;
705 }
706
707 if (rx->key) {
708 rx->key->tx_rx_count++;
709 /* TODO: add threshold stuff again */
710 } else {
711 return RX_DROP_MONITOR;
712 }
713
714 /* Check for weak IVs if possible */
715 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
716 ieee80211_is_data(hdr->frame_control) &&
717 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
718 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
719 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
720 rx->sta->wep_weak_iv_count++;
721
722 switch (rx->key->conf.alg) {
723 case ALG_WEP:
724 result = ieee80211_crypto_wep_decrypt(rx);
725 break;
726 case ALG_TKIP:
727 result = ieee80211_crypto_tkip_decrypt(rx);
728 break;
729 case ALG_CCMP:
730 result = ieee80211_crypto_ccmp_decrypt(rx);
731 break;
732 case ALG_AES_CMAC:
733 result = ieee80211_crypto_aes_cmac_decrypt(rx);
734 break;
735 }
736
737 /* either the frame has been decrypted or will be dropped */
738 rx->status->flag |= RX_FLAG_DECRYPTED;
739
740 return result;
741 }
742
743 static void ap_sta_ps_start(struct sta_info *sta)
744 {
745 struct ieee80211_sub_if_data *sdata = sta->sdata;
746 struct ieee80211_local *local = sdata->local;
747
748 atomic_inc(&sdata->bss->num_sta_ps);
749 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
750 if (local->ops->sta_notify)
751 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
752 STA_NOTIFY_SLEEP, &sta->sta);
753 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
754 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
755 sdata->dev->name, sta->sta.addr, sta->sta.aid);
756 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
757 }
758
759 static int ap_sta_ps_end(struct sta_info *sta)
760 {
761 struct ieee80211_sub_if_data *sdata = sta->sdata;
762 struct ieee80211_local *local = sdata->local;
763 struct sk_buff *skb;
764 int sent = 0;
765
766 atomic_dec(&sdata->bss->num_sta_ps);
767
768 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
769 if (local->ops->sta_notify)
770 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
771 STA_NOTIFY_AWAKE, &sta->sta);
772
773 if (!skb_queue_empty(&sta->ps_tx_buf))
774 sta_info_clear_tim_bit(sta);
775
776 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
777 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
778 sdata->dev->name, sta->sta.addr, sta->sta.aid);
779 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
780
781 /* Send all buffered frames to the station */
782 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
783 sent++;
784 skb->requeue = 1;
785 dev_queue_xmit(skb);
786 }
787 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
788 local->total_ps_buffered--;
789 sent++;
790 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
791 printk(KERN_DEBUG "%s: STA %pM aid %d send PS frame "
792 "since STA not sleeping anymore\n", sdata->dev->name,
793 sta->sta.addr, sta->sta.aid);
794 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
795 skb->requeue = 1;
796 dev_queue_xmit(skb);
797 }
798
799 return sent;
800 }
801
802 static ieee80211_rx_result debug_noinline
803 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
804 {
805 struct sta_info *sta = rx->sta;
806 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
807
808 if (!sta)
809 return RX_CONTINUE;
810
811 /* Update last_rx only for IBSS packets which are for the current
812 * BSSID to avoid keeping the current IBSS network alive in cases where
813 * other STAs are using different BSSID. */
814 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
815 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
816 NL80211_IFTYPE_ADHOC);
817 if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0)
818 sta->last_rx = jiffies;
819 } else
820 if (!is_multicast_ether_addr(hdr->addr1) ||
821 rx->sdata->vif.type == NL80211_IFTYPE_STATION) {
822 /* Update last_rx only for unicast frames in order to prevent
823 * the Probe Request frames (the only broadcast frames from a
824 * STA in infrastructure mode) from keeping a connection alive.
825 * Mesh beacons will update last_rx when if they are found to
826 * match the current local configuration when processed.
827 */
828 sta->last_rx = jiffies;
829 }
830
831 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
832 return RX_CONTINUE;
833
834 sta->rx_fragments++;
835 sta->rx_bytes += rx->skb->len;
836 sta->last_signal = rx->status->signal;
837 sta->last_qual = rx->status->qual;
838 sta->last_noise = rx->status->noise;
839
840 /*
841 * Change STA power saving mode only at the end of a frame
842 * exchange sequence.
843 */
844 if (!ieee80211_has_morefrags(hdr->frame_control) &&
845 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
846 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
847 if (test_sta_flags(sta, WLAN_STA_PS)) {
848 /*
849 * Ignore doze->wake transitions that are
850 * indicated by non-data frames, the standard
851 * is unclear here, but for example going to
852 * PS mode and then scanning would cause a
853 * doze->wake transition for the probe request,
854 * and that is clearly undesirable.
855 */
856 if (ieee80211_is_data(hdr->frame_control) &&
857 !ieee80211_has_pm(hdr->frame_control))
858 rx->sent_ps_buffered += ap_sta_ps_end(sta);
859 } else {
860 if (ieee80211_has_pm(hdr->frame_control))
861 ap_sta_ps_start(sta);
862 }
863 }
864
865 /* Drop data::nullfunc frames silently, since they are used only to
866 * control station power saving mode. */
867 if (ieee80211_is_nullfunc(hdr->frame_control)) {
868 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
869 /* Update counter and free packet here to avoid counting this
870 * as a dropped packed. */
871 sta->rx_packets++;
872 dev_kfree_skb(rx->skb);
873 return RX_QUEUED;
874 }
875
876 return RX_CONTINUE;
877 } /* ieee80211_rx_h_sta_process */
878
879 static inline struct ieee80211_fragment_entry *
880 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
881 unsigned int frag, unsigned int seq, int rx_queue,
882 struct sk_buff **skb)
883 {
884 struct ieee80211_fragment_entry *entry;
885 int idx;
886
887 idx = sdata->fragment_next;
888 entry = &sdata->fragments[sdata->fragment_next++];
889 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
890 sdata->fragment_next = 0;
891
892 if (!skb_queue_empty(&entry->skb_list)) {
893 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
894 struct ieee80211_hdr *hdr =
895 (struct ieee80211_hdr *) entry->skb_list.next->data;
896 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
897 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
898 "addr1=%pM addr2=%pM\n",
899 sdata->dev->name, idx,
900 jiffies - entry->first_frag_time, entry->seq,
901 entry->last_frag, hdr->addr1, hdr->addr2);
902 #endif
903 __skb_queue_purge(&entry->skb_list);
904 }
905
906 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
907 *skb = NULL;
908 entry->first_frag_time = jiffies;
909 entry->seq = seq;
910 entry->rx_queue = rx_queue;
911 entry->last_frag = frag;
912 entry->ccmp = 0;
913 entry->extra_len = 0;
914
915 return entry;
916 }
917
918 static inline struct ieee80211_fragment_entry *
919 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
920 unsigned int frag, unsigned int seq,
921 int rx_queue, struct ieee80211_hdr *hdr)
922 {
923 struct ieee80211_fragment_entry *entry;
924 int i, idx;
925
926 idx = sdata->fragment_next;
927 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
928 struct ieee80211_hdr *f_hdr;
929
930 idx--;
931 if (idx < 0)
932 idx = IEEE80211_FRAGMENT_MAX - 1;
933
934 entry = &sdata->fragments[idx];
935 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
936 entry->rx_queue != rx_queue ||
937 entry->last_frag + 1 != frag)
938 continue;
939
940 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
941
942 /*
943 * Check ftype and addresses are equal, else check next fragment
944 */
945 if (((hdr->frame_control ^ f_hdr->frame_control) &
946 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
947 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
948 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
949 continue;
950
951 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
952 __skb_queue_purge(&entry->skb_list);
953 continue;
954 }
955 return entry;
956 }
957
958 return NULL;
959 }
960
961 static ieee80211_rx_result debug_noinline
962 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
963 {
964 struct ieee80211_hdr *hdr;
965 u16 sc;
966 __le16 fc;
967 unsigned int frag, seq;
968 struct ieee80211_fragment_entry *entry;
969 struct sk_buff *skb;
970
971 hdr = (struct ieee80211_hdr *)rx->skb->data;
972 fc = hdr->frame_control;
973 sc = le16_to_cpu(hdr->seq_ctrl);
974 frag = sc & IEEE80211_SCTL_FRAG;
975
976 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
977 (rx->skb)->len < 24 ||
978 is_multicast_ether_addr(hdr->addr1))) {
979 /* not fragmented */
980 goto out;
981 }
982 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
983
984 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
985
986 if (frag == 0) {
987 /* This is the first fragment of a new frame. */
988 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
989 rx->queue, &(rx->skb));
990 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
991 ieee80211_has_protected(fc)) {
992 /* Store CCMP PN so that we can verify that the next
993 * fragment has a sequential PN value. */
994 entry->ccmp = 1;
995 memcpy(entry->last_pn,
996 rx->key->u.ccmp.rx_pn[rx->queue],
997 CCMP_PN_LEN);
998 }
999 return RX_QUEUED;
1000 }
1001
1002 /* This is a fragment for a frame that should already be pending in
1003 * fragment cache. Add this fragment to the end of the pending entry.
1004 */
1005 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1006 if (!entry) {
1007 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1008 return RX_DROP_MONITOR;
1009 }
1010
1011 /* Verify that MPDUs within one MSDU have sequential PN values.
1012 * (IEEE 802.11i, 8.3.3.4.5) */
1013 if (entry->ccmp) {
1014 int i;
1015 u8 pn[CCMP_PN_LEN], *rpn;
1016 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1017 return RX_DROP_UNUSABLE;
1018 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1019 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1020 pn[i]++;
1021 if (pn[i])
1022 break;
1023 }
1024 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
1025 if (memcmp(pn, rpn, CCMP_PN_LEN))
1026 return RX_DROP_UNUSABLE;
1027 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1028 }
1029
1030 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1031 __skb_queue_tail(&entry->skb_list, rx->skb);
1032 entry->last_frag = frag;
1033 entry->extra_len += rx->skb->len;
1034 if (ieee80211_has_morefrags(fc)) {
1035 rx->skb = NULL;
1036 return RX_QUEUED;
1037 }
1038
1039 rx->skb = __skb_dequeue(&entry->skb_list);
1040 if (skb_tailroom(rx->skb) < entry->extra_len) {
1041 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1042 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1043 GFP_ATOMIC))) {
1044 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1045 __skb_queue_purge(&entry->skb_list);
1046 return RX_DROP_UNUSABLE;
1047 }
1048 }
1049 while ((skb = __skb_dequeue(&entry->skb_list))) {
1050 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1051 dev_kfree_skb(skb);
1052 }
1053
1054 /* Complete frame has been reassembled - process it now */
1055 rx->flags |= IEEE80211_RX_FRAGMENTED;
1056
1057 out:
1058 if (rx->sta)
1059 rx->sta->rx_packets++;
1060 if (is_multicast_ether_addr(hdr->addr1))
1061 rx->local->dot11MulticastReceivedFrameCount++;
1062 else
1063 ieee80211_led_rx(rx->local);
1064 return RX_CONTINUE;
1065 }
1066
1067 static ieee80211_rx_result debug_noinline
1068 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1069 {
1070 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1071 struct sk_buff *skb;
1072 int no_pending_pkts;
1073 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1074
1075 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1076 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1077 return RX_CONTINUE;
1078
1079 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1080 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1081 return RX_DROP_UNUSABLE;
1082
1083 skb = skb_dequeue(&rx->sta->tx_filtered);
1084 if (!skb) {
1085 skb = skb_dequeue(&rx->sta->ps_tx_buf);
1086 if (skb)
1087 rx->local->total_ps_buffered--;
1088 }
1089 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
1090 skb_queue_empty(&rx->sta->ps_tx_buf);
1091
1092 if (skb) {
1093 struct ieee80211_hdr *hdr =
1094 (struct ieee80211_hdr *) skb->data;
1095
1096 /*
1097 * Tell TX path to send one frame even though the STA may
1098 * still remain is PS mode after this frame exchange.
1099 */
1100 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1101
1102 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1103 printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
1104 rx->sta->sta.addr, rx->sta->sta.aid,
1105 skb_queue_len(&rx->sta->ps_tx_buf));
1106 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1107
1108 /* Use MoreData flag to indicate whether there are more
1109 * buffered frames for this STA */
1110 if (no_pending_pkts)
1111 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1112 else
1113 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1114
1115 dev_queue_xmit(skb);
1116
1117 if (no_pending_pkts)
1118 sta_info_clear_tim_bit(rx->sta);
1119 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1120 } else if (!rx->sent_ps_buffered) {
1121 /*
1122 * FIXME: This can be the result of a race condition between
1123 * us expiring a frame and the station polling for it.
1124 * Should we send it a null-func frame indicating we
1125 * have nothing buffered for it?
1126 */
1127 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
1128 "though there are no buffered frames for it\n",
1129 rx->dev->name, rx->sta->sta.addr);
1130 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1131 }
1132
1133 /* Free PS Poll skb here instead of returning RX_DROP that would
1134 * count as an dropped frame. */
1135 dev_kfree_skb(rx->skb);
1136
1137 return RX_QUEUED;
1138 }
1139
1140 static ieee80211_rx_result debug_noinline
1141 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1142 {
1143 u8 *data = rx->skb->data;
1144 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1145
1146 if (!ieee80211_is_data_qos(hdr->frame_control))
1147 return RX_CONTINUE;
1148
1149 /* remove the qos control field, update frame type and meta-data */
1150 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1151 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1152 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1153 /* change frame type to non QOS */
1154 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1155
1156 return RX_CONTINUE;
1157 }
1158
1159 static int
1160 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1161 {
1162 if (unlikely(!rx->sta ||
1163 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1164 return -EACCES;
1165
1166 return 0;
1167 }
1168
1169 static int
1170 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1171 {
1172 /*
1173 * Pass through unencrypted frames if the hardware has
1174 * decrypted them already.
1175 */
1176 if (rx->status->flag & RX_FLAG_DECRYPTED)
1177 return 0;
1178
1179 /* Drop unencrypted frames if key is set. */
1180 if (unlikely(!ieee80211_has_protected(fc) &&
1181 !ieee80211_is_nullfunc(fc) &&
1182 (!ieee80211_is_mgmt(fc) ||
1183 (ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1184 rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP))) &&
1185 (rx->key || rx->sdata->drop_unencrypted)))
1186 return -EACCES;
1187 /* BIP does not use Protected field, so need to check MMIE */
1188 if (unlikely(rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP) &&
1189 ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1190 ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1191 (rx->key || rx->sdata->drop_unencrypted)))
1192 return -EACCES;
1193
1194 return 0;
1195 }
1196
1197 static int
1198 ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1199 {
1200 struct net_device *dev = rx->dev;
1201 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1202 u16 hdrlen, ethertype;
1203 u8 *payload;
1204 u8 dst[ETH_ALEN];
1205 u8 src[ETH_ALEN] __aligned(2);
1206 struct sk_buff *skb = rx->skb;
1207 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1208
1209 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1210 return -1;
1211
1212 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1213
1214 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1215 * header
1216 * IEEE 802.11 address fields:
1217 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1218 * 0 0 DA SA BSSID n/a
1219 * 0 1 DA BSSID SA n/a
1220 * 1 0 BSSID SA DA n/a
1221 * 1 1 RA TA DA SA
1222 */
1223 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1224 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1225
1226 switch (hdr->frame_control &
1227 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1228 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS):
1229 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1230 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1231 return -1;
1232 break;
1233 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1234 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1235 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1236 return -1;
1237 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1238 struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *)
1239 (skb->data + hdrlen);
1240 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
1241 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
1242 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
1243 memcpy(src, meshdr->eaddr2, ETH_ALEN);
1244 }
1245 }
1246 break;
1247 case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS):
1248 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1249 (is_multicast_ether_addr(dst) &&
1250 !compare_ether_addr(src, dev->dev_addr)))
1251 return -1;
1252 break;
1253 case __constant_cpu_to_le16(0):
1254 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1255 return -1;
1256 break;
1257 }
1258
1259 if (unlikely(skb->len - hdrlen < 8))
1260 return -1;
1261
1262 payload = skb->data + hdrlen;
1263 ethertype = (payload[6] << 8) | payload[7];
1264
1265 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1266 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1267 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1268 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1269 * replace EtherType */
1270 skb_pull(skb, hdrlen + 6);
1271 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1272 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1273 } else {
1274 struct ethhdr *ehdr;
1275 __be16 len;
1276
1277 skb_pull(skb, hdrlen);
1278 len = htons(skb->len);
1279 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1280 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1281 memcpy(ehdr->h_source, src, ETH_ALEN);
1282 ehdr->h_proto = len;
1283 }
1284 return 0;
1285 }
1286
1287 /*
1288 * requires that rx->skb is a frame with ethernet header
1289 */
1290 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1291 {
1292 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1293 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1294 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1295
1296 /*
1297 * Allow EAPOL frames to us/the PAE group address regardless
1298 * of whether the frame was encrypted or not.
1299 */
1300 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1301 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1302 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1303 return true;
1304
1305 if (ieee80211_802_1x_port_control(rx) ||
1306 ieee80211_drop_unencrypted(rx, fc))
1307 return false;
1308
1309 return true;
1310 }
1311
1312 /*
1313 * requires that rx->skb is a frame with ethernet header
1314 */
1315 static void
1316 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1317 {
1318 struct net_device *dev = rx->dev;
1319 struct ieee80211_local *local = rx->local;
1320 struct sk_buff *skb, *xmit_skb;
1321 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1322 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1323 struct sta_info *dsta;
1324
1325 skb = rx->skb;
1326 xmit_skb = NULL;
1327
1328 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1329 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1330 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1331 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1332 if (is_multicast_ether_addr(ehdr->h_dest)) {
1333 /*
1334 * send multicast frames both to higher layers in
1335 * local net stack and back to the wireless medium
1336 */
1337 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1338 if (!xmit_skb && net_ratelimit())
1339 printk(KERN_DEBUG "%s: failed to clone "
1340 "multicast frame\n", dev->name);
1341 } else {
1342 dsta = sta_info_get(local, skb->data);
1343 if (dsta && dsta->sdata->dev == dev) {
1344 /*
1345 * The destination station is associated to
1346 * this AP (in this VLAN), so send the frame
1347 * directly to it and do not pass it to local
1348 * net stack.
1349 */
1350 xmit_skb = skb;
1351 skb = NULL;
1352 }
1353 }
1354 }
1355
1356 if (skb) {
1357 int align __maybe_unused;
1358
1359 #if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1360 /*
1361 * 'align' will only take the values 0 or 2 here
1362 * since all frames are required to be aligned
1363 * to 2-byte boundaries when being passed to
1364 * mac80211. That also explains the __skb_push()
1365 * below.
1366 */
1367 align = (unsigned long)skb->data & 4;
1368 if (align) {
1369 if (WARN_ON(skb_headroom(skb) < 3)) {
1370 dev_kfree_skb(skb);
1371 skb = NULL;
1372 } else {
1373 u8 *data = skb->data;
1374 size_t len = skb->len;
1375 u8 *new = __skb_push(skb, align);
1376 memmove(new, data, len);
1377 __skb_trim(skb, len);
1378 }
1379 }
1380 #endif
1381
1382 if (skb) {
1383 /* deliver to local stack */
1384 skb->protocol = eth_type_trans(skb, dev);
1385 memset(skb->cb, 0, sizeof(skb->cb));
1386 netif_rx(skb);
1387 }
1388 }
1389
1390 if (xmit_skb) {
1391 /* send to wireless media */
1392 xmit_skb->protocol = htons(ETH_P_802_3);
1393 skb_reset_network_header(xmit_skb);
1394 skb_reset_mac_header(xmit_skb);
1395 dev_queue_xmit(xmit_skb);
1396 }
1397 }
1398
1399 static ieee80211_rx_result debug_noinline
1400 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1401 {
1402 struct net_device *dev = rx->dev;
1403 struct ieee80211_local *local = rx->local;
1404 u16 ethertype;
1405 u8 *payload;
1406 struct sk_buff *skb = rx->skb, *frame = NULL;
1407 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1408 __le16 fc = hdr->frame_control;
1409 const struct ethhdr *eth;
1410 int remaining, err;
1411 u8 dst[ETH_ALEN];
1412 u8 src[ETH_ALEN];
1413
1414 if (unlikely(!ieee80211_is_data(fc)))
1415 return RX_CONTINUE;
1416
1417 if (unlikely(!ieee80211_is_data_present(fc)))
1418 return RX_DROP_MONITOR;
1419
1420 if (!(rx->flags & IEEE80211_RX_AMSDU))
1421 return RX_CONTINUE;
1422
1423 err = ieee80211_data_to_8023(rx);
1424 if (unlikely(err))
1425 return RX_DROP_UNUSABLE;
1426
1427 skb->dev = dev;
1428
1429 dev->stats.rx_packets++;
1430 dev->stats.rx_bytes += skb->len;
1431
1432 /* skip the wrapping header */
1433 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1434 if (!eth)
1435 return RX_DROP_UNUSABLE;
1436
1437 while (skb != frame) {
1438 u8 padding;
1439 __be16 len = eth->h_proto;
1440 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1441
1442 remaining = skb->len;
1443 memcpy(dst, eth->h_dest, ETH_ALEN);
1444 memcpy(src, eth->h_source, ETH_ALEN);
1445
1446 padding = ((4 - subframe_len) & 0x3);
1447 /* the last MSDU has no padding */
1448 if (subframe_len > remaining)
1449 return RX_DROP_UNUSABLE;
1450
1451 skb_pull(skb, sizeof(struct ethhdr));
1452 /* if last subframe reuse skb */
1453 if (remaining <= subframe_len + padding)
1454 frame = skb;
1455 else {
1456 /*
1457 * Allocate and reserve two bytes more for payload
1458 * alignment since sizeof(struct ethhdr) is 14.
1459 */
1460 frame = dev_alloc_skb(
1461 ALIGN(local->hw.extra_tx_headroom, 4) +
1462 subframe_len + 2);
1463
1464 if (frame == NULL)
1465 return RX_DROP_UNUSABLE;
1466
1467 skb_reserve(frame,
1468 ALIGN(local->hw.extra_tx_headroom, 4) +
1469 sizeof(struct ethhdr) + 2);
1470 memcpy(skb_put(frame, ntohs(len)), skb->data,
1471 ntohs(len));
1472
1473 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1474 padding);
1475 if (!eth) {
1476 dev_kfree_skb(frame);
1477 return RX_DROP_UNUSABLE;
1478 }
1479 }
1480
1481 skb_reset_network_header(frame);
1482 frame->dev = dev;
1483 frame->priority = skb->priority;
1484 rx->skb = frame;
1485
1486 payload = frame->data;
1487 ethertype = (payload[6] << 8) | payload[7];
1488
1489 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1490 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1491 compare_ether_addr(payload,
1492 bridge_tunnel_header) == 0)) {
1493 /* remove RFC1042 or Bridge-Tunnel
1494 * encapsulation and replace EtherType */
1495 skb_pull(frame, 6);
1496 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1497 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1498 } else {
1499 memcpy(skb_push(frame, sizeof(__be16)),
1500 &len, sizeof(__be16));
1501 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1502 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1503 }
1504
1505 if (!ieee80211_frame_allowed(rx, fc)) {
1506 if (skb == frame) /* last frame */
1507 return RX_DROP_UNUSABLE;
1508 dev_kfree_skb(frame);
1509 continue;
1510 }
1511
1512 ieee80211_deliver_skb(rx);
1513 }
1514
1515 return RX_QUEUED;
1516 }
1517
1518 #ifdef CONFIG_MAC80211_MESH
1519 static ieee80211_rx_result
1520 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1521 {
1522 struct ieee80211_hdr *hdr;
1523 struct ieee80211s_hdr *mesh_hdr;
1524 unsigned int hdrlen;
1525 struct sk_buff *skb = rx->skb, *fwd_skb;
1526
1527 hdr = (struct ieee80211_hdr *) skb->data;
1528 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1529 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1530
1531 if (!ieee80211_is_data(hdr->frame_control))
1532 return RX_CONTINUE;
1533
1534 if (!mesh_hdr->ttl)
1535 /* illegal frame */
1536 return RX_DROP_MONITOR;
1537
1538 if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){
1539 struct ieee80211_sub_if_data *sdata;
1540 struct mesh_path *mppath;
1541
1542 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1543 rcu_read_lock();
1544 mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
1545 if (!mppath) {
1546 mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
1547 } else {
1548 spin_lock_bh(&mppath->state_lock);
1549 mppath->exp_time = jiffies;
1550 if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
1551 memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
1552 spin_unlock_bh(&mppath->state_lock);
1553 }
1554 rcu_read_unlock();
1555 }
1556
1557 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1558 return RX_CONTINUE;
1559
1560 mesh_hdr->ttl--;
1561
1562 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1563 if (!mesh_hdr->ttl)
1564 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1565 dropped_frames_ttl);
1566 else {
1567 struct ieee80211_hdr *fwd_hdr;
1568 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1569
1570 if (!fwd_skb && net_ratelimit())
1571 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1572 rx->dev->name);
1573
1574 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1575 /*
1576 * Save TA to addr1 to send TA a path error if a
1577 * suitable next hop is not found
1578 */
1579 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
1580 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1581 fwd_skb->dev = rx->local->mdev;
1582 fwd_skb->iif = rx->dev->ifindex;
1583 dev_queue_xmit(fwd_skb);
1584 }
1585 }
1586
1587 if (is_multicast_ether_addr(hdr->addr3) ||
1588 rx->dev->flags & IFF_PROMISC)
1589 return RX_CONTINUE;
1590 else
1591 return RX_DROP_MONITOR;
1592 }
1593 #endif
1594
1595 static ieee80211_rx_result debug_noinline
1596 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1597 {
1598 struct net_device *dev = rx->dev;
1599 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1600 __le16 fc = hdr->frame_control;
1601 int err;
1602
1603 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1604 return RX_CONTINUE;
1605
1606 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1607 return RX_DROP_MONITOR;
1608
1609 err = ieee80211_data_to_8023(rx);
1610 if (unlikely(err))
1611 return RX_DROP_UNUSABLE;
1612
1613 if (!ieee80211_frame_allowed(rx, fc))
1614 return RX_DROP_MONITOR;
1615
1616 rx->skb->dev = dev;
1617
1618 dev->stats.rx_packets++;
1619 dev->stats.rx_bytes += rx->skb->len;
1620
1621 ieee80211_deliver_skb(rx);
1622
1623 return RX_QUEUED;
1624 }
1625
1626 static ieee80211_rx_result debug_noinline
1627 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1628 {
1629 struct ieee80211_local *local = rx->local;
1630 struct ieee80211_hw *hw = &local->hw;
1631 struct sk_buff *skb = rx->skb;
1632 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1633 struct tid_ampdu_rx *tid_agg_rx;
1634 u16 start_seq_num;
1635 u16 tid;
1636
1637 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1638 return RX_CONTINUE;
1639
1640 if (ieee80211_is_back_req(bar->frame_control)) {
1641 if (!rx->sta)
1642 return RX_CONTINUE;
1643 tid = le16_to_cpu(bar->control) >> 12;
1644 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1645 != HT_AGG_STATE_OPERATIONAL)
1646 return RX_CONTINUE;
1647 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1648
1649 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1650
1651 /* reset session timer */
1652 if (tid_agg_rx->timeout) {
1653 unsigned long expires =
1654 jiffies + (tid_agg_rx->timeout / 1000) * HZ;
1655 mod_timer(&tid_agg_rx->session_timer, expires);
1656 }
1657
1658 /* manage reordering buffer according to requested */
1659 /* sequence number */
1660 rcu_read_lock();
1661 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
1662 start_seq_num, 1);
1663 rcu_read_unlock();
1664 return RX_DROP_UNUSABLE;
1665 }
1666
1667 return RX_CONTINUE;
1668 }
1669
1670 void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1671 struct ieee80211_mgmt *mgmt,
1672 size_t len)
1673 {
1674 struct ieee80211_local *local = sdata->local;
1675 struct sk_buff *skb;
1676 struct ieee80211_mgmt *resp;
1677
1678 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
1679 /* Not to own unicast address */
1680 return;
1681 }
1682
1683 if (compare_ether_addr(mgmt->sa, sdata->u.sta.bssid) != 0 ||
1684 compare_ether_addr(mgmt->bssid, sdata->u.sta.bssid) != 0) {
1685 /* Not from the current AP. */
1686 return;
1687 }
1688
1689 if (sdata->u.sta.state == IEEE80211_STA_MLME_ASSOCIATE) {
1690 /* Association in progress; ignore SA Query */
1691 return;
1692 }
1693
1694 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1695 /* Too short SA Query request frame */
1696 return;
1697 }
1698
1699 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1700 if (skb == NULL)
1701 return;
1702
1703 skb_reserve(skb, local->hw.extra_tx_headroom);
1704 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1705 memset(resp, 0, 24);
1706 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1707 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
1708 memcpy(resp->bssid, sdata->u.sta.bssid, ETH_ALEN);
1709 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1710 IEEE80211_STYPE_ACTION);
1711 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1712 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1713 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1714 memcpy(resp->u.action.u.sa_query.trans_id,
1715 mgmt->u.action.u.sa_query.trans_id,
1716 WLAN_SA_QUERY_TR_ID_LEN);
1717
1718 ieee80211_tx_skb(sdata, skb, 1);
1719 }
1720
1721 static ieee80211_rx_result debug_noinline
1722 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1723 {
1724 struct ieee80211_local *local = rx->local;
1725 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1726 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1727 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1728 struct ieee80211_bss *bss;
1729 int len = rx->skb->len;
1730
1731 if (!ieee80211_is_action(mgmt->frame_control))
1732 return RX_CONTINUE;
1733
1734 if (!rx->sta)
1735 return RX_DROP_MONITOR;
1736
1737 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1738 return RX_DROP_MONITOR;
1739
1740 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1741 return RX_DROP_MONITOR;
1742
1743 /* all categories we currently handle have action_code */
1744 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1745 return RX_DROP_MONITOR;
1746
1747 switch (mgmt->u.action.category) {
1748 case WLAN_CATEGORY_BACK:
1749 switch (mgmt->u.action.u.addba_req.action_code) {
1750 case WLAN_ACTION_ADDBA_REQ:
1751 if (len < (IEEE80211_MIN_ACTION_SIZE +
1752 sizeof(mgmt->u.action.u.addba_req)))
1753 return RX_DROP_MONITOR;
1754 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1755 break;
1756 case WLAN_ACTION_ADDBA_RESP:
1757 if (len < (IEEE80211_MIN_ACTION_SIZE +
1758 sizeof(mgmt->u.action.u.addba_resp)))
1759 return RX_DROP_MONITOR;
1760 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1761 break;
1762 case WLAN_ACTION_DELBA:
1763 if (len < (IEEE80211_MIN_ACTION_SIZE +
1764 sizeof(mgmt->u.action.u.delba)))
1765 return RX_DROP_MONITOR;
1766 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1767 break;
1768 }
1769 break;
1770 case WLAN_CATEGORY_SPECTRUM_MGMT:
1771 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1772 return RX_DROP_MONITOR;
1773 switch (mgmt->u.action.u.measurement.action_code) {
1774 case WLAN_ACTION_SPCT_MSR_REQ:
1775 if (len < (IEEE80211_MIN_ACTION_SIZE +
1776 sizeof(mgmt->u.action.u.measurement)))
1777 return RX_DROP_MONITOR;
1778 ieee80211_process_measurement_req(sdata, mgmt, len);
1779 break;
1780 case WLAN_ACTION_SPCT_CHL_SWITCH:
1781 if (len < (IEEE80211_MIN_ACTION_SIZE +
1782 sizeof(mgmt->u.action.u.chan_switch)))
1783 return RX_DROP_MONITOR;
1784
1785 if (memcmp(mgmt->bssid, ifsta->bssid, ETH_ALEN) != 0)
1786 return RX_DROP_MONITOR;
1787
1788 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
1789 local->hw.conf.channel->center_freq,
1790 ifsta->ssid, ifsta->ssid_len);
1791 if (!bss)
1792 return RX_DROP_MONITOR;
1793
1794 ieee80211_process_chanswitch(sdata,
1795 &mgmt->u.action.u.chan_switch.sw_elem, bss);
1796 ieee80211_rx_bss_put(local, bss);
1797 break;
1798 }
1799 break;
1800 case WLAN_CATEGORY_SA_QUERY:
1801 if (len < (IEEE80211_MIN_ACTION_SIZE +
1802 sizeof(mgmt->u.action.u.sa_query)))
1803 return RX_DROP_MONITOR;
1804 switch (mgmt->u.action.u.sa_query.action) {
1805 case WLAN_ACTION_SA_QUERY_REQUEST:
1806 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1807 return RX_DROP_MONITOR;
1808 ieee80211_process_sa_query_req(sdata, mgmt, len);
1809 break;
1810 case WLAN_ACTION_SA_QUERY_RESPONSE:
1811 /*
1812 * SA Query response is currently only used in AP mode
1813 * and it is processed in user space.
1814 */
1815 return RX_CONTINUE;
1816 }
1817 break;
1818 default:
1819 return RX_CONTINUE;
1820 }
1821
1822 rx->sta->rx_packets++;
1823 dev_kfree_skb(rx->skb);
1824 return RX_QUEUED;
1825 }
1826
1827 static ieee80211_rx_result debug_noinline
1828 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1829 {
1830 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1831 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1832
1833 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1834 return RX_DROP_MONITOR;
1835
1836 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1837 return RX_DROP_MONITOR;
1838
1839 if (ieee80211_vif_is_mesh(&sdata->vif))
1840 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1841
1842 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1843 sdata->vif.type != NL80211_IFTYPE_ADHOC)
1844 return RX_DROP_MONITOR;
1845
1846 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
1847 return RX_DROP_MONITOR;
1848
1849 ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1850 return RX_QUEUED;
1851 }
1852
1853 static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1854 struct ieee80211_hdr *hdr,
1855 struct ieee80211_rx_data *rx)
1856 {
1857 int keyidx;
1858 unsigned int hdrlen;
1859
1860 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1861 if (rx->skb->len >= hdrlen + 4)
1862 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1863 else
1864 keyidx = -1;
1865
1866 if (!rx->sta) {
1867 /*
1868 * Some hardware seem to generate incorrect Michael MIC
1869 * reports; ignore them to avoid triggering countermeasures.
1870 */
1871 goto ignore;
1872 }
1873
1874 if (!ieee80211_has_protected(hdr->frame_control))
1875 goto ignore;
1876
1877 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1878 /*
1879 * APs with pairwise keys should never receive Michael MIC
1880 * errors for non-zero keyidx because these are reserved for
1881 * group keys and only the AP is sending real multicast
1882 * frames in the BSS.
1883 */
1884 goto ignore;
1885 }
1886
1887 if (!ieee80211_is_data(hdr->frame_control) &&
1888 !ieee80211_is_auth(hdr->frame_control))
1889 goto ignore;
1890
1891 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr);
1892 ignore:
1893 dev_kfree_skb(rx->skb);
1894 rx->skb = NULL;
1895 }
1896
1897 /* TODO: use IEEE80211_RX_FRAGMENTED */
1898 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1899 {
1900 struct ieee80211_sub_if_data *sdata;
1901 struct ieee80211_local *local = rx->local;
1902 struct ieee80211_rtap_hdr {
1903 struct ieee80211_radiotap_header hdr;
1904 u8 flags;
1905 u8 rate;
1906 __le16 chan_freq;
1907 __le16 chan_flags;
1908 } __attribute__ ((packed)) *rthdr;
1909 struct sk_buff *skb = rx->skb, *skb2;
1910 struct net_device *prev_dev = NULL;
1911 struct ieee80211_rx_status *status = rx->status;
1912
1913 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1914 goto out_free_skb;
1915
1916 if (skb_headroom(skb) < sizeof(*rthdr) &&
1917 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1918 goto out_free_skb;
1919
1920 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1921 memset(rthdr, 0, sizeof(*rthdr));
1922 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1923 rthdr->hdr.it_present =
1924 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1925 (1 << IEEE80211_RADIOTAP_RATE) |
1926 (1 << IEEE80211_RADIOTAP_CHANNEL));
1927
1928 rthdr->rate = rx->rate->bitrate / 5;
1929 rthdr->chan_freq = cpu_to_le16(status->freq);
1930
1931 if (status->band == IEEE80211_BAND_5GHZ)
1932 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1933 IEEE80211_CHAN_5GHZ);
1934 else
1935 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1936 IEEE80211_CHAN_2GHZ);
1937
1938 skb_set_mac_header(skb, 0);
1939 skb->ip_summed = CHECKSUM_UNNECESSARY;
1940 skb->pkt_type = PACKET_OTHERHOST;
1941 skb->protocol = htons(ETH_P_802_2);
1942
1943 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1944 if (!netif_running(sdata->dev))
1945 continue;
1946
1947 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1948 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1949 continue;
1950
1951 if (prev_dev) {
1952 skb2 = skb_clone(skb, GFP_ATOMIC);
1953 if (skb2) {
1954 skb2->dev = prev_dev;
1955 netif_rx(skb2);
1956 }
1957 }
1958
1959 prev_dev = sdata->dev;
1960 sdata->dev->stats.rx_packets++;
1961 sdata->dev->stats.rx_bytes += skb->len;
1962 }
1963
1964 if (prev_dev) {
1965 skb->dev = prev_dev;
1966 netif_rx(skb);
1967 skb = NULL;
1968 } else
1969 goto out_free_skb;
1970
1971 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
1972 return;
1973
1974 out_free_skb:
1975 dev_kfree_skb(skb);
1976 }
1977
1978
1979 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1980 struct ieee80211_rx_data *rx,
1981 struct sk_buff *skb)
1982 {
1983 ieee80211_rx_result res = RX_DROP_MONITOR;
1984
1985 rx->skb = skb;
1986 rx->sdata = sdata;
1987 rx->dev = sdata->dev;
1988
1989 #define CALL_RXH(rxh) \
1990 do { \
1991 res = rxh(rx); \
1992 if (res != RX_CONTINUE) \
1993 goto rxh_done; \
1994 } while (0);
1995
1996 CALL_RXH(ieee80211_rx_h_passive_scan)
1997 CALL_RXH(ieee80211_rx_h_check)
1998 CALL_RXH(ieee80211_rx_h_decrypt)
1999 CALL_RXH(ieee80211_rx_h_sta_process)
2000 CALL_RXH(ieee80211_rx_h_defragment)
2001 CALL_RXH(ieee80211_rx_h_ps_poll)
2002 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2003 /* must be after MMIC verify so header is counted in MPDU mic */
2004 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2005 CALL_RXH(ieee80211_rx_h_amsdu)
2006 #ifdef CONFIG_MAC80211_MESH
2007 if (ieee80211_vif_is_mesh(&sdata->vif))
2008 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2009 #endif
2010 CALL_RXH(ieee80211_rx_h_data)
2011 CALL_RXH(ieee80211_rx_h_ctrl)
2012 CALL_RXH(ieee80211_rx_h_action)
2013 CALL_RXH(ieee80211_rx_h_mgmt)
2014
2015 #undef CALL_RXH
2016
2017 rxh_done:
2018 switch (res) {
2019 case RX_DROP_MONITOR:
2020 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2021 if (rx->sta)
2022 rx->sta->rx_dropped++;
2023 /* fall through */
2024 case RX_CONTINUE:
2025 ieee80211_rx_cooked_monitor(rx);
2026 break;
2027 case RX_DROP_UNUSABLE:
2028 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2029 if (rx->sta)
2030 rx->sta->rx_dropped++;
2031 dev_kfree_skb(rx->skb);
2032 break;
2033 case RX_QUEUED:
2034 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
2035 break;
2036 }
2037 }
2038
2039 /* main receive path */
2040
2041 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2042 u8 *bssid, struct ieee80211_rx_data *rx,
2043 struct ieee80211_hdr *hdr)
2044 {
2045 int multicast = is_multicast_ether_addr(hdr->addr1);
2046
2047 switch (sdata->vif.type) {
2048 case NL80211_IFTYPE_STATION:
2049 if (!bssid)
2050 return 0;
2051 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
2052 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2053 return 0;
2054 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2055 } else if (!multicast &&
2056 compare_ether_addr(sdata->dev->dev_addr,
2057 hdr->addr1) != 0) {
2058 if (!(sdata->dev->flags & IFF_PROMISC))
2059 return 0;
2060 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2061 }
2062 break;
2063 case NL80211_IFTYPE_ADHOC:
2064 if (!bssid)
2065 return 0;
2066 if (ieee80211_is_beacon(hdr->frame_control)) {
2067 return 1;
2068 }
2069 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
2070 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2071 return 0;
2072 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2073 } else if (!multicast &&
2074 compare_ether_addr(sdata->dev->dev_addr,
2075 hdr->addr1) != 0) {
2076 if (!(sdata->dev->flags & IFF_PROMISC))
2077 return 0;
2078 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2079 } else if (!rx->sta) {
2080 int rate_idx;
2081 if (rx->status->flag & RX_FLAG_HT)
2082 rate_idx = 0; /* TODO: HT rates */
2083 else
2084 rate_idx = rx->status->rate_idx;
2085 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
2086 BIT(rate_idx));
2087 }
2088 break;
2089 case NL80211_IFTYPE_MESH_POINT:
2090 if (!multicast &&
2091 compare_ether_addr(sdata->dev->dev_addr,
2092 hdr->addr1) != 0) {
2093 if (!(sdata->dev->flags & IFF_PROMISC))
2094 return 0;
2095
2096 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2097 }
2098 break;
2099 case NL80211_IFTYPE_AP_VLAN:
2100 case NL80211_IFTYPE_AP:
2101 if (!bssid) {
2102 if (compare_ether_addr(sdata->dev->dev_addr,
2103 hdr->addr1))
2104 return 0;
2105 } else if (!ieee80211_bssid_match(bssid,
2106 sdata->dev->dev_addr)) {
2107 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2108 return 0;
2109 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2110 }
2111 break;
2112 case NL80211_IFTYPE_WDS:
2113 if (bssid || !ieee80211_is_data(hdr->frame_control))
2114 return 0;
2115 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2116 return 0;
2117 break;
2118 case NL80211_IFTYPE_MONITOR:
2119 /* take everything */
2120 break;
2121 case NL80211_IFTYPE_UNSPECIFIED:
2122 case __NL80211_IFTYPE_AFTER_LAST:
2123 /* should never get here */
2124 WARN_ON(1);
2125 break;
2126 }
2127
2128 return 1;
2129 }
2130
2131 /*
2132 * This is the actual Rx frames handler. as it blongs to Rx path it must
2133 * be called with rcu_read_lock protection.
2134 */
2135 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2136 struct sk_buff *skb,
2137 struct ieee80211_rx_status *status,
2138 struct ieee80211_rate *rate)
2139 {
2140 struct ieee80211_local *local = hw_to_local(hw);
2141 struct ieee80211_sub_if_data *sdata;
2142 struct ieee80211_hdr *hdr;
2143 struct ieee80211_rx_data rx;
2144 int prepares;
2145 struct ieee80211_sub_if_data *prev = NULL;
2146 struct sk_buff *skb_new;
2147 u8 *bssid;
2148
2149 hdr = (struct ieee80211_hdr *)skb->data;
2150 memset(&rx, 0, sizeof(rx));
2151 rx.skb = skb;
2152 rx.local = local;
2153
2154 rx.status = status;
2155 rx.rate = rate;
2156
2157 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2158 local->dot11ReceivedFragmentCount++;
2159
2160 rx.sta = sta_info_get(local, hdr->addr2);
2161 if (rx.sta) {
2162 rx.sdata = rx.sta->sdata;
2163 rx.dev = rx.sta->sdata->dev;
2164 }
2165
2166 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
2167 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
2168 return;
2169 }
2170
2171 if (unlikely(local->sw_scanning || local->hw_scanning))
2172 rx.flags |= IEEE80211_RX_IN_SCAN;
2173
2174 ieee80211_parse_qos(&rx);
2175 ieee80211_verify_alignment(&rx);
2176
2177 skb = rx.skb;
2178
2179 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2180 if (!netif_running(sdata->dev))
2181 continue;
2182
2183 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
2184 continue;
2185
2186 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2187 rx.flags |= IEEE80211_RX_RA_MATCH;
2188 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr);
2189
2190 if (!prepares)
2191 continue;
2192
2193 /*
2194 * frame is destined for this interface, but if it's not
2195 * also for the previous one we handle that after the
2196 * loop to avoid copying the SKB once too much
2197 */
2198
2199 if (!prev) {
2200 prev = sdata;
2201 continue;
2202 }
2203
2204 /*
2205 * frame was destined for the previous interface
2206 * so invoke RX handlers for it
2207 */
2208
2209 skb_new = skb_copy(skb, GFP_ATOMIC);
2210 if (!skb_new) {
2211 if (net_ratelimit())
2212 printk(KERN_DEBUG "%s: failed to copy "
2213 "multicast frame for %s\n",
2214 wiphy_name(local->hw.wiphy),
2215 prev->dev->name);
2216 continue;
2217 }
2218 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
2219 prev = sdata;
2220 }
2221 if (prev)
2222 ieee80211_invoke_rx_handlers(prev, &rx, skb);
2223 else
2224 dev_kfree_skb(skb);
2225 }
2226
2227 #define SEQ_MODULO 0x1000
2228 #define SEQ_MASK 0xfff
2229
2230 static inline int seq_less(u16 sq1, u16 sq2)
2231 {
2232 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2233 }
2234
2235 static inline u16 seq_inc(u16 sq)
2236 {
2237 return (sq + 1) & SEQ_MASK;
2238 }
2239
2240 static inline u16 seq_sub(u16 sq1, u16 sq2)
2241 {
2242 return (sq1 - sq2) & SEQ_MASK;
2243 }
2244
2245
2246 /*
2247 * As it function blongs to Rx path it must be called with
2248 * the proper rcu_read_lock protection for its flow.
2249 */
2250 static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2251 struct tid_ampdu_rx *tid_agg_rx,
2252 struct sk_buff *skb,
2253 u16 mpdu_seq_num,
2254 int bar_req)
2255 {
2256 struct ieee80211_local *local = hw_to_local(hw);
2257 struct ieee80211_rx_status status;
2258 u16 head_seq_num, buf_size;
2259 int index;
2260 struct ieee80211_supported_band *sband;
2261 struct ieee80211_rate *rate;
2262
2263 buf_size = tid_agg_rx->buf_size;
2264 head_seq_num = tid_agg_rx->head_seq_num;
2265
2266 /* frame with out of date sequence number */
2267 if (seq_less(mpdu_seq_num, head_seq_num)) {
2268 dev_kfree_skb(skb);
2269 return 1;
2270 }
2271
2272 /* if frame sequence number exceeds our buffering window size or
2273 * block Ack Request arrived - release stored frames */
2274 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
2275 /* new head to the ordering buffer */
2276 if (bar_req)
2277 head_seq_num = mpdu_seq_num;
2278 else
2279 head_seq_num =
2280 seq_inc(seq_sub(mpdu_seq_num, buf_size));
2281 /* release stored frames up to new head to stack */
2282 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2283 index = seq_sub(tid_agg_rx->head_seq_num,
2284 tid_agg_rx->ssn)
2285 % tid_agg_rx->buf_size;
2286
2287 if (tid_agg_rx->reorder_buf[index]) {
2288 /* release the reordered frames to stack */
2289 memcpy(&status,
2290 tid_agg_rx->reorder_buf[index]->cb,
2291 sizeof(status));
2292 sband = local->hw.wiphy->bands[status.band];
2293 if (status.flag & RX_FLAG_HT) {
2294 /* TODO: HT rates */
2295 rate = sband->bitrates;
2296 } else {
2297 rate = &sband->bitrates
2298 [status.rate_idx];
2299 }
2300 __ieee80211_rx_handle_packet(hw,
2301 tid_agg_rx->reorder_buf[index],
2302 &status, rate);
2303 tid_agg_rx->stored_mpdu_num--;
2304 tid_agg_rx->reorder_buf[index] = NULL;
2305 }
2306 tid_agg_rx->head_seq_num =
2307 seq_inc(tid_agg_rx->head_seq_num);
2308 }
2309 if (bar_req)
2310 return 1;
2311 }
2312
2313 /* now the new frame is always in the range of the reordering */
2314 /* buffer window */
2315 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
2316 % tid_agg_rx->buf_size;
2317 /* check if we already stored this frame */
2318 if (tid_agg_rx->reorder_buf[index]) {
2319 dev_kfree_skb(skb);
2320 return 1;
2321 }
2322
2323 /* if arrived mpdu is in the right order and nothing else stored */
2324 /* release it immediately */
2325 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2326 tid_agg_rx->stored_mpdu_num == 0) {
2327 tid_agg_rx->head_seq_num =
2328 seq_inc(tid_agg_rx->head_seq_num);
2329 return 0;
2330 }
2331
2332 /* put the frame in the reordering buffer */
2333 tid_agg_rx->reorder_buf[index] = skb;
2334 tid_agg_rx->stored_mpdu_num++;
2335 /* release the buffer until next missing frame */
2336 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2337 % tid_agg_rx->buf_size;
2338 while (tid_agg_rx->reorder_buf[index]) {
2339 /* release the reordered frame back to stack */
2340 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb,
2341 sizeof(status));
2342 sband = local->hw.wiphy->bands[status.band];
2343 if (status.flag & RX_FLAG_HT)
2344 rate = sband->bitrates; /* TODO: HT rates */
2345 else
2346 rate = &sband->bitrates[status.rate_idx];
2347 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2348 &status, rate);
2349 tid_agg_rx->stored_mpdu_num--;
2350 tid_agg_rx->reorder_buf[index] = NULL;
2351 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2352 index = seq_sub(tid_agg_rx->head_seq_num,
2353 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2354 }
2355 return 1;
2356 }
2357
2358 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2359 struct sk_buff *skb)
2360 {
2361 struct ieee80211_hw *hw = &local->hw;
2362 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2363 struct sta_info *sta;
2364 struct tid_ampdu_rx *tid_agg_rx;
2365 u16 sc;
2366 u16 mpdu_seq_num;
2367 u8 ret = 0;
2368 int tid;
2369
2370 sta = sta_info_get(local, hdr->addr2);
2371 if (!sta)
2372 return ret;
2373
2374 /* filter the QoS data rx stream according to
2375 * STA/TID and check if this STA/TID is on aggregation */
2376 if (!ieee80211_is_data_qos(hdr->frame_control))
2377 goto end_reorder;
2378
2379 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2380
2381 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2382 goto end_reorder;
2383
2384 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2385
2386 /* qos null data frames are excluded */
2387 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2388 goto end_reorder;
2389
2390 /* new un-ordered ampdu frame - process it */
2391
2392 /* reset session timer */
2393 if (tid_agg_rx->timeout) {
2394 unsigned long expires =
2395 jiffies + (tid_agg_rx->timeout / 1000) * HZ;
2396 mod_timer(&tid_agg_rx->session_timer, expires);
2397 }
2398
2399 /* if this mpdu is fragmented - terminate rx aggregation session */
2400 sc = le16_to_cpu(hdr->seq_ctrl);
2401 if (sc & IEEE80211_SCTL_FRAG) {
2402 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2403 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2404 ret = 1;
2405 goto end_reorder;
2406 }
2407
2408 /* according to mpdu sequence number deal with reordering buffer */
2409 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2410 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2411 mpdu_seq_num, 0);
2412 end_reorder:
2413 return ret;
2414 }
2415
2416 /*
2417 * This is the receive path handler. It is called by a low level driver when an
2418 * 802.11 MPDU is received from the hardware.
2419 */
2420 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2421 struct ieee80211_rx_status *status)
2422 {
2423 struct ieee80211_local *local = hw_to_local(hw);
2424 struct ieee80211_rate *rate = NULL;
2425 struct ieee80211_supported_band *sband;
2426
2427 if (status->band < 0 ||
2428 status->band >= IEEE80211_NUM_BANDS) {
2429 WARN_ON(1);
2430 return;
2431 }
2432
2433 sband = local->hw.wiphy->bands[status->band];
2434 if (!sband) {
2435 WARN_ON(1);
2436 return;
2437 }
2438
2439 if (status->flag & RX_FLAG_HT) {
2440 /* rate_idx is MCS index */
2441 if (WARN_ON(status->rate_idx < 0 ||
2442 status->rate_idx >= 76))
2443 return;
2444 /* HT rates are not in the table - use the highest legacy rate
2445 * for now since other parts of mac80211 may not yet be fully
2446 * MCS aware. */
2447 rate = &sband->bitrates[sband->n_bitrates - 1];
2448 } else {
2449 if (WARN_ON(status->rate_idx < 0 ||
2450 status->rate_idx >= sband->n_bitrates))
2451 return;
2452 rate = &sband->bitrates[status->rate_idx];
2453 }
2454
2455 /*
2456 * key references and virtual interfaces are protected using RCU
2457 * and this requires that we are in a read-side RCU section during
2458 * receive processing
2459 */
2460 rcu_read_lock();
2461
2462 /*
2463 * Frames with failed FCS/PLCP checksum are not returned,
2464 * all other frames are returned without radiotap header
2465 * if it was previously present.
2466 * Also, frames with less than 16 bytes are dropped.
2467 */
2468 skb = ieee80211_rx_monitor(local, skb, status, rate);
2469 if (!skb) {
2470 rcu_read_unlock();
2471 return;
2472 }
2473
2474 if (!ieee80211_rx_reorder_ampdu(local, skb))
2475 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2476
2477 rcu_read_unlock();
2478 }
2479 EXPORT_SYMBOL(__ieee80211_rx);
2480
2481 /* This is a version of the rx handler that can be called from hard irq
2482 * context. Post the skb on the queue and schedule the tasklet */
2483 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
2484 struct ieee80211_rx_status *status)
2485 {
2486 struct ieee80211_local *local = hw_to_local(hw);
2487
2488 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2489
2490 skb->dev = local->mdev;
2491 /* copy status into skb->cb for use by tasklet */
2492 memcpy(skb->cb, status, sizeof(*status));
2493 skb->pkt_type = IEEE80211_RX_MSG;
2494 skb_queue_tail(&local->skb_queue, skb);
2495 tasklet_schedule(&local->tasklet);
2496 }
2497 EXPORT_SYMBOL(ieee80211_rx_irqsafe);