]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/mac80211/rx.c
mac80211: remove encrypt parameter from ieee80211_tx_skb
[mirror_ubuntu-bionic-kernel.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "driver-ops.h"
23 #include "led.h"
24 #include "mesh.h"
25 #include "wep.h"
26 #include "wpa.h"
27 #include "tkip.h"
28 #include "wme.h"
29
30 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
31 struct tid_ampdu_rx *tid_agg_rx,
32 u16 head_seq_num);
33
34 /*
35 * monitor mode reception
36 *
37 * This function cleans up the SKB, i.e. it removes all the stuff
38 * only useful for monitoring.
39 */
40 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
41 struct sk_buff *skb)
42 {
43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
44 if (likely(skb->len > FCS_LEN))
45 skb_trim(skb, skb->len - FCS_LEN);
46 else {
47 /* driver bug */
48 WARN_ON(1);
49 dev_kfree_skb(skb);
50 skb = NULL;
51 }
52 }
53
54 return skb;
55 }
56
57 static inline int should_drop_frame(struct sk_buff *skb,
58 int present_fcs_len)
59 {
60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
62
63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
64 return 1;
65 if (unlikely(skb->len < 16 + present_fcs_len))
66 return 1;
67 if (ieee80211_is_ctl(hdr->frame_control) &&
68 !ieee80211_is_pspoll(hdr->frame_control) &&
69 !ieee80211_is_back_req(hdr->frame_control))
70 return 1;
71 return 0;
72 }
73
74 static int
75 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
76 struct ieee80211_rx_status *status)
77 {
78 int len;
79
80 /* always present fields */
81 len = sizeof(struct ieee80211_radiotap_header) + 9;
82
83 if (status->flag & RX_FLAG_TSFT)
84 len += 8;
85 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
86 len += 1;
87 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
88 len += 1;
89
90 if (len & 1) /* padding for RX_FLAGS if necessary */
91 len++;
92
93 return len;
94 }
95
96 /*
97 * ieee80211_add_rx_radiotap_header - add radiotap header
98 *
99 * add a radiotap header containing all the fields which the hardware provided.
100 */
101 static void
102 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
103 struct sk_buff *skb,
104 struct ieee80211_rate *rate,
105 int rtap_len)
106 {
107 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
108 struct ieee80211_radiotap_header *rthdr;
109 unsigned char *pos;
110 u16 rx_flags = 0;
111
112 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
113 memset(rthdr, 0, rtap_len);
114
115 /* radiotap header, set always present flags */
116 rthdr->it_present =
117 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
118 (1 << IEEE80211_RADIOTAP_CHANNEL) |
119 (1 << IEEE80211_RADIOTAP_ANTENNA) |
120 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
121 rthdr->it_len = cpu_to_le16(rtap_len);
122
123 pos = (unsigned char *)(rthdr+1);
124
125 /* the order of the following fields is important */
126
127 /* IEEE80211_RADIOTAP_TSFT */
128 if (status->flag & RX_FLAG_TSFT) {
129 put_unaligned_le64(status->mactime, pos);
130 rthdr->it_present |=
131 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
132 pos += 8;
133 }
134
135 /* IEEE80211_RADIOTAP_FLAGS */
136 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
137 *pos |= IEEE80211_RADIOTAP_F_FCS;
138 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
139 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
140 if (status->flag & RX_FLAG_SHORTPRE)
141 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
142 pos++;
143
144 /* IEEE80211_RADIOTAP_RATE */
145 if (status->flag & RX_FLAG_HT) {
146 /*
147 * TODO: add following information into radiotap header once
148 * suitable fields are defined for it:
149 * - MCS index (status->rate_idx)
150 * - HT40 (status->flag & RX_FLAG_40MHZ)
151 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
152 */
153 *pos = 0;
154 } else {
155 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
156 *pos = rate->bitrate / 5;
157 }
158 pos++;
159
160 /* IEEE80211_RADIOTAP_CHANNEL */
161 put_unaligned_le16(status->freq, pos);
162 pos += 2;
163 if (status->band == IEEE80211_BAND_5GHZ)
164 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
165 pos);
166 else if (status->flag & RX_FLAG_HT)
167 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
168 pos);
169 else if (rate->flags & IEEE80211_RATE_ERP_G)
170 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
171 pos);
172 else
173 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
174 pos);
175 pos += 2;
176
177 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
178 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
179 *pos = status->signal;
180 rthdr->it_present |=
181 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
182 pos++;
183 }
184
185 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
186 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
187 *pos = status->noise;
188 rthdr->it_present |=
189 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
190 pos++;
191 }
192
193 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
194
195 /* IEEE80211_RADIOTAP_ANTENNA */
196 *pos = status->antenna;
197 pos++;
198
199 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
200
201 /* IEEE80211_RADIOTAP_RX_FLAGS */
202 /* ensure 2 byte alignment for the 2 byte field as required */
203 if ((pos - (u8 *)rthdr) & 1)
204 pos++;
205 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
206 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
207 put_unaligned_le16(rx_flags, pos);
208 pos += 2;
209 }
210
211 /*
212 * This function copies a received frame to all monitor interfaces and
213 * returns a cleaned-up SKB that no longer includes the FCS nor the
214 * radiotap header the driver might have added.
215 */
216 static struct sk_buff *
217 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
218 struct ieee80211_rate *rate)
219 {
220 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
221 struct ieee80211_sub_if_data *sdata;
222 int needed_headroom = 0;
223 struct sk_buff *skb, *skb2;
224 struct net_device *prev_dev = NULL;
225 int present_fcs_len = 0;
226
227 /*
228 * First, we may need to make a copy of the skb because
229 * (1) we need to modify it for radiotap (if not present), and
230 * (2) the other RX handlers will modify the skb we got.
231 *
232 * We don't need to, of course, if we aren't going to return
233 * the SKB because it has a bad FCS/PLCP checksum.
234 */
235
236 /* room for the radiotap header based on driver features */
237 needed_headroom = ieee80211_rx_radiotap_len(local, status);
238
239 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
240 present_fcs_len = FCS_LEN;
241
242 if (!local->monitors) {
243 if (should_drop_frame(origskb, present_fcs_len)) {
244 dev_kfree_skb(origskb);
245 return NULL;
246 }
247
248 return remove_monitor_info(local, origskb);
249 }
250
251 if (should_drop_frame(origskb, present_fcs_len)) {
252 /* only need to expand headroom if necessary */
253 skb = origskb;
254 origskb = NULL;
255
256 /*
257 * This shouldn't trigger often because most devices have an
258 * RX header they pull before we get here, and that should
259 * be big enough for our radiotap information. We should
260 * probably export the length to drivers so that we can have
261 * them allocate enough headroom to start with.
262 */
263 if (skb_headroom(skb) < needed_headroom &&
264 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
265 dev_kfree_skb(skb);
266 return NULL;
267 }
268 } else {
269 /*
270 * Need to make a copy and possibly remove radiotap header
271 * and FCS from the original.
272 */
273 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
274
275 origskb = remove_monitor_info(local, origskb);
276
277 if (!skb)
278 return origskb;
279 }
280
281 /* prepend radiotap information */
282 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
283
284 skb_reset_mac_header(skb);
285 skb->ip_summed = CHECKSUM_UNNECESSARY;
286 skb->pkt_type = PACKET_OTHERHOST;
287 skb->protocol = htons(ETH_P_802_2);
288
289 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
290 if (!netif_running(sdata->dev))
291 continue;
292
293 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
294 continue;
295
296 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
297 continue;
298
299 if (prev_dev) {
300 skb2 = skb_clone(skb, GFP_ATOMIC);
301 if (skb2) {
302 skb2->dev = prev_dev;
303 netif_rx(skb2);
304 }
305 }
306
307 prev_dev = sdata->dev;
308 sdata->dev->stats.rx_packets++;
309 sdata->dev->stats.rx_bytes += skb->len;
310 }
311
312 if (prev_dev) {
313 skb->dev = prev_dev;
314 netif_rx(skb);
315 } else
316 dev_kfree_skb(skb);
317
318 return origskb;
319 }
320
321
322 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
323 {
324 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
325 int tid;
326
327 /* does the frame have a qos control field? */
328 if (ieee80211_is_data_qos(hdr->frame_control)) {
329 u8 *qc = ieee80211_get_qos_ctl(hdr);
330 /* frame has qos control */
331 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
332 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
333 rx->flags |= IEEE80211_RX_AMSDU;
334 else
335 rx->flags &= ~IEEE80211_RX_AMSDU;
336 } else {
337 /*
338 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
339 *
340 * Sequence numbers for management frames, QoS data
341 * frames with a broadcast/multicast address in the
342 * Address 1 field, and all non-QoS data frames sent
343 * by QoS STAs are assigned using an additional single
344 * modulo-4096 counter, [...]
345 *
346 * We also use that counter for non-QoS STAs.
347 */
348 tid = NUM_RX_DATA_QUEUES - 1;
349 }
350
351 rx->queue = tid;
352 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
353 * For now, set skb->priority to 0 for other cases. */
354 rx->skb->priority = (tid > 7) ? 0 : tid;
355 }
356
357 /**
358 * DOC: Packet alignment
359 *
360 * Drivers always need to pass packets that are aligned to two-byte boundaries
361 * to the stack.
362 *
363 * Additionally, should, if possible, align the payload data in a way that
364 * guarantees that the contained IP header is aligned to a four-byte
365 * boundary. In the case of regular frames, this simply means aligning the
366 * payload to a four-byte boundary (because either the IP header is directly
367 * contained, or IV/RFC1042 headers that have a length divisible by four are
368 * in front of it).
369 *
370 * With A-MSDU frames, however, the payload data address must yield two modulo
371 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
372 * push the IP header further back to a multiple of four again. Thankfully, the
373 * specs were sane enough this time around to require padding each A-MSDU
374 * subframe to a length that is a multiple of four.
375 *
376 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
377 * the payload is not supported, the driver is required to move the 802.11
378 * header to be directly in front of the payload in that case.
379 */
380 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
381 {
382 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
383 int hdrlen;
384
385 #ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
386 return;
387 #endif
388
389 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
390 "unaligned packet at 0x%p\n", rx->skb->data))
391 return;
392
393 if (!ieee80211_is_data_present(hdr->frame_control))
394 return;
395
396 hdrlen = ieee80211_hdrlen(hdr->frame_control);
397 if (rx->flags & IEEE80211_RX_AMSDU)
398 hdrlen += ETH_HLEN;
399 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
400 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
401 }
402
403
404 /* rx handlers */
405
406 static ieee80211_rx_result debug_noinline
407 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
408 {
409 struct ieee80211_local *local = rx->local;
410 struct sk_buff *skb = rx->skb;
411
412 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning)))
413 return ieee80211_scan_rx(rx->sdata, skb);
414
415 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) &&
416 (rx->flags & IEEE80211_RX_IN_SCAN))) {
417 /* drop all the other packets during a software scan anyway */
418 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
419 dev_kfree_skb(skb);
420 return RX_QUEUED;
421 }
422
423 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
424 /* scanning finished during invoking of handlers */
425 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
426 return RX_DROP_UNUSABLE;
427 }
428
429 return RX_CONTINUE;
430 }
431
432
433 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
434 {
435 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
436
437 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
438 return 0;
439
440 return ieee80211_is_robust_mgmt_frame(hdr);
441 }
442
443
444 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
445 {
446 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
447
448 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
449 return 0;
450
451 return ieee80211_is_robust_mgmt_frame(hdr);
452 }
453
454
455 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
456 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
457 {
458 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
459 struct ieee80211_mmie *mmie;
460
461 if (skb->len < 24 + sizeof(*mmie) ||
462 !is_multicast_ether_addr(hdr->da))
463 return -1;
464
465 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
466 return -1; /* not a robust management frame */
467
468 mmie = (struct ieee80211_mmie *)
469 (skb->data + skb->len - sizeof(*mmie));
470 if (mmie->element_id != WLAN_EID_MMIE ||
471 mmie->length != sizeof(*mmie) - 2)
472 return -1;
473
474 return le16_to_cpu(mmie->key_id);
475 }
476
477
478 static ieee80211_rx_result
479 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
480 {
481 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
482 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
483 char *dev_addr = rx->sdata->dev->dev_addr;
484
485 if (ieee80211_is_data(hdr->frame_control)) {
486 if (is_multicast_ether_addr(hdr->addr1)) {
487 if (ieee80211_has_tods(hdr->frame_control) ||
488 !ieee80211_has_fromds(hdr->frame_control))
489 return RX_DROP_MONITOR;
490 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
491 return RX_DROP_MONITOR;
492 } else {
493 if (!ieee80211_has_a4(hdr->frame_control))
494 return RX_DROP_MONITOR;
495 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
496 return RX_DROP_MONITOR;
497 }
498 }
499
500 /* If there is not an established peer link and this is not a peer link
501 * establisment frame, beacon or probe, drop the frame.
502 */
503
504 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
505 struct ieee80211_mgmt *mgmt;
506
507 if (!ieee80211_is_mgmt(hdr->frame_control))
508 return RX_DROP_MONITOR;
509
510 if (ieee80211_is_action(hdr->frame_control)) {
511 mgmt = (struct ieee80211_mgmt *)hdr;
512 if (mgmt->u.action.category != MESH_PLINK_CATEGORY)
513 return RX_DROP_MONITOR;
514 return RX_CONTINUE;
515 }
516
517 if (ieee80211_is_probe_req(hdr->frame_control) ||
518 ieee80211_is_probe_resp(hdr->frame_control) ||
519 ieee80211_is_beacon(hdr->frame_control))
520 return RX_CONTINUE;
521
522 return RX_DROP_MONITOR;
523
524 }
525
526 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
527
528 if (ieee80211_is_data(hdr->frame_control) &&
529 is_multicast_ether_addr(hdr->addr1) &&
530 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
531 return RX_DROP_MONITOR;
532 #undef msh_h_get
533
534 return RX_CONTINUE;
535 }
536
537
538 static ieee80211_rx_result debug_noinline
539 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
540 {
541 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
542
543 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
544 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
545 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
546 rx->sta->last_seq_ctrl[rx->queue] ==
547 hdr->seq_ctrl)) {
548 if (rx->flags & IEEE80211_RX_RA_MATCH) {
549 rx->local->dot11FrameDuplicateCount++;
550 rx->sta->num_duplicates++;
551 }
552 return RX_DROP_MONITOR;
553 } else
554 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
555 }
556
557 if (unlikely(rx->skb->len < 16)) {
558 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
559 return RX_DROP_MONITOR;
560 }
561
562 /* Drop disallowed frame classes based on STA auth/assoc state;
563 * IEEE 802.11, Chap 5.5.
564 *
565 * mac80211 filters only based on association state, i.e. it drops
566 * Class 3 frames from not associated stations. hostapd sends
567 * deauth/disassoc frames when needed. In addition, hostapd is
568 * responsible for filtering on both auth and assoc states.
569 */
570
571 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
572 return ieee80211_rx_mesh_check(rx);
573
574 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
575 ieee80211_is_pspoll(hdr->frame_control)) &&
576 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
577 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
578 if ((!ieee80211_has_fromds(hdr->frame_control) &&
579 !ieee80211_has_tods(hdr->frame_control) &&
580 ieee80211_is_data(hdr->frame_control)) ||
581 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
582 /* Drop IBSS frames and frames for other hosts
583 * silently. */
584 return RX_DROP_MONITOR;
585 }
586
587 return RX_DROP_MONITOR;
588 }
589
590 return RX_CONTINUE;
591 }
592
593
594 static ieee80211_rx_result debug_noinline
595 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
596 {
597 struct sk_buff *skb = rx->skb;
598 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
599 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
600 int keyidx;
601 int hdrlen;
602 ieee80211_rx_result result = RX_DROP_UNUSABLE;
603 struct ieee80211_key *stakey = NULL;
604 int mmie_keyidx = -1;
605
606 /*
607 * Key selection 101
608 *
609 * There are four types of keys:
610 * - GTK (group keys)
611 * - IGTK (group keys for management frames)
612 * - PTK (pairwise keys)
613 * - STK (station-to-station pairwise keys)
614 *
615 * When selecting a key, we have to distinguish between multicast
616 * (including broadcast) and unicast frames, the latter can only
617 * use PTKs and STKs while the former always use GTKs and IGTKs.
618 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
619 * unicast frames can also use key indices like GTKs. Hence, if we
620 * don't have a PTK/STK we check the key index for a WEP key.
621 *
622 * Note that in a regular BSS, multicast frames are sent by the
623 * AP only, associated stations unicast the frame to the AP first
624 * which then multicasts it on their behalf.
625 *
626 * There is also a slight problem in IBSS mode: GTKs are negotiated
627 * with each station, that is something we don't currently handle.
628 * The spec seems to expect that one negotiates the same key with
629 * every station but there's no such requirement; VLANs could be
630 * possible.
631 */
632
633 /*
634 * No point in finding a key and decrypting if the frame is neither
635 * addressed to us nor a multicast frame.
636 */
637 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
638 return RX_CONTINUE;
639
640 if (rx->sta)
641 stakey = rcu_dereference(rx->sta->key);
642
643 if (!ieee80211_has_protected(hdr->frame_control))
644 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
645
646 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
647 rx->key = stakey;
648 /* Skip decryption if the frame is not protected. */
649 if (!ieee80211_has_protected(hdr->frame_control))
650 return RX_CONTINUE;
651 } else if (mmie_keyidx >= 0) {
652 /* Broadcast/multicast robust management frame / BIP */
653 if ((status->flag & RX_FLAG_DECRYPTED) &&
654 (status->flag & RX_FLAG_IV_STRIPPED))
655 return RX_CONTINUE;
656
657 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
658 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
659 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
660 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
661 } else if (!ieee80211_has_protected(hdr->frame_control)) {
662 /*
663 * The frame was not protected, so skip decryption. However, we
664 * need to set rx->key if there is a key that could have been
665 * used so that the frame may be dropped if encryption would
666 * have been expected.
667 */
668 struct ieee80211_key *key = NULL;
669 if (ieee80211_is_mgmt(hdr->frame_control) &&
670 is_multicast_ether_addr(hdr->addr1) &&
671 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
672 rx->key = key;
673 else if ((key = rcu_dereference(rx->sdata->default_key)))
674 rx->key = key;
675 return RX_CONTINUE;
676 } else {
677 /*
678 * The device doesn't give us the IV so we won't be
679 * able to look up the key. That's ok though, we
680 * don't need to decrypt the frame, we just won't
681 * be able to keep statistics accurate.
682 * Except for key threshold notifications, should
683 * we somehow allow the driver to tell us which key
684 * the hardware used if this flag is set?
685 */
686 if ((status->flag & RX_FLAG_DECRYPTED) &&
687 (status->flag & RX_FLAG_IV_STRIPPED))
688 return RX_CONTINUE;
689
690 hdrlen = ieee80211_hdrlen(hdr->frame_control);
691
692 if (rx->skb->len < 8 + hdrlen)
693 return RX_DROP_UNUSABLE; /* TODO: count this? */
694
695 /*
696 * no need to call ieee80211_wep_get_keyidx,
697 * it verifies a bunch of things we've done already
698 */
699 keyidx = rx->skb->data[hdrlen + 3] >> 6;
700
701 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
702
703 /*
704 * RSNA-protected unicast frames should always be sent with
705 * pairwise or station-to-station keys, but for WEP we allow
706 * using a key index as well.
707 */
708 if (rx->key && rx->key->conf.alg != ALG_WEP &&
709 !is_multicast_ether_addr(hdr->addr1))
710 rx->key = NULL;
711 }
712
713 if (rx->key) {
714 rx->key->tx_rx_count++;
715 /* TODO: add threshold stuff again */
716 } else {
717 return RX_DROP_MONITOR;
718 }
719
720 /* Check for weak IVs if possible */
721 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
722 ieee80211_is_data(hdr->frame_control) &&
723 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
724 !(status->flag & RX_FLAG_DECRYPTED)) &&
725 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
726 rx->sta->wep_weak_iv_count++;
727
728 switch (rx->key->conf.alg) {
729 case ALG_WEP:
730 result = ieee80211_crypto_wep_decrypt(rx);
731 break;
732 case ALG_TKIP:
733 result = ieee80211_crypto_tkip_decrypt(rx);
734 break;
735 case ALG_CCMP:
736 result = ieee80211_crypto_ccmp_decrypt(rx);
737 break;
738 case ALG_AES_CMAC:
739 result = ieee80211_crypto_aes_cmac_decrypt(rx);
740 break;
741 }
742
743 /* either the frame has been decrypted or will be dropped */
744 status->flag |= RX_FLAG_DECRYPTED;
745
746 return result;
747 }
748
749 static ieee80211_rx_result debug_noinline
750 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
751 {
752 struct ieee80211_local *local;
753 struct ieee80211_hdr *hdr;
754 struct sk_buff *skb;
755
756 local = rx->local;
757 skb = rx->skb;
758 hdr = (struct ieee80211_hdr *) skb->data;
759
760 if (!local->pspolling)
761 return RX_CONTINUE;
762
763 if (!ieee80211_has_fromds(hdr->frame_control))
764 /* this is not from AP */
765 return RX_CONTINUE;
766
767 if (!ieee80211_is_data(hdr->frame_control))
768 return RX_CONTINUE;
769
770 if (!ieee80211_has_moredata(hdr->frame_control)) {
771 /* AP has no more frames buffered for us */
772 local->pspolling = false;
773 return RX_CONTINUE;
774 }
775
776 /* more data bit is set, let's request a new frame from the AP */
777 ieee80211_send_pspoll(local, rx->sdata);
778
779 return RX_CONTINUE;
780 }
781
782 static void ap_sta_ps_start(struct sta_info *sta)
783 {
784 struct ieee80211_sub_if_data *sdata = sta->sdata;
785 struct ieee80211_local *local = sdata->local;
786
787 atomic_inc(&sdata->bss->num_sta_ps);
788 set_sta_flags(sta, WLAN_STA_PS_STA);
789 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
790 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
791 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
792 sdata->dev->name, sta->sta.addr, sta->sta.aid);
793 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
794 }
795
796 static void ap_sta_ps_end(struct sta_info *sta)
797 {
798 struct ieee80211_sub_if_data *sdata = sta->sdata;
799
800 atomic_dec(&sdata->bss->num_sta_ps);
801
802 clear_sta_flags(sta, WLAN_STA_PS_STA);
803
804 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
805 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
806 sdata->dev->name, sta->sta.addr, sta->sta.aid);
807 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
808
809 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
810 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
811 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
812 sdata->dev->name, sta->sta.addr, sta->sta.aid);
813 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
814 return;
815 }
816
817 ieee80211_sta_ps_deliver_wakeup(sta);
818 }
819
820 static ieee80211_rx_result debug_noinline
821 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
822 {
823 struct sta_info *sta = rx->sta;
824 struct sk_buff *skb = rx->skb;
825 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
826 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
827
828 if (!sta)
829 return RX_CONTINUE;
830
831 /*
832 * Update last_rx only for IBSS packets which are for the current
833 * BSSID to avoid keeping the current IBSS network alive in cases
834 * where other STAs start using different BSSID.
835 */
836 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
837 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
838 NL80211_IFTYPE_ADHOC);
839 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
840 sta->last_rx = jiffies;
841 } else if (!is_multicast_ether_addr(hdr->addr1)) {
842 /*
843 * Mesh beacons will update last_rx when if they are found to
844 * match the current local configuration when processed.
845 */
846 sta->last_rx = jiffies;
847 }
848
849 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
850 return RX_CONTINUE;
851
852 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
853 ieee80211_sta_rx_notify(rx->sdata, hdr);
854
855 sta->rx_fragments++;
856 sta->rx_bytes += rx->skb->len;
857 sta->last_signal = status->signal;
858 sta->last_noise = status->noise;
859
860 /*
861 * Change STA power saving mode only at the end of a frame
862 * exchange sequence.
863 */
864 if (!ieee80211_has_morefrags(hdr->frame_control) &&
865 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
866 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
867 if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
868 /*
869 * Ignore doze->wake transitions that are
870 * indicated by non-data frames, the standard
871 * is unclear here, but for example going to
872 * PS mode and then scanning would cause a
873 * doze->wake transition for the probe request,
874 * and that is clearly undesirable.
875 */
876 if (ieee80211_is_data(hdr->frame_control) &&
877 !ieee80211_has_pm(hdr->frame_control))
878 ap_sta_ps_end(sta);
879 } else {
880 if (ieee80211_has_pm(hdr->frame_control))
881 ap_sta_ps_start(sta);
882 }
883 }
884
885 /*
886 * Drop (qos-)data::nullfunc frames silently, since they
887 * are used only to control station power saving mode.
888 */
889 if (ieee80211_is_nullfunc(hdr->frame_control) ||
890 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
891 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
892 /*
893 * Update counter and free packet here to avoid
894 * counting this as a dropped packed.
895 */
896 sta->rx_packets++;
897 dev_kfree_skb(rx->skb);
898 return RX_QUEUED;
899 }
900
901 return RX_CONTINUE;
902 } /* ieee80211_rx_h_sta_process */
903
904 static inline struct ieee80211_fragment_entry *
905 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
906 unsigned int frag, unsigned int seq, int rx_queue,
907 struct sk_buff **skb)
908 {
909 struct ieee80211_fragment_entry *entry;
910 int idx;
911
912 idx = sdata->fragment_next;
913 entry = &sdata->fragments[sdata->fragment_next++];
914 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
915 sdata->fragment_next = 0;
916
917 if (!skb_queue_empty(&entry->skb_list)) {
918 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
919 struct ieee80211_hdr *hdr =
920 (struct ieee80211_hdr *) entry->skb_list.next->data;
921 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
922 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
923 "addr1=%pM addr2=%pM\n",
924 sdata->dev->name, idx,
925 jiffies - entry->first_frag_time, entry->seq,
926 entry->last_frag, hdr->addr1, hdr->addr2);
927 #endif
928 __skb_queue_purge(&entry->skb_list);
929 }
930
931 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
932 *skb = NULL;
933 entry->first_frag_time = jiffies;
934 entry->seq = seq;
935 entry->rx_queue = rx_queue;
936 entry->last_frag = frag;
937 entry->ccmp = 0;
938 entry->extra_len = 0;
939
940 return entry;
941 }
942
943 static inline struct ieee80211_fragment_entry *
944 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
945 unsigned int frag, unsigned int seq,
946 int rx_queue, struct ieee80211_hdr *hdr)
947 {
948 struct ieee80211_fragment_entry *entry;
949 int i, idx;
950
951 idx = sdata->fragment_next;
952 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
953 struct ieee80211_hdr *f_hdr;
954
955 idx--;
956 if (idx < 0)
957 idx = IEEE80211_FRAGMENT_MAX - 1;
958
959 entry = &sdata->fragments[idx];
960 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
961 entry->rx_queue != rx_queue ||
962 entry->last_frag + 1 != frag)
963 continue;
964
965 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
966
967 /*
968 * Check ftype and addresses are equal, else check next fragment
969 */
970 if (((hdr->frame_control ^ f_hdr->frame_control) &
971 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
972 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
973 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
974 continue;
975
976 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
977 __skb_queue_purge(&entry->skb_list);
978 continue;
979 }
980 return entry;
981 }
982
983 return NULL;
984 }
985
986 static ieee80211_rx_result debug_noinline
987 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
988 {
989 struct ieee80211_hdr *hdr;
990 u16 sc;
991 __le16 fc;
992 unsigned int frag, seq;
993 struct ieee80211_fragment_entry *entry;
994 struct sk_buff *skb;
995
996 hdr = (struct ieee80211_hdr *)rx->skb->data;
997 fc = hdr->frame_control;
998 sc = le16_to_cpu(hdr->seq_ctrl);
999 frag = sc & IEEE80211_SCTL_FRAG;
1000
1001 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1002 (rx->skb)->len < 24 ||
1003 is_multicast_ether_addr(hdr->addr1))) {
1004 /* not fragmented */
1005 goto out;
1006 }
1007 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1008
1009 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1010
1011 if (frag == 0) {
1012 /* This is the first fragment of a new frame. */
1013 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1014 rx->queue, &(rx->skb));
1015 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1016 ieee80211_has_protected(fc)) {
1017 /* Store CCMP PN so that we can verify that the next
1018 * fragment has a sequential PN value. */
1019 entry->ccmp = 1;
1020 memcpy(entry->last_pn,
1021 rx->key->u.ccmp.rx_pn[rx->queue],
1022 CCMP_PN_LEN);
1023 }
1024 return RX_QUEUED;
1025 }
1026
1027 /* This is a fragment for a frame that should already be pending in
1028 * fragment cache. Add this fragment to the end of the pending entry.
1029 */
1030 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1031 if (!entry) {
1032 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1033 return RX_DROP_MONITOR;
1034 }
1035
1036 /* Verify that MPDUs within one MSDU have sequential PN values.
1037 * (IEEE 802.11i, 8.3.3.4.5) */
1038 if (entry->ccmp) {
1039 int i;
1040 u8 pn[CCMP_PN_LEN], *rpn;
1041 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1042 return RX_DROP_UNUSABLE;
1043 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1044 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1045 pn[i]++;
1046 if (pn[i])
1047 break;
1048 }
1049 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
1050 if (memcmp(pn, rpn, CCMP_PN_LEN))
1051 return RX_DROP_UNUSABLE;
1052 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1053 }
1054
1055 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1056 __skb_queue_tail(&entry->skb_list, rx->skb);
1057 entry->last_frag = frag;
1058 entry->extra_len += rx->skb->len;
1059 if (ieee80211_has_morefrags(fc)) {
1060 rx->skb = NULL;
1061 return RX_QUEUED;
1062 }
1063
1064 rx->skb = __skb_dequeue(&entry->skb_list);
1065 if (skb_tailroom(rx->skb) < entry->extra_len) {
1066 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1067 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1068 GFP_ATOMIC))) {
1069 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1070 __skb_queue_purge(&entry->skb_list);
1071 return RX_DROP_UNUSABLE;
1072 }
1073 }
1074 while ((skb = __skb_dequeue(&entry->skb_list))) {
1075 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1076 dev_kfree_skb(skb);
1077 }
1078
1079 /* Complete frame has been reassembled - process it now */
1080 rx->flags |= IEEE80211_RX_FRAGMENTED;
1081
1082 out:
1083 if (rx->sta)
1084 rx->sta->rx_packets++;
1085 if (is_multicast_ether_addr(hdr->addr1))
1086 rx->local->dot11MulticastReceivedFrameCount++;
1087 else
1088 ieee80211_led_rx(rx->local);
1089 return RX_CONTINUE;
1090 }
1091
1092 static ieee80211_rx_result debug_noinline
1093 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1094 {
1095 struct ieee80211_sub_if_data *sdata = rx->sdata;
1096 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1097
1098 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1099 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1100 return RX_CONTINUE;
1101
1102 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1103 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1104 return RX_DROP_UNUSABLE;
1105
1106 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1107 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1108 else
1109 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1110
1111 /* Free PS Poll skb here instead of returning RX_DROP that would
1112 * count as an dropped frame. */
1113 dev_kfree_skb(rx->skb);
1114
1115 return RX_QUEUED;
1116 }
1117
1118 static ieee80211_rx_result debug_noinline
1119 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1120 {
1121 u8 *data = rx->skb->data;
1122 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1123
1124 if (!ieee80211_is_data_qos(hdr->frame_control))
1125 return RX_CONTINUE;
1126
1127 /* remove the qos control field, update frame type and meta-data */
1128 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1129 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1130 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1131 /* change frame type to non QOS */
1132 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1133
1134 return RX_CONTINUE;
1135 }
1136
1137 static int
1138 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1139 {
1140 if (unlikely(!rx->sta ||
1141 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1142 return -EACCES;
1143
1144 return 0;
1145 }
1146
1147 static int
1148 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1149 {
1150 struct sk_buff *skb = rx->skb;
1151 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1152
1153 /*
1154 * Pass through unencrypted frames if the hardware has
1155 * decrypted them already.
1156 */
1157 if (status->flag & RX_FLAG_DECRYPTED)
1158 return 0;
1159
1160 /* Drop unencrypted frames if key is set. */
1161 if (unlikely(!ieee80211_has_protected(fc) &&
1162 !ieee80211_is_nullfunc(fc) &&
1163 ieee80211_is_data(fc) &&
1164 (rx->key || rx->sdata->drop_unencrypted)))
1165 return -EACCES;
1166 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1167 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1168 rx->key))
1169 return -EACCES;
1170 /* BIP does not use Protected field, so need to check MMIE */
1171 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb)
1172 && ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
1173 rx->key))
1174 return -EACCES;
1175 /*
1176 * When using MFP, Action frames are not allowed prior to
1177 * having configured keys.
1178 */
1179 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1180 ieee80211_is_robust_mgmt_frame(
1181 (struct ieee80211_hdr *) rx->skb->data)))
1182 return -EACCES;
1183 }
1184
1185 return 0;
1186 }
1187
1188 static int
1189 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1190 {
1191 struct ieee80211_sub_if_data *sdata = rx->sdata;
1192 struct net_device *dev = sdata->dev;
1193 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1194
1195 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->use_4addr &&
1196 ieee80211_has_a4(hdr->frame_control))
1197 return -1;
1198 if (sdata->use_4addr && is_multicast_ether_addr(hdr->addr1))
1199 return -1;
1200
1201 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
1202 }
1203
1204 /*
1205 * requires that rx->skb is a frame with ethernet header
1206 */
1207 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1208 {
1209 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1210 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1211 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1212
1213 /*
1214 * Allow EAPOL frames to us/the PAE group address regardless
1215 * of whether the frame was encrypted or not.
1216 */
1217 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1218 (compare_ether_addr(ehdr->h_dest, rx->sdata->dev->dev_addr) == 0 ||
1219 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1220 return true;
1221
1222 if (ieee80211_802_1x_port_control(rx) ||
1223 ieee80211_drop_unencrypted(rx, fc))
1224 return false;
1225
1226 return true;
1227 }
1228
1229 /*
1230 * requires that rx->skb is a frame with ethernet header
1231 */
1232 static void
1233 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1234 {
1235 struct ieee80211_sub_if_data *sdata = rx->sdata;
1236 struct net_device *dev = sdata->dev;
1237 struct ieee80211_local *local = rx->local;
1238 struct sk_buff *skb, *xmit_skb;
1239 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1240 struct sta_info *dsta;
1241
1242 skb = rx->skb;
1243 xmit_skb = NULL;
1244
1245 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1246 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1247 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1248 (rx->flags & IEEE80211_RX_RA_MATCH) && !rx->sdata->use_4addr) {
1249 if (is_multicast_ether_addr(ehdr->h_dest)) {
1250 /*
1251 * send multicast frames both to higher layers in
1252 * local net stack and back to the wireless medium
1253 */
1254 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1255 if (!xmit_skb && net_ratelimit())
1256 printk(KERN_DEBUG "%s: failed to clone "
1257 "multicast frame\n", dev->name);
1258 } else {
1259 dsta = sta_info_get(local, skb->data);
1260 if (dsta && dsta->sdata->dev == dev) {
1261 /*
1262 * The destination station is associated to
1263 * this AP (in this VLAN), so send the frame
1264 * directly to it and do not pass it to local
1265 * net stack.
1266 */
1267 xmit_skb = skb;
1268 skb = NULL;
1269 }
1270 }
1271 }
1272
1273 if (skb) {
1274 int align __maybe_unused;
1275
1276 #if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1277 /*
1278 * 'align' will only take the values 0 or 2 here
1279 * since all frames are required to be aligned
1280 * to 2-byte boundaries when being passed to
1281 * mac80211. That also explains the __skb_push()
1282 * below.
1283 */
1284 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1285 if (align) {
1286 if (WARN_ON(skb_headroom(skb) < 3)) {
1287 dev_kfree_skb(skb);
1288 skb = NULL;
1289 } else {
1290 u8 *data = skb->data;
1291 size_t len = skb_headlen(skb);
1292 skb->data -= align;
1293 memmove(skb->data, data, len);
1294 skb_set_tail_pointer(skb, len);
1295 }
1296 }
1297 #endif
1298
1299 if (skb) {
1300 /* deliver to local stack */
1301 skb->protocol = eth_type_trans(skb, dev);
1302 memset(skb->cb, 0, sizeof(skb->cb));
1303 netif_rx(skb);
1304 }
1305 }
1306
1307 if (xmit_skb) {
1308 /* send to wireless media */
1309 xmit_skb->protocol = htons(ETH_P_802_3);
1310 skb_reset_network_header(xmit_skb);
1311 skb_reset_mac_header(xmit_skb);
1312 dev_queue_xmit(xmit_skb);
1313 }
1314 }
1315
1316 static ieee80211_rx_result debug_noinline
1317 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1318 {
1319 struct net_device *dev = rx->sdata->dev;
1320 struct ieee80211_local *local = rx->local;
1321 u16 ethertype;
1322 u8 *payload;
1323 struct sk_buff *skb = rx->skb, *frame = NULL;
1324 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1325 __le16 fc = hdr->frame_control;
1326 const struct ethhdr *eth;
1327 int remaining, err;
1328 u8 dst[ETH_ALEN];
1329 u8 src[ETH_ALEN];
1330
1331 if (unlikely(!ieee80211_is_data(fc)))
1332 return RX_CONTINUE;
1333
1334 if (unlikely(!ieee80211_is_data_present(fc)))
1335 return RX_DROP_MONITOR;
1336
1337 if (!(rx->flags & IEEE80211_RX_AMSDU))
1338 return RX_CONTINUE;
1339
1340 err = __ieee80211_data_to_8023(rx);
1341 if (unlikely(err))
1342 return RX_DROP_UNUSABLE;
1343
1344 skb->dev = dev;
1345
1346 dev->stats.rx_packets++;
1347 dev->stats.rx_bytes += skb->len;
1348
1349 /* skip the wrapping header */
1350 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1351 if (!eth)
1352 return RX_DROP_UNUSABLE;
1353
1354 while (skb != frame) {
1355 u8 padding;
1356 __be16 len = eth->h_proto;
1357 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1358
1359 remaining = skb->len;
1360 memcpy(dst, eth->h_dest, ETH_ALEN);
1361 memcpy(src, eth->h_source, ETH_ALEN);
1362
1363 padding = ((4 - subframe_len) & 0x3);
1364 /* the last MSDU has no padding */
1365 if (subframe_len > remaining)
1366 return RX_DROP_UNUSABLE;
1367
1368 skb_pull(skb, sizeof(struct ethhdr));
1369 /* if last subframe reuse skb */
1370 if (remaining <= subframe_len + padding)
1371 frame = skb;
1372 else {
1373 /*
1374 * Allocate and reserve two bytes more for payload
1375 * alignment since sizeof(struct ethhdr) is 14.
1376 */
1377 frame = dev_alloc_skb(
1378 ALIGN(local->hw.extra_tx_headroom, 4) +
1379 subframe_len + 2);
1380
1381 if (frame == NULL)
1382 return RX_DROP_UNUSABLE;
1383
1384 skb_reserve(frame,
1385 ALIGN(local->hw.extra_tx_headroom, 4) +
1386 sizeof(struct ethhdr) + 2);
1387 memcpy(skb_put(frame, ntohs(len)), skb->data,
1388 ntohs(len));
1389
1390 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1391 padding);
1392 if (!eth) {
1393 dev_kfree_skb(frame);
1394 return RX_DROP_UNUSABLE;
1395 }
1396 }
1397
1398 skb_reset_network_header(frame);
1399 frame->dev = dev;
1400 frame->priority = skb->priority;
1401 rx->skb = frame;
1402
1403 payload = frame->data;
1404 ethertype = (payload[6] << 8) | payload[7];
1405
1406 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1407 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1408 compare_ether_addr(payload,
1409 bridge_tunnel_header) == 0)) {
1410 /* remove RFC1042 or Bridge-Tunnel
1411 * encapsulation and replace EtherType */
1412 skb_pull(frame, 6);
1413 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1414 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1415 } else {
1416 memcpy(skb_push(frame, sizeof(__be16)),
1417 &len, sizeof(__be16));
1418 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1419 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1420 }
1421
1422 if (!ieee80211_frame_allowed(rx, fc)) {
1423 if (skb == frame) /* last frame */
1424 return RX_DROP_UNUSABLE;
1425 dev_kfree_skb(frame);
1426 continue;
1427 }
1428
1429 ieee80211_deliver_skb(rx);
1430 }
1431
1432 return RX_QUEUED;
1433 }
1434
1435 #ifdef CONFIG_MAC80211_MESH
1436 static ieee80211_rx_result
1437 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1438 {
1439 struct ieee80211_hdr *hdr;
1440 struct ieee80211s_hdr *mesh_hdr;
1441 unsigned int hdrlen;
1442 struct sk_buff *skb = rx->skb, *fwd_skb;
1443 struct ieee80211_local *local = rx->local;
1444 struct ieee80211_sub_if_data *sdata = rx->sdata;
1445
1446 hdr = (struct ieee80211_hdr *) skb->data;
1447 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1448 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1449
1450 if (!ieee80211_is_data(hdr->frame_control))
1451 return RX_CONTINUE;
1452
1453 if (!mesh_hdr->ttl)
1454 /* illegal frame */
1455 return RX_DROP_MONITOR;
1456
1457 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1458 struct mesh_path *mppath;
1459 char *proxied_addr;
1460 char *mpp_addr;
1461
1462 if (is_multicast_ether_addr(hdr->addr1)) {
1463 mpp_addr = hdr->addr3;
1464 proxied_addr = mesh_hdr->eaddr1;
1465 } else {
1466 mpp_addr = hdr->addr4;
1467 proxied_addr = mesh_hdr->eaddr2;
1468 }
1469
1470 rcu_read_lock();
1471 mppath = mpp_path_lookup(proxied_addr, sdata);
1472 if (!mppath) {
1473 mpp_path_add(proxied_addr, mpp_addr, sdata);
1474 } else {
1475 spin_lock_bh(&mppath->state_lock);
1476 mppath->exp_time = jiffies;
1477 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1478 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1479 spin_unlock_bh(&mppath->state_lock);
1480 }
1481 rcu_read_unlock();
1482 }
1483
1484 /* Frame has reached destination. Don't forward */
1485 if (!is_multicast_ether_addr(hdr->addr1) &&
1486 compare_ether_addr(sdata->dev->dev_addr, hdr->addr3) == 0)
1487 return RX_CONTINUE;
1488
1489 mesh_hdr->ttl--;
1490
1491 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1492 if (!mesh_hdr->ttl)
1493 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1494 dropped_frames_ttl);
1495 else {
1496 struct ieee80211_hdr *fwd_hdr;
1497 struct ieee80211_tx_info *info;
1498
1499 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1500
1501 if (!fwd_skb && net_ratelimit())
1502 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1503 sdata->dev->name);
1504
1505 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1506 memcpy(fwd_hdr->addr2, sdata->dev->dev_addr, ETH_ALEN);
1507 info = IEEE80211_SKB_CB(fwd_skb);
1508 memset(info, 0, sizeof(*info));
1509 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1510 info->control.vif = &rx->sdata->vif;
1511 ieee80211_select_queue(local, fwd_skb);
1512 if (is_multicast_ether_addr(fwd_hdr->addr1))
1513 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1514 fwded_mcast);
1515 else {
1516 int err;
1517 /*
1518 * Save TA to addr1 to send TA a path error if a
1519 * suitable next hop is not found
1520 */
1521 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1522 ETH_ALEN);
1523 err = mesh_nexthop_lookup(fwd_skb, sdata);
1524 /* Failed to immediately resolve next hop:
1525 * fwded frame was dropped or will be added
1526 * later to the pending skb queue. */
1527 if (err)
1528 return RX_DROP_MONITOR;
1529
1530 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1531 fwded_unicast);
1532 }
1533 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1534 fwded_frames);
1535 ieee80211_add_pending_skb(local, fwd_skb);
1536 }
1537 }
1538
1539 if (is_multicast_ether_addr(hdr->addr1) ||
1540 sdata->dev->flags & IFF_PROMISC)
1541 return RX_CONTINUE;
1542 else
1543 return RX_DROP_MONITOR;
1544 }
1545 #endif
1546
1547 static ieee80211_rx_result debug_noinline
1548 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1549 {
1550 struct ieee80211_sub_if_data *sdata = rx->sdata;
1551 struct net_device *dev = sdata->dev;
1552 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1553 __le16 fc = hdr->frame_control;
1554 int err;
1555
1556 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1557 return RX_CONTINUE;
1558
1559 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1560 return RX_DROP_MONITOR;
1561
1562 /*
1563 * Allow the cooked monitor interface of an AP to see 4-addr frames so
1564 * that a 4-addr station can be detected and moved into a separate VLAN
1565 */
1566 if (ieee80211_has_a4(hdr->frame_control) &&
1567 sdata->vif.type == NL80211_IFTYPE_AP)
1568 return RX_DROP_MONITOR;
1569
1570 err = __ieee80211_data_to_8023(rx);
1571 if (unlikely(err))
1572 return RX_DROP_UNUSABLE;
1573
1574 if (!ieee80211_frame_allowed(rx, fc))
1575 return RX_DROP_MONITOR;
1576
1577 rx->skb->dev = dev;
1578
1579 dev->stats.rx_packets++;
1580 dev->stats.rx_bytes += rx->skb->len;
1581
1582 ieee80211_deliver_skb(rx);
1583
1584 return RX_QUEUED;
1585 }
1586
1587 static ieee80211_rx_result debug_noinline
1588 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1589 {
1590 struct ieee80211_local *local = rx->local;
1591 struct ieee80211_hw *hw = &local->hw;
1592 struct sk_buff *skb = rx->skb;
1593 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1594 struct tid_ampdu_rx *tid_agg_rx;
1595 u16 start_seq_num;
1596 u16 tid;
1597
1598 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1599 return RX_CONTINUE;
1600
1601 if (ieee80211_is_back_req(bar->frame_control)) {
1602 if (!rx->sta)
1603 return RX_DROP_MONITOR;
1604 tid = le16_to_cpu(bar->control) >> 12;
1605 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1606 != HT_AGG_STATE_OPERATIONAL)
1607 return RX_DROP_MONITOR;
1608 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1609
1610 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1611
1612 /* reset session timer */
1613 if (tid_agg_rx->timeout)
1614 mod_timer(&tid_agg_rx->session_timer,
1615 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1616
1617 /* release stored frames up to start of BAR */
1618 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
1619 kfree_skb(skb);
1620 return RX_QUEUED;
1621 }
1622
1623 return RX_CONTINUE;
1624 }
1625
1626 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1627 struct ieee80211_mgmt *mgmt,
1628 size_t len)
1629 {
1630 struct ieee80211_local *local = sdata->local;
1631 struct sk_buff *skb;
1632 struct ieee80211_mgmt *resp;
1633
1634 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
1635 /* Not to own unicast address */
1636 return;
1637 }
1638
1639 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1640 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1641 /* Not from the current AP or not associated yet. */
1642 return;
1643 }
1644
1645 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1646 /* Too short SA Query request frame */
1647 return;
1648 }
1649
1650 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1651 if (skb == NULL)
1652 return;
1653
1654 skb_reserve(skb, local->hw.extra_tx_headroom);
1655 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1656 memset(resp, 0, 24);
1657 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1658 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
1659 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1660 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1661 IEEE80211_STYPE_ACTION);
1662 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1663 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1664 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1665 memcpy(resp->u.action.u.sa_query.trans_id,
1666 mgmt->u.action.u.sa_query.trans_id,
1667 WLAN_SA_QUERY_TR_ID_LEN);
1668
1669 ieee80211_tx_skb(sdata, skb);
1670 }
1671
1672 static ieee80211_rx_result debug_noinline
1673 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1674 {
1675 struct ieee80211_local *local = rx->local;
1676 struct ieee80211_sub_if_data *sdata = rx->sdata;
1677 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1678 int len = rx->skb->len;
1679
1680 if (!ieee80211_is_action(mgmt->frame_control))
1681 return RX_CONTINUE;
1682
1683 if (!rx->sta)
1684 return RX_DROP_MONITOR;
1685
1686 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1687 return RX_DROP_MONITOR;
1688
1689 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1690 return RX_DROP_MONITOR;
1691
1692 /* all categories we currently handle have action_code */
1693 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1694 return RX_DROP_MONITOR;
1695
1696 switch (mgmt->u.action.category) {
1697 case WLAN_CATEGORY_BACK:
1698 /*
1699 * The aggregation code is not prepared to handle
1700 * anything but STA/AP due to the BSSID handling;
1701 * IBSS could work in the code but isn't supported
1702 * by drivers or the standard.
1703 */
1704 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1705 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1706 sdata->vif.type != NL80211_IFTYPE_AP)
1707 return RX_DROP_MONITOR;
1708
1709 switch (mgmt->u.action.u.addba_req.action_code) {
1710 case WLAN_ACTION_ADDBA_REQ:
1711 if (len < (IEEE80211_MIN_ACTION_SIZE +
1712 sizeof(mgmt->u.action.u.addba_req)))
1713 return RX_DROP_MONITOR;
1714 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1715 break;
1716 case WLAN_ACTION_ADDBA_RESP:
1717 if (len < (IEEE80211_MIN_ACTION_SIZE +
1718 sizeof(mgmt->u.action.u.addba_resp)))
1719 return RX_DROP_MONITOR;
1720 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1721 break;
1722 case WLAN_ACTION_DELBA:
1723 if (len < (IEEE80211_MIN_ACTION_SIZE +
1724 sizeof(mgmt->u.action.u.delba)))
1725 return RX_DROP_MONITOR;
1726 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1727 break;
1728 }
1729 break;
1730 case WLAN_CATEGORY_SPECTRUM_MGMT:
1731 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1732 return RX_DROP_MONITOR;
1733
1734 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1735 return RX_DROP_MONITOR;
1736
1737 switch (mgmt->u.action.u.measurement.action_code) {
1738 case WLAN_ACTION_SPCT_MSR_REQ:
1739 if (len < (IEEE80211_MIN_ACTION_SIZE +
1740 sizeof(mgmt->u.action.u.measurement)))
1741 return RX_DROP_MONITOR;
1742 ieee80211_process_measurement_req(sdata, mgmt, len);
1743 break;
1744 case WLAN_ACTION_SPCT_CHL_SWITCH:
1745 if (len < (IEEE80211_MIN_ACTION_SIZE +
1746 sizeof(mgmt->u.action.u.chan_switch)))
1747 return RX_DROP_MONITOR;
1748
1749 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1750 return RX_DROP_MONITOR;
1751
1752 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1753 return RX_DROP_MONITOR;
1754
1755 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1756 }
1757 break;
1758 case WLAN_CATEGORY_SA_QUERY:
1759 if (len < (IEEE80211_MIN_ACTION_SIZE +
1760 sizeof(mgmt->u.action.u.sa_query)))
1761 return RX_DROP_MONITOR;
1762 switch (mgmt->u.action.u.sa_query.action) {
1763 case WLAN_ACTION_SA_QUERY_REQUEST:
1764 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1765 return RX_DROP_MONITOR;
1766 ieee80211_process_sa_query_req(sdata, mgmt, len);
1767 break;
1768 case WLAN_ACTION_SA_QUERY_RESPONSE:
1769 /*
1770 * SA Query response is currently only used in AP mode
1771 * and it is processed in user space.
1772 */
1773 return RX_CONTINUE;
1774 }
1775 break;
1776 default:
1777 return RX_CONTINUE;
1778 }
1779
1780 rx->sta->rx_packets++;
1781 dev_kfree_skb(rx->skb);
1782 return RX_QUEUED;
1783 }
1784
1785 static ieee80211_rx_result debug_noinline
1786 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1787 {
1788 struct ieee80211_sub_if_data *sdata = rx->sdata;
1789 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1790
1791 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1792 return RX_DROP_MONITOR;
1793
1794 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
1795 return RX_DROP_MONITOR;
1796
1797 if (ieee80211_vif_is_mesh(&sdata->vif))
1798 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
1799
1800 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
1801 return ieee80211_ibss_rx_mgmt(sdata, rx->skb);
1802
1803 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1804 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1805
1806 return RX_DROP_MONITOR;
1807 }
1808
1809 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
1810 struct ieee80211_rx_data *rx)
1811 {
1812 int keyidx;
1813 unsigned int hdrlen;
1814
1815 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1816 if (rx->skb->len >= hdrlen + 4)
1817 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1818 else
1819 keyidx = -1;
1820
1821 if (!rx->sta) {
1822 /*
1823 * Some hardware seem to generate incorrect Michael MIC
1824 * reports; ignore them to avoid triggering countermeasures.
1825 */
1826 return;
1827 }
1828
1829 if (!ieee80211_has_protected(hdr->frame_control))
1830 return;
1831
1832 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1833 /*
1834 * APs with pairwise keys should never receive Michael MIC
1835 * errors for non-zero keyidx because these are reserved for
1836 * group keys and only the AP is sending real multicast
1837 * frames in the BSS.
1838 */
1839 return;
1840 }
1841
1842 if (!ieee80211_is_data(hdr->frame_control) &&
1843 !ieee80211_is_auth(hdr->frame_control))
1844 return;
1845
1846 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
1847 GFP_ATOMIC);
1848 }
1849
1850 /* TODO: use IEEE80211_RX_FRAGMENTED */
1851 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
1852 struct ieee80211_rate *rate)
1853 {
1854 struct ieee80211_sub_if_data *sdata;
1855 struct ieee80211_local *local = rx->local;
1856 struct ieee80211_rtap_hdr {
1857 struct ieee80211_radiotap_header hdr;
1858 u8 flags;
1859 u8 rate_or_pad;
1860 __le16 chan_freq;
1861 __le16 chan_flags;
1862 } __attribute__ ((packed)) *rthdr;
1863 struct sk_buff *skb = rx->skb, *skb2;
1864 struct net_device *prev_dev = NULL;
1865 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1866
1867 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1868 goto out_free_skb;
1869
1870 if (skb_headroom(skb) < sizeof(*rthdr) &&
1871 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1872 goto out_free_skb;
1873
1874 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1875 memset(rthdr, 0, sizeof(*rthdr));
1876 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1877 rthdr->hdr.it_present =
1878 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1879 (1 << IEEE80211_RADIOTAP_CHANNEL));
1880
1881 if (rate) {
1882 rthdr->rate_or_pad = rate->bitrate / 5;
1883 rthdr->hdr.it_present |=
1884 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
1885 }
1886 rthdr->chan_freq = cpu_to_le16(status->freq);
1887
1888 if (status->band == IEEE80211_BAND_5GHZ)
1889 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1890 IEEE80211_CHAN_5GHZ);
1891 else
1892 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1893 IEEE80211_CHAN_2GHZ);
1894
1895 skb_set_mac_header(skb, 0);
1896 skb->ip_summed = CHECKSUM_UNNECESSARY;
1897 skb->pkt_type = PACKET_OTHERHOST;
1898 skb->protocol = htons(ETH_P_802_2);
1899
1900 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1901 if (!netif_running(sdata->dev))
1902 continue;
1903
1904 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1905 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1906 continue;
1907
1908 if (prev_dev) {
1909 skb2 = skb_clone(skb, GFP_ATOMIC);
1910 if (skb2) {
1911 skb2->dev = prev_dev;
1912 netif_rx(skb2);
1913 }
1914 }
1915
1916 prev_dev = sdata->dev;
1917 sdata->dev->stats.rx_packets++;
1918 sdata->dev->stats.rx_bytes += skb->len;
1919 }
1920
1921 if (prev_dev) {
1922 skb->dev = prev_dev;
1923 netif_rx(skb);
1924 skb = NULL;
1925 } else
1926 goto out_free_skb;
1927
1928 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
1929 return;
1930
1931 out_free_skb:
1932 dev_kfree_skb(skb);
1933 }
1934
1935
1936 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1937 struct ieee80211_rx_data *rx,
1938 struct sk_buff *skb,
1939 struct ieee80211_rate *rate)
1940 {
1941 ieee80211_rx_result res = RX_DROP_MONITOR;
1942
1943 rx->skb = skb;
1944 rx->sdata = sdata;
1945
1946 #define CALL_RXH(rxh) \
1947 do { \
1948 res = rxh(rx); \
1949 if (res != RX_CONTINUE) \
1950 goto rxh_done; \
1951 } while (0);
1952
1953 CALL_RXH(ieee80211_rx_h_passive_scan)
1954 CALL_RXH(ieee80211_rx_h_check)
1955 CALL_RXH(ieee80211_rx_h_decrypt)
1956 CALL_RXH(ieee80211_rx_h_check_more_data)
1957 CALL_RXH(ieee80211_rx_h_sta_process)
1958 CALL_RXH(ieee80211_rx_h_defragment)
1959 CALL_RXH(ieee80211_rx_h_ps_poll)
1960 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
1961 /* must be after MMIC verify so header is counted in MPDU mic */
1962 CALL_RXH(ieee80211_rx_h_remove_qos_control)
1963 CALL_RXH(ieee80211_rx_h_amsdu)
1964 #ifdef CONFIG_MAC80211_MESH
1965 if (ieee80211_vif_is_mesh(&sdata->vif))
1966 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1967 #endif
1968 CALL_RXH(ieee80211_rx_h_data)
1969 CALL_RXH(ieee80211_rx_h_ctrl)
1970 CALL_RXH(ieee80211_rx_h_action)
1971 CALL_RXH(ieee80211_rx_h_mgmt)
1972
1973 #undef CALL_RXH
1974
1975 rxh_done:
1976 switch (res) {
1977 case RX_DROP_MONITOR:
1978 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1979 if (rx->sta)
1980 rx->sta->rx_dropped++;
1981 /* fall through */
1982 case RX_CONTINUE:
1983 ieee80211_rx_cooked_monitor(rx, rate);
1984 break;
1985 case RX_DROP_UNUSABLE:
1986 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1987 if (rx->sta)
1988 rx->sta->rx_dropped++;
1989 dev_kfree_skb(rx->skb);
1990 break;
1991 case RX_QUEUED:
1992 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1993 break;
1994 }
1995 }
1996
1997 /* main receive path */
1998
1999 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2000 struct ieee80211_rx_data *rx,
2001 struct ieee80211_hdr *hdr)
2002 {
2003 struct sk_buff *skb = rx->skb;
2004 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2005 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2006 int multicast = is_multicast_ether_addr(hdr->addr1);
2007
2008 switch (sdata->vif.type) {
2009 case NL80211_IFTYPE_STATION:
2010 if (!bssid && !sdata->use_4addr)
2011 return 0;
2012 if (!multicast &&
2013 compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) {
2014 if (!(sdata->dev->flags & IFF_PROMISC))
2015 return 0;
2016 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2017 }
2018 break;
2019 case NL80211_IFTYPE_ADHOC:
2020 if (!bssid)
2021 return 0;
2022 if (ieee80211_is_beacon(hdr->frame_control)) {
2023 return 1;
2024 }
2025 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2026 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2027 return 0;
2028 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2029 } else if (!multicast &&
2030 compare_ether_addr(sdata->dev->dev_addr,
2031 hdr->addr1) != 0) {
2032 if (!(sdata->dev->flags & IFF_PROMISC))
2033 return 0;
2034 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2035 } else if (!rx->sta) {
2036 int rate_idx;
2037 if (status->flag & RX_FLAG_HT)
2038 rate_idx = 0; /* TODO: HT rates */
2039 else
2040 rate_idx = status->rate_idx;
2041 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
2042 BIT(rate_idx));
2043 }
2044 break;
2045 case NL80211_IFTYPE_MESH_POINT:
2046 if (!multicast &&
2047 compare_ether_addr(sdata->dev->dev_addr,
2048 hdr->addr1) != 0) {
2049 if (!(sdata->dev->flags & IFF_PROMISC))
2050 return 0;
2051
2052 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2053 }
2054 break;
2055 case NL80211_IFTYPE_AP_VLAN:
2056 case NL80211_IFTYPE_AP:
2057 if (!bssid) {
2058 if (compare_ether_addr(sdata->dev->dev_addr,
2059 hdr->addr1))
2060 return 0;
2061 } else if (!ieee80211_bssid_match(bssid,
2062 sdata->dev->dev_addr)) {
2063 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2064 return 0;
2065 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2066 }
2067 break;
2068 case NL80211_IFTYPE_WDS:
2069 if (bssid || !ieee80211_is_data(hdr->frame_control))
2070 return 0;
2071 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2072 return 0;
2073 break;
2074 case NL80211_IFTYPE_MONITOR:
2075 case NL80211_IFTYPE_UNSPECIFIED:
2076 case __NL80211_IFTYPE_AFTER_LAST:
2077 /* should never get here */
2078 WARN_ON(1);
2079 break;
2080 }
2081
2082 return 1;
2083 }
2084
2085 /*
2086 * This is the actual Rx frames handler. as it blongs to Rx path it must
2087 * be called with rcu_read_lock protection.
2088 */
2089 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2090 struct sk_buff *skb,
2091 struct ieee80211_rate *rate)
2092 {
2093 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2094 struct ieee80211_local *local = hw_to_local(hw);
2095 struct ieee80211_sub_if_data *sdata;
2096 struct ieee80211_hdr *hdr;
2097 struct ieee80211_rx_data rx;
2098 int prepares;
2099 struct ieee80211_sub_if_data *prev = NULL;
2100 struct sk_buff *skb_new;
2101
2102 hdr = (struct ieee80211_hdr *)skb->data;
2103 memset(&rx, 0, sizeof(rx));
2104 rx.skb = skb;
2105 rx.local = local;
2106
2107 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
2108 local->dot11ReceivedFragmentCount++;
2109
2110 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2111 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2112 rx.flags |= IEEE80211_RX_IN_SCAN;
2113
2114 ieee80211_parse_qos(&rx);
2115 ieee80211_verify_alignment(&rx);
2116
2117 rx.sta = sta_info_get(local, hdr->addr2);
2118 if (rx.sta)
2119 rx.sdata = rx.sta->sdata;
2120
2121 if (rx.sdata && ieee80211_is_data(hdr->frame_control)) {
2122 rx.flags |= IEEE80211_RX_RA_MATCH;
2123 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2124 if (prepares) {
2125 if (status->flag & RX_FLAG_MMIC_ERROR) {
2126 if (rx.flags & IEEE80211_RX_RA_MATCH)
2127 ieee80211_rx_michael_mic_report(hdr, &rx);
2128 } else
2129 prev = rx.sdata;
2130 }
2131 } else list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2132 if (!netif_running(sdata->dev))
2133 continue;
2134
2135 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2136 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2137 continue;
2138
2139 rx.flags |= IEEE80211_RX_RA_MATCH;
2140 prepares = prepare_for_handlers(sdata, &rx, hdr);
2141
2142 if (!prepares)
2143 continue;
2144
2145 if (status->flag & RX_FLAG_MMIC_ERROR) {
2146 rx.sdata = sdata;
2147 if (rx.flags & IEEE80211_RX_RA_MATCH)
2148 ieee80211_rx_michael_mic_report(hdr, &rx);
2149 continue;
2150 }
2151
2152 /*
2153 * frame is destined for this interface, but if it's not
2154 * also for the previous one we handle that after the
2155 * loop to avoid copying the SKB once too much
2156 */
2157
2158 if (!prev) {
2159 prev = sdata;
2160 continue;
2161 }
2162
2163 /*
2164 * frame was destined for the previous interface
2165 * so invoke RX handlers for it
2166 */
2167
2168 skb_new = skb_copy(skb, GFP_ATOMIC);
2169 if (!skb_new) {
2170 if (net_ratelimit())
2171 printk(KERN_DEBUG "%s: failed to copy "
2172 "multicast frame for %s\n",
2173 wiphy_name(local->hw.wiphy),
2174 prev->dev->name);
2175 continue;
2176 }
2177 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2178 prev = sdata;
2179 }
2180 if (prev)
2181 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
2182 else
2183 dev_kfree_skb(skb);
2184 }
2185
2186 #define SEQ_MODULO 0x1000
2187 #define SEQ_MASK 0xfff
2188
2189 static inline int seq_less(u16 sq1, u16 sq2)
2190 {
2191 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2192 }
2193
2194 static inline u16 seq_inc(u16 sq)
2195 {
2196 return (sq + 1) & SEQ_MASK;
2197 }
2198
2199 static inline u16 seq_sub(u16 sq1, u16 sq2)
2200 {
2201 return (sq1 - sq2) & SEQ_MASK;
2202 }
2203
2204
2205 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2206 struct tid_ampdu_rx *tid_agg_rx,
2207 int index)
2208 {
2209 struct ieee80211_supported_band *sband;
2210 struct ieee80211_rate *rate = NULL;
2211 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
2212 struct ieee80211_rx_status *status;
2213
2214 if (!skb)
2215 goto no_frame;
2216
2217 status = IEEE80211_SKB_RXCB(skb);
2218
2219 /* release the reordered frames to stack */
2220 sband = hw->wiphy->bands[status->band];
2221 if (!(status->flag & RX_FLAG_HT))
2222 rate = &sband->bitrates[status->rate_idx];
2223 __ieee80211_rx_handle_packet(hw, skb, rate);
2224 tid_agg_rx->stored_mpdu_num--;
2225 tid_agg_rx->reorder_buf[index] = NULL;
2226
2227 no_frame:
2228 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2229 }
2230
2231 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
2232 struct tid_ampdu_rx *tid_agg_rx,
2233 u16 head_seq_num)
2234 {
2235 int index;
2236
2237 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2238 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2239 tid_agg_rx->buf_size;
2240 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2241 }
2242 }
2243
2244 /*
2245 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
2246 * the skb was added to the buffer longer than this time ago, the earlier
2247 * frames that have not yet been received are assumed to be lost and the skb
2248 * can be released for processing. This may also release other skb's from the
2249 * reorder buffer if there are no additional gaps between the frames.
2250 */
2251 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2252
2253 /*
2254 * As this function belongs to the RX path it must be under
2255 * rcu_read_lock protection. It returns false if the frame
2256 * can be processed immediately, true if it was consumed.
2257 */
2258 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2259 struct tid_ampdu_rx *tid_agg_rx,
2260 struct sk_buff *skb)
2261 {
2262 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2263 u16 sc = le16_to_cpu(hdr->seq_ctrl);
2264 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2265 u16 head_seq_num, buf_size;
2266 int index;
2267
2268 buf_size = tid_agg_rx->buf_size;
2269 head_seq_num = tid_agg_rx->head_seq_num;
2270
2271 /* frame with out of date sequence number */
2272 if (seq_less(mpdu_seq_num, head_seq_num)) {
2273 dev_kfree_skb(skb);
2274 return true;
2275 }
2276
2277 /*
2278 * If frame the sequence number exceeds our buffering window
2279 * size release some previous frames to make room for this one.
2280 */
2281 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
2282 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
2283 /* release stored frames up to new head to stack */
2284 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
2285 }
2286
2287 /* Now the new frame is always in the range of the reordering buffer */
2288
2289 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2290
2291 /* check if we already stored this frame */
2292 if (tid_agg_rx->reorder_buf[index]) {
2293 dev_kfree_skb(skb);
2294 return true;
2295 }
2296
2297 /*
2298 * If the current MPDU is in the right order and nothing else
2299 * is stored we can process it directly, no need to buffer it.
2300 */
2301 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2302 tid_agg_rx->stored_mpdu_num == 0) {
2303 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2304 return false;
2305 }
2306
2307 /* put the frame in the reordering buffer */
2308 tid_agg_rx->reorder_buf[index] = skb;
2309 tid_agg_rx->reorder_time[index] = jiffies;
2310 tid_agg_rx->stored_mpdu_num++;
2311 /* release the buffer until next missing frame */
2312 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2313 tid_agg_rx->buf_size;
2314 if (!tid_agg_rx->reorder_buf[index] &&
2315 tid_agg_rx->stored_mpdu_num > 1) {
2316 /*
2317 * No buffers ready to be released, but check whether any
2318 * frames in the reorder buffer have timed out.
2319 */
2320 int j;
2321 int skipped = 1;
2322 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2323 j = (j + 1) % tid_agg_rx->buf_size) {
2324 if (!tid_agg_rx->reorder_buf[j]) {
2325 skipped++;
2326 continue;
2327 }
2328 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2329 HT_RX_REORDER_BUF_TIMEOUT))
2330 break;
2331
2332 #ifdef CONFIG_MAC80211_HT_DEBUG
2333 if (net_ratelimit())
2334 printk(KERN_DEBUG "%s: release an RX reorder "
2335 "frame due to timeout on earlier "
2336 "frames\n",
2337 wiphy_name(hw->wiphy));
2338 #endif
2339 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
2340
2341 /*
2342 * Increment the head seq# also for the skipped slots.
2343 */
2344 tid_agg_rx->head_seq_num =
2345 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
2346 skipped = 0;
2347 }
2348 } else while (tid_agg_rx->reorder_buf[index]) {
2349 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2350 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2351 tid_agg_rx->buf_size;
2352 }
2353
2354 return true;
2355 }
2356
2357 /*
2358 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
2359 * true if the MPDU was buffered, false if it should be processed.
2360 */
2361 static bool ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2362 struct sk_buff *skb)
2363 {
2364 struct ieee80211_hw *hw = &local->hw;
2365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2366 struct sta_info *sta;
2367 struct tid_ampdu_rx *tid_agg_rx;
2368 u16 sc;
2369 int tid;
2370
2371 if (!ieee80211_is_data_qos(hdr->frame_control))
2372 return false;
2373
2374 /*
2375 * filter the QoS data rx stream according to
2376 * STA/TID and check if this STA/TID is on aggregation
2377 */
2378
2379 sta = sta_info_get(local, hdr->addr2);
2380 if (!sta)
2381 return false;
2382
2383 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2384
2385 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2386 return false;
2387
2388 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2389
2390 /* qos null data frames are excluded */
2391 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2392 return false;
2393
2394 /* new, potentially un-ordered, ampdu frame - process it */
2395
2396 /* reset session timer */
2397 if (tid_agg_rx->timeout)
2398 mod_timer(&tid_agg_rx->session_timer,
2399 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2400
2401 /* if this mpdu is fragmented - terminate rx aggregation session */
2402 sc = le16_to_cpu(hdr->seq_ctrl);
2403 if (sc & IEEE80211_SCTL_FRAG) {
2404 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2405 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2406 dev_kfree_skb(skb);
2407 return true;
2408 }
2409
2410 return ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb);
2411 }
2412
2413 /*
2414 * This is the receive path handler. It is called by a low level driver when an
2415 * 802.11 MPDU is received from the hardware.
2416 */
2417 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2418 {
2419 struct ieee80211_local *local = hw_to_local(hw);
2420 struct ieee80211_rate *rate = NULL;
2421 struct ieee80211_supported_band *sband;
2422 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2423
2424 WARN_ON_ONCE(softirq_count() == 0);
2425
2426 if (WARN_ON(status->band < 0 ||
2427 status->band >= IEEE80211_NUM_BANDS))
2428 goto drop;
2429
2430 sband = local->hw.wiphy->bands[status->band];
2431 if (WARN_ON(!sband))
2432 goto drop;
2433
2434 /*
2435 * If we're suspending, it is possible although not too likely
2436 * that we'd be receiving frames after having already partially
2437 * quiesced the stack. We can't process such frames then since
2438 * that might, for example, cause stations to be added or other
2439 * driver callbacks be invoked.
2440 */
2441 if (unlikely(local->quiescing || local->suspended))
2442 goto drop;
2443
2444 /*
2445 * The same happens when we're not even started,
2446 * but that's worth a warning.
2447 */
2448 if (WARN_ON(!local->started))
2449 goto drop;
2450
2451 if (status->flag & RX_FLAG_HT) {
2452 /*
2453 * rate_idx is MCS index, which can be [0-76] as documented on:
2454 *
2455 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2456 *
2457 * Anything else would be some sort of driver or hardware error.
2458 * The driver should catch hardware errors.
2459 */
2460 if (WARN((status->rate_idx < 0 ||
2461 status->rate_idx > 76),
2462 "Rate marked as an HT rate but passed "
2463 "status->rate_idx is not "
2464 "an MCS index [0-76]: %d (0x%02x)\n",
2465 status->rate_idx,
2466 status->rate_idx))
2467 goto drop;
2468 } else {
2469 if (WARN_ON(status->rate_idx < 0 ||
2470 status->rate_idx >= sband->n_bitrates))
2471 goto drop;
2472 rate = &sband->bitrates[status->rate_idx];
2473 }
2474
2475 /*
2476 * key references and virtual interfaces are protected using RCU
2477 * and this requires that we are in a read-side RCU section during
2478 * receive processing
2479 */
2480 rcu_read_lock();
2481
2482 /*
2483 * Frames with failed FCS/PLCP checksum are not returned,
2484 * all other frames are returned without radiotap header
2485 * if it was previously present.
2486 * Also, frames with less than 16 bytes are dropped.
2487 */
2488 skb = ieee80211_rx_monitor(local, skb, rate);
2489 if (!skb) {
2490 rcu_read_unlock();
2491 return;
2492 }
2493
2494 /*
2495 * In theory, the block ack reordering should happen after duplicate
2496 * removal (ieee80211_rx_h_check(), which is an RX handler). As such,
2497 * the call to ieee80211_rx_reorder_ampdu() should really be moved to
2498 * happen as a new RX handler between ieee80211_rx_h_check and
2499 * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for
2500 * the time being, the call can be here since RX reorder buf processing
2501 * will implicitly skip duplicates. We could, in theory at least,
2502 * process frames that ieee80211_rx_h_passive_scan would drop (e.g.,
2503 * frames from other than operational channel), but that should not
2504 * happen in normal networks.
2505 */
2506 if (!ieee80211_rx_reorder_ampdu(local, skb))
2507 __ieee80211_rx_handle_packet(hw, skb, rate);
2508
2509 rcu_read_unlock();
2510
2511 return;
2512 drop:
2513 kfree_skb(skb);
2514 }
2515 EXPORT_SYMBOL(ieee80211_rx);
2516
2517 /* This is a version of the rx handler that can be called from hard irq
2518 * context. Post the skb on the queue and schedule the tasklet */
2519 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
2520 {
2521 struct ieee80211_local *local = hw_to_local(hw);
2522
2523 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2524
2525 skb->pkt_type = IEEE80211_RX_MSG;
2526 skb_queue_tail(&local->skb_queue, skb);
2527 tasklet_schedule(&local->tasklet);
2528 }
2529 EXPORT_SYMBOL(ieee80211_rx_irqsafe);