]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/mac80211/rx.c
mac80211: split off mesh handling entirely
[mirror_ubuntu-zesty-kernel.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rcupdate.h>
18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h>
20
21 #include "ieee80211_i.h"
22 #include "led.h"
23 #include "mesh.h"
24 #include "wep.h"
25 #include "wpa.h"
26 #include "tkip.h"
27 #include "wme.h"
28
29 u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
30 struct tid_ampdu_rx *tid_agg_rx,
31 struct sk_buff *skb, u16 mpdu_seq_num,
32 int bar_req);
33 /*
34 * monitor mode reception
35 *
36 * This function cleans up the SKB, i.e. it removes all the stuff
37 * only useful for monitoring.
38 */
39 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
40 struct sk_buff *skb,
41 int rtap_len)
42 {
43 skb_pull(skb, rtap_len);
44
45 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
46 if (likely(skb->len > FCS_LEN))
47 skb_trim(skb, skb->len - FCS_LEN);
48 else {
49 /* driver bug */
50 WARN_ON(1);
51 dev_kfree_skb(skb);
52 skb = NULL;
53 }
54 }
55
56 return skb;
57 }
58
59 static inline int should_drop_frame(struct ieee80211_rx_status *status,
60 struct sk_buff *skb,
61 int present_fcs_len,
62 int radiotap_len)
63 {
64 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
65
66 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
67 return 1;
68 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
69 return 1;
70 if (ieee80211_is_ctl(hdr->frame_control) &&
71 !ieee80211_is_pspoll(hdr->frame_control) &&
72 !ieee80211_is_back_req(hdr->frame_control))
73 return 1;
74 return 0;
75 }
76
77 static int
78 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
79 struct ieee80211_rx_status *status)
80 {
81 int len;
82
83 /* always present fields */
84 len = sizeof(struct ieee80211_radiotap_header) + 9;
85
86 if (status->flag & RX_FLAG_TSFT)
87 len += 8;
88 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB ||
89 local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
90 len += 1;
91 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
92 len += 1;
93
94 if (len & 1) /* padding for RX_FLAGS if necessary */
95 len++;
96
97 /* make sure radiotap starts at a naturally aligned address */
98 if (len % 8)
99 len = roundup(len, 8);
100
101 return len;
102 }
103
104 /**
105 * ieee80211_add_rx_radiotap_header - add radiotap header
106 *
107 * add a radiotap header containing all the fields which the hardware provided.
108 */
109 static void
110 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
111 struct sk_buff *skb,
112 struct ieee80211_rx_status *status,
113 struct ieee80211_rate *rate,
114 int rtap_len)
115 {
116 struct ieee80211_radiotap_header *rthdr;
117 unsigned char *pos;
118
119 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
120 memset(rthdr, 0, rtap_len);
121
122 /* radiotap header, set always present flags */
123 rthdr->it_present =
124 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
125 (1 << IEEE80211_RADIOTAP_RATE) |
126 (1 << IEEE80211_RADIOTAP_CHANNEL) |
127 (1 << IEEE80211_RADIOTAP_ANTENNA) |
128 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
129 rthdr->it_len = cpu_to_le16(rtap_len);
130
131 pos = (unsigned char *)(rthdr+1);
132
133 /* the order of the following fields is important */
134
135 /* IEEE80211_RADIOTAP_TSFT */
136 if (status->flag & RX_FLAG_TSFT) {
137 *(__le64 *)pos = cpu_to_le64(status->mactime);
138 rthdr->it_present |=
139 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
140 pos += 8;
141 }
142
143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 if (status->flag & RX_FLAG_SHORTPRE)
147 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
148 pos++;
149
150 /* IEEE80211_RADIOTAP_RATE */
151 *pos = rate->bitrate / 5;
152 pos++;
153
154 /* IEEE80211_RADIOTAP_CHANNEL */
155 *(__le16 *)pos = cpu_to_le16(status->freq);
156 pos += 2;
157 if (status->band == IEEE80211_BAND_5GHZ)
158 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
159 IEEE80211_CHAN_5GHZ);
160 else if (rate->flags & IEEE80211_RATE_ERP_G)
161 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
162 IEEE80211_CHAN_2GHZ);
163 else
164 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
165 IEEE80211_CHAN_2GHZ);
166 pos += 2;
167
168 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
169 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
170 *pos = status->signal;
171 rthdr->it_present |=
172 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
173 pos++;
174 }
175
176 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
177 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
178 *pos = status->noise;
179 rthdr->it_present |=
180 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
181 pos++;
182 }
183
184 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
185
186 /* IEEE80211_RADIOTAP_ANTENNA */
187 *pos = status->antenna;
188 pos++;
189
190 /* IEEE80211_RADIOTAP_DB_ANTSIGNAL */
191 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB) {
192 *pos = status->signal;
193 rthdr->it_present |=
194 cpu_to_le32(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL);
195 pos++;
196 }
197
198 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
199
200 /* IEEE80211_RADIOTAP_RX_FLAGS */
201 /* ensure 2 byte alignment for the 2 byte field as required */
202 if ((pos - (unsigned char *)rthdr) & 1)
203 pos++;
204 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
205 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
206 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
207 pos += 2;
208 }
209
210 /*
211 * This function copies a received frame to all monitor interfaces and
212 * returns a cleaned-up SKB that no longer includes the FCS nor the
213 * radiotap header the driver might have added.
214 */
215 static struct sk_buff *
216 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
217 struct ieee80211_rx_status *status,
218 struct ieee80211_rate *rate)
219 {
220 struct ieee80211_sub_if_data *sdata;
221 int needed_headroom = 0;
222 struct sk_buff *skb, *skb2;
223 struct net_device *prev_dev = NULL;
224 int present_fcs_len = 0;
225 int rtap_len = 0;
226
227 /*
228 * First, we may need to make a copy of the skb because
229 * (1) we need to modify it for radiotap (if not present), and
230 * (2) the other RX handlers will modify the skb we got.
231 *
232 * We don't need to, of course, if we aren't going to return
233 * the SKB because it has a bad FCS/PLCP checksum.
234 */
235 if (status->flag & RX_FLAG_RADIOTAP)
236 rtap_len = ieee80211_get_radiotap_len(origskb->data);
237 else
238 /* room for the radiotap header based on driver features */
239 needed_headroom = ieee80211_rx_radiotap_len(local, status);
240
241 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
242 present_fcs_len = FCS_LEN;
243
244 if (!local->monitors) {
245 if (should_drop_frame(status, origskb, present_fcs_len,
246 rtap_len)) {
247 dev_kfree_skb(origskb);
248 return NULL;
249 }
250
251 return remove_monitor_info(local, origskb, rtap_len);
252 }
253
254 if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
255 /* only need to expand headroom if necessary */
256 skb = origskb;
257 origskb = NULL;
258
259 /*
260 * This shouldn't trigger often because most devices have an
261 * RX header they pull before we get here, and that should
262 * be big enough for our radiotap information. We should
263 * probably export the length to drivers so that we can have
264 * them allocate enough headroom to start with.
265 */
266 if (skb_headroom(skb) < needed_headroom &&
267 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
268 dev_kfree_skb(skb);
269 return NULL;
270 }
271 } else {
272 /*
273 * Need to make a copy and possibly remove radiotap header
274 * and FCS from the original.
275 */
276 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
277
278 origskb = remove_monitor_info(local, origskb, rtap_len);
279
280 if (!skb)
281 return origskb;
282 }
283
284 /* if necessary, prepend radiotap information */
285 if (!(status->flag & RX_FLAG_RADIOTAP))
286 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
287 needed_headroom);
288
289 skb_reset_mac_header(skb);
290 skb->ip_summed = CHECKSUM_UNNECESSARY;
291 skb->pkt_type = PACKET_OTHERHOST;
292 skb->protocol = htons(ETH_P_802_2);
293
294 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
295 if (!netif_running(sdata->dev))
296 continue;
297
298 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR)
299 continue;
300
301 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
302 continue;
303
304 if (prev_dev) {
305 skb2 = skb_clone(skb, GFP_ATOMIC);
306 if (skb2) {
307 skb2->dev = prev_dev;
308 netif_rx(skb2);
309 }
310 }
311
312 prev_dev = sdata->dev;
313 sdata->dev->stats.rx_packets++;
314 sdata->dev->stats.rx_bytes += skb->len;
315 }
316
317 if (prev_dev) {
318 skb->dev = prev_dev;
319 netif_rx(skb);
320 } else
321 dev_kfree_skb(skb);
322
323 return origskb;
324 }
325
326
327 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
328 {
329 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
330 int tid;
331
332 /* does the frame have a qos control field? */
333 if (ieee80211_is_data_qos(hdr->frame_control)) {
334 u8 *qc = ieee80211_get_qos_ctl(hdr);
335 /* frame has qos control */
336 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
337 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
338 rx->flags |= IEEE80211_RX_AMSDU;
339 else
340 rx->flags &= ~IEEE80211_RX_AMSDU;
341 } else {
342 /*
343 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
344 *
345 * Sequence numbers for management frames, QoS data
346 * frames with a broadcast/multicast address in the
347 * Address 1 field, and all non-QoS data frames sent
348 * by QoS STAs are assigned using an additional single
349 * modulo-4096 counter, [...]
350 *
351 * We also use that counter for non-QoS STAs.
352 */
353 tid = NUM_RX_DATA_QUEUES - 1;
354 }
355
356 rx->queue = tid;
357 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
358 * For now, set skb->priority to 0 for other cases. */
359 rx->skb->priority = (tid > 7) ? 0 : tid;
360 }
361
362 static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx)
363 {
364 #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
366 int hdrlen;
367
368 if (!ieee80211_is_data_present(hdr->frame_control))
369 return;
370
371 /*
372 * Drivers are required to align the payload data in a way that
373 * guarantees that the contained IP header is aligned to a four-
374 * byte boundary. In the case of regular frames, this simply means
375 * aligning the payload to a four-byte boundary (because either
376 * the IP header is directly contained, or IV/RFC1042 headers that
377 * have a length divisible by four are in front of it.
378 *
379 * With A-MSDU frames, however, the payload data address must
380 * yield two modulo four because there are 14-byte 802.3 headers
381 * within the A-MSDU frames that push the IP header further back
382 * to a multiple of four again. Thankfully, the specs were sane
383 * enough this time around to require padding each A-MSDU subframe
384 * to a length that is a multiple of four.
385 *
386 * Padding like atheros hardware adds which is inbetween the 802.11
387 * header and the payload is not supported, the driver is required
388 * to move the 802.11 header further back in that case.
389 */
390 hdrlen = ieee80211_hdrlen(hdr->frame_control);
391 if (rx->flags & IEEE80211_RX_AMSDU)
392 hdrlen += ETH_HLEN;
393 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3);
394 #endif
395 }
396
397
398 /* rx handlers */
399
400 static ieee80211_rx_result debug_noinline
401 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
402 {
403 struct ieee80211_local *local = rx->local;
404 struct sk_buff *skb = rx->skb;
405
406 if (unlikely(local->sta_hw_scanning))
407 return ieee80211_sta_rx_scan(rx->sdata, skb, rx->status);
408
409 if (unlikely(local->sta_sw_scanning)) {
410 /* drop all the other packets during a software scan anyway */
411 if (ieee80211_sta_rx_scan(rx->sdata, skb, rx->status)
412 != RX_QUEUED)
413 dev_kfree_skb(skb);
414 return RX_QUEUED;
415 }
416
417 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
418 /* scanning finished during invoking of handlers */
419 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
420 return RX_DROP_UNUSABLE;
421 }
422
423 return RX_CONTINUE;
424 }
425
426 static ieee80211_rx_result
427 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
428 {
429 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
430 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
431
432 if (ieee80211_is_data(hdr->frame_control)) {
433 if (!ieee80211_has_a4(hdr->frame_control))
434 return RX_DROP_MONITOR;
435 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
436 return RX_DROP_MONITOR;
437 }
438
439 /* If there is not an established peer link and this is not a peer link
440 * establisment frame, beacon or probe, drop the frame.
441 */
442
443 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
444 struct ieee80211_mgmt *mgmt;
445
446 if (!ieee80211_is_mgmt(hdr->frame_control))
447 return RX_DROP_MONITOR;
448
449 if (ieee80211_is_action(hdr->frame_control)) {
450 mgmt = (struct ieee80211_mgmt *)hdr;
451 if (mgmt->u.action.category != PLINK_CATEGORY)
452 return RX_DROP_MONITOR;
453 return RX_CONTINUE;
454 }
455
456 if (ieee80211_is_probe_req(hdr->frame_control) ||
457 ieee80211_is_probe_resp(hdr->frame_control) ||
458 ieee80211_is_beacon(hdr->frame_control))
459 return RX_CONTINUE;
460
461 return RX_DROP_MONITOR;
462
463 }
464
465 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
466
467 if (ieee80211_is_data(hdr->frame_control) &&
468 is_multicast_ether_addr(hdr->addr1) &&
469 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
470 return RX_DROP_MONITOR;
471 #undef msh_h_get
472
473 return RX_CONTINUE;
474 }
475
476
477 static ieee80211_rx_result debug_noinline
478 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
479 {
480 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
481
482 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
483 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
484 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
485 rx->sta->last_seq_ctrl[rx->queue] ==
486 hdr->seq_ctrl)) {
487 if (rx->flags & IEEE80211_RX_RA_MATCH) {
488 rx->local->dot11FrameDuplicateCount++;
489 rx->sta->num_duplicates++;
490 }
491 return RX_DROP_MONITOR;
492 } else
493 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
494 }
495
496 if (unlikely(rx->skb->len < 16)) {
497 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
498 return RX_DROP_MONITOR;
499 }
500
501 /* Drop disallowed frame classes based on STA auth/assoc state;
502 * IEEE 802.11, Chap 5.5.
503 *
504 * 80211.o does filtering only based on association state, i.e., it
505 * drops Class 3 frames from not associated stations. hostapd sends
506 * deauth/disassoc frames when needed. In addition, hostapd is
507 * responsible for filtering on both auth and assoc states.
508 */
509
510 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
511 return ieee80211_rx_mesh_check(rx);
512
513 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
514 ieee80211_is_pspoll(hdr->frame_control)) &&
515 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
516 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
517 if ((!ieee80211_has_fromds(hdr->frame_control) &&
518 !ieee80211_has_tods(hdr->frame_control) &&
519 ieee80211_is_data(hdr->frame_control)) ||
520 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
521 /* Drop IBSS frames and frames for other hosts
522 * silently. */
523 return RX_DROP_MONITOR;
524 }
525
526 return RX_DROP_MONITOR;
527 }
528
529 return RX_CONTINUE;
530 }
531
532
533 static ieee80211_rx_result debug_noinline
534 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
535 {
536 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
537 int keyidx;
538 int hdrlen;
539 ieee80211_rx_result result = RX_DROP_UNUSABLE;
540 struct ieee80211_key *stakey = NULL;
541
542 /*
543 * Key selection 101
544 *
545 * There are three types of keys:
546 * - GTK (group keys)
547 * - PTK (pairwise keys)
548 * - STK (station-to-station pairwise keys)
549 *
550 * When selecting a key, we have to distinguish between multicast
551 * (including broadcast) and unicast frames, the latter can only
552 * use PTKs and STKs while the former always use GTKs. Unless, of
553 * course, actual WEP keys ("pre-RSNA") are used, then unicast
554 * frames can also use key indizes like GTKs. Hence, if we don't
555 * have a PTK/STK we check the key index for a WEP key.
556 *
557 * Note that in a regular BSS, multicast frames are sent by the
558 * AP only, associated stations unicast the frame to the AP first
559 * which then multicasts it on their behalf.
560 *
561 * There is also a slight problem in IBSS mode: GTKs are negotiated
562 * with each station, that is something we don't currently handle.
563 * The spec seems to expect that one negotiates the same key with
564 * every station but there's no such requirement; VLANs could be
565 * possible.
566 */
567
568 if (!ieee80211_has_protected(hdr->frame_control))
569 return RX_CONTINUE;
570
571 /*
572 * No point in finding a key and decrypting if the frame is neither
573 * addressed to us nor a multicast frame.
574 */
575 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
576 return RX_CONTINUE;
577
578 if (rx->sta)
579 stakey = rcu_dereference(rx->sta->key);
580
581 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
582 rx->key = stakey;
583 } else {
584 /*
585 * The device doesn't give us the IV so we won't be
586 * able to look up the key. That's ok though, we
587 * don't need to decrypt the frame, we just won't
588 * be able to keep statistics accurate.
589 * Except for key threshold notifications, should
590 * we somehow allow the driver to tell us which key
591 * the hardware used if this flag is set?
592 */
593 if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
594 (rx->status->flag & RX_FLAG_IV_STRIPPED))
595 return RX_CONTINUE;
596
597 hdrlen = ieee80211_hdrlen(hdr->frame_control);
598
599 if (rx->skb->len < 8 + hdrlen)
600 return RX_DROP_UNUSABLE; /* TODO: count this? */
601
602 /*
603 * no need to call ieee80211_wep_get_keyidx,
604 * it verifies a bunch of things we've done already
605 */
606 keyidx = rx->skb->data[hdrlen + 3] >> 6;
607
608 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
609
610 /*
611 * RSNA-protected unicast frames should always be sent with
612 * pairwise or station-to-station keys, but for WEP we allow
613 * using a key index as well.
614 */
615 if (rx->key && rx->key->conf.alg != ALG_WEP &&
616 !is_multicast_ether_addr(hdr->addr1))
617 rx->key = NULL;
618 }
619
620 if (rx->key) {
621 rx->key->tx_rx_count++;
622 /* TODO: add threshold stuff again */
623 } else {
624 return RX_DROP_MONITOR;
625 }
626
627 /* Check for weak IVs if possible */
628 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
629 ieee80211_is_data(hdr->frame_control) &&
630 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
631 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
632 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
633 rx->sta->wep_weak_iv_count++;
634
635 switch (rx->key->conf.alg) {
636 case ALG_WEP:
637 result = ieee80211_crypto_wep_decrypt(rx);
638 break;
639 case ALG_TKIP:
640 result = ieee80211_crypto_tkip_decrypt(rx);
641 break;
642 case ALG_CCMP:
643 result = ieee80211_crypto_ccmp_decrypt(rx);
644 break;
645 }
646
647 /* either the frame has been decrypted or will be dropped */
648 rx->status->flag |= RX_FLAG_DECRYPTED;
649
650 return result;
651 }
652
653 static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
654 {
655 struct ieee80211_sub_if_data *sdata;
656 DECLARE_MAC_BUF(mac);
657
658 sdata = sta->sdata;
659
660 atomic_inc(&sdata->bss->num_sta_ps);
661 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
662 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
663 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
664 dev->name, print_mac(mac, sta->addr), sta->aid);
665 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
666 }
667
668 static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
669 {
670 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
671 struct sk_buff *skb;
672 int sent = 0;
673 struct ieee80211_sub_if_data *sdata;
674 struct ieee80211_tx_info *info;
675 DECLARE_MAC_BUF(mac);
676
677 sdata = sta->sdata;
678
679 atomic_dec(&sdata->bss->num_sta_ps);
680
681 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
682
683 if (!skb_queue_empty(&sta->ps_tx_buf))
684 sta_info_clear_tim_bit(sta);
685
686 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
687 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n",
688 dev->name, print_mac(mac, sta->addr), sta->aid);
689 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
690
691 /* Send all buffered frames to the station */
692 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
693 info = IEEE80211_SKB_CB(skb);
694 sent++;
695 info->flags |= IEEE80211_TX_CTL_REQUEUE;
696 dev_queue_xmit(skb);
697 }
698 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
699 info = IEEE80211_SKB_CB(skb);
700 local->total_ps_buffered--;
701 sent++;
702 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
703 printk(KERN_DEBUG "%s: STA %s aid %d send PS frame "
704 "since STA not sleeping anymore\n", dev->name,
705 print_mac(mac, sta->addr), sta->aid);
706 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
707 info->flags |= IEEE80211_TX_CTL_REQUEUE;
708 dev_queue_xmit(skb);
709 }
710
711 return sent;
712 }
713
714 static ieee80211_rx_result debug_noinline
715 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
716 {
717 struct sta_info *sta = rx->sta;
718 struct net_device *dev = rx->dev;
719 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
720
721 if (!sta)
722 return RX_CONTINUE;
723
724 /* Update last_rx only for IBSS packets which are for the current
725 * BSSID to avoid keeping the current IBSS network alive in cases where
726 * other STAs are using different BSSID. */
727 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
728 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
729 IEEE80211_IF_TYPE_IBSS);
730 if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0)
731 sta->last_rx = jiffies;
732 } else
733 if (!is_multicast_ether_addr(hdr->addr1) ||
734 rx->sdata->vif.type == IEEE80211_IF_TYPE_STA) {
735 /* Update last_rx only for unicast frames in order to prevent
736 * the Probe Request frames (the only broadcast frames from a
737 * STA in infrastructure mode) from keeping a connection alive.
738 * Mesh beacons will update last_rx when if they are found to
739 * match the current local configuration when processed.
740 */
741 sta->last_rx = jiffies;
742 }
743
744 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
745 return RX_CONTINUE;
746
747 sta->rx_fragments++;
748 sta->rx_bytes += rx->skb->len;
749 sta->last_signal = rx->status->signal;
750 sta->last_qual = rx->status->qual;
751 sta->last_noise = rx->status->noise;
752
753 if (!ieee80211_has_morefrags(hdr->frame_control) &&
754 (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP ||
755 rx->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) {
756 /* Change STA power saving mode only in the end of a frame
757 * exchange sequence */
758 if (test_sta_flags(sta, WLAN_STA_PS) &&
759 !ieee80211_has_pm(hdr->frame_control))
760 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta);
761 else if (!test_sta_flags(sta, WLAN_STA_PS) &&
762 ieee80211_has_pm(hdr->frame_control))
763 ap_sta_ps_start(dev, sta);
764 }
765
766 /* Drop data::nullfunc frames silently, since they are used only to
767 * control station power saving mode. */
768 if (ieee80211_is_nullfunc(hdr->frame_control)) {
769 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
770 /* Update counter and free packet here to avoid counting this
771 * as a dropped packed. */
772 sta->rx_packets++;
773 dev_kfree_skb(rx->skb);
774 return RX_QUEUED;
775 }
776
777 return RX_CONTINUE;
778 } /* ieee80211_rx_h_sta_process */
779
780 static inline struct ieee80211_fragment_entry *
781 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
782 unsigned int frag, unsigned int seq, int rx_queue,
783 struct sk_buff **skb)
784 {
785 struct ieee80211_fragment_entry *entry;
786 int idx;
787
788 idx = sdata->fragment_next;
789 entry = &sdata->fragments[sdata->fragment_next++];
790 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
791 sdata->fragment_next = 0;
792
793 if (!skb_queue_empty(&entry->skb_list)) {
794 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
795 struct ieee80211_hdr *hdr =
796 (struct ieee80211_hdr *) entry->skb_list.next->data;
797 DECLARE_MAC_BUF(mac);
798 DECLARE_MAC_BUF(mac2);
799 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
800 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
801 "addr1=%s addr2=%s\n",
802 sdata->dev->name, idx,
803 jiffies - entry->first_frag_time, entry->seq,
804 entry->last_frag, print_mac(mac, hdr->addr1),
805 print_mac(mac2, hdr->addr2));
806 #endif
807 __skb_queue_purge(&entry->skb_list);
808 }
809
810 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
811 *skb = NULL;
812 entry->first_frag_time = jiffies;
813 entry->seq = seq;
814 entry->rx_queue = rx_queue;
815 entry->last_frag = frag;
816 entry->ccmp = 0;
817 entry->extra_len = 0;
818
819 return entry;
820 }
821
822 static inline struct ieee80211_fragment_entry *
823 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
824 unsigned int frag, unsigned int seq,
825 int rx_queue, struct ieee80211_hdr *hdr)
826 {
827 struct ieee80211_fragment_entry *entry;
828 int i, idx;
829
830 idx = sdata->fragment_next;
831 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
832 struct ieee80211_hdr *f_hdr;
833
834 idx--;
835 if (idx < 0)
836 idx = IEEE80211_FRAGMENT_MAX - 1;
837
838 entry = &sdata->fragments[idx];
839 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
840 entry->rx_queue != rx_queue ||
841 entry->last_frag + 1 != frag)
842 continue;
843
844 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
845
846 /*
847 * Check ftype and addresses are equal, else check next fragment
848 */
849 if (((hdr->frame_control ^ f_hdr->frame_control) &
850 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
851 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
852 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
853 continue;
854
855 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
856 __skb_queue_purge(&entry->skb_list);
857 continue;
858 }
859 return entry;
860 }
861
862 return NULL;
863 }
864
865 static ieee80211_rx_result debug_noinline
866 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
867 {
868 struct ieee80211_hdr *hdr;
869 u16 sc;
870 __le16 fc;
871 unsigned int frag, seq;
872 struct ieee80211_fragment_entry *entry;
873 struct sk_buff *skb;
874 DECLARE_MAC_BUF(mac);
875
876 hdr = (struct ieee80211_hdr *)rx->skb->data;
877 fc = hdr->frame_control;
878 sc = le16_to_cpu(hdr->seq_ctrl);
879 frag = sc & IEEE80211_SCTL_FRAG;
880
881 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
882 (rx->skb)->len < 24 ||
883 is_multicast_ether_addr(hdr->addr1))) {
884 /* not fragmented */
885 goto out;
886 }
887 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
888
889 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
890
891 if (frag == 0) {
892 /* This is the first fragment of a new frame. */
893 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
894 rx->queue, &(rx->skb));
895 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
896 ieee80211_has_protected(fc)) {
897 /* Store CCMP PN so that we can verify that the next
898 * fragment has a sequential PN value. */
899 entry->ccmp = 1;
900 memcpy(entry->last_pn,
901 rx->key->u.ccmp.rx_pn[rx->queue],
902 CCMP_PN_LEN);
903 }
904 return RX_QUEUED;
905 }
906
907 /* This is a fragment for a frame that should already be pending in
908 * fragment cache. Add this fragment to the end of the pending entry.
909 */
910 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
911 if (!entry) {
912 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
913 return RX_DROP_MONITOR;
914 }
915
916 /* Verify that MPDUs within one MSDU have sequential PN values.
917 * (IEEE 802.11i, 8.3.3.4.5) */
918 if (entry->ccmp) {
919 int i;
920 u8 pn[CCMP_PN_LEN], *rpn;
921 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
922 return RX_DROP_UNUSABLE;
923 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
924 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
925 pn[i]++;
926 if (pn[i])
927 break;
928 }
929 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
930 if (memcmp(pn, rpn, CCMP_PN_LEN))
931 return RX_DROP_UNUSABLE;
932 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
933 }
934
935 skb_pull(rx->skb, ieee80211_hdrlen(fc));
936 __skb_queue_tail(&entry->skb_list, rx->skb);
937 entry->last_frag = frag;
938 entry->extra_len += rx->skb->len;
939 if (ieee80211_has_morefrags(fc)) {
940 rx->skb = NULL;
941 return RX_QUEUED;
942 }
943
944 rx->skb = __skb_dequeue(&entry->skb_list);
945 if (skb_tailroom(rx->skb) < entry->extra_len) {
946 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
947 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
948 GFP_ATOMIC))) {
949 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
950 __skb_queue_purge(&entry->skb_list);
951 return RX_DROP_UNUSABLE;
952 }
953 }
954 while ((skb = __skb_dequeue(&entry->skb_list))) {
955 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
956 dev_kfree_skb(skb);
957 }
958
959 /* Complete frame has been reassembled - process it now */
960 rx->flags |= IEEE80211_RX_FRAGMENTED;
961
962 out:
963 if (rx->sta)
964 rx->sta->rx_packets++;
965 if (is_multicast_ether_addr(hdr->addr1))
966 rx->local->dot11MulticastReceivedFrameCount++;
967 else
968 ieee80211_led_rx(rx->local);
969 return RX_CONTINUE;
970 }
971
972 static ieee80211_rx_result debug_noinline
973 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
974 {
975 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
976 struct sk_buff *skb;
977 int no_pending_pkts;
978 DECLARE_MAC_BUF(mac);
979 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
980
981 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
982 !(rx->flags & IEEE80211_RX_RA_MATCH)))
983 return RX_CONTINUE;
984
985 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) &&
986 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
987 return RX_DROP_UNUSABLE;
988
989 skb = skb_dequeue(&rx->sta->tx_filtered);
990 if (!skb) {
991 skb = skb_dequeue(&rx->sta->ps_tx_buf);
992 if (skb)
993 rx->local->total_ps_buffered--;
994 }
995 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
996 skb_queue_empty(&rx->sta->ps_tx_buf);
997
998 if (skb) {
999 struct ieee80211_hdr *hdr =
1000 (struct ieee80211_hdr *) skb->data;
1001
1002 /*
1003 * Tell TX path to send one frame even though the STA may
1004 * still remain is PS mode after this frame exchange.
1005 */
1006 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1007
1008 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1009 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
1010 print_mac(mac, rx->sta->addr), rx->sta->aid,
1011 skb_queue_len(&rx->sta->ps_tx_buf));
1012 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1013
1014 /* Use MoreData flag to indicate whether there are more
1015 * buffered frames for this STA */
1016 if (no_pending_pkts)
1017 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1018 else
1019 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1020
1021 dev_queue_xmit(skb);
1022
1023 if (no_pending_pkts)
1024 sta_info_clear_tim_bit(rx->sta);
1025 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1026 } else if (!rx->sent_ps_buffered) {
1027 /*
1028 * FIXME: This can be the result of a race condition between
1029 * us expiring a frame and the station polling for it.
1030 * Should we send it a null-func frame indicating we
1031 * have nothing buffered for it?
1032 */
1033 printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
1034 "though there are no buffered frames for it\n",
1035 rx->dev->name, print_mac(mac, rx->sta->addr));
1036 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1037 }
1038
1039 /* Free PS Poll skb here instead of returning RX_DROP that would
1040 * count as an dropped frame. */
1041 dev_kfree_skb(rx->skb);
1042
1043 return RX_QUEUED;
1044 }
1045
1046 static ieee80211_rx_result debug_noinline
1047 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1048 {
1049 u8 *data = rx->skb->data;
1050 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1051
1052 if (!ieee80211_is_data_qos(hdr->frame_control))
1053 return RX_CONTINUE;
1054
1055 /* remove the qos control field, update frame type and meta-data */
1056 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1057 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1058 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1059 /* change frame type to non QOS */
1060 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1061
1062 return RX_CONTINUE;
1063 }
1064
1065 static int
1066 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1067 {
1068 if (unlikely(!rx->sta ||
1069 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1070 return -EACCES;
1071
1072 return 0;
1073 }
1074
1075 static int
1076 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1077 {
1078 /*
1079 * Pass through unencrypted frames if the hardware has
1080 * decrypted them already.
1081 */
1082 if (rx->status->flag & RX_FLAG_DECRYPTED)
1083 return 0;
1084
1085 /* Drop unencrypted frames if key is set. */
1086 if (unlikely(!ieee80211_has_protected(fc) &&
1087 !ieee80211_is_nullfunc(fc) &&
1088 (rx->key || rx->sdata->drop_unencrypted)))
1089 return -EACCES;
1090
1091 return 0;
1092 }
1093
1094 static int
1095 ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1096 {
1097 struct net_device *dev = rx->dev;
1098 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1099 u16 hdrlen, ethertype;
1100 u8 *payload;
1101 u8 dst[ETH_ALEN];
1102 u8 src[ETH_ALEN] __aligned(2);
1103 struct sk_buff *skb = rx->skb;
1104 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1105 DECLARE_MAC_BUF(mac);
1106 DECLARE_MAC_BUF(mac2);
1107 DECLARE_MAC_BUF(mac3);
1108 DECLARE_MAC_BUF(mac4);
1109
1110 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1111 return -1;
1112
1113 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1114
1115 if (ieee80211_vif_is_mesh(&sdata->vif))
1116 hdrlen += ieee80211_get_mesh_hdrlen(
1117 (struct ieee80211s_hdr *) (skb->data + hdrlen));
1118
1119 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1120 * header
1121 * IEEE 802.11 address fields:
1122 * ToDS FromDS Addr1 Addr2 Addr3 Addr4
1123 * 0 0 DA SA BSSID n/a
1124 * 0 1 DA BSSID SA n/a
1125 * 1 0 BSSID SA DA n/a
1126 * 1 1 RA TA DA SA
1127 */
1128 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1129 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1130
1131 switch (hdr->frame_control &
1132 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1133 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS):
1134 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP &&
1135 sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
1136 return -1;
1137 break;
1138 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1139 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
1140 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
1141 return -1;
1142 break;
1143 case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS):
1144 if (sdata->vif.type != IEEE80211_IF_TYPE_STA ||
1145 (is_multicast_ether_addr(dst) &&
1146 !compare_ether_addr(src, dev->dev_addr)))
1147 return -1;
1148 break;
1149 case __constant_cpu_to_le16(0):
1150 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1151 return -1;
1152 break;
1153 }
1154
1155 if (unlikely(skb->len - hdrlen < 8))
1156 return -1;
1157
1158 payload = skb->data + hdrlen;
1159 ethertype = (payload[6] << 8) | payload[7];
1160
1161 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1162 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1163 compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
1164 /* remove RFC1042 or Bridge-Tunnel encapsulation and
1165 * replace EtherType */
1166 skb_pull(skb, hdrlen + 6);
1167 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
1168 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
1169 } else {
1170 struct ethhdr *ehdr;
1171 __be16 len;
1172
1173 skb_pull(skb, hdrlen);
1174 len = htons(skb->len);
1175 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
1176 memcpy(ehdr->h_dest, dst, ETH_ALEN);
1177 memcpy(ehdr->h_source, src, ETH_ALEN);
1178 ehdr->h_proto = len;
1179 }
1180 return 0;
1181 }
1182
1183 /*
1184 * requires that rx->skb is a frame with ethernet header
1185 */
1186 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1187 {
1188 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1189 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1190 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1191
1192 /*
1193 * Allow EAPOL frames to us/the PAE group address regardless
1194 * of whether the frame was encrypted or not.
1195 */
1196 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1197 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
1198 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1199 return true;
1200
1201 if (ieee80211_802_1x_port_control(rx) ||
1202 ieee80211_drop_unencrypted(rx, fc))
1203 return false;
1204
1205 return true;
1206 }
1207
1208 /*
1209 * requires that rx->skb is a frame with ethernet header
1210 */
1211 static void
1212 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1213 {
1214 struct net_device *dev = rx->dev;
1215 struct ieee80211_local *local = rx->local;
1216 struct sk_buff *skb, *xmit_skb;
1217 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1218 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1219 struct sta_info *dsta;
1220
1221 skb = rx->skb;
1222 xmit_skb = NULL;
1223
1224 if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP ||
1225 sdata->vif.type == IEEE80211_IF_TYPE_VLAN) &&
1226 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1227 if (is_multicast_ether_addr(ehdr->h_dest)) {
1228 /*
1229 * send multicast frames both to higher layers in
1230 * local net stack and back to the wireless medium
1231 */
1232 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1233 if (!xmit_skb && net_ratelimit())
1234 printk(KERN_DEBUG "%s: failed to clone "
1235 "multicast frame\n", dev->name);
1236 } else {
1237 dsta = sta_info_get(local, skb->data);
1238 if (dsta && dsta->sdata->dev == dev) {
1239 /*
1240 * The destination station is associated to
1241 * this AP (in this VLAN), so send the frame
1242 * directly to it and do not pass it to local
1243 * net stack.
1244 */
1245 xmit_skb = skb;
1246 skb = NULL;
1247 }
1248 }
1249 }
1250
1251 if (skb) {
1252 /* deliver to local stack */
1253 skb->protocol = eth_type_trans(skb, dev);
1254 memset(skb->cb, 0, sizeof(skb->cb));
1255 netif_rx(skb);
1256 }
1257
1258 if (xmit_skb) {
1259 /* send to wireless media */
1260 xmit_skb->protocol = htons(ETH_P_802_3);
1261 skb_reset_network_header(xmit_skb);
1262 skb_reset_mac_header(xmit_skb);
1263 dev_queue_xmit(xmit_skb);
1264 }
1265 }
1266
1267 static ieee80211_rx_result debug_noinline
1268 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1269 {
1270 struct net_device *dev = rx->dev;
1271 struct ieee80211_local *local = rx->local;
1272 u16 ethertype;
1273 u8 *payload;
1274 struct sk_buff *skb = rx->skb, *frame = NULL;
1275 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1276 __le16 fc = hdr->frame_control;
1277 const struct ethhdr *eth;
1278 int remaining, err;
1279 u8 dst[ETH_ALEN];
1280 u8 src[ETH_ALEN];
1281 DECLARE_MAC_BUF(mac);
1282
1283 if (unlikely(!ieee80211_is_data(fc)))
1284 return RX_CONTINUE;
1285
1286 if (unlikely(!ieee80211_is_data_present(fc)))
1287 return RX_DROP_MONITOR;
1288
1289 if (!(rx->flags & IEEE80211_RX_AMSDU))
1290 return RX_CONTINUE;
1291
1292 err = ieee80211_data_to_8023(rx);
1293 if (unlikely(err))
1294 return RX_DROP_UNUSABLE;
1295
1296 skb->dev = dev;
1297
1298 dev->stats.rx_packets++;
1299 dev->stats.rx_bytes += skb->len;
1300
1301 /* skip the wrapping header */
1302 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1303 if (!eth)
1304 return RX_DROP_UNUSABLE;
1305
1306 while (skb != frame) {
1307 u8 padding;
1308 __be16 len = eth->h_proto;
1309 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1310
1311 remaining = skb->len;
1312 memcpy(dst, eth->h_dest, ETH_ALEN);
1313 memcpy(src, eth->h_source, ETH_ALEN);
1314
1315 padding = ((4 - subframe_len) & 0x3);
1316 /* the last MSDU has no padding */
1317 if (subframe_len > remaining)
1318 return RX_DROP_UNUSABLE;
1319
1320 skb_pull(skb, sizeof(struct ethhdr));
1321 /* if last subframe reuse skb */
1322 if (remaining <= subframe_len + padding)
1323 frame = skb;
1324 else {
1325 frame = dev_alloc_skb(local->hw.extra_tx_headroom +
1326 subframe_len);
1327
1328 if (frame == NULL)
1329 return RX_DROP_UNUSABLE;
1330
1331 skb_reserve(frame, local->hw.extra_tx_headroom +
1332 sizeof(struct ethhdr));
1333 memcpy(skb_put(frame, ntohs(len)), skb->data,
1334 ntohs(len));
1335
1336 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1337 padding);
1338 if (!eth) {
1339 dev_kfree_skb(frame);
1340 return RX_DROP_UNUSABLE;
1341 }
1342 }
1343
1344 skb_reset_network_header(frame);
1345 frame->dev = dev;
1346 frame->priority = skb->priority;
1347 rx->skb = frame;
1348
1349 payload = frame->data;
1350 ethertype = (payload[6] << 8) | payload[7];
1351
1352 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1353 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1354 compare_ether_addr(payload,
1355 bridge_tunnel_header) == 0)) {
1356 /* remove RFC1042 or Bridge-Tunnel
1357 * encapsulation and replace EtherType */
1358 skb_pull(frame, 6);
1359 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1360 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1361 } else {
1362 memcpy(skb_push(frame, sizeof(__be16)),
1363 &len, sizeof(__be16));
1364 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1365 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1366 }
1367
1368 if (!ieee80211_frame_allowed(rx, fc)) {
1369 if (skb == frame) /* last frame */
1370 return RX_DROP_UNUSABLE;
1371 dev_kfree_skb(frame);
1372 continue;
1373 }
1374
1375 ieee80211_deliver_skb(rx);
1376 }
1377
1378 return RX_QUEUED;
1379 }
1380
1381 static ieee80211_rx_result debug_noinline
1382 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1383 {
1384 struct ieee80211_hdr *hdr;
1385 struct ieee80211s_hdr *mesh_hdr;
1386 unsigned int hdrlen;
1387 struct sk_buff *skb = rx->skb, *fwd_skb;
1388
1389 hdr = (struct ieee80211_hdr *) skb->data;
1390 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1391 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1392
1393 if (!ieee80211_is_data(hdr->frame_control))
1394 return RX_CONTINUE;
1395
1396 if (!mesh_hdr->ttl)
1397 /* illegal frame */
1398 return RX_DROP_MONITOR;
1399
1400 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1401 return RX_CONTINUE;
1402
1403 mesh_hdr->ttl--;
1404
1405 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1406 if (!mesh_hdr->ttl)
1407 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1408 dropped_frames_ttl);
1409 else {
1410 struct ieee80211_hdr *fwd_hdr;
1411 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1412
1413 if (!fwd_skb && net_ratelimit())
1414 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1415 rx->dev->name);
1416
1417 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1418 /*
1419 * Save TA to addr1 to send TA a path error if a
1420 * suitable next hop is not found
1421 */
1422 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
1423 memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
1424 fwd_skb->dev = rx->local->mdev;
1425 fwd_skb->iif = rx->dev->ifindex;
1426 dev_queue_xmit(fwd_skb);
1427 }
1428 }
1429
1430 if (is_multicast_ether_addr(hdr->addr3) ||
1431 rx->dev->flags & IFF_PROMISC)
1432 return RX_CONTINUE;
1433 else
1434 return RX_DROP_MONITOR;
1435 }
1436
1437
1438 static ieee80211_rx_result debug_noinline
1439 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1440 {
1441 struct net_device *dev = rx->dev;
1442 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1443 __le16 fc = hdr->frame_control;
1444 int err;
1445
1446 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1447 return RX_CONTINUE;
1448
1449 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1450 return RX_DROP_MONITOR;
1451
1452 err = ieee80211_data_to_8023(rx);
1453 if (unlikely(err))
1454 return RX_DROP_UNUSABLE;
1455
1456 if (!ieee80211_frame_allowed(rx, fc))
1457 return RX_DROP_MONITOR;
1458
1459 rx->skb->dev = dev;
1460
1461 dev->stats.rx_packets++;
1462 dev->stats.rx_bytes += rx->skb->len;
1463
1464 ieee80211_deliver_skb(rx);
1465
1466 return RX_QUEUED;
1467 }
1468
1469 static ieee80211_rx_result debug_noinline
1470 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1471 {
1472 struct ieee80211_local *local = rx->local;
1473 struct ieee80211_hw *hw = &local->hw;
1474 struct sk_buff *skb = rx->skb;
1475 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1476 struct tid_ampdu_rx *tid_agg_rx;
1477 u16 start_seq_num;
1478 u16 tid;
1479
1480 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1481 return RX_CONTINUE;
1482
1483 if (ieee80211_is_back_req(bar->frame_control)) {
1484 if (!rx->sta)
1485 return RX_CONTINUE;
1486 tid = le16_to_cpu(bar->control) >> 12;
1487 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1488 != HT_AGG_STATE_OPERATIONAL)
1489 return RX_CONTINUE;
1490 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1491
1492 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1493
1494 /* reset session timer */
1495 if (tid_agg_rx->timeout) {
1496 unsigned long expires =
1497 jiffies + (tid_agg_rx->timeout / 1000) * HZ;
1498 mod_timer(&tid_agg_rx->session_timer, expires);
1499 }
1500
1501 /* manage reordering buffer according to requested */
1502 /* sequence number */
1503 rcu_read_lock();
1504 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
1505 start_seq_num, 1);
1506 rcu_read_unlock();
1507 return RX_DROP_UNUSABLE;
1508 }
1509
1510 return RX_CONTINUE;
1511 }
1512
1513 static ieee80211_rx_result debug_noinline
1514 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1515 {
1516 struct ieee80211_local *local = rx->local;
1517 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1518 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1519 int len = rx->skb->len;
1520
1521 if (!ieee80211_is_action(mgmt->frame_control))
1522 return RX_CONTINUE;
1523
1524 if (!rx->sta)
1525 return RX_DROP_MONITOR;
1526
1527 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1528 return RX_DROP_MONITOR;
1529
1530 /* all categories we currently handle have action_code */
1531 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1532 return RX_DROP_MONITOR;
1533
1534 /*
1535 * FIXME: revisit this, I'm sure we should handle most
1536 * of these frames in other modes as well!
1537 */
1538 if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
1539 sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1540 return RX_DROP_MONITOR;
1541
1542 switch (mgmt->u.action.category) {
1543 case WLAN_CATEGORY_BACK:
1544 switch (mgmt->u.action.u.addba_req.action_code) {
1545 case WLAN_ACTION_ADDBA_REQ:
1546 if (len < (IEEE80211_MIN_ACTION_SIZE +
1547 sizeof(mgmt->u.action.u.addba_req)))
1548 return RX_DROP_MONITOR;
1549 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1550 break;
1551 case WLAN_ACTION_ADDBA_RESP:
1552 if (len < (IEEE80211_MIN_ACTION_SIZE +
1553 sizeof(mgmt->u.action.u.addba_resp)))
1554 return RX_DROP_MONITOR;
1555 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1556 break;
1557 case WLAN_ACTION_DELBA:
1558 if (len < (IEEE80211_MIN_ACTION_SIZE +
1559 sizeof(mgmt->u.action.u.delba)))
1560 return RX_DROP_MONITOR;
1561 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1562 break;
1563 }
1564 break;
1565 case WLAN_CATEGORY_SPECTRUM_MGMT:
1566 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1567 return RX_DROP_MONITOR;
1568 switch (mgmt->u.action.u.measurement.action_code) {
1569 case WLAN_ACTION_SPCT_MSR_REQ:
1570 if (len < (IEEE80211_MIN_ACTION_SIZE +
1571 sizeof(mgmt->u.action.u.measurement)))
1572 return RX_DROP_MONITOR;
1573 ieee80211_process_measurement_req(sdata, mgmt, len);
1574 break;
1575 }
1576 break;
1577 default:
1578 return RX_CONTINUE;
1579 }
1580
1581 rx->sta->rx_packets++;
1582 dev_kfree_skb(rx->skb);
1583 return RX_QUEUED;
1584 }
1585
1586 static ieee80211_rx_result debug_noinline
1587 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1588 {
1589 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1590
1591 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1592 return RX_DROP_MONITOR;
1593
1594 if (ieee80211_vif_is_mesh(&sdata->vif))
1595 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1596
1597 if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
1598 sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1599 return RX_DROP_MONITOR;
1600
1601 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
1602 return RX_DROP_MONITOR;
1603
1604 ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1605 return RX_QUEUED;
1606 }
1607
1608 static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1609 struct ieee80211_hdr *hdr,
1610 struct ieee80211_rx_data *rx)
1611 {
1612 int keyidx;
1613 unsigned int hdrlen;
1614 DECLARE_MAC_BUF(mac);
1615 DECLARE_MAC_BUF(mac2);
1616
1617 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1618 if (rx->skb->len >= hdrlen + 4)
1619 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1620 else
1621 keyidx = -1;
1622
1623 if (!rx->sta) {
1624 /*
1625 * Some hardware seem to generate incorrect Michael MIC
1626 * reports; ignore them to avoid triggering countermeasures.
1627 */
1628 goto ignore;
1629 }
1630
1631 if (!ieee80211_has_protected(hdr->frame_control))
1632 goto ignore;
1633
1634 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) {
1635 /*
1636 * APs with pairwise keys should never receive Michael MIC
1637 * errors for non-zero keyidx because these are reserved for
1638 * group keys and only the AP is sending real multicast
1639 * frames in the BSS.
1640 */
1641 goto ignore;
1642 }
1643
1644 if (!ieee80211_is_data(hdr->frame_control) &&
1645 !ieee80211_is_auth(hdr->frame_control))
1646 goto ignore;
1647
1648 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr);
1649 ignore:
1650 dev_kfree_skb(rx->skb);
1651 rx->skb = NULL;
1652 }
1653
1654 /* TODO: use IEEE80211_RX_FRAGMENTED */
1655 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1656 {
1657 struct ieee80211_sub_if_data *sdata;
1658 struct ieee80211_local *local = rx->local;
1659 struct ieee80211_rtap_hdr {
1660 struct ieee80211_radiotap_header hdr;
1661 u8 flags;
1662 u8 rate;
1663 __le16 chan_freq;
1664 __le16 chan_flags;
1665 } __attribute__ ((packed)) *rthdr;
1666 struct sk_buff *skb = rx->skb, *skb2;
1667 struct net_device *prev_dev = NULL;
1668 struct ieee80211_rx_status *status = rx->status;
1669
1670 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
1671 goto out_free_skb;
1672
1673 if (skb_headroom(skb) < sizeof(*rthdr) &&
1674 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1675 goto out_free_skb;
1676
1677 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1678 memset(rthdr, 0, sizeof(*rthdr));
1679 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1680 rthdr->hdr.it_present =
1681 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1682 (1 << IEEE80211_RADIOTAP_RATE) |
1683 (1 << IEEE80211_RADIOTAP_CHANNEL));
1684
1685 rthdr->rate = rx->rate->bitrate / 5;
1686 rthdr->chan_freq = cpu_to_le16(status->freq);
1687
1688 if (status->band == IEEE80211_BAND_5GHZ)
1689 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1690 IEEE80211_CHAN_5GHZ);
1691 else
1692 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1693 IEEE80211_CHAN_2GHZ);
1694
1695 skb_set_mac_header(skb, 0);
1696 skb->ip_summed = CHECKSUM_UNNECESSARY;
1697 skb->pkt_type = PACKET_OTHERHOST;
1698 skb->protocol = htons(ETH_P_802_2);
1699
1700 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1701 if (!netif_running(sdata->dev))
1702 continue;
1703
1704 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR ||
1705 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1706 continue;
1707
1708 if (prev_dev) {
1709 skb2 = skb_clone(skb, GFP_ATOMIC);
1710 if (skb2) {
1711 skb2->dev = prev_dev;
1712 netif_rx(skb2);
1713 }
1714 }
1715
1716 prev_dev = sdata->dev;
1717 sdata->dev->stats.rx_packets++;
1718 sdata->dev->stats.rx_bytes += skb->len;
1719 }
1720
1721 if (prev_dev) {
1722 skb->dev = prev_dev;
1723 netif_rx(skb);
1724 skb = NULL;
1725 } else
1726 goto out_free_skb;
1727
1728 rx->flags |= IEEE80211_RX_CMNTR_REPORTED;
1729 return;
1730
1731 out_free_skb:
1732 dev_kfree_skb(skb);
1733 }
1734
1735
1736 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1737 struct ieee80211_rx_data *rx,
1738 struct sk_buff *skb)
1739 {
1740 ieee80211_rx_result res = RX_DROP_MONITOR;
1741
1742 rx->skb = skb;
1743 rx->sdata = sdata;
1744 rx->dev = sdata->dev;
1745
1746 #define CALL_RXH(rxh) \
1747 do { \
1748 res = rxh(rx); \
1749 if (res != RX_CONTINUE) \
1750 goto rxh_done; \
1751 } while (0);
1752
1753 CALL_RXH(ieee80211_rx_h_passive_scan)
1754 CALL_RXH(ieee80211_rx_h_check)
1755 CALL_RXH(ieee80211_rx_h_decrypt)
1756 CALL_RXH(ieee80211_rx_h_sta_process)
1757 CALL_RXH(ieee80211_rx_h_defragment)
1758 CALL_RXH(ieee80211_rx_h_ps_poll)
1759 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
1760 /* must be after MMIC verify so header is counted in MPDU mic */
1761 CALL_RXH(ieee80211_rx_h_remove_qos_control)
1762 CALL_RXH(ieee80211_rx_h_amsdu)
1763 if (ieee80211_vif_is_mesh(&sdata->vif))
1764 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1765 CALL_RXH(ieee80211_rx_h_data)
1766 CALL_RXH(ieee80211_rx_h_ctrl)
1767 CALL_RXH(ieee80211_rx_h_action)
1768 CALL_RXH(ieee80211_rx_h_mgmt)
1769
1770 #undef CALL_RXH
1771
1772 rxh_done:
1773 switch (res) {
1774 case RX_DROP_MONITOR:
1775 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1776 if (rx->sta)
1777 rx->sta->rx_dropped++;
1778 /* fall through */
1779 case RX_CONTINUE:
1780 ieee80211_rx_cooked_monitor(rx);
1781 break;
1782 case RX_DROP_UNUSABLE:
1783 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1784 if (rx->sta)
1785 rx->sta->rx_dropped++;
1786 dev_kfree_skb(rx->skb);
1787 break;
1788 case RX_QUEUED:
1789 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1790 break;
1791 }
1792 }
1793
1794 /* main receive path */
1795
1796 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1797 u8 *bssid, struct ieee80211_rx_data *rx,
1798 struct ieee80211_hdr *hdr)
1799 {
1800 int multicast = is_multicast_ether_addr(hdr->addr1);
1801
1802 switch (sdata->vif.type) {
1803 case IEEE80211_IF_TYPE_STA:
1804 if (!bssid)
1805 return 0;
1806 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1807 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1808 return 0;
1809 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1810 } else if (!multicast &&
1811 compare_ether_addr(sdata->dev->dev_addr,
1812 hdr->addr1) != 0) {
1813 if (!(sdata->dev->flags & IFF_PROMISC))
1814 return 0;
1815 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1816 }
1817 break;
1818 case IEEE80211_IF_TYPE_IBSS:
1819 if (!bssid)
1820 return 0;
1821 if (ieee80211_is_beacon(hdr->frame_control)) {
1822 return 1;
1823 }
1824 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1825 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1826 return 0;
1827 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1828 } else if (!multicast &&
1829 compare_ether_addr(sdata->dev->dev_addr,
1830 hdr->addr1) != 0) {
1831 if (!(sdata->dev->flags & IFF_PROMISC))
1832 return 0;
1833 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1834 } else if (!rx->sta)
1835 rx->sta = ieee80211_ibss_add_sta(sdata, rx->skb,
1836 bssid, hdr->addr2,
1837 BIT(rx->status->rate_idx));
1838 break;
1839 case IEEE80211_IF_TYPE_MESH_POINT:
1840 if (!multicast &&
1841 compare_ether_addr(sdata->dev->dev_addr,
1842 hdr->addr1) != 0) {
1843 if (!(sdata->dev->flags & IFF_PROMISC))
1844 return 0;
1845
1846 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1847 }
1848 break;
1849 case IEEE80211_IF_TYPE_VLAN:
1850 case IEEE80211_IF_TYPE_AP:
1851 if (!bssid) {
1852 if (compare_ether_addr(sdata->dev->dev_addr,
1853 hdr->addr1))
1854 return 0;
1855 } else if (!ieee80211_bssid_match(bssid,
1856 sdata->dev->dev_addr)) {
1857 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1858 return 0;
1859 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1860 }
1861 break;
1862 case IEEE80211_IF_TYPE_WDS:
1863 if (bssid || !ieee80211_is_data(hdr->frame_control))
1864 return 0;
1865 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
1866 return 0;
1867 break;
1868 case IEEE80211_IF_TYPE_MNTR:
1869 /* take everything */
1870 break;
1871 case IEEE80211_IF_TYPE_INVALID:
1872 /* should never get here */
1873 WARN_ON(1);
1874 break;
1875 }
1876
1877 return 1;
1878 }
1879
1880 /*
1881 * This is the actual Rx frames handler. as it blongs to Rx path it must
1882 * be called with rcu_read_lock protection.
1883 */
1884 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1885 struct sk_buff *skb,
1886 struct ieee80211_rx_status *status,
1887 struct ieee80211_rate *rate)
1888 {
1889 struct ieee80211_local *local = hw_to_local(hw);
1890 struct ieee80211_sub_if_data *sdata;
1891 struct ieee80211_hdr *hdr;
1892 struct ieee80211_rx_data rx;
1893 int prepares;
1894 struct ieee80211_sub_if_data *prev = NULL;
1895 struct sk_buff *skb_new;
1896 u8 *bssid;
1897
1898 hdr = (struct ieee80211_hdr *)skb->data;
1899 memset(&rx, 0, sizeof(rx));
1900 rx.skb = skb;
1901 rx.local = local;
1902
1903 rx.status = status;
1904 rx.rate = rate;
1905
1906 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
1907 local->dot11ReceivedFragmentCount++;
1908
1909 rx.sta = sta_info_get(local, hdr->addr2);
1910 if (rx.sta) {
1911 rx.sdata = rx.sta->sdata;
1912 rx.dev = rx.sta->sdata->dev;
1913 }
1914
1915 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
1916 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
1917 return;
1918 }
1919
1920 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning))
1921 rx.flags |= IEEE80211_RX_IN_SCAN;
1922
1923 ieee80211_parse_qos(&rx);
1924 ieee80211_verify_ip_alignment(&rx);
1925
1926 skb = rx.skb;
1927
1928 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1929 if (!netif_running(sdata->dev))
1930 continue;
1931
1932 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR)
1933 continue;
1934
1935 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
1936 rx.flags |= IEEE80211_RX_RA_MATCH;
1937 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr);
1938
1939 if (!prepares)
1940 continue;
1941
1942 /*
1943 * frame is destined for this interface, but if it's not
1944 * also for the previous one we handle that after the
1945 * loop to avoid copying the SKB once too much
1946 */
1947
1948 if (!prev) {
1949 prev = sdata;
1950 continue;
1951 }
1952
1953 /*
1954 * frame was destined for the previous interface
1955 * so invoke RX handlers for it
1956 */
1957
1958 skb_new = skb_copy(skb, GFP_ATOMIC);
1959 if (!skb_new) {
1960 if (net_ratelimit())
1961 printk(KERN_DEBUG "%s: failed to copy "
1962 "multicast frame for %s\n",
1963 wiphy_name(local->hw.wiphy),
1964 prev->dev->name);
1965 continue;
1966 }
1967 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
1968 prev = sdata;
1969 }
1970 if (prev)
1971 ieee80211_invoke_rx_handlers(prev, &rx, skb);
1972 else
1973 dev_kfree_skb(skb);
1974 }
1975
1976 #define SEQ_MODULO 0x1000
1977 #define SEQ_MASK 0xfff
1978
1979 static inline int seq_less(u16 sq1, u16 sq2)
1980 {
1981 return (((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1));
1982 }
1983
1984 static inline u16 seq_inc(u16 sq)
1985 {
1986 return ((sq + 1) & SEQ_MASK);
1987 }
1988
1989 static inline u16 seq_sub(u16 sq1, u16 sq2)
1990 {
1991 return ((sq1 - sq2) & SEQ_MASK);
1992 }
1993
1994
1995 /*
1996 * As it function blongs to Rx path it must be called with
1997 * the proper rcu_read_lock protection for its flow.
1998 */
1999 u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2000 struct tid_ampdu_rx *tid_agg_rx,
2001 struct sk_buff *skb, u16 mpdu_seq_num,
2002 int bar_req)
2003 {
2004 struct ieee80211_local *local = hw_to_local(hw);
2005 struct ieee80211_rx_status status;
2006 u16 head_seq_num, buf_size;
2007 int index;
2008 struct ieee80211_supported_band *sband;
2009 struct ieee80211_rate *rate;
2010
2011 buf_size = tid_agg_rx->buf_size;
2012 head_seq_num = tid_agg_rx->head_seq_num;
2013
2014 /* frame with out of date sequence number */
2015 if (seq_less(mpdu_seq_num, head_seq_num)) {
2016 dev_kfree_skb(skb);
2017 return 1;
2018 }
2019
2020 /* if frame sequence number exceeds our buffering window size or
2021 * block Ack Request arrived - release stored frames */
2022 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
2023 /* new head to the ordering buffer */
2024 if (bar_req)
2025 head_seq_num = mpdu_seq_num;
2026 else
2027 head_seq_num =
2028 seq_inc(seq_sub(mpdu_seq_num, buf_size));
2029 /* release stored frames up to new head to stack */
2030 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2031 index = seq_sub(tid_agg_rx->head_seq_num,
2032 tid_agg_rx->ssn)
2033 % tid_agg_rx->buf_size;
2034
2035 if (tid_agg_rx->reorder_buf[index]) {
2036 /* release the reordered frames to stack */
2037 memcpy(&status,
2038 tid_agg_rx->reorder_buf[index]->cb,
2039 sizeof(status));
2040 sband = local->hw.wiphy->bands[status.band];
2041 rate = &sband->bitrates[status.rate_idx];
2042 __ieee80211_rx_handle_packet(hw,
2043 tid_agg_rx->reorder_buf[index],
2044 &status, rate);
2045 tid_agg_rx->stored_mpdu_num--;
2046 tid_agg_rx->reorder_buf[index] = NULL;
2047 }
2048 tid_agg_rx->head_seq_num =
2049 seq_inc(tid_agg_rx->head_seq_num);
2050 }
2051 if (bar_req)
2052 return 1;
2053 }
2054
2055 /* now the new frame is always in the range of the reordering */
2056 /* buffer window */
2057 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
2058 % tid_agg_rx->buf_size;
2059 /* check if we already stored this frame */
2060 if (tid_agg_rx->reorder_buf[index]) {
2061 dev_kfree_skb(skb);
2062 return 1;
2063 }
2064
2065 /* if arrived mpdu is in the right order and nothing else stored */
2066 /* release it immediately */
2067 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2068 tid_agg_rx->stored_mpdu_num == 0) {
2069 tid_agg_rx->head_seq_num =
2070 seq_inc(tid_agg_rx->head_seq_num);
2071 return 0;
2072 }
2073
2074 /* put the frame in the reordering buffer */
2075 tid_agg_rx->reorder_buf[index] = skb;
2076 tid_agg_rx->stored_mpdu_num++;
2077 /* release the buffer until next missing frame */
2078 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
2079 % tid_agg_rx->buf_size;
2080 while (tid_agg_rx->reorder_buf[index]) {
2081 /* release the reordered frame back to stack */
2082 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb,
2083 sizeof(status));
2084 sband = local->hw.wiphy->bands[status.band];
2085 rate = &sband->bitrates[status.rate_idx];
2086 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2087 &status, rate);
2088 tid_agg_rx->stored_mpdu_num--;
2089 tid_agg_rx->reorder_buf[index] = NULL;
2090 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2091 index = seq_sub(tid_agg_rx->head_seq_num,
2092 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2093 }
2094 return 1;
2095 }
2096
2097 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2098 struct sk_buff *skb)
2099 {
2100 struct ieee80211_hw *hw = &local->hw;
2101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2102 struct sta_info *sta;
2103 struct tid_ampdu_rx *tid_agg_rx;
2104 u16 sc;
2105 u16 mpdu_seq_num;
2106 u8 ret = 0;
2107 int tid;
2108
2109 sta = sta_info_get(local, hdr->addr2);
2110 if (!sta)
2111 return ret;
2112
2113 /* filter the QoS data rx stream according to
2114 * STA/TID and check if this STA/TID is on aggregation */
2115 if (!ieee80211_is_data_qos(hdr->frame_control))
2116 goto end_reorder;
2117
2118 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2119
2120 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2121 goto end_reorder;
2122
2123 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2124
2125 /* qos null data frames are excluded */
2126 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2127 goto end_reorder;
2128
2129 /* new un-ordered ampdu frame - process it */
2130
2131 /* reset session timer */
2132 if (tid_agg_rx->timeout) {
2133 unsigned long expires =
2134 jiffies + (tid_agg_rx->timeout / 1000) * HZ;
2135 mod_timer(&tid_agg_rx->session_timer, expires);
2136 }
2137
2138 /* if this mpdu is fragmented - terminate rx aggregation session */
2139 sc = le16_to_cpu(hdr->seq_ctrl);
2140 if (sc & IEEE80211_SCTL_FRAG) {
2141 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->addr,
2142 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2143 ret = 1;
2144 goto end_reorder;
2145 }
2146
2147 /* according to mpdu sequence number deal with reordering buffer */
2148 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2149 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
2150 mpdu_seq_num, 0);
2151 end_reorder:
2152 return ret;
2153 }
2154
2155 /*
2156 * This is the receive path handler. It is called by a low level driver when an
2157 * 802.11 MPDU is received from the hardware.
2158 */
2159 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2160 struct ieee80211_rx_status *status)
2161 {
2162 struct ieee80211_local *local = hw_to_local(hw);
2163 struct ieee80211_rate *rate = NULL;
2164 struct ieee80211_supported_band *sband;
2165
2166 if (status->band < 0 ||
2167 status->band >= IEEE80211_NUM_BANDS) {
2168 WARN_ON(1);
2169 return;
2170 }
2171
2172 sband = local->hw.wiphy->bands[status->band];
2173
2174 if (!sband ||
2175 status->rate_idx < 0 ||
2176 status->rate_idx >= sband->n_bitrates) {
2177 WARN_ON(1);
2178 return;
2179 }
2180
2181 rate = &sband->bitrates[status->rate_idx];
2182
2183 /*
2184 * key references and virtual interfaces are protected using RCU
2185 * and this requires that we are in a read-side RCU section during
2186 * receive processing
2187 */
2188 rcu_read_lock();
2189
2190 /*
2191 * Frames with failed FCS/PLCP checksum are not returned,
2192 * all other frames are returned without radiotap header
2193 * if it was previously present.
2194 * Also, frames with less than 16 bytes are dropped.
2195 */
2196 skb = ieee80211_rx_monitor(local, skb, status, rate);
2197 if (!skb) {
2198 rcu_read_unlock();
2199 return;
2200 }
2201
2202 if (!ieee80211_rx_reorder_ampdu(local, skb))
2203 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2204
2205 rcu_read_unlock();
2206 }
2207 EXPORT_SYMBOL(__ieee80211_rx);
2208
2209 /* This is a version of the rx handler that can be called from hard irq
2210 * context. Post the skb on the queue and schedule the tasklet */
2211 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
2212 struct ieee80211_rx_status *status)
2213 {
2214 struct ieee80211_local *local = hw_to_local(hw);
2215
2216 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2217
2218 skb->dev = local->mdev;
2219 /* copy status into skb->cb for use by tasklet */
2220 memcpy(skb->cb, status, sizeof(*status));
2221 skb->pkt_type = IEEE80211_RX_MSG;
2222 skb_queue_tail(&local->skb_queue, skb);
2223 tasklet_schedule(&local->tasklet);
2224 }
2225 EXPORT_SYMBOL(ieee80211_rx_irqsafe);