]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/mac80211/rx.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[mirror_ubuntu-artful-kernel.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <net/mac80211.h>
21 #include <net/ieee80211_radiotap.h>
22
23 #include "ieee80211_i.h"
24 #include "driver-ops.h"
25 #include "led.h"
26 #include "mesh.h"
27 #include "wep.h"
28 #include "wpa.h"
29 #include "tkip.h"
30 #include "wme.h"
31
32 /*
33 * monitor mode reception
34 *
35 * This function cleans up the SKB, i.e. it removes all the stuff
36 * only useful for monitoring.
37 */
38 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
39 struct sk_buff *skb)
40 {
41 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
42 if (likely(skb->len > FCS_LEN))
43 __pskb_trim(skb, skb->len - FCS_LEN);
44 else {
45 /* driver bug */
46 WARN_ON(1);
47 dev_kfree_skb(skb);
48 skb = NULL;
49 }
50 }
51
52 return skb;
53 }
54
55 static inline int should_drop_frame(struct sk_buff *skb,
56 int present_fcs_len)
57 {
58 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
59 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
60
61 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
62 return 1;
63 if (unlikely(skb->len < 16 + present_fcs_len))
64 return 1;
65 if (ieee80211_is_ctl(hdr->frame_control) &&
66 !ieee80211_is_pspoll(hdr->frame_control) &&
67 !ieee80211_is_back_req(hdr->frame_control))
68 return 1;
69 return 0;
70 }
71
72 static int
73 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
74 struct ieee80211_rx_status *status)
75 {
76 int len;
77
78 /* always present fields */
79 len = sizeof(struct ieee80211_radiotap_header) + 9;
80
81 if (status->flag & RX_FLAG_MACTIME_MPDU)
82 len += 8;
83 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
84 len += 1;
85
86 if (len & 1) /* padding for RX_FLAGS if necessary */
87 len++;
88
89 if (status->flag & RX_FLAG_HT) /* HT info */
90 len += 3;
91
92 return len;
93 }
94
95 /*
96 * ieee80211_add_rx_radiotap_header - add radiotap header
97 *
98 * add a radiotap header containing all the fields which the hardware provided.
99 */
100 static void
101 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
102 struct sk_buff *skb,
103 struct ieee80211_rate *rate,
104 int rtap_len)
105 {
106 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
107 struct ieee80211_radiotap_header *rthdr;
108 unsigned char *pos;
109 u16 rx_flags = 0;
110
111 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
112 memset(rthdr, 0, rtap_len);
113
114 /* radiotap header, set always present flags */
115 rthdr->it_present =
116 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
117 (1 << IEEE80211_RADIOTAP_CHANNEL) |
118 (1 << IEEE80211_RADIOTAP_ANTENNA) |
119 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
120 rthdr->it_len = cpu_to_le16(rtap_len);
121
122 pos = (unsigned char *)(rthdr+1);
123
124 /* the order of the following fields is important */
125
126 /* IEEE80211_RADIOTAP_TSFT */
127 if (status->flag & RX_FLAG_MACTIME_MPDU) {
128 put_unaligned_le64(status->mactime, pos);
129 rthdr->it_present |=
130 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
131 pos += 8;
132 }
133
134 /* IEEE80211_RADIOTAP_FLAGS */
135 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
136 *pos |= IEEE80211_RADIOTAP_F_FCS;
137 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
138 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
139 if (status->flag & RX_FLAG_SHORTPRE)
140 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
141 pos++;
142
143 /* IEEE80211_RADIOTAP_RATE */
144 if (!rate || status->flag & RX_FLAG_HT) {
145 /*
146 * Without rate information don't add it. If we have,
147 * MCS information is a separate field in radiotap,
148 * added below. The byte here is needed as padding
149 * for the channel though, so initialise it to 0.
150 */
151 *pos = 0;
152 } else {
153 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
154 *pos = rate->bitrate / 5;
155 }
156 pos++;
157
158 /* IEEE80211_RADIOTAP_CHANNEL */
159 put_unaligned_le16(status->freq, pos);
160 pos += 2;
161 if (status->band == IEEE80211_BAND_5GHZ)
162 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
163 pos);
164 else if (status->flag & RX_FLAG_HT)
165 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
166 pos);
167 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
168 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
169 pos);
170 else if (rate)
171 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
172 pos);
173 else
174 put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
175 pos += 2;
176
177 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
178 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
179 *pos = status->signal;
180 rthdr->it_present |=
181 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
182 pos++;
183 }
184
185 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
186
187 /* IEEE80211_RADIOTAP_ANTENNA */
188 *pos = status->antenna;
189 pos++;
190
191 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
192
193 /* IEEE80211_RADIOTAP_RX_FLAGS */
194 /* ensure 2 byte alignment for the 2 byte field as required */
195 if ((pos - (u8 *)rthdr) & 1)
196 pos++;
197 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
198 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
199 put_unaligned_le16(rx_flags, pos);
200 pos += 2;
201
202 if (status->flag & RX_FLAG_HT) {
203 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
204 *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
205 IEEE80211_RADIOTAP_MCS_HAVE_GI |
206 IEEE80211_RADIOTAP_MCS_HAVE_BW;
207 *pos = 0;
208 if (status->flag & RX_FLAG_SHORT_GI)
209 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
210 if (status->flag & RX_FLAG_40MHZ)
211 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
212 pos++;
213 *pos++ = status->rate_idx;
214 }
215 }
216
217 /*
218 * This function copies a received frame to all monitor interfaces and
219 * returns a cleaned-up SKB that no longer includes the FCS nor the
220 * radiotap header the driver might have added.
221 */
222 static struct sk_buff *
223 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
224 struct ieee80211_rate *rate)
225 {
226 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
227 struct ieee80211_sub_if_data *sdata;
228 int needed_headroom = 0;
229 struct sk_buff *skb, *skb2;
230 struct net_device *prev_dev = NULL;
231 int present_fcs_len = 0;
232
233 /*
234 * First, we may need to make a copy of the skb because
235 * (1) we need to modify it for radiotap (if not present), and
236 * (2) the other RX handlers will modify the skb we got.
237 *
238 * We don't need to, of course, if we aren't going to return
239 * the SKB because it has a bad FCS/PLCP checksum.
240 */
241
242 /* room for the radiotap header based on driver features */
243 needed_headroom = ieee80211_rx_radiotap_len(local, status);
244
245 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
246 present_fcs_len = FCS_LEN;
247
248 /* make sure hdr->frame_control is on the linear part */
249 if (!pskb_may_pull(origskb, 2)) {
250 dev_kfree_skb(origskb);
251 return NULL;
252 }
253
254 if (!local->monitors) {
255 if (should_drop_frame(origskb, present_fcs_len)) {
256 dev_kfree_skb(origskb);
257 return NULL;
258 }
259
260 return remove_monitor_info(local, origskb);
261 }
262
263 if (should_drop_frame(origskb, present_fcs_len)) {
264 /* only need to expand headroom if necessary */
265 skb = origskb;
266 origskb = NULL;
267
268 /*
269 * This shouldn't trigger often because most devices have an
270 * RX header they pull before we get here, and that should
271 * be big enough for our radiotap information. We should
272 * probably export the length to drivers so that we can have
273 * them allocate enough headroom to start with.
274 */
275 if (skb_headroom(skb) < needed_headroom &&
276 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
277 dev_kfree_skb(skb);
278 return NULL;
279 }
280 } else {
281 /*
282 * Need to make a copy and possibly remove radiotap header
283 * and FCS from the original.
284 */
285 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
286
287 origskb = remove_monitor_info(local, origskb);
288
289 if (!skb)
290 return origskb;
291 }
292
293 /* prepend radiotap information */
294 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
295
296 skb_reset_mac_header(skb);
297 skb->ip_summed = CHECKSUM_UNNECESSARY;
298 skb->pkt_type = PACKET_OTHERHOST;
299 skb->protocol = htons(ETH_P_802_2);
300
301 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
302 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
303 continue;
304
305 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
306 continue;
307
308 if (!ieee80211_sdata_running(sdata))
309 continue;
310
311 if (prev_dev) {
312 skb2 = skb_clone(skb, GFP_ATOMIC);
313 if (skb2) {
314 skb2->dev = prev_dev;
315 netif_receive_skb(skb2);
316 }
317 }
318
319 prev_dev = sdata->dev;
320 sdata->dev->stats.rx_packets++;
321 sdata->dev->stats.rx_bytes += skb->len;
322 }
323
324 if (prev_dev) {
325 skb->dev = prev_dev;
326 netif_receive_skb(skb);
327 } else
328 dev_kfree_skb(skb);
329
330 return origskb;
331 }
332
333
334 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
335 {
336 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
337 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
338 int tid, seqno_idx, security_idx;
339
340 /* does the frame have a qos control field? */
341 if (ieee80211_is_data_qos(hdr->frame_control)) {
342 u8 *qc = ieee80211_get_qos_ctl(hdr);
343 /* frame has qos control */
344 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
345 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
346 status->rx_flags |= IEEE80211_RX_AMSDU;
347
348 seqno_idx = tid;
349 security_idx = tid;
350 } else {
351 /*
352 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
353 *
354 * Sequence numbers for management frames, QoS data
355 * frames with a broadcast/multicast address in the
356 * Address 1 field, and all non-QoS data frames sent
357 * by QoS STAs are assigned using an additional single
358 * modulo-4096 counter, [...]
359 *
360 * We also use that counter for non-QoS STAs.
361 */
362 seqno_idx = NUM_RX_DATA_QUEUES;
363 security_idx = 0;
364 if (ieee80211_is_mgmt(hdr->frame_control))
365 security_idx = NUM_RX_DATA_QUEUES;
366 tid = 0;
367 }
368
369 rx->seqno_idx = seqno_idx;
370 rx->security_idx = security_idx;
371 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
372 * For now, set skb->priority to 0 for other cases. */
373 rx->skb->priority = (tid > 7) ? 0 : tid;
374 }
375
376 /**
377 * DOC: Packet alignment
378 *
379 * Drivers always need to pass packets that are aligned to two-byte boundaries
380 * to the stack.
381 *
382 * Additionally, should, if possible, align the payload data in a way that
383 * guarantees that the contained IP header is aligned to a four-byte
384 * boundary. In the case of regular frames, this simply means aligning the
385 * payload to a four-byte boundary (because either the IP header is directly
386 * contained, or IV/RFC1042 headers that have a length divisible by four are
387 * in front of it). If the payload data is not properly aligned and the
388 * architecture doesn't support efficient unaligned operations, mac80211
389 * will align the data.
390 *
391 * With A-MSDU frames, however, the payload data address must yield two modulo
392 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
393 * push the IP header further back to a multiple of four again. Thankfully, the
394 * specs were sane enough this time around to require padding each A-MSDU
395 * subframe to a length that is a multiple of four.
396 *
397 * Padding like Atheros hardware adds which is between the 802.11 header and
398 * the payload is not supported, the driver is required to move the 802.11
399 * header to be directly in front of the payload in that case.
400 */
401 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
402 {
403 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
404 WARN_ONCE((unsigned long)rx->skb->data & 1,
405 "unaligned packet at 0x%p\n", rx->skb->data);
406 #endif
407 }
408
409
410 /* rx handlers */
411
412 static ieee80211_rx_result debug_noinline
413 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
414 {
415 struct ieee80211_local *local = rx->local;
416 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
417 struct sk_buff *skb = rx->skb;
418
419 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
420 !local->sched_scanning))
421 return RX_CONTINUE;
422
423 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
424 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
425 local->sched_scanning)
426 return ieee80211_scan_rx(rx->sdata, skb);
427
428 /* scanning finished during invoking of handlers */
429 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
430 return RX_DROP_UNUSABLE;
431 }
432
433
434 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
435 {
436 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
437
438 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
439 return 0;
440
441 return ieee80211_is_robust_mgmt_frame(hdr);
442 }
443
444
445 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
446 {
447 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
448
449 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
450 return 0;
451
452 return ieee80211_is_robust_mgmt_frame(hdr);
453 }
454
455
456 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
457 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
458 {
459 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
460 struct ieee80211_mmie *mmie;
461
462 if (skb->len < 24 + sizeof(*mmie) ||
463 !is_multicast_ether_addr(hdr->da))
464 return -1;
465
466 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
467 return -1; /* not a robust management frame */
468
469 mmie = (struct ieee80211_mmie *)
470 (skb->data + skb->len - sizeof(*mmie));
471 if (mmie->element_id != WLAN_EID_MMIE ||
472 mmie->length != sizeof(*mmie) - 2)
473 return -1;
474
475 return le16_to_cpu(mmie->key_id);
476 }
477
478
479 static ieee80211_rx_result
480 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
481 {
482 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
483 char *dev_addr = rx->sdata->vif.addr;
484
485 if (ieee80211_is_data(hdr->frame_control)) {
486 if (is_multicast_ether_addr(hdr->addr1)) {
487 if (ieee80211_has_tods(hdr->frame_control) ||
488 !ieee80211_has_fromds(hdr->frame_control))
489 return RX_DROP_MONITOR;
490 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
491 return RX_DROP_MONITOR;
492 } else {
493 if (!ieee80211_has_a4(hdr->frame_control))
494 return RX_DROP_MONITOR;
495 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
496 return RX_DROP_MONITOR;
497 }
498 }
499
500 /* If there is not an established peer link and this is not a peer link
501 * establisment frame, beacon or probe, drop the frame.
502 */
503
504 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
505 struct ieee80211_mgmt *mgmt;
506
507 if (!ieee80211_is_mgmt(hdr->frame_control))
508 return RX_DROP_MONITOR;
509
510 if (ieee80211_is_action(hdr->frame_control)) {
511 u8 category;
512 mgmt = (struct ieee80211_mgmt *)hdr;
513 category = mgmt->u.action.category;
514 if (category != WLAN_CATEGORY_MESH_ACTION &&
515 category != WLAN_CATEGORY_SELF_PROTECTED)
516 return RX_DROP_MONITOR;
517 return RX_CONTINUE;
518 }
519
520 if (ieee80211_is_probe_req(hdr->frame_control) ||
521 ieee80211_is_probe_resp(hdr->frame_control) ||
522 ieee80211_is_beacon(hdr->frame_control) ||
523 ieee80211_is_auth(hdr->frame_control))
524 return RX_CONTINUE;
525
526 return RX_DROP_MONITOR;
527
528 }
529
530 return RX_CONTINUE;
531 }
532
533 #define SEQ_MODULO 0x1000
534 #define SEQ_MASK 0xfff
535
536 static inline int seq_less(u16 sq1, u16 sq2)
537 {
538 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
539 }
540
541 static inline u16 seq_inc(u16 sq)
542 {
543 return (sq + 1) & SEQ_MASK;
544 }
545
546 static inline u16 seq_sub(u16 sq1, u16 sq2)
547 {
548 return (sq1 - sq2) & SEQ_MASK;
549 }
550
551
552 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
553 struct tid_ampdu_rx *tid_agg_rx,
554 int index)
555 {
556 struct ieee80211_local *local = hw_to_local(hw);
557 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
558 struct ieee80211_rx_status *status;
559
560 lockdep_assert_held(&tid_agg_rx->reorder_lock);
561
562 if (!skb)
563 goto no_frame;
564
565 /* release the frame from the reorder ring buffer */
566 tid_agg_rx->stored_mpdu_num--;
567 tid_agg_rx->reorder_buf[index] = NULL;
568 status = IEEE80211_SKB_RXCB(skb);
569 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
570 skb_queue_tail(&local->rx_skb_queue, skb);
571
572 no_frame:
573 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
574 }
575
576 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
577 struct tid_ampdu_rx *tid_agg_rx,
578 u16 head_seq_num)
579 {
580 int index;
581
582 lockdep_assert_held(&tid_agg_rx->reorder_lock);
583
584 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
585 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
586 tid_agg_rx->buf_size;
587 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
588 }
589 }
590
591 /*
592 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
593 * the skb was added to the buffer longer than this time ago, the earlier
594 * frames that have not yet been received are assumed to be lost and the skb
595 * can be released for processing. This may also release other skb's from the
596 * reorder buffer if there are no additional gaps between the frames.
597 *
598 * Callers must hold tid_agg_rx->reorder_lock.
599 */
600 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
601
602 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
603 struct tid_ampdu_rx *tid_agg_rx)
604 {
605 int index, j;
606
607 lockdep_assert_held(&tid_agg_rx->reorder_lock);
608
609 /* release the buffer until next missing frame */
610 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
611 tid_agg_rx->buf_size;
612 if (!tid_agg_rx->reorder_buf[index] &&
613 tid_agg_rx->stored_mpdu_num > 1) {
614 /*
615 * No buffers ready to be released, but check whether any
616 * frames in the reorder buffer have timed out.
617 */
618 int skipped = 1;
619 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
620 j = (j + 1) % tid_agg_rx->buf_size) {
621 if (!tid_agg_rx->reorder_buf[j]) {
622 skipped++;
623 continue;
624 }
625 if (skipped &&
626 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
627 HT_RX_REORDER_BUF_TIMEOUT))
628 goto set_release_timer;
629
630 #ifdef CONFIG_MAC80211_HT_DEBUG
631 if (net_ratelimit())
632 wiphy_debug(hw->wiphy,
633 "release an RX reorder frame due to timeout on earlier frames\n");
634 #endif
635 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
636
637 /*
638 * Increment the head seq# also for the skipped slots.
639 */
640 tid_agg_rx->head_seq_num =
641 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
642 skipped = 0;
643 }
644 } else while (tid_agg_rx->reorder_buf[index]) {
645 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
646 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
647 tid_agg_rx->buf_size;
648 }
649
650 if (tid_agg_rx->stored_mpdu_num) {
651 j = index = seq_sub(tid_agg_rx->head_seq_num,
652 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
653
654 for (; j != (index - 1) % tid_agg_rx->buf_size;
655 j = (j + 1) % tid_agg_rx->buf_size) {
656 if (tid_agg_rx->reorder_buf[j])
657 break;
658 }
659
660 set_release_timer:
661
662 mod_timer(&tid_agg_rx->reorder_timer,
663 tid_agg_rx->reorder_time[j] + 1 +
664 HT_RX_REORDER_BUF_TIMEOUT);
665 } else {
666 del_timer(&tid_agg_rx->reorder_timer);
667 }
668 }
669
670 /*
671 * As this function belongs to the RX path it must be under
672 * rcu_read_lock protection. It returns false if the frame
673 * can be processed immediately, true if it was consumed.
674 */
675 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
676 struct tid_ampdu_rx *tid_agg_rx,
677 struct sk_buff *skb)
678 {
679 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
680 u16 sc = le16_to_cpu(hdr->seq_ctrl);
681 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
682 u16 head_seq_num, buf_size;
683 int index;
684 bool ret = true;
685
686 spin_lock(&tid_agg_rx->reorder_lock);
687
688 buf_size = tid_agg_rx->buf_size;
689 head_seq_num = tid_agg_rx->head_seq_num;
690
691 /* frame with out of date sequence number */
692 if (seq_less(mpdu_seq_num, head_seq_num)) {
693 dev_kfree_skb(skb);
694 goto out;
695 }
696
697 /*
698 * If frame the sequence number exceeds our buffering window
699 * size release some previous frames to make room for this one.
700 */
701 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
702 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
703 /* release stored frames up to new head to stack */
704 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
705 }
706
707 /* Now the new frame is always in the range of the reordering buffer */
708
709 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
710
711 /* check if we already stored this frame */
712 if (tid_agg_rx->reorder_buf[index]) {
713 dev_kfree_skb(skb);
714 goto out;
715 }
716
717 /*
718 * If the current MPDU is in the right order and nothing else
719 * is stored we can process it directly, no need to buffer it.
720 * If it is first but there's something stored, we may be able
721 * to release frames after this one.
722 */
723 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
724 tid_agg_rx->stored_mpdu_num == 0) {
725 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
726 ret = false;
727 goto out;
728 }
729
730 /* put the frame in the reordering buffer */
731 tid_agg_rx->reorder_buf[index] = skb;
732 tid_agg_rx->reorder_time[index] = jiffies;
733 tid_agg_rx->stored_mpdu_num++;
734 ieee80211_sta_reorder_release(hw, tid_agg_rx);
735
736 out:
737 spin_unlock(&tid_agg_rx->reorder_lock);
738 return ret;
739 }
740
741 /*
742 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
743 * true if the MPDU was buffered, false if it should be processed.
744 */
745 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
746 {
747 struct sk_buff *skb = rx->skb;
748 struct ieee80211_local *local = rx->local;
749 struct ieee80211_hw *hw = &local->hw;
750 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
751 struct sta_info *sta = rx->sta;
752 struct tid_ampdu_rx *tid_agg_rx;
753 u16 sc;
754 int tid;
755
756 if (!ieee80211_is_data_qos(hdr->frame_control))
757 goto dont_reorder;
758
759 /*
760 * filter the QoS data rx stream according to
761 * STA/TID and check if this STA/TID is on aggregation
762 */
763
764 if (!sta)
765 goto dont_reorder;
766
767 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
768
769 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
770 if (!tid_agg_rx)
771 goto dont_reorder;
772
773 /* qos null data frames are excluded */
774 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
775 goto dont_reorder;
776
777 /* new, potentially un-ordered, ampdu frame - process it */
778
779 /* reset session timer */
780 if (tid_agg_rx->timeout)
781 mod_timer(&tid_agg_rx->session_timer,
782 TU_TO_EXP_TIME(tid_agg_rx->timeout));
783
784 /* if this mpdu is fragmented - terminate rx aggregation session */
785 sc = le16_to_cpu(hdr->seq_ctrl);
786 if (sc & IEEE80211_SCTL_FRAG) {
787 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
788 skb_queue_tail(&rx->sdata->skb_queue, skb);
789 ieee80211_queue_work(&local->hw, &rx->sdata->work);
790 return;
791 }
792
793 /*
794 * No locking needed -- we will only ever process one
795 * RX packet at a time, and thus own tid_agg_rx. All
796 * other code manipulating it needs to (and does) make
797 * sure that we cannot get to it any more before doing
798 * anything with it.
799 */
800 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
801 return;
802
803 dont_reorder:
804 skb_queue_tail(&local->rx_skb_queue, skb);
805 }
806
807 static ieee80211_rx_result debug_noinline
808 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
809 {
810 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
811 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
812
813 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
814 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
815 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
816 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
817 hdr->seq_ctrl)) {
818 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
819 rx->local->dot11FrameDuplicateCount++;
820 rx->sta->num_duplicates++;
821 }
822 return RX_DROP_UNUSABLE;
823 } else
824 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
825 }
826
827 if (unlikely(rx->skb->len < 16)) {
828 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
829 return RX_DROP_MONITOR;
830 }
831
832 /* Drop disallowed frame classes based on STA auth/assoc state;
833 * IEEE 802.11, Chap 5.5.
834 *
835 * mac80211 filters only based on association state, i.e. it drops
836 * Class 3 frames from not associated stations. hostapd sends
837 * deauth/disassoc frames when needed. In addition, hostapd is
838 * responsible for filtering on both auth and assoc states.
839 */
840
841 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
842 return ieee80211_rx_mesh_check(rx);
843
844 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
845 ieee80211_is_pspoll(hdr->frame_control)) &&
846 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
847 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
848 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
849 if (rx->sta && rx->sta->dummy &&
850 ieee80211_is_data_present(hdr->frame_control)) {
851 u16 ethertype;
852 u8 *payload;
853
854 payload = rx->skb->data +
855 ieee80211_hdrlen(hdr->frame_control);
856 ethertype = (payload[6] << 8) | payload[7];
857 if (cpu_to_be16(ethertype) ==
858 rx->sdata->control_port_protocol)
859 return RX_CONTINUE;
860 }
861 return RX_DROP_MONITOR;
862 }
863
864 return RX_CONTINUE;
865 }
866
867
868 static ieee80211_rx_result debug_noinline
869 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
870 {
871 struct sk_buff *skb = rx->skb;
872 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
873 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
874 int keyidx;
875 int hdrlen;
876 ieee80211_rx_result result = RX_DROP_UNUSABLE;
877 struct ieee80211_key *sta_ptk = NULL;
878 int mmie_keyidx = -1;
879 __le16 fc;
880
881 /*
882 * Key selection 101
883 *
884 * There are four types of keys:
885 * - GTK (group keys)
886 * - IGTK (group keys for management frames)
887 * - PTK (pairwise keys)
888 * - STK (station-to-station pairwise keys)
889 *
890 * When selecting a key, we have to distinguish between multicast
891 * (including broadcast) and unicast frames, the latter can only
892 * use PTKs and STKs while the former always use GTKs and IGTKs.
893 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
894 * unicast frames can also use key indices like GTKs. Hence, if we
895 * don't have a PTK/STK we check the key index for a WEP key.
896 *
897 * Note that in a regular BSS, multicast frames are sent by the
898 * AP only, associated stations unicast the frame to the AP first
899 * which then multicasts it on their behalf.
900 *
901 * There is also a slight problem in IBSS mode: GTKs are negotiated
902 * with each station, that is something we don't currently handle.
903 * The spec seems to expect that one negotiates the same key with
904 * every station but there's no such requirement; VLANs could be
905 * possible.
906 */
907
908 /*
909 * No point in finding a key and decrypting if the frame is neither
910 * addressed to us nor a multicast frame.
911 */
912 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
913 return RX_CONTINUE;
914
915 /* start without a key */
916 rx->key = NULL;
917
918 if (rx->sta)
919 sta_ptk = rcu_dereference(rx->sta->ptk);
920
921 fc = hdr->frame_control;
922
923 if (!ieee80211_has_protected(fc))
924 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
925
926 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
927 rx->key = sta_ptk;
928 if ((status->flag & RX_FLAG_DECRYPTED) &&
929 (status->flag & RX_FLAG_IV_STRIPPED))
930 return RX_CONTINUE;
931 /* Skip decryption if the frame is not protected. */
932 if (!ieee80211_has_protected(fc))
933 return RX_CONTINUE;
934 } else if (mmie_keyidx >= 0) {
935 /* Broadcast/multicast robust management frame / BIP */
936 if ((status->flag & RX_FLAG_DECRYPTED) &&
937 (status->flag & RX_FLAG_IV_STRIPPED))
938 return RX_CONTINUE;
939
940 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
941 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
942 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
943 if (rx->sta)
944 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
945 if (!rx->key)
946 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
947 } else if (!ieee80211_has_protected(fc)) {
948 /*
949 * The frame was not protected, so skip decryption. However, we
950 * need to set rx->key if there is a key that could have been
951 * used so that the frame may be dropped if encryption would
952 * have been expected.
953 */
954 struct ieee80211_key *key = NULL;
955 struct ieee80211_sub_if_data *sdata = rx->sdata;
956 int i;
957
958 if (ieee80211_is_mgmt(fc) &&
959 is_multicast_ether_addr(hdr->addr1) &&
960 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
961 rx->key = key;
962 else {
963 if (rx->sta) {
964 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
965 key = rcu_dereference(rx->sta->gtk[i]);
966 if (key)
967 break;
968 }
969 }
970 if (!key) {
971 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
972 key = rcu_dereference(sdata->keys[i]);
973 if (key)
974 break;
975 }
976 }
977 if (key)
978 rx->key = key;
979 }
980 return RX_CONTINUE;
981 } else {
982 u8 keyid;
983 /*
984 * The device doesn't give us the IV so we won't be
985 * able to look up the key. That's ok though, we
986 * don't need to decrypt the frame, we just won't
987 * be able to keep statistics accurate.
988 * Except for key threshold notifications, should
989 * we somehow allow the driver to tell us which key
990 * the hardware used if this flag is set?
991 */
992 if ((status->flag & RX_FLAG_DECRYPTED) &&
993 (status->flag & RX_FLAG_IV_STRIPPED))
994 return RX_CONTINUE;
995
996 hdrlen = ieee80211_hdrlen(fc);
997
998 if (rx->skb->len < 8 + hdrlen)
999 return RX_DROP_UNUSABLE; /* TODO: count this? */
1000
1001 /*
1002 * no need to call ieee80211_wep_get_keyidx,
1003 * it verifies a bunch of things we've done already
1004 */
1005 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1006 keyidx = keyid >> 6;
1007
1008 /* check per-station GTK first, if multicast packet */
1009 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1010 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1011
1012 /* if not found, try default key */
1013 if (!rx->key) {
1014 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1015
1016 /*
1017 * RSNA-protected unicast frames should always be
1018 * sent with pairwise or station-to-station keys,
1019 * but for WEP we allow using a key index as well.
1020 */
1021 if (rx->key &&
1022 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1023 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1024 !is_multicast_ether_addr(hdr->addr1))
1025 rx->key = NULL;
1026 }
1027 }
1028
1029 if (rx->key) {
1030 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1031 return RX_DROP_MONITOR;
1032
1033 rx->key->tx_rx_count++;
1034 /* TODO: add threshold stuff again */
1035 } else {
1036 return RX_DROP_MONITOR;
1037 }
1038
1039 if (skb_linearize(rx->skb))
1040 return RX_DROP_UNUSABLE;
1041 /* the hdr variable is invalid now! */
1042
1043 switch (rx->key->conf.cipher) {
1044 case WLAN_CIPHER_SUITE_WEP40:
1045 case WLAN_CIPHER_SUITE_WEP104:
1046 /* Check for weak IVs if possible */
1047 if (rx->sta && ieee80211_is_data(fc) &&
1048 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1049 !(status->flag & RX_FLAG_DECRYPTED)) &&
1050 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1051 rx->sta->wep_weak_iv_count++;
1052
1053 result = ieee80211_crypto_wep_decrypt(rx);
1054 break;
1055 case WLAN_CIPHER_SUITE_TKIP:
1056 result = ieee80211_crypto_tkip_decrypt(rx);
1057 break;
1058 case WLAN_CIPHER_SUITE_CCMP:
1059 result = ieee80211_crypto_ccmp_decrypt(rx);
1060 break;
1061 case WLAN_CIPHER_SUITE_AES_CMAC:
1062 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1063 break;
1064 default:
1065 /*
1066 * We can reach here only with HW-only algorithms
1067 * but why didn't it decrypt the frame?!
1068 */
1069 return RX_DROP_UNUSABLE;
1070 }
1071
1072 /* either the frame has been decrypted or will be dropped */
1073 status->flag |= RX_FLAG_DECRYPTED;
1074
1075 return result;
1076 }
1077
1078 static ieee80211_rx_result debug_noinline
1079 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1080 {
1081 struct ieee80211_local *local;
1082 struct ieee80211_hdr *hdr;
1083 struct sk_buff *skb;
1084
1085 local = rx->local;
1086 skb = rx->skb;
1087 hdr = (struct ieee80211_hdr *) skb->data;
1088
1089 if (!local->pspolling)
1090 return RX_CONTINUE;
1091
1092 if (!ieee80211_has_fromds(hdr->frame_control))
1093 /* this is not from AP */
1094 return RX_CONTINUE;
1095
1096 if (!ieee80211_is_data(hdr->frame_control))
1097 return RX_CONTINUE;
1098
1099 if (!ieee80211_has_moredata(hdr->frame_control)) {
1100 /* AP has no more frames buffered for us */
1101 local->pspolling = false;
1102 return RX_CONTINUE;
1103 }
1104
1105 /* more data bit is set, let's request a new frame from the AP */
1106 ieee80211_send_pspoll(local, rx->sdata);
1107
1108 return RX_CONTINUE;
1109 }
1110
1111 static void ap_sta_ps_start(struct sta_info *sta)
1112 {
1113 struct ieee80211_sub_if_data *sdata = sta->sdata;
1114 struct ieee80211_local *local = sdata->local;
1115
1116 atomic_inc(&sdata->bss->num_sta_ps);
1117 set_sta_flag(sta, WLAN_STA_PS_STA);
1118 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1119 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1120 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1121 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1122 sdata->name, sta->sta.addr, sta->sta.aid);
1123 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1124 }
1125
1126 static void ap_sta_ps_end(struct sta_info *sta)
1127 {
1128 struct ieee80211_sub_if_data *sdata = sta->sdata;
1129
1130 atomic_dec(&sdata->bss->num_sta_ps);
1131
1132 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1133 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1134 sdata->name, sta->sta.addr, sta->sta.aid);
1135 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1136
1137 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1138 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1139 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1140 sdata->name, sta->sta.addr, sta->sta.aid);
1141 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1142 return;
1143 }
1144
1145 ieee80211_sta_ps_deliver_wakeup(sta);
1146 }
1147
1148 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1149 {
1150 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
1151 bool in_ps;
1152
1153 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1154
1155 /* Don't let the same PS state be set twice */
1156 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1157 if ((start && in_ps) || (!start && !in_ps))
1158 return -EINVAL;
1159
1160 if (start)
1161 ap_sta_ps_start(sta_inf);
1162 else
1163 ap_sta_ps_end(sta_inf);
1164
1165 return 0;
1166 }
1167 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1168
1169 static ieee80211_rx_result debug_noinline
1170 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1171 {
1172 struct ieee80211_sub_if_data *sdata = rx->sdata;
1173 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1174 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1175 int tid, ac;
1176
1177 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1178 return RX_CONTINUE;
1179
1180 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1181 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1182 return RX_CONTINUE;
1183
1184 /*
1185 * The device handles station powersave, so don't do anything about
1186 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1187 * it to mac80211 since they're handled.)
1188 */
1189 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1190 return RX_CONTINUE;
1191
1192 /*
1193 * Don't do anything if the station isn't already asleep. In
1194 * the uAPSD case, the station will probably be marked asleep,
1195 * in the PS-Poll case the station must be confused ...
1196 */
1197 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1198 return RX_CONTINUE;
1199
1200 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1201 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1202 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1203 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1204 else
1205 set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1206 }
1207
1208 /* Free PS Poll skb here instead of returning RX_DROP that would
1209 * count as an dropped frame. */
1210 dev_kfree_skb(rx->skb);
1211
1212 return RX_QUEUED;
1213 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1214 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1215 ieee80211_has_pm(hdr->frame_control) &&
1216 (ieee80211_is_data_qos(hdr->frame_control) ||
1217 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1218 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1219 ac = ieee802_1d_to_ac[tid & 7];
1220
1221 /*
1222 * If this AC is not trigger-enabled do nothing.
1223 *
1224 * NB: This could/should check a separate bitmap of trigger-
1225 * enabled queues, but for now we only implement uAPSD w/o
1226 * TSPEC changes to the ACs, so they're always the same.
1227 */
1228 if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1229 return RX_CONTINUE;
1230
1231 /* if we are in a service period, do nothing */
1232 if (test_sta_flag(rx->sta, WLAN_STA_SP))
1233 return RX_CONTINUE;
1234
1235 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1236 ieee80211_sta_ps_deliver_uapsd(rx->sta);
1237 else
1238 set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1239 }
1240
1241 return RX_CONTINUE;
1242 }
1243
1244 static ieee80211_rx_result debug_noinline
1245 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1246 {
1247 struct sta_info *sta = rx->sta;
1248 struct sk_buff *skb = rx->skb;
1249 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1250 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1251
1252 if (!sta)
1253 return RX_CONTINUE;
1254
1255 /*
1256 * Update last_rx only for IBSS packets which are for the current
1257 * BSSID to avoid keeping the current IBSS network alive in cases
1258 * where other STAs start using different BSSID.
1259 */
1260 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1261 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1262 NL80211_IFTYPE_ADHOC);
1263 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
1264 sta->last_rx = jiffies;
1265 if (ieee80211_is_data(hdr->frame_control)) {
1266 sta->last_rx_rate_idx = status->rate_idx;
1267 sta->last_rx_rate_flag = status->flag;
1268 }
1269 }
1270 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1271 /*
1272 * Mesh beacons will update last_rx when if they are found to
1273 * match the current local configuration when processed.
1274 */
1275 sta->last_rx = jiffies;
1276 if (ieee80211_is_data(hdr->frame_control)) {
1277 sta->last_rx_rate_idx = status->rate_idx;
1278 sta->last_rx_rate_flag = status->flag;
1279 }
1280 }
1281
1282 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1283 return RX_CONTINUE;
1284
1285 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1286 ieee80211_sta_rx_notify(rx->sdata, hdr);
1287
1288 sta->rx_fragments++;
1289 sta->rx_bytes += rx->skb->len;
1290 sta->last_signal = status->signal;
1291 ewma_add(&sta->avg_signal, -status->signal);
1292
1293 /*
1294 * Change STA power saving mode only at the end of a frame
1295 * exchange sequence.
1296 */
1297 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
1298 !ieee80211_has_morefrags(hdr->frame_control) &&
1299 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1300 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1301 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1302 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1303 /*
1304 * Ignore doze->wake transitions that are
1305 * indicated by non-data frames, the standard
1306 * is unclear here, but for example going to
1307 * PS mode and then scanning would cause a
1308 * doze->wake transition for the probe request,
1309 * and that is clearly undesirable.
1310 */
1311 if (ieee80211_is_data(hdr->frame_control) &&
1312 !ieee80211_has_pm(hdr->frame_control))
1313 ap_sta_ps_end(sta);
1314 } else {
1315 if (ieee80211_has_pm(hdr->frame_control))
1316 ap_sta_ps_start(sta);
1317 }
1318 }
1319
1320 /*
1321 * Drop (qos-)data::nullfunc frames silently, since they
1322 * are used only to control station power saving mode.
1323 */
1324 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1325 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1326 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1327
1328 /*
1329 * If we receive a 4-addr nullfunc frame from a STA
1330 * that was not moved to a 4-addr STA vlan yet, drop
1331 * the frame to the monitor interface, to make sure
1332 * that hostapd sees it
1333 */
1334 if (ieee80211_has_a4(hdr->frame_control) &&
1335 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1336 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1337 !rx->sdata->u.vlan.sta)))
1338 return RX_DROP_MONITOR;
1339 /*
1340 * Update counter and free packet here to avoid
1341 * counting this as a dropped packed.
1342 */
1343 sta->rx_packets++;
1344 dev_kfree_skb(rx->skb);
1345 return RX_QUEUED;
1346 }
1347
1348 return RX_CONTINUE;
1349 } /* ieee80211_rx_h_sta_process */
1350
1351 static inline struct ieee80211_fragment_entry *
1352 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1353 unsigned int frag, unsigned int seq, int rx_queue,
1354 struct sk_buff **skb)
1355 {
1356 struct ieee80211_fragment_entry *entry;
1357 int idx;
1358
1359 idx = sdata->fragment_next;
1360 entry = &sdata->fragments[sdata->fragment_next++];
1361 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1362 sdata->fragment_next = 0;
1363
1364 if (!skb_queue_empty(&entry->skb_list)) {
1365 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1366 struct ieee80211_hdr *hdr =
1367 (struct ieee80211_hdr *) entry->skb_list.next->data;
1368 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1369 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1370 "addr1=%pM addr2=%pM\n",
1371 sdata->name, idx,
1372 jiffies - entry->first_frag_time, entry->seq,
1373 entry->last_frag, hdr->addr1, hdr->addr2);
1374 #endif
1375 __skb_queue_purge(&entry->skb_list);
1376 }
1377
1378 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1379 *skb = NULL;
1380 entry->first_frag_time = jiffies;
1381 entry->seq = seq;
1382 entry->rx_queue = rx_queue;
1383 entry->last_frag = frag;
1384 entry->ccmp = 0;
1385 entry->extra_len = 0;
1386
1387 return entry;
1388 }
1389
1390 static inline struct ieee80211_fragment_entry *
1391 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1392 unsigned int frag, unsigned int seq,
1393 int rx_queue, struct ieee80211_hdr *hdr)
1394 {
1395 struct ieee80211_fragment_entry *entry;
1396 int i, idx;
1397
1398 idx = sdata->fragment_next;
1399 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1400 struct ieee80211_hdr *f_hdr;
1401
1402 idx--;
1403 if (idx < 0)
1404 idx = IEEE80211_FRAGMENT_MAX - 1;
1405
1406 entry = &sdata->fragments[idx];
1407 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1408 entry->rx_queue != rx_queue ||
1409 entry->last_frag + 1 != frag)
1410 continue;
1411
1412 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1413
1414 /*
1415 * Check ftype and addresses are equal, else check next fragment
1416 */
1417 if (((hdr->frame_control ^ f_hdr->frame_control) &
1418 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1419 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1420 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1421 continue;
1422
1423 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1424 __skb_queue_purge(&entry->skb_list);
1425 continue;
1426 }
1427 return entry;
1428 }
1429
1430 return NULL;
1431 }
1432
1433 static ieee80211_rx_result debug_noinline
1434 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1435 {
1436 struct ieee80211_hdr *hdr;
1437 u16 sc;
1438 __le16 fc;
1439 unsigned int frag, seq;
1440 struct ieee80211_fragment_entry *entry;
1441 struct sk_buff *skb;
1442 struct ieee80211_rx_status *status;
1443
1444 hdr = (struct ieee80211_hdr *)rx->skb->data;
1445 fc = hdr->frame_control;
1446 sc = le16_to_cpu(hdr->seq_ctrl);
1447 frag = sc & IEEE80211_SCTL_FRAG;
1448
1449 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1450 (rx->skb)->len < 24 ||
1451 is_multicast_ether_addr(hdr->addr1))) {
1452 /* not fragmented */
1453 goto out;
1454 }
1455 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1456
1457 if (skb_linearize(rx->skb))
1458 return RX_DROP_UNUSABLE;
1459
1460 /*
1461 * skb_linearize() might change the skb->data and
1462 * previously cached variables (in this case, hdr) need to
1463 * be refreshed with the new data.
1464 */
1465 hdr = (struct ieee80211_hdr *)rx->skb->data;
1466 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1467
1468 if (frag == 0) {
1469 /* This is the first fragment of a new frame. */
1470 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1471 rx->seqno_idx, &(rx->skb));
1472 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1473 ieee80211_has_protected(fc)) {
1474 int queue = rx->security_idx;
1475 /* Store CCMP PN so that we can verify that the next
1476 * fragment has a sequential PN value. */
1477 entry->ccmp = 1;
1478 memcpy(entry->last_pn,
1479 rx->key->u.ccmp.rx_pn[queue],
1480 CCMP_PN_LEN);
1481 }
1482 return RX_QUEUED;
1483 }
1484
1485 /* This is a fragment for a frame that should already be pending in
1486 * fragment cache. Add this fragment to the end of the pending entry.
1487 */
1488 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1489 rx->seqno_idx, hdr);
1490 if (!entry) {
1491 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1492 return RX_DROP_MONITOR;
1493 }
1494
1495 /* Verify that MPDUs within one MSDU have sequential PN values.
1496 * (IEEE 802.11i, 8.3.3.4.5) */
1497 if (entry->ccmp) {
1498 int i;
1499 u8 pn[CCMP_PN_LEN], *rpn;
1500 int queue;
1501 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1502 return RX_DROP_UNUSABLE;
1503 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1504 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1505 pn[i]++;
1506 if (pn[i])
1507 break;
1508 }
1509 queue = rx->security_idx;
1510 rpn = rx->key->u.ccmp.rx_pn[queue];
1511 if (memcmp(pn, rpn, CCMP_PN_LEN))
1512 return RX_DROP_UNUSABLE;
1513 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1514 }
1515
1516 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1517 __skb_queue_tail(&entry->skb_list, rx->skb);
1518 entry->last_frag = frag;
1519 entry->extra_len += rx->skb->len;
1520 if (ieee80211_has_morefrags(fc)) {
1521 rx->skb = NULL;
1522 return RX_QUEUED;
1523 }
1524
1525 rx->skb = __skb_dequeue(&entry->skb_list);
1526 if (skb_tailroom(rx->skb) < entry->extra_len) {
1527 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1528 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1529 GFP_ATOMIC))) {
1530 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1531 __skb_queue_purge(&entry->skb_list);
1532 return RX_DROP_UNUSABLE;
1533 }
1534 }
1535 while ((skb = __skb_dequeue(&entry->skb_list))) {
1536 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1537 dev_kfree_skb(skb);
1538 }
1539
1540 /* Complete frame has been reassembled - process it now */
1541 status = IEEE80211_SKB_RXCB(rx->skb);
1542 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1543
1544 out:
1545 if (rx->sta)
1546 rx->sta->rx_packets++;
1547 if (is_multicast_ether_addr(hdr->addr1))
1548 rx->local->dot11MulticastReceivedFrameCount++;
1549 else
1550 ieee80211_led_rx(rx->local);
1551 return RX_CONTINUE;
1552 }
1553
1554 static ieee80211_rx_result debug_noinline
1555 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1556 {
1557 u8 *data = rx->skb->data;
1558 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1559
1560 if (!ieee80211_is_data_qos(hdr->frame_control))
1561 return RX_CONTINUE;
1562
1563 /* remove the qos control field, update frame type and meta-data */
1564 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1565 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1566 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1567 /* change frame type to non QOS */
1568 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1569
1570 return RX_CONTINUE;
1571 }
1572
1573 static int
1574 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1575 {
1576 if (unlikely(!rx->sta ||
1577 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1578 return -EACCES;
1579
1580 return 0;
1581 }
1582
1583 static int
1584 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1585 {
1586 struct sk_buff *skb = rx->skb;
1587 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1588
1589 /*
1590 * Pass through unencrypted frames if the hardware has
1591 * decrypted them already.
1592 */
1593 if (status->flag & RX_FLAG_DECRYPTED)
1594 return 0;
1595
1596 /* Drop unencrypted frames if key is set. */
1597 if (unlikely(!ieee80211_has_protected(fc) &&
1598 !ieee80211_is_nullfunc(fc) &&
1599 ieee80211_is_data(fc) &&
1600 (rx->key || rx->sdata->drop_unencrypted)))
1601 return -EACCES;
1602
1603 return 0;
1604 }
1605
1606 static int
1607 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1608 {
1609 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1610 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1611 __le16 fc = hdr->frame_control;
1612
1613 /*
1614 * Pass through unencrypted frames if the hardware has
1615 * decrypted them already.
1616 */
1617 if (status->flag & RX_FLAG_DECRYPTED)
1618 return 0;
1619
1620 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1621 if (unlikely(!ieee80211_has_protected(fc) &&
1622 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1623 rx->key)) {
1624 if (ieee80211_is_deauth(fc))
1625 cfg80211_send_unprot_deauth(rx->sdata->dev,
1626 rx->skb->data,
1627 rx->skb->len);
1628 else if (ieee80211_is_disassoc(fc))
1629 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1630 rx->skb->data,
1631 rx->skb->len);
1632 return -EACCES;
1633 }
1634 /* BIP does not use Protected field, so need to check MMIE */
1635 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1636 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
1637 if (ieee80211_is_deauth(fc))
1638 cfg80211_send_unprot_deauth(rx->sdata->dev,
1639 rx->skb->data,
1640 rx->skb->len);
1641 else if (ieee80211_is_disassoc(fc))
1642 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1643 rx->skb->data,
1644 rx->skb->len);
1645 return -EACCES;
1646 }
1647 /*
1648 * When using MFP, Action frames are not allowed prior to
1649 * having configured keys.
1650 */
1651 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1652 ieee80211_is_robust_mgmt_frame(
1653 (struct ieee80211_hdr *) rx->skb->data)))
1654 return -EACCES;
1655 }
1656
1657 return 0;
1658 }
1659
1660 static int
1661 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1662 {
1663 struct ieee80211_sub_if_data *sdata = rx->sdata;
1664 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1665 bool check_port_control = false;
1666 struct ethhdr *ehdr;
1667 int ret;
1668
1669 *port_control = false;
1670 if (ieee80211_has_a4(hdr->frame_control) &&
1671 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1672 return -1;
1673
1674 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1675 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
1676
1677 if (!sdata->u.mgd.use_4addr)
1678 return -1;
1679 else
1680 check_port_control = true;
1681 }
1682
1683 if (is_multicast_ether_addr(hdr->addr1) &&
1684 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
1685 return -1;
1686
1687 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1688 if (ret < 0)
1689 return ret;
1690
1691 ehdr = (struct ethhdr *) rx->skb->data;
1692 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1693 *port_control = true;
1694 else if (check_port_control)
1695 return -1;
1696
1697 return 0;
1698 }
1699
1700 /*
1701 * requires that rx->skb is a frame with ethernet header
1702 */
1703 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1704 {
1705 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1706 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1707 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1708
1709 /*
1710 * Allow EAPOL frames to us/the PAE group address regardless
1711 * of whether the frame was encrypted or not.
1712 */
1713 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1714 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1715 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1716 return true;
1717
1718 if (ieee80211_802_1x_port_control(rx) ||
1719 ieee80211_drop_unencrypted(rx, fc))
1720 return false;
1721
1722 return true;
1723 }
1724
1725 /*
1726 * requires that rx->skb is a frame with ethernet header
1727 */
1728 static void
1729 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1730 {
1731 struct ieee80211_sub_if_data *sdata = rx->sdata;
1732 struct net_device *dev = sdata->dev;
1733 struct sk_buff *skb, *xmit_skb;
1734 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1735 struct sta_info *dsta;
1736 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1737
1738 skb = rx->skb;
1739 xmit_skb = NULL;
1740
1741 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1742 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1743 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1744 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1745 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1746 if (is_multicast_ether_addr(ehdr->h_dest)) {
1747 /*
1748 * send multicast frames both to higher layers in
1749 * local net stack and back to the wireless medium
1750 */
1751 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1752 if (!xmit_skb && net_ratelimit())
1753 printk(KERN_DEBUG "%s: failed to clone "
1754 "multicast frame\n", dev->name);
1755 } else {
1756 dsta = sta_info_get(sdata, skb->data);
1757 if (dsta) {
1758 /*
1759 * The destination station is associated to
1760 * this AP (in this VLAN), so send the frame
1761 * directly to it and do not pass it to local
1762 * net stack.
1763 */
1764 xmit_skb = skb;
1765 skb = NULL;
1766 }
1767 }
1768 }
1769
1770 if (skb) {
1771 int align __maybe_unused;
1772
1773 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1774 /*
1775 * 'align' will only take the values 0 or 2 here
1776 * since all frames are required to be aligned
1777 * to 2-byte boundaries when being passed to
1778 * mac80211. That also explains the __skb_push()
1779 * below.
1780 */
1781 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1782 if (align) {
1783 if (WARN_ON(skb_headroom(skb) < 3)) {
1784 dev_kfree_skb(skb);
1785 skb = NULL;
1786 } else {
1787 u8 *data = skb->data;
1788 size_t len = skb_headlen(skb);
1789 skb->data -= align;
1790 memmove(skb->data, data, len);
1791 skb_set_tail_pointer(skb, len);
1792 }
1793 }
1794 #endif
1795
1796 if (skb) {
1797 /* deliver to local stack */
1798 skb->protocol = eth_type_trans(skb, dev);
1799 memset(skb->cb, 0, sizeof(skb->cb));
1800 netif_receive_skb(skb);
1801 }
1802 }
1803
1804 if (xmit_skb) {
1805 /* send to wireless media */
1806 xmit_skb->protocol = htons(ETH_P_802_3);
1807 skb_reset_network_header(xmit_skb);
1808 skb_reset_mac_header(xmit_skb);
1809 dev_queue_xmit(xmit_skb);
1810 }
1811 }
1812
1813 static ieee80211_rx_result debug_noinline
1814 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1815 {
1816 struct net_device *dev = rx->sdata->dev;
1817 struct sk_buff *skb = rx->skb;
1818 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1819 __le16 fc = hdr->frame_control;
1820 struct sk_buff_head frame_list;
1821 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1822
1823 if (unlikely(!ieee80211_is_data(fc)))
1824 return RX_CONTINUE;
1825
1826 if (unlikely(!ieee80211_is_data_present(fc)))
1827 return RX_DROP_MONITOR;
1828
1829 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1830 return RX_CONTINUE;
1831
1832 if (ieee80211_has_a4(hdr->frame_control) &&
1833 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1834 !rx->sdata->u.vlan.sta)
1835 return RX_DROP_UNUSABLE;
1836
1837 if (is_multicast_ether_addr(hdr->addr1) &&
1838 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1839 rx->sdata->u.vlan.sta) ||
1840 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1841 rx->sdata->u.mgd.use_4addr)))
1842 return RX_DROP_UNUSABLE;
1843
1844 skb->dev = dev;
1845 __skb_queue_head_init(&frame_list);
1846
1847 if (skb_linearize(skb))
1848 return RX_DROP_UNUSABLE;
1849
1850 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1851 rx->sdata->vif.type,
1852 rx->local->hw.extra_tx_headroom, true);
1853
1854 while (!skb_queue_empty(&frame_list)) {
1855 rx->skb = __skb_dequeue(&frame_list);
1856
1857 if (!ieee80211_frame_allowed(rx, fc)) {
1858 dev_kfree_skb(rx->skb);
1859 continue;
1860 }
1861 dev->stats.rx_packets++;
1862 dev->stats.rx_bytes += rx->skb->len;
1863
1864 ieee80211_deliver_skb(rx);
1865 }
1866
1867 return RX_QUEUED;
1868 }
1869
1870 #ifdef CONFIG_MAC80211_MESH
1871 static ieee80211_rx_result
1872 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1873 {
1874 struct ieee80211_hdr *hdr;
1875 struct ieee80211s_hdr *mesh_hdr;
1876 unsigned int hdrlen;
1877 struct sk_buff *skb = rx->skb, *fwd_skb;
1878 struct ieee80211_local *local = rx->local;
1879 struct ieee80211_sub_if_data *sdata = rx->sdata;
1880 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1881
1882 hdr = (struct ieee80211_hdr *) skb->data;
1883 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1884 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1885
1886 /* frame is in RMC, don't forward */
1887 if (ieee80211_is_data(hdr->frame_control) &&
1888 is_multicast_ether_addr(hdr->addr1) &&
1889 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1890 return RX_DROP_MONITOR;
1891
1892 if (!ieee80211_is_data(hdr->frame_control))
1893 return RX_CONTINUE;
1894
1895 if (!mesh_hdr->ttl)
1896 /* illegal frame */
1897 return RX_DROP_MONITOR;
1898
1899 if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
1900 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1901 dropped_frames_congestion);
1902 return RX_DROP_MONITOR;
1903 }
1904
1905 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1906 struct mesh_path *mppath;
1907 char *proxied_addr;
1908 char *mpp_addr;
1909
1910 if (is_multicast_ether_addr(hdr->addr1)) {
1911 mpp_addr = hdr->addr3;
1912 proxied_addr = mesh_hdr->eaddr1;
1913 } else {
1914 mpp_addr = hdr->addr4;
1915 proxied_addr = mesh_hdr->eaddr2;
1916 }
1917
1918 rcu_read_lock();
1919 mppath = mpp_path_lookup(proxied_addr, sdata);
1920 if (!mppath) {
1921 mpp_path_add(proxied_addr, mpp_addr, sdata);
1922 } else {
1923 spin_lock_bh(&mppath->state_lock);
1924 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1925 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1926 spin_unlock_bh(&mppath->state_lock);
1927 }
1928 rcu_read_unlock();
1929 }
1930
1931 /* Frame has reached destination. Don't forward */
1932 if (!is_multicast_ether_addr(hdr->addr1) &&
1933 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1934 return RX_CONTINUE;
1935
1936 mesh_hdr->ttl--;
1937
1938 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
1939 if (!mesh_hdr->ttl)
1940 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1941 dropped_frames_ttl);
1942 else {
1943 struct ieee80211_hdr *fwd_hdr;
1944 struct ieee80211_tx_info *info;
1945
1946 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1947
1948 if (!fwd_skb && net_ratelimit())
1949 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1950 sdata->name);
1951 if (!fwd_skb)
1952 goto out;
1953
1954 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1955 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1956 info = IEEE80211_SKB_CB(fwd_skb);
1957 memset(info, 0, sizeof(*info));
1958 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1959 info->control.vif = &rx->sdata->vif;
1960 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1961 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1962 fwded_mcast);
1963 skb_set_queue_mapping(fwd_skb,
1964 ieee80211_select_queue(sdata, fwd_skb));
1965 ieee80211_set_qos_hdr(sdata, fwd_skb);
1966 } else {
1967 int err;
1968 /*
1969 * Save TA to addr1 to send TA a path error if a
1970 * suitable next hop is not found
1971 */
1972 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1973 ETH_ALEN);
1974 err = mesh_nexthop_lookup(fwd_skb, sdata);
1975 /* Failed to immediately resolve next hop:
1976 * fwded frame was dropped or will be added
1977 * later to the pending skb queue. */
1978 if (err)
1979 return RX_DROP_MONITOR;
1980
1981 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1982 fwded_unicast);
1983 }
1984 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1985 fwded_frames);
1986 ieee80211_add_pending_skb(local, fwd_skb);
1987 }
1988 }
1989
1990 out:
1991 if (is_multicast_ether_addr(hdr->addr1) ||
1992 sdata->dev->flags & IFF_PROMISC)
1993 return RX_CONTINUE;
1994 else
1995 return RX_DROP_MONITOR;
1996 }
1997 #endif
1998
1999 static ieee80211_rx_result debug_noinline
2000 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2001 {
2002 struct ieee80211_sub_if_data *sdata = rx->sdata;
2003 struct ieee80211_local *local = rx->local;
2004 struct net_device *dev = sdata->dev;
2005 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2006 __le16 fc = hdr->frame_control;
2007 bool port_control;
2008 int err;
2009
2010 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2011 return RX_CONTINUE;
2012
2013 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2014 return RX_DROP_MONITOR;
2015
2016 /*
2017 * Allow the cooked monitor interface of an AP to see 4-addr frames so
2018 * that a 4-addr station can be detected and moved into a separate VLAN
2019 */
2020 if (ieee80211_has_a4(hdr->frame_control) &&
2021 sdata->vif.type == NL80211_IFTYPE_AP)
2022 return RX_DROP_MONITOR;
2023
2024 err = __ieee80211_data_to_8023(rx, &port_control);
2025 if (unlikely(err))
2026 return RX_DROP_UNUSABLE;
2027
2028 if (!ieee80211_frame_allowed(rx, fc))
2029 return RX_DROP_MONITOR;
2030
2031 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2032 unlikely(port_control) && sdata->bss) {
2033 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2034 u.ap);
2035 dev = sdata->dev;
2036 rx->sdata = sdata;
2037 }
2038
2039 rx->skb->dev = dev;
2040
2041 dev->stats.rx_packets++;
2042 dev->stats.rx_bytes += rx->skb->len;
2043
2044 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2045 !is_multicast_ether_addr(
2046 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2047 (!local->scanning &&
2048 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
2049 mod_timer(&local->dynamic_ps_timer, jiffies +
2050 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2051 }
2052
2053 ieee80211_deliver_skb(rx);
2054
2055 return RX_QUEUED;
2056 }
2057
2058 static ieee80211_rx_result debug_noinline
2059 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2060 {
2061 struct ieee80211_local *local = rx->local;
2062 struct ieee80211_hw *hw = &local->hw;
2063 struct sk_buff *skb = rx->skb;
2064 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2065 struct tid_ampdu_rx *tid_agg_rx;
2066 u16 start_seq_num;
2067 u16 tid;
2068
2069 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2070 return RX_CONTINUE;
2071
2072 if (ieee80211_is_back_req(bar->frame_control)) {
2073 struct {
2074 __le16 control, start_seq_num;
2075 } __packed bar_data;
2076
2077 if (!rx->sta)
2078 return RX_DROP_MONITOR;
2079
2080 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2081 &bar_data, sizeof(bar_data)))
2082 return RX_DROP_MONITOR;
2083
2084 tid = le16_to_cpu(bar_data.control) >> 12;
2085
2086 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2087 if (!tid_agg_rx)
2088 return RX_DROP_MONITOR;
2089
2090 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2091
2092 /* reset session timer */
2093 if (tid_agg_rx->timeout)
2094 mod_timer(&tid_agg_rx->session_timer,
2095 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2096
2097 spin_lock(&tid_agg_rx->reorder_lock);
2098 /* release stored frames up to start of BAR */
2099 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
2100 spin_unlock(&tid_agg_rx->reorder_lock);
2101
2102 kfree_skb(skb);
2103 return RX_QUEUED;
2104 }
2105
2106 /*
2107 * After this point, we only want management frames,
2108 * so we can drop all remaining control frames to
2109 * cooked monitor interfaces.
2110 */
2111 return RX_DROP_MONITOR;
2112 }
2113
2114 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2115 struct ieee80211_mgmt *mgmt,
2116 size_t len)
2117 {
2118 struct ieee80211_local *local = sdata->local;
2119 struct sk_buff *skb;
2120 struct ieee80211_mgmt *resp;
2121
2122 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
2123 /* Not to own unicast address */
2124 return;
2125 }
2126
2127 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
2128 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
2129 /* Not from the current AP or not associated yet. */
2130 return;
2131 }
2132
2133 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2134 /* Too short SA Query request frame */
2135 return;
2136 }
2137
2138 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2139 if (skb == NULL)
2140 return;
2141
2142 skb_reserve(skb, local->hw.extra_tx_headroom);
2143 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2144 memset(resp, 0, 24);
2145 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2146 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2147 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2148 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2149 IEEE80211_STYPE_ACTION);
2150 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2151 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2152 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2153 memcpy(resp->u.action.u.sa_query.trans_id,
2154 mgmt->u.action.u.sa_query.trans_id,
2155 WLAN_SA_QUERY_TR_ID_LEN);
2156
2157 ieee80211_tx_skb(sdata, skb);
2158 }
2159
2160 static ieee80211_rx_result debug_noinline
2161 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2162 {
2163 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2164 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2165
2166 /*
2167 * From here on, look only at management frames.
2168 * Data and control frames are already handled,
2169 * and unknown (reserved) frames are useless.
2170 */
2171 if (rx->skb->len < 24)
2172 return RX_DROP_MONITOR;
2173
2174 if (!ieee80211_is_mgmt(mgmt->frame_control))
2175 return RX_DROP_MONITOR;
2176
2177 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2178 return RX_DROP_MONITOR;
2179
2180 if (ieee80211_drop_unencrypted_mgmt(rx))
2181 return RX_DROP_UNUSABLE;
2182
2183 return RX_CONTINUE;
2184 }
2185
2186 static ieee80211_rx_result debug_noinline
2187 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2188 {
2189 struct ieee80211_local *local = rx->local;
2190 struct ieee80211_sub_if_data *sdata = rx->sdata;
2191 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2192 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2193 int len = rx->skb->len;
2194
2195 if (!ieee80211_is_action(mgmt->frame_control))
2196 return RX_CONTINUE;
2197
2198 /* drop too small frames */
2199 if (len < IEEE80211_MIN_ACTION_SIZE)
2200 return RX_DROP_UNUSABLE;
2201
2202 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2203 return RX_DROP_UNUSABLE;
2204
2205 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2206 return RX_DROP_UNUSABLE;
2207
2208 switch (mgmt->u.action.category) {
2209 case WLAN_CATEGORY_BACK:
2210 /*
2211 * The aggregation code is not prepared to handle
2212 * anything but STA/AP due to the BSSID handling;
2213 * IBSS could work in the code but isn't supported
2214 * by drivers or the standard.
2215 */
2216 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2217 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2218 sdata->vif.type != NL80211_IFTYPE_AP)
2219 break;
2220
2221 /* verify action_code is present */
2222 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2223 break;
2224
2225 switch (mgmt->u.action.u.addba_req.action_code) {
2226 case WLAN_ACTION_ADDBA_REQ:
2227 if (len < (IEEE80211_MIN_ACTION_SIZE +
2228 sizeof(mgmt->u.action.u.addba_req)))
2229 goto invalid;
2230 break;
2231 case WLAN_ACTION_ADDBA_RESP:
2232 if (len < (IEEE80211_MIN_ACTION_SIZE +
2233 sizeof(mgmt->u.action.u.addba_resp)))
2234 goto invalid;
2235 break;
2236 case WLAN_ACTION_DELBA:
2237 if (len < (IEEE80211_MIN_ACTION_SIZE +
2238 sizeof(mgmt->u.action.u.delba)))
2239 goto invalid;
2240 break;
2241 default:
2242 goto invalid;
2243 }
2244
2245 goto queue;
2246 case WLAN_CATEGORY_SPECTRUM_MGMT:
2247 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2248 break;
2249
2250 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2251 break;
2252
2253 /* verify action_code is present */
2254 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2255 break;
2256
2257 switch (mgmt->u.action.u.measurement.action_code) {
2258 case WLAN_ACTION_SPCT_MSR_REQ:
2259 if (len < (IEEE80211_MIN_ACTION_SIZE +
2260 sizeof(mgmt->u.action.u.measurement)))
2261 break;
2262 ieee80211_process_measurement_req(sdata, mgmt, len);
2263 goto handled;
2264 case WLAN_ACTION_SPCT_CHL_SWITCH:
2265 if (len < (IEEE80211_MIN_ACTION_SIZE +
2266 sizeof(mgmt->u.action.u.chan_switch)))
2267 break;
2268
2269 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2270 break;
2271
2272 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2273 break;
2274
2275 goto queue;
2276 }
2277 break;
2278 case WLAN_CATEGORY_SA_QUERY:
2279 if (len < (IEEE80211_MIN_ACTION_SIZE +
2280 sizeof(mgmt->u.action.u.sa_query)))
2281 break;
2282
2283 switch (mgmt->u.action.u.sa_query.action) {
2284 case WLAN_ACTION_SA_QUERY_REQUEST:
2285 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2286 break;
2287 ieee80211_process_sa_query_req(sdata, mgmt, len);
2288 goto handled;
2289 }
2290 break;
2291 case WLAN_CATEGORY_SELF_PROTECTED:
2292 switch (mgmt->u.action.u.self_prot.action_code) {
2293 case WLAN_SP_MESH_PEERING_OPEN:
2294 case WLAN_SP_MESH_PEERING_CLOSE:
2295 case WLAN_SP_MESH_PEERING_CONFIRM:
2296 if (!ieee80211_vif_is_mesh(&sdata->vif))
2297 goto invalid;
2298 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2299 /* userspace handles this frame */
2300 break;
2301 goto queue;
2302 case WLAN_SP_MGK_INFORM:
2303 case WLAN_SP_MGK_ACK:
2304 if (!ieee80211_vif_is_mesh(&sdata->vif))
2305 goto invalid;
2306 break;
2307 }
2308 break;
2309 case WLAN_CATEGORY_MESH_ACTION:
2310 if (!ieee80211_vif_is_mesh(&sdata->vif))
2311 break;
2312 if (mesh_action_is_path_sel(mgmt) &&
2313 (!mesh_path_sel_is_hwmp(sdata)))
2314 break;
2315 goto queue;
2316 }
2317
2318 return RX_CONTINUE;
2319
2320 invalid:
2321 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2322 /* will return in the next handlers */
2323 return RX_CONTINUE;
2324
2325 handled:
2326 if (rx->sta)
2327 rx->sta->rx_packets++;
2328 dev_kfree_skb(rx->skb);
2329 return RX_QUEUED;
2330
2331 queue:
2332 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2333 skb_queue_tail(&sdata->skb_queue, rx->skb);
2334 ieee80211_queue_work(&local->hw, &sdata->work);
2335 if (rx->sta)
2336 rx->sta->rx_packets++;
2337 return RX_QUEUED;
2338 }
2339
2340 static ieee80211_rx_result debug_noinline
2341 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2342 {
2343 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2344
2345 /* skip known-bad action frames and return them in the next handler */
2346 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2347 return RX_CONTINUE;
2348
2349 /*
2350 * Getting here means the kernel doesn't know how to handle
2351 * it, but maybe userspace does ... include returned frames
2352 * so userspace can register for those to know whether ones
2353 * it transmitted were processed or returned.
2354 */
2355
2356 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2357 rx->skb->data, rx->skb->len,
2358 GFP_ATOMIC)) {
2359 if (rx->sta)
2360 rx->sta->rx_packets++;
2361 dev_kfree_skb(rx->skb);
2362 return RX_QUEUED;
2363 }
2364
2365
2366 return RX_CONTINUE;
2367 }
2368
2369 static ieee80211_rx_result debug_noinline
2370 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2371 {
2372 struct ieee80211_local *local = rx->local;
2373 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2374 struct sk_buff *nskb;
2375 struct ieee80211_sub_if_data *sdata = rx->sdata;
2376 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2377
2378 if (!ieee80211_is_action(mgmt->frame_control))
2379 return RX_CONTINUE;
2380
2381 /*
2382 * For AP mode, hostapd is responsible for handling any action
2383 * frames that we didn't handle, including returning unknown
2384 * ones. For all other modes we will return them to the sender,
2385 * setting the 0x80 bit in the action category, as required by
2386 * 802.11-2007 7.3.1.11.
2387 * Newer versions of hostapd shall also use the management frame
2388 * registration mechanisms, but older ones still use cooked
2389 * monitor interfaces so push all frames there.
2390 */
2391 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2392 (sdata->vif.type == NL80211_IFTYPE_AP ||
2393 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2394 return RX_DROP_MONITOR;
2395
2396 /* do not return rejected action frames */
2397 if (mgmt->u.action.category & 0x80)
2398 return RX_DROP_UNUSABLE;
2399
2400 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2401 GFP_ATOMIC);
2402 if (nskb) {
2403 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2404
2405 nmgmt->u.action.category |= 0x80;
2406 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2407 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2408
2409 memset(nskb->cb, 0, sizeof(nskb->cb));
2410
2411 ieee80211_tx_skb(rx->sdata, nskb);
2412 }
2413 dev_kfree_skb(rx->skb);
2414 return RX_QUEUED;
2415 }
2416
2417 static ieee80211_rx_result debug_noinline
2418 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2419 {
2420 struct ieee80211_sub_if_data *sdata = rx->sdata;
2421 ieee80211_rx_result rxs;
2422 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2423 __le16 stype;
2424
2425 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2426 if (rxs != RX_CONTINUE)
2427 return rxs;
2428
2429 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2430
2431 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2432 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2433 sdata->vif.type != NL80211_IFTYPE_STATION)
2434 return RX_DROP_MONITOR;
2435
2436 switch (stype) {
2437 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2438 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2439 /* process for all: mesh, mlme, ibss */
2440 break;
2441 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2442 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2443 if (is_multicast_ether_addr(mgmt->da) &&
2444 !is_broadcast_ether_addr(mgmt->da))
2445 return RX_DROP_MONITOR;
2446
2447 /* process only for station */
2448 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2449 return RX_DROP_MONITOR;
2450 break;
2451 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2452 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2453 /* process only for ibss */
2454 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2455 return RX_DROP_MONITOR;
2456 break;
2457 default:
2458 return RX_DROP_MONITOR;
2459 }
2460
2461 /* queue up frame and kick off work to process it */
2462 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2463 skb_queue_tail(&sdata->skb_queue, rx->skb);
2464 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2465 if (rx->sta)
2466 rx->sta->rx_packets++;
2467
2468 return RX_QUEUED;
2469 }
2470
2471 /* TODO: use IEEE80211_RX_FRAGMENTED */
2472 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2473 struct ieee80211_rate *rate)
2474 {
2475 struct ieee80211_sub_if_data *sdata;
2476 struct ieee80211_local *local = rx->local;
2477 struct ieee80211_rtap_hdr {
2478 struct ieee80211_radiotap_header hdr;
2479 u8 flags;
2480 u8 rate_or_pad;
2481 __le16 chan_freq;
2482 __le16 chan_flags;
2483 } __packed *rthdr;
2484 struct sk_buff *skb = rx->skb, *skb2;
2485 struct net_device *prev_dev = NULL;
2486 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2487
2488 /*
2489 * If cooked monitor has been processed already, then
2490 * don't do it again. If not, set the flag.
2491 */
2492 if (rx->flags & IEEE80211_RX_CMNTR)
2493 goto out_free_skb;
2494 rx->flags |= IEEE80211_RX_CMNTR;
2495
2496 if (skb_headroom(skb) < sizeof(*rthdr) &&
2497 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2498 goto out_free_skb;
2499
2500 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2501 memset(rthdr, 0, sizeof(*rthdr));
2502 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2503 rthdr->hdr.it_present =
2504 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2505 (1 << IEEE80211_RADIOTAP_CHANNEL));
2506
2507 if (rate) {
2508 rthdr->rate_or_pad = rate->bitrate / 5;
2509 rthdr->hdr.it_present |=
2510 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2511 }
2512 rthdr->chan_freq = cpu_to_le16(status->freq);
2513
2514 if (status->band == IEEE80211_BAND_5GHZ)
2515 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2516 IEEE80211_CHAN_5GHZ);
2517 else
2518 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2519 IEEE80211_CHAN_2GHZ);
2520
2521 skb_set_mac_header(skb, 0);
2522 skb->ip_summed = CHECKSUM_UNNECESSARY;
2523 skb->pkt_type = PACKET_OTHERHOST;
2524 skb->protocol = htons(ETH_P_802_2);
2525
2526 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2527 if (!ieee80211_sdata_running(sdata))
2528 continue;
2529
2530 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2531 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2532 continue;
2533
2534 if (prev_dev) {
2535 skb2 = skb_clone(skb, GFP_ATOMIC);
2536 if (skb2) {
2537 skb2->dev = prev_dev;
2538 netif_receive_skb(skb2);
2539 }
2540 }
2541
2542 prev_dev = sdata->dev;
2543 sdata->dev->stats.rx_packets++;
2544 sdata->dev->stats.rx_bytes += skb->len;
2545 }
2546
2547 if (prev_dev) {
2548 skb->dev = prev_dev;
2549 netif_receive_skb(skb);
2550 return;
2551 }
2552
2553 out_free_skb:
2554 dev_kfree_skb(skb);
2555 }
2556
2557 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2558 ieee80211_rx_result res)
2559 {
2560 switch (res) {
2561 case RX_DROP_MONITOR:
2562 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2563 if (rx->sta)
2564 rx->sta->rx_dropped++;
2565 /* fall through */
2566 case RX_CONTINUE: {
2567 struct ieee80211_rate *rate = NULL;
2568 struct ieee80211_supported_band *sband;
2569 struct ieee80211_rx_status *status;
2570
2571 status = IEEE80211_SKB_RXCB((rx->skb));
2572
2573 sband = rx->local->hw.wiphy->bands[status->band];
2574 if (!(status->flag & RX_FLAG_HT))
2575 rate = &sband->bitrates[status->rate_idx];
2576
2577 ieee80211_rx_cooked_monitor(rx, rate);
2578 break;
2579 }
2580 case RX_DROP_UNUSABLE:
2581 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2582 if (rx->sta)
2583 rx->sta->rx_dropped++;
2584 dev_kfree_skb(rx->skb);
2585 break;
2586 case RX_QUEUED:
2587 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2588 break;
2589 }
2590 }
2591
2592 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2593 {
2594 ieee80211_rx_result res = RX_DROP_MONITOR;
2595 struct sk_buff *skb;
2596
2597 #define CALL_RXH(rxh) \
2598 do { \
2599 res = rxh(rx); \
2600 if (res != RX_CONTINUE) \
2601 goto rxh_next; \
2602 } while (0);
2603
2604 spin_lock(&rx->local->rx_skb_queue.lock);
2605 if (rx->local->running_rx_handler)
2606 goto unlock;
2607
2608 rx->local->running_rx_handler = true;
2609
2610 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2611 spin_unlock(&rx->local->rx_skb_queue.lock);
2612
2613 /*
2614 * all the other fields are valid across frames
2615 * that belong to an aMPDU since they are on the
2616 * same TID from the same station
2617 */
2618 rx->skb = skb;
2619
2620 CALL_RXH(ieee80211_rx_h_decrypt)
2621 CALL_RXH(ieee80211_rx_h_check_more_data)
2622 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2623 CALL_RXH(ieee80211_rx_h_sta_process)
2624 CALL_RXH(ieee80211_rx_h_defragment)
2625 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2626 /* must be after MMIC verify so header is counted in MPDU mic */
2627 #ifdef CONFIG_MAC80211_MESH
2628 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2629 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2630 #endif
2631 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2632 CALL_RXH(ieee80211_rx_h_amsdu)
2633 CALL_RXH(ieee80211_rx_h_data)
2634 CALL_RXH(ieee80211_rx_h_ctrl);
2635 CALL_RXH(ieee80211_rx_h_mgmt_check)
2636 CALL_RXH(ieee80211_rx_h_action)
2637 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2638 CALL_RXH(ieee80211_rx_h_action_return)
2639 CALL_RXH(ieee80211_rx_h_mgmt)
2640
2641 rxh_next:
2642 ieee80211_rx_handlers_result(rx, res);
2643 spin_lock(&rx->local->rx_skb_queue.lock);
2644 #undef CALL_RXH
2645 }
2646
2647 rx->local->running_rx_handler = false;
2648
2649 unlock:
2650 spin_unlock(&rx->local->rx_skb_queue.lock);
2651 }
2652
2653 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2654 {
2655 ieee80211_rx_result res = RX_DROP_MONITOR;
2656
2657 #define CALL_RXH(rxh) \
2658 do { \
2659 res = rxh(rx); \
2660 if (res != RX_CONTINUE) \
2661 goto rxh_next; \
2662 } while (0);
2663
2664 CALL_RXH(ieee80211_rx_h_passive_scan)
2665 CALL_RXH(ieee80211_rx_h_check)
2666
2667 ieee80211_rx_reorder_ampdu(rx);
2668
2669 ieee80211_rx_handlers(rx);
2670 return;
2671
2672 rxh_next:
2673 ieee80211_rx_handlers_result(rx, res);
2674
2675 #undef CALL_RXH
2676 }
2677
2678 /*
2679 * This function makes calls into the RX path, therefore
2680 * it has to be invoked under RCU read lock.
2681 */
2682 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2683 {
2684 struct ieee80211_rx_data rx = {
2685 .sta = sta,
2686 .sdata = sta->sdata,
2687 .local = sta->local,
2688 /* This is OK -- must be QoS data frame */
2689 .security_idx = tid,
2690 .seqno_idx = tid,
2691 .flags = 0,
2692 };
2693 struct tid_ampdu_rx *tid_agg_rx;
2694
2695 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2696 if (!tid_agg_rx)
2697 return;
2698
2699 spin_lock(&tid_agg_rx->reorder_lock);
2700 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
2701 spin_unlock(&tid_agg_rx->reorder_lock);
2702
2703 ieee80211_rx_handlers(&rx);
2704 }
2705
2706 /* main receive path */
2707
2708 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2709 struct ieee80211_hdr *hdr)
2710 {
2711 struct ieee80211_sub_if_data *sdata = rx->sdata;
2712 struct sk_buff *skb = rx->skb;
2713 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2714 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2715 int multicast = is_multicast_ether_addr(hdr->addr1);
2716
2717 switch (sdata->vif.type) {
2718 case NL80211_IFTYPE_STATION:
2719 if (!bssid && !sdata->u.mgd.use_4addr)
2720 return 0;
2721 if (!multicast &&
2722 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2723 if (!(sdata->dev->flags & IFF_PROMISC) ||
2724 sdata->u.mgd.use_4addr)
2725 return 0;
2726 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2727 }
2728 break;
2729 case NL80211_IFTYPE_ADHOC:
2730 if (!bssid)
2731 return 0;
2732 if (ieee80211_is_beacon(hdr->frame_control)) {
2733 return 1;
2734 }
2735 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2736 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2737 return 0;
2738 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2739 } else if (!multicast &&
2740 compare_ether_addr(sdata->vif.addr,
2741 hdr->addr1) != 0) {
2742 if (!(sdata->dev->flags & IFF_PROMISC))
2743 return 0;
2744 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2745 } else if (!rx->sta) {
2746 int rate_idx;
2747 if (status->flag & RX_FLAG_HT)
2748 rate_idx = 0; /* TODO: HT rates */
2749 else
2750 rate_idx = status->rate_idx;
2751 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2752 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2753 }
2754 break;
2755 case NL80211_IFTYPE_MESH_POINT:
2756 if (!multicast &&
2757 compare_ether_addr(sdata->vif.addr,
2758 hdr->addr1) != 0) {
2759 if (!(sdata->dev->flags & IFF_PROMISC))
2760 return 0;
2761
2762 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2763 }
2764 break;
2765 case NL80211_IFTYPE_AP_VLAN:
2766 case NL80211_IFTYPE_AP:
2767 if (!bssid) {
2768 if (compare_ether_addr(sdata->vif.addr,
2769 hdr->addr1))
2770 return 0;
2771 } else if (!ieee80211_bssid_match(bssid,
2772 sdata->vif.addr)) {
2773 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2774 !ieee80211_is_beacon(hdr->frame_control) &&
2775 !(ieee80211_is_action(hdr->frame_control) &&
2776 sdata->vif.p2p))
2777 return 0;
2778 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2779 }
2780 break;
2781 case NL80211_IFTYPE_WDS:
2782 if (bssid || !ieee80211_is_data(hdr->frame_control))
2783 return 0;
2784 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2785 return 0;
2786 break;
2787 default:
2788 /* should never get here */
2789 WARN_ON(1);
2790 break;
2791 }
2792
2793 return 1;
2794 }
2795
2796 /*
2797 * This function returns whether or not the SKB
2798 * was destined for RX processing or not, which,
2799 * if consume is true, is equivalent to whether
2800 * or not the skb was consumed.
2801 */
2802 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2803 struct sk_buff *skb, bool consume)
2804 {
2805 struct ieee80211_local *local = rx->local;
2806 struct ieee80211_sub_if_data *sdata = rx->sdata;
2807 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2808 struct ieee80211_hdr *hdr = (void *)skb->data;
2809 int prepares;
2810
2811 rx->skb = skb;
2812 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2813 prepares = prepare_for_handlers(rx, hdr);
2814
2815 if (!prepares)
2816 return false;
2817
2818 if (!consume) {
2819 skb = skb_copy(skb, GFP_ATOMIC);
2820 if (!skb) {
2821 if (net_ratelimit())
2822 wiphy_debug(local->hw.wiphy,
2823 "failed to copy skb for %s\n",
2824 sdata->name);
2825 return true;
2826 }
2827
2828 rx->skb = skb;
2829 }
2830
2831 ieee80211_invoke_rx_handlers(rx);
2832 return true;
2833 }
2834
2835 /*
2836 * This is the actual Rx frames handler. as it blongs to Rx path it must
2837 * be called with rcu_read_lock protection.
2838 */
2839 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2840 struct sk_buff *skb)
2841 {
2842 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2843 struct ieee80211_local *local = hw_to_local(hw);
2844 struct ieee80211_sub_if_data *sdata;
2845 struct ieee80211_hdr *hdr;
2846 __le16 fc;
2847 struct ieee80211_rx_data rx;
2848 struct ieee80211_sub_if_data *prev;
2849 struct sta_info *sta, *tmp, *prev_sta;
2850 int err = 0;
2851
2852 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2853 memset(&rx, 0, sizeof(rx));
2854 rx.skb = skb;
2855 rx.local = local;
2856
2857 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2858 local->dot11ReceivedFragmentCount++;
2859
2860 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2861 test_bit(SCAN_SW_SCANNING, &local->scanning)))
2862 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2863
2864 if (ieee80211_is_mgmt(fc))
2865 err = skb_linearize(skb);
2866 else
2867 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2868
2869 if (err) {
2870 dev_kfree_skb(skb);
2871 return;
2872 }
2873
2874 hdr = (struct ieee80211_hdr *)skb->data;
2875 ieee80211_parse_qos(&rx);
2876 ieee80211_verify_alignment(&rx);
2877
2878 if (ieee80211_is_data(fc)) {
2879 prev_sta = NULL;
2880
2881 for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
2882 if (!prev_sta) {
2883 prev_sta = sta;
2884 continue;
2885 }
2886
2887 rx.sta = prev_sta;
2888 rx.sdata = prev_sta->sdata;
2889 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2890
2891 prev_sta = sta;
2892 }
2893
2894 if (prev_sta) {
2895 rx.sta = prev_sta;
2896 rx.sdata = prev_sta->sdata;
2897
2898 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2899 return;
2900 goto out;
2901 }
2902 }
2903
2904 prev = NULL;
2905
2906 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2907 if (!ieee80211_sdata_running(sdata))
2908 continue;
2909
2910 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2911 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2912 continue;
2913
2914 /*
2915 * frame is destined for this interface, but if it's
2916 * not also for the previous one we handle that after
2917 * the loop to avoid copying the SKB once too much
2918 */
2919
2920 if (!prev) {
2921 prev = sdata;
2922 continue;
2923 }
2924
2925 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2926 rx.sdata = prev;
2927 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2928
2929 prev = sdata;
2930 }
2931
2932 if (prev) {
2933 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2934 rx.sdata = prev;
2935
2936 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2937 return;
2938 }
2939
2940 out:
2941 dev_kfree_skb(skb);
2942 }
2943
2944 /*
2945 * This is the receive path handler. It is called by a low level driver when an
2946 * 802.11 MPDU is received from the hardware.
2947 */
2948 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2949 {
2950 struct ieee80211_local *local = hw_to_local(hw);
2951 struct ieee80211_rate *rate = NULL;
2952 struct ieee80211_supported_band *sband;
2953 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2954
2955 WARN_ON_ONCE(softirq_count() == 0);
2956
2957 if (WARN_ON(status->band < 0 ||
2958 status->band >= IEEE80211_NUM_BANDS))
2959 goto drop;
2960
2961 sband = local->hw.wiphy->bands[status->band];
2962 if (WARN_ON(!sband))
2963 goto drop;
2964
2965 /*
2966 * If we're suspending, it is possible although not too likely
2967 * that we'd be receiving frames after having already partially
2968 * quiesced the stack. We can't process such frames then since
2969 * that might, for example, cause stations to be added or other
2970 * driver callbacks be invoked.
2971 */
2972 if (unlikely(local->quiescing || local->suspended))
2973 goto drop;
2974
2975 /*
2976 * The same happens when we're not even started,
2977 * but that's worth a warning.
2978 */
2979 if (WARN_ON(!local->started))
2980 goto drop;
2981
2982 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
2983 /*
2984 * Validate the rate, unless a PLCP error means that
2985 * we probably can't have a valid rate here anyway.
2986 */
2987
2988 if (status->flag & RX_FLAG_HT) {
2989 /*
2990 * rate_idx is MCS index, which can be [0-76]
2991 * as documented on:
2992 *
2993 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2994 *
2995 * Anything else would be some sort of driver or
2996 * hardware error. The driver should catch hardware
2997 * errors.
2998 */
2999 if (WARN((status->rate_idx < 0 ||
3000 status->rate_idx > 76),
3001 "Rate marked as an HT rate but passed "
3002 "status->rate_idx is not "
3003 "an MCS index [0-76]: %d (0x%02x)\n",
3004 status->rate_idx,
3005 status->rate_idx))
3006 goto drop;
3007 } else {
3008 if (WARN_ON(status->rate_idx < 0 ||
3009 status->rate_idx >= sband->n_bitrates))
3010 goto drop;
3011 rate = &sband->bitrates[status->rate_idx];
3012 }
3013 }
3014
3015 status->rx_flags = 0;
3016
3017 /*
3018 * key references and virtual interfaces are protected using RCU
3019 * and this requires that we are in a read-side RCU section during
3020 * receive processing
3021 */
3022 rcu_read_lock();
3023
3024 /*
3025 * Frames with failed FCS/PLCP checksum are not returned,
3026 * all other frames are returned without radiotap header
3027 * if it was previously present.
3028 * Also, frames with less than 16 bytes are dropped.
3029 */
3030 skb = ieee80211_rx_monitor(local, skb, rate);
3031 if (!skb) {
3032 rcu_read_unlock();
3033 return;
3034 }
3035
3036 ieee80211_tpt_led_trig_rx(local,
3037 ((struct ieee80211_hdr *)skb->data)->frame_control,
3038 skb->len);
3039 __ieee80211_rx_handle_packet(hw, skb);
3040
3041 rcu_read_unlock();
3042
3043 return;
3044 drop:
3045 kfree_skb(skb);
3046 }
3047 EXPORT_SYMBOL(ieee80211_rx);
3048
3049 /* This is a version of the rx handler that can be called from hard irq
3050 * context. Post the skb on the queue and schedule the tasklet */
3051 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
3052 {
3053 struct ieee80211_local *local = hw_to_local(hw);
3054
3055 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
3056
3057 skb->pkt_type = IEEE80211_RX_MSG;
3058 skb_queue_tail(&local->skb_queue, skb);
3059 tasklet_schedule(&local->tasklet);
3060 }
3061 EXPORT_SYMBOL(ieee80211_rx_irqsafe);