]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/mac80211/rx.c
batman-adv: Prefix packet structs with batadv_
[mirror_ubuntu-artful-kernel.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <net/mac80211.h>
21 #include <net/ieee80211_radiotap.h>
22 #include <asm/unaligned.h>
23
24 #include "ieee80211_i.h"
25 #include "driver-ops.h"
26 #include "led.h"
27 #include "mesh.h"
28 #include "wep.h"
29 #include "wpa.h"
30 #include "tkip.h"
31 #include "wme.h"
32 #include "rate.h"
33
34 /*
35 * monitor mode reception
36 *
37 * This function cleans up the SKB, i.e. it removes all the stuff
38 * only useful for monitoring.
39 */
40 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
41 struct sk_buff *skb)
42 {
43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
44 if (likely(skb->len > FCS_LEN))
45 __pskb_trim(skb, skb->len - FCS_LEN);
46 else {
47 /* driver bug */
48 WARN_ON(1);
49 dev_kfree_skb(skb);
50 skb = NULL;
51 }
52 }
53
54 return skb;
55 }
56
57 static inline int should_drop_frame(struct sk_buff *skb,
58 int present_fcs_len)
59 {
60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
62
63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
64 return 1;
65 if (unlikely(skb->len < 16 + present_fcs_len))
66 return 1;
67 if (ieee80211_is_ctl(hdr->frame_control) &&
68 !ieee80211_is_pspoll(hdr->frame_control) &&
69 !ieee80211_is_back_req(hdr->frame_control))
70 return 1;
71 return 0;
72 }
73
74 static int
75 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
76 struct ieee80211_rx_status *status)
77 {
78 int len;
79
80 /* always present fields */
81 len = sizeof(struct ieee80211_radiotap_header) + 9;
82
83 if (status->flag & RX_FLAG_MACTIME_MPDU)
84 len += 8;
85 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
86 len += 1;
87
88 if (len & 1) /* padding for RX_FLAGS if necessary */
89 len++;
90
91 if (status->flag & RX_FLAG_HT) /* HT info */
92 len += 3;
93
94 return len;
95 }
96
97 /*
98 * ieee80211_add_rx_radiotap_header - add radiotap header
99 *
100 * add a radiotap header containing all the fields which the hardware provided.
101 */
102 static void
103 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
104 struct sk_buff *skb,
105 struct ieee80211_rate *rate,
106 int rtap_len, bool has_fcs)
107 {
108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
109 struct ieee80211_radiotap_header *rthdr;
110 unsigned char *pos;
111 u16 rx_flags = 0;
112
113 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
114 memset(rthdr, 0, rtap_len);
115
116 /* radiotap header, set always present flags */
117 rthdr->it_present =
118 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
119 (1 << IEEE80211_RADIOTAP_CHANNEL) |
120 (1 << IEEE80211_RADIOTAP_ANTENNA) |
121 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
122 rthdr->it_len = cpu_to_le16(rtap_len);
123
124 pos = (unsigned char *)(rthdr+1);
125
126 /* the order of the following fields is important */
127
128 /* IEEE80211_RADIOTAP_TSFT */
129 if (status->flag & RX_FLAG_MACTIME_MPDU) {
130 put_unaligned_le64(status->mactime, pos);
131 rthdr->it_present |=
132 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
133 pos += 8;
134 }
135
136 /* IEEE80211_RADIOTAP_FLAGS */
137 if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
138 *pos |= IEEE80211_RADIOTAP_F_FCS;
139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
140 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
141 if (status->flag & RX_FLAG_SHORTPRE)
142 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
143 pos++;
144
145 /* IEEE80211_RADIOTAP_RATE */
146 if (!rate || status->flag & RX_FLAG_HT) {
147 /*
148 * Without rate information don't add it. If we have,
149 * MCS information is a separate field in radiotap,
150 * added below. The byte here is needed as padding
151 * for the channel though, so initialise it to 0.
152 */
153 *pos = 0;
154 } else {
155 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
156 *pos = rate->bitrate / 5;
157 }
158 pos++;
159
160 /* IEEE80211_RADIOTAP_CHANNEL */
161 put_unaligned_le16(status->freq, pos);
162 pos += 2;
163 if (status->band == IEEE80211_BAND_5GHZ)
164 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
165 pos);
166 else if (status->flag & RX_FLAG_HT)
167 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
168 pos);
169 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
170 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
171 pos);
172 else if (rate)
173 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
174 pos);
175 else
176 put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
177 pos += 2;
178
179 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
180 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM &&
181 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
182 *pos = status->signal;
183 rthdr->it_present |=
184 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
185 pos++;
186 }
187
188 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
189
190 /* IEEE80211_RADIOTAP_ANTENNA */
191 *pos = status->antenna;
192 pos++;
193
194 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
195
196 /* IEEE80211_RADIOTAP_RX_FLAGS */
197 /* ensure 2 byte alignment for the 2 byte field as required */
198 if ((pos - (u8 *)rthdr) & 1)
199 pos++;
200 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
201 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
202 put_unaligned_le16(rx_flags, pos);
203 pos += 2;
204
205 if (status->flag & RX_FLAG_HT) {
206 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
207 *pos++ = local->hw.radiotap_mcs_details;
208 *pos = 0;
209 if (status->flag & RX_FLAG_SHORT_GI)
210 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
211 if (status->flag & RX_FLAG_40MHZ)
212 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
213 if (status->flag & RX_FLAG_HT_GF)
214 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
215 pos++;
216 *pos++ = status->rate_idx;
217 }
218 }
219
220 /*
221 * This function copies a received frame to all monitor interfaces and
222 * returns a cleaned-up SKB that no longer includes the FCS nor the
223 * radiotap header the driver might have added.
224 */
225 static struct sk_buff *
226 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
227 struct ieee80211_rate *rate)
228 {
229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
230 struct ieee80211_sub_if_data *sdata;
231 int needed_headroom;
232 struct sk_buff *skb, *skb2;
233 struct net_device *prev_dev = NULL;
234 int present_fcs_len = 0;
235
236 /*
237 * First, we may need to make a copy of the skb because
238 * (1) we need to modify it for radiotap (if not present), and
239 * (2) the other RX handlers will modify the skb we got.
240 *
241 * We don't need to, of course, if we aren't going to return
242 * the SKB because it has a bad FCS/PLCP checksum.
243 */
244
245 /* room for the radiotap header based on driver features */
246 needed_headroom = ieee80211_rx_radiotap_len(local, status);
247
248 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
249 present_fcs_len = FCS_LEN;
250
251 /* make sure hdr->frame_control is on the linear part */
252 if (!pskb_may_pull(origskb, 2)) {
253 dev_kfree_skb(origskb);
254 return NULL;
255 }
256
257 if (!local->monitors) {
258 if (should_drop_frame(origskb, present_fcs_len)) {
259 dev_kfree_skb(origskb);
260 return NULL;
261 }
262
263 return remove_monitor_info(local, origskb);
264 }
265
266 if (should_drop_frame(origskb, present_fcs_len)) {
267 /* only need to expand headroom if necessary */
268 skb = origskb;
269 origskb = NULL;
270
271 /*
272 * This shouldn't trigger often because most devices have an
273 * RX header they pull before we get here, and that should
274 * be big enough for our radiotap information. We should
275 * probably export the length to drivers so that we can have
276 * them allocate enough headroom to start with.
277 */
278 if (skb_headroom(skb) < needed_headroom &&
279 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
280 dev_kfree_skb(skb);
281 return NULL;
282 }
283 } else {
284 /*
285 * Need to make a copy and possibly remove radiotap header
286 * and FCS from the original.
287 */
288 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
289
290 origskb = remove_monitor_info(local, origskb);
291
292 if (!skb)
293 return origskb;
294 }
295
296 /* prepend radiotap information */
297 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
298 true);
299
300 skb_reset_mac_header(skb);
301 skb->ip_summed = CHECKSUM_UNNECESSARY;
302 skb->pkt_type = PACKET_OTHERHOST;
303 skb->protocol = htons(ETH_P_802_2);
304
305 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
306 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
307 continue;
308
309 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
310 continue;
311
312 if (!ieee80211_sdata_running(sdata))
313 continue;
314
315 if (prev_dev) {
316 skb2 = skb_clone(skb, GFP_ATOMIC);
317 if (skb2) {
318 skb2->dev = prev_dev;
319 netif_receive_skb(skb2);
320 }
321 }
322
323 prev_dev = sdata->dev;
324 sdata->dev->stats.rx_packets++;
325 sdata->dev->stats.rx_bytes += skb->len;
326 }
327
328 if (prev_dev) {
329 skb->dev = prev_dev;
330 netif_receive_skb(skb);
331 } else
332 dev_kfree_skb(skb);
333
334 return origskb;
335 }
336
337
338 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
339 {
340 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
341 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
342 int tid, seqno_idx, security_idx;
343
344 /* does the frame have a qos control field? */
345 if (ieee80211_is_data_qos(hdr->frame_control)) {
346 u8 *qc = ieee80211_get_qos_ctl(hdr);
347 /* frame has qos control */
348 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
349 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
350 status->rx_flags |= IEEE80211_RX_AMSDU;
351
352 seqno_idx = tid;
353 security_idx = tid;
354 } else {
355 /*
356 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
357 *
358 * Sequence numbers for management frames, QoS data
359 * frames with a broadcast/multicast address in the
360 * Address 1 field, and all non-QoS data frames sent
361 * by QoS STAs are assigned using an additional single
362 * modulo-4096 counter, [...]
363 *
364 * We also use that counter for non-QoS STAs.
365 */
366 seqno_idx = NUM_RX_DATA_QUEUES;
367 security_idx = 0;
368 if (ieee80211_is_mgmt(hdr->frame_control))
369 security_idx = NUM_RX_DATA_QUEUES;
370 tid = 0;
371 }
372
373 rx->seqno_idx = seqno_idx;
374 rx->security_idx = security_idx;
375 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
376 * For now, set skb->priority to 0 for other cases. */
377 rx->skb->priority = (tid > 7) ? 0 : tid;
378 }
379
380 /**
381 * DOC: Packet alignment
382 *
383 * Drivers always need to pass packets that are aligned to two-byte boundaries
384 * to the stack.
385 *
386 * Additionally, should, if possible, align the payload data in a way that
387 * guarantees that the contained IP header is aligned to a four-byte
388 * boundary. In the case of regular frames, this simply means aligning the
389 * payload to a four-byte boundary (because either the IP header is directly
390 * contained, or IV/RFC1042 headers that have a length divisible by four are
391 * in front of it). If the payload data is not properly aligned and the
392 * architecture doesn't support efficient unaligned operations, mac80211
393 * will align the data.
394 *
395 * With A-MSDU frames, however, the payload data address must yield two modulo
396 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
397 * push the IP header further back to a multiple of four again. Thankfully, the
398 * specs were sane enough this time around to require padding each A-MSDU
399 * subframe to a length that is a multiple of four.
400 *
401 * Padding like Atheros hardware adds which is between the 802.11 header and
402 * the payload is not supported, the driver is required to move the 802.11
403 * header to be directly in front of the payload in that case.
404 */
405 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
406 {
407 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
408 WARN_ONCE((unsigned long)rx->skb->data & 1,
409 "unaligned packet at 0x%p\n", rx->skb->data);
410 #endif
411 }
412
413
414 /* rx handlers */
415
416 static ieee80211_rx_result debug_noinline
417 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
418 {
419 struct ieee80211_local *local = rx->local;
420 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
421 struct sk_buff *skb = rx->skb;
422
423 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
424 !local->sched_scanning))
425 return RX_CONTINUE;
426
427 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
428 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
429 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
430 local->sched_scanning)
431 return ieee80211_scan_rx(rx->sdata, skb);
432
433 /* scanning finished during invoking of handlers */
434 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
435 return RX_DROP_UNUSABLE;
436 }
437
438
439 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
440 {
441 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
442
443 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
444 return 0;
445
446 return ieee80211_is_robust_mgmt_frame(hdr);
447 }
448
449
450 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
451 {
452 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
453
454 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
455 return 0;
456
457 return ieee80211_is_robust_mgmt_frame(hdr);
458 }
459
460
461 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
462 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
463 {
464 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
465 struct ieee80211_mmie *mmie;
466
467 if (skb->len < 24 + sizeof(*mmie) ||
468 !is_multicast_ether_addr(hdr->da))
469 return -1;
470
471 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
472 return -1; /* not a robust management frame */
473
474 mmie = (struct ieee80211_mmie *)
475 (skb->data + skb->len - sizeof(*mmie));
476 if (mmie->element_id != WLAN_EID_MMIE ||
477 mmie->length != sizeof(*mmie) - 2)
478 return -1;
479
480 return le16_to_cpu(mmie->key_id);
481 }
482
483
484 static ieee80211_rx_result
485 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
486 {
487 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
488 char *dev_addr = rx->sdata->vif.addr;
489
490 if (ieee80211_is_data(hdr->frame_control)) {
491 if (is_multicast_ether_addr(hdr->addr1)) {
492 if (ieee80211_has_tods(hdr->frame_control) ||
493 !ieee80211_has_fromds(hdr->frame_control))
494 return RX_DROP_MONITOR;
495 if (ether_addr_equal(hdr->addr3, dev_addr))
496 return RX_DROP_MONITOR;
497 } else {
498 if (!ieee80211_has_a4(hdr->frame_control))
499 return RX_DROP_MONITOR;
500 if (ether_addr_equal(hdr->addr4, dev_addr))
501 return RX_DROP_MONITOR;
502 }
503 }
504
505 /* If there is not an established peer link and this is not a peer link
506 * establisment frame, beacon or probe, drop the frame.
507 */
508
509 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
510 struct ieee80211_mgmt *mgmt;
511
512 if (!ieee80211_is_mgmt(hdr->frame_control))
513 return RX_DROP_MONITOR;
514
515 if (ieee80211_is_action(hdr->frame_control)) {
516 u8 category;
517 mgmt = (struct ieee80211_mgmt *)hdr;
518 category = mgmt->u.action.category;
519 if (category != WLAN_CATEGORY_MESH_ACTION &&
520 category != WLAN_CATEGORY_SELF_PROTECTED)
521 return RX_DROP_MONITOR;
522 return RX_CONTINUE;
523 }
524
525 if (ieee80211_is_probe_req(hdr->frame_control) ||
526 ieee80211_is_probe_resp(hdr->frame_control) ||
527 ieee80211_is_beacon(hdr->frame_control) ||
528 ieee80211_is_auth(hdr->frame_control))
529 return RX_CONTINUE;
530
531 return RX_DROP_MONITOR;
532
533 }
534
535 return RX_CONTINUE;
536 }
537
538 #define SEQ_MODULO 0x1000
539 #define SEQ_MASK 0xfff
540
541 static inline int seq_less(u16 sq1, u16 sq2)
542 {
543 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
544 }
545
546 static inline u16 seq_inc(u16 sq)
547 {
548 return (sq + 1) & SEQ_MASK;
549 }
550
551 static inline u16 seq_sub(u16 sq1, u16 sq2)
552 {
553 return (sq1 - sq2) & SEQ_MASK;
554 }
555
556
557 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
558 struct tid_ampdu_rx *tid_agg_rx,
559 int index)
560 {
561 struct ieee80211_local *local = sdata->local;
562 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
563 struct ieee80211_rx_status *status;
564
565 lockdep_assert_held(&tid_agg_rx->reorder_lock);
566
567 if (!skb)
568 goto no_frame;
569
570 /* release the frame from the reorder ring buffer */
571 tid_agg_rx->stored_mpdu_num--;
572 tid_agg_rx->reorder_buf[index] = NULL;
573 status = IEEE80211_SKB_RXCB(skb);
574 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
575 skb_queue_tail(&local->rx_skb_queue, skb);
576
577 no_frame:
578 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
579 }
580
581 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
582 struct tid_ampdu_rx *tid_agg_rx,
583 u16 head_seq_num)
584 {
585 int index;
586
587 lockdep_assert_held(&tid_agg_rx->reorder_lock);
588
589 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
590 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
591 tid_agg_rx->buf_size;
592 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
593 }
594 }
595
596 /*
597 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
598 * the skb was added to the buffer longer than this time ago, the earlier
599 * frames that have not yet been received are assumed to be lost and the skb
600 * can be released for processing. This may also release other skb's from the
601 * reorder buffer if there are no additional gaps between the frames.
602 *
603 * Callers must hold tid_agg_rx->reorder_lock.
604 */
605 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
606
607 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
608 struct tid_ampdu_rx *tid_agg_rx)
609 {
610 int index, j;
611
612 lockdep_assert_held(&tid_agg_rx->reorder_lock);
613
614 /* release the buffer until next missing frame */
615 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
616 tid_agg_rx->buf_size;
617 if (!tid_agg_rx->reorder_buf[index] &&
618 tid_agg_rx->stored_mpdu_num) {
619 /*
620 * No buffers ready to be released, but check whether any
621 * frames in the reorder buffer have timed out.
622 */
623 int skipped = 1;
624 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
625 j = (j + 1) % tid_agg_rx->buf_size) {
626 if (!tid_agg_rx->reorder_buf[j]) {
627 skipped++;
628 continue;
629 }
630 if (skipped &&
631 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
632 HT_RX_REORDER_BUF_TIMEOUT))
633 goto set_release_timer;
634
635 ht_dbg_ratelimited(sdata,
636 "release an RX reorder frame due to timeout on earlier frames\n");
637 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j);
638
639 /*
640 * Increment the head seq# also for the skipped slots.
641 */
642 tid_agg_rx->head_seq_num =
643 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
644 skipped = 0;
645 }
646 } else while (tid_agg_rx->reorder_buf[index]) {
647 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
648 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
649 tid_agg_rx->buf_size;
650 }
651
652 if (tid_agg_rx->stored_mpdu_num) {
653 j = index = seq_sub(tid_agg_rx->head_seq_num,
654 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
655
656 for (; j != (index - 1) % tid_agg_rx->buf_size;
657 j = (j + 1) % tid_agg_rx->buf_size) {
658 if (tid_agg_rx->reorder_buf[j])
659 break;
660 }
661
662 set_release_timer:
663
664 mod_timer(&tid_agg_rx->reorder_timer,
665 tid_agg_rx->reorder_time[j] + 1 +
666 HT_RX_REORDER_BUF_TIMEOUT);
667 } else {
668 del_timer(&tid_agg_rx->reorder_timer);
669 }
670 }
671
672 /*
673 * As this function belongs to the RX path it must be under
674 * rcu_read_lock protection. It returns false if the frame
675 * can be processed immediately, true if it was consumed.
676 */
677 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
678 struct tid_ampdu_rx *tid_agg_rx,
679 struct sk_buff *skb)
680 {
681 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
682 u16 sc = le16_to_cpu(hdr->seq_ctrl);
683 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
684 u16 head_seq_num, buf_size;
685 int index;
686 bool ret = true;
687
688 spin_lock(&tid_agg_rx->reorder_lock);
689
690 buf_size = tid_agg_rx->buf_size;
691 head_seq_num = tid_agg_rx->head_seq_num;
692
693 /* frame with out of date sequence number */
694 if (seq_less(mpdu_seq_num, head_seq_num)) {
695 dev_kfree_skb(skb);
696 goto out;
697 }
698
699 /*
700 * If frame the sequence number exceeds our buffering window
701 * size release some previous frames to make room for this one.
702 */
703 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
704 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
705 /* release stored frames up to new head to stack */
706 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
707 head_seq_num);
708 }
709
710 /* Now the new frame is always in the range of the reordering buffer */
711
712 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
713
714 /* check if we already stored this frame */
715 if (tid_agg_rx->reorder_buf[index]) {
716 dev_kfree_skb(skb);
717 goto out;
718 }
719
720 /*
721 * If the current MPDU is in the right order and nothing else
722 * is stored we can process it directly, no need to buffer it.
723 * If it is first but there's something stored, we may be able
724 * to release frames after this one.
725 */
726 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
727 tid_agg_rx->stored_mpdu_num == 0) {
728 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
729 ret = false;
730 goto out;
731 }
732
733 /* put the frame in the reordering buffer */
734 tid_agg_rx->reorder_buf[index] = skb;
735 tid_agg_rx->reorder_time[index] = jiffies;
736 tid_agg_rx->stored_mpdu_num++;
737 ieee80211_sta_reorder_release(sdata, tid_agg_rx);
738
739 out:
740 spin_unlock(&tid_agg_rx->reorder_lock);
741 return ret;
742 }
743
744 /*
745 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
746 * true if the MPDU was buffered, false if it should be processed.
747 */
748 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
749 {
750 struct sk_buff *skb = rx->skb;
751 struct ieee80211_local *local = rx->local;
752 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
753 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
754 struct sta_info *sta = rx->sta;
755 struct tid_ampdu_rx *tid_agg_rx;
756 u16 sc;
757 u8 tid, ack_policy;
758
759 if (!ieee80211_is_data_qos(hdr->frame_control))
760 goto dont_reorder;
761
762 /*
763 * filter the QoS data rx stream according to
764 * STA/TID and check if this STA/TID is on aggregation
765 */
766
767 if (!sta)
768 goto dont_reorder;
769
770 ack_policy = *ieee80211_get_qos_ctl(hdr) &
771 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
772 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
773
774 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
775 if (!tid_agg_rx)
776 goto dont_reorder;
777
778 /* qos null data frames are excluded */
779 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
780 goto dont_reorder;
781
782 /* not part of a BA session */
783 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
784 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
785 goto dont_reorder;
786
787 /* not actually part of this BA session */
788 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
789 goto dont_reorder;
790
791 /* new, potentially un-ordered, ampdu frame - process it */
792
793 /* reset session timer */
794 if (tid_agg_rx->timeout)
795 tid_agg_rx->last_rx = jiffies;
796
797 /* if this mpdu is fragmented - terminate rx aggregation session */
798 sc = le16_to_cpu(hdr->seq_ctrl);
799 if (sc & IEEE80211_SCTL_FRAG) {
800 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
801 skb_queue_tail(&rx->sdata->skb_queue, skb);
802 ieee80211_queue_work(&local->hw, &rx->sdata->work);
803 return;
804 }
805
806 /*
807 * No locking needed -- we will only ever process one
808 * RX packet at a time, and thus own tid_agg_rx. All
809 * other code manipulating it needs to (and does) make
810 * sure that we cannot get to it any more before doing
811 * anything with it.
812 */
813 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb))
814 return;
815
816 dont_reorder:
817 skb_queue_tail(&local->rx_skb_queue, skb);
818 }
819
820 static ieee80211_rx_result debug_noinline
821 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
822 {
823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
824 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
825
826 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
827 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
828 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
829 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
830 hdr->seq_ctrl)) {
831 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
832 rx->local->dot11FrameDuplicateCount++;
833 rx->sta->num_duplicates++;
834 }
835 return RX_DROP_UNUSABLE;
836 } else
837 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
838 }
839
840 if (unlikely(rx->skb->len < 16)) {
841 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
842 return RX_DROP_MONITOR;
843 }
844
845 /* Drop disallowed frame classes based on STA auth/assoc state;
846 * IEEE 802.11, Chap 5.5.
847 *
848 * mac80211 filters only based on association state, i.e. it drops
849 * Class 3 frames from not associated stations. hostapd sends
850 * deauth/disassoc frames when needed. In addition, hostapd is
851 * responsible for filtering on both auth and assoc states.
852 */
853
854 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
855 return ieee80211_rx_mesh_check(rx);
856
857 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
858 ieee80211_is_pspoll(hdr->frame_control)) &&
859 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
860 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
861 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
862 /*
863 * accept port control frames from the AP even when it's not
864 * yet marked ASSOC to prevent a race where we don't set the
865 * assoc bit quickly enough before it sends the first frame
866 */
867 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
868 ieee80211_is_data_present(hdr->frame_control)) {
869 u16 ethertype;
870 u8 *payload;
871
872 payload = rx->skb->data +
873 ieee80211_hdrlen(hdr->frame_control);
874 ethertype = (payload[6] << 8) | payload[7];
875 if (cpu_to_be16(ethertype) ==
876 rx->sdata->control_port_protocol)
877 return RX_CONTINUE;
878 }
879
880 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
881 cfg80211_rx_spurious_frame(rx->sdata->dev,
882 hdr->addr2,
883 GFP_ATOMIC))
884 return RX_DROP_UNUSABLE;
885
886 return RX_DROP_MONITOR;
887 }
888
889 return RX_CONTINUE;
890 }
891
892
893 static ieee80211_rx_result debug_noinline
894 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
895 {
896 struct sk_buff *skb = rx->skb;
897 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
898 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
899 int keyidx;
900 int hdrlen;
901 ieee80211_rx_result result = RX_DROP_UNUSABLE;
902 struct ieee80211_key *sta_ptk = NULL;
903 int mmie_keyidx = -1;
904 __le16 fc;
905
906 /*
907 * Key selection 101
908 *
909 * There are four types of keys:
910 * - GTK (group keys)
911 * - IGTK (group keys for management frames)
912 * - PTK (pairwise keys)
913 * - STK (station-to-station pairwise keys)
914 *
915 * When selecting a key, we have to distinguish between multicast
916 * (including broadcast) and unicast frames, the latter can only
917 * use PTKs and STKs while the former always use GTKs and IGTKs.
918 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
919 * unicast frames can also use key indices like GTKs. Hence, if we
920 * don't have a PTK/STK we check the key index for a WEP key.
921 *
922 * Note that in a regular BSS, multicast frames are sent by the
923 * AP only, associated stations unicast the frame to the AP first
924 * which then multicasts it on their behalf.
925 *
926 * There is also a slight problem in IBSS mode: GTKs are negotiated
927 * with each station, that is something we don't currently handle.
928 * The spec seems to expect that one negotiates the same key with
929 * every station but there's no such requirement; VLANs could be
930 * possible.
931 */
932
933 /*
934 * No point in finding a key and decrypting if the frame is neither
935 * addressed to us nor a multicast frame.
936 */
937 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
938 return RX_CONTINUE;
939
940 /* start without a key */
941 rx->key = NULL;
942
943 if (rx->sta)
944 sta_ptk = rcu_dereference(rx->sta->ptk);
945
946 fc = hdr->frame_control;
947
948 if (!ieee80211_has_protected(fc))
949 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
950
951 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
952 rx->key = sta_ptk;
953 if ((status->flag & RX_FLAG_DECRYPTED) &&
954 (status->flag & RX_FLAG_IV_STRIPPED))
955 return RX_CONTINUE;
956 /* Skip decryption if the frame is not protected. */
957 if (!ieee80211_has_protected(fc))
958 return RX_CONTINUE;
959 } else if (mmie_keyidx >= 0) {
960 /* Broadcast/multicast robust management frame / BIP */
961 if ((status->flag & RX_FLAG_DECRYPTED) &&
962 (status->flag & RX_FLAG_IV_STRIPPED))
963 return RX_CONTINUE;
964
965 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
966 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
967 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
968 if (rx->sta)
969 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
970 if (!rx->key)
971 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
972 } else if (!ieee80211_has_protected(fc)) {
973 /*
974 * The frame was not protected, so skip decryption. However, we
975 * need to set rx->key if there is a key that could have been
976 * used so that the frame may be dropped if encryption would
977 * have been expected.
978 */
979 struct ieee80211_key *key = NULL;
980 struct ieee80211_sub_if_data *sdata = rx->sdata;
981 int i;
982
983 if (ieee80211_is_mgmt(fc) &&
984 is_multicast_ether_addr(hdr->addr1) &&
985 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
986 rx->key = key;
987 else {
988 if (rx->sta) {
989 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
990 key = rcu_dereference(rx->sta->gtk[i]);
991 if (key)
992 break;
993 }
994 }
995 if (!key) {
996 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
997 key = rcu_dereference(sdata->keys[i]);
998 if (key)
999 break;
1000 }
1001 }
1002 if (key)
1003 rx->key = key;
1004 }
1005 return RX_CONTINUE;
1006 } else {
1007 u8 keyid;
1008 /*
1009 * The device doesn't give us the IV so we won't be
1010 * able to look up the key. That's ok though, we
1011 * don't need to decrypt the frame, we just won't
1012 * be able to keep statistics accurate.
1013 * Except for key threshold notifications, should
1014 * we somehow allow the driver to tell us which key
1015 * the hardware used if this flag is set?
1016 */
1017 if ((status->flag & RX_FLAG_DECRYPTED) &&
1018 (status->flag & RX_FLAG_IV_STRIPPED))
1019 return RX_CONTINUE;
1020
1021 hdrlen = ieee80211_hdrlen(fc);
1022
1023 if (rx->skb->len < 8 + hdrlen)
1024 return RX_DROP_UNUSABLE; /* TODO: count this? */
1025
1026 /*
1027 * no need to call ieee80211_wep_get_keyidx,
1028 * it verifies a bunch of things we've done already
1029 */
1030 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1031 keyidx = keyid >> 6;
1032
1033 /* check per-station GTK first, if multicast packet */
1034 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1035 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1036
1037 /* if not found, try default key */
1038 if (!rx->key) {
1039 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1040
1041 /*
1042 * RSNA-protected unicast frames should always be
1043 * sent with pairwise or station-to-station keys,
1044 * but for WEP we allow using a key index as well.
1045 */
1046 if (rx->key &&
1047 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1048 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1049 !is_multicast_ether_addr(hdr->addr1))
1050 rx->key = NULL;
1051 }
1052 }
1053
1054 if (rx->key) {
1055 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1056 return RX_DROP_MONITOR;
1057
1058 rx->key->tx_rx_count++;
1059 /* TODO: add threshold stuff again */
1060 } else {
1061 return RX_DROP_MONITOR;
1062 }
1063
1064 switch (rx->key->conf.cipher) {
1065 case WLAN_CIPHER_SUITE_WEP40:
1066 case WLAN_CIPHER_SUITE_WEP104:
1067 result = ieee80211_crypto_wep_decrypt(rx);
1068 break;
1069 case WLAN_CIPHER_SUITE_TKIP:
1070 result = ieee80211_crypto_tkip_decrypt(rx);
1071 break;
1072 case WLAN_CIPHER_SUITE_CCMP:
1073 result = ieee80211_crypto_ccmp_decrypt(rx);
1074 break;
1075 case WLAN_CIPHER_SUITE_AES_CMAC:
1076 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1077 break;
1078 default:
1079 /*
1080 * We can reach here only with HW-only algorithms
1081 * but why didn't it decrypt the frame?!
1082 */
1083 return RX_DROP_UNUSABLE;
1084 }
1085
1086 /* the hdr variable is invalid after the decrypt handlers */
1087
1088 /* either the frame has been decrypted or will be dropped */
1089 status->flag |= RX_FLAG_DECRYPTED;
1090
1091 return result;
1092 }
1093
1094 static ieee80211_rx_result debug_noinline
1095 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1096 {
1097 struct ieee80211_local *local;
1098 struct ieee80211_hdr *hdr;
1099 struct sk_buff *skb;
1100
1101 local = rx->local;
1102 skb = rx->skb;
1103 hdr = (struct ieee80211_hdr *) skb->data;
1104
1105 if (!local->pspolling)
1106 return RX_CONTINUE;
1107
1108 if (!ieee80211_has_fromds(hdr->frame_control))
1109 /* this is not from AP */
1110 return RX_CONTINUE;
1111
1112 if (!ieee80211_is_data(hdr->frame_control))
1113 return RX_CONTINUE;
1114
1115 if (!ieee80211_has_moredata(hdr->frame_control)) {
1116 /* AP has no more frames buffered for us */
1117 local->pspolling = false;
1118 return RX_CONTINUE;
1119 }
1120
1121 /* more data bit is set, let's request a new frame from the AP */
1122 ieee80211_send_pspoll(local, rx->sdata);
1123
1124 return RX_CONTINUE;
1125 }
1126
1127 static void ap_sta_ps_start(struct sta_info *sta)
1128 {
1129 struct ieee80211_sub_if_data *sdata = sta->sdata;
1130 struct ieee80211_local *local = sdata->local;
1131
1132 atomic_inc(&sdata->bss->num_sta_ps);
1133 set_sta_flag(sta, WLAN_STA_PS_STA);
1134 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1135 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1136 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1137 sta->sta.addr, sta->sta.aid);
1138 }
1139
1140 static void ap_sta_ps_end(struct sta_info *sta)
1141 {
1142 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1143 sta->sta.addr, sta->sta.aid);
1144
1145 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1146 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1147 sta->sta.addr, sta->sta.aid);
1148 return;
1149 }
1150
1151 ieee80211_sta_ps_deliver_wakeup(sta);
1152 }
1153
1154 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1155 {
1156 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
1157 bool in_ps;
1158
1159 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1160
1161 /* Don't let the same PS state be set twice */
1162 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1163 if ((start && in_ps) || (!start && !in_ps))
1164 return -EINVAL;
1165
1166 if (start)
1167 ap_sta_ps_start(sta_inf);
1168 else
1169 ap_sta_ps_end(sta_inf);
1170
1171 return 0;
1172 }
1173 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1174
1175 static ieee80211_rx_result debug_noinline
1176 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1177 {
1178 struct ieee80211_sub_if_data *sdata = rx->sdata;
1179 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1180 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1181 int tid, ac;
1182
1183 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1184 return RX_CONTINUE;
1185
1186 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1187 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1188 return RX_CONTINUE;
1189
1190 /*
1191 * The device handles station powersave, so don't do anything about
1192 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1193 * it to mac80211 since they're handled.)
1194 */
1195 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1196 return RX_CONTINUE;
1197
1198 /*
1199 * Don't do anything if the station isn't already asleep. In
1200 * the uAPSD case, the station will probably be marked asleep,
1201 * in the PS-Poll case the station must be confused ...
1202 */
1203 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1204 return RX_CONTINUE;
1205
1206 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1207 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1208 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1209 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1210 else
1211 set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1212 }
1213
1214 /* Free PS Poll skb here instead of returning RX_DROP that would
1215 * count as an dropped frame. */
1216 dev_kfree_skb(rx->skb);
1217
1218 return RX_QUEUED;
1219 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1220 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1221 ieee80211_has_pm(hdr->frame_control) &&
1222 (ieee80211_is_data_qos(hdr->frame_control) ||
1223 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1224 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1225 ac = ieee802_1d_to_ac[tid & 7];
1226
1227 /*
1228 * If this AC is not trigger-enabled do nothing.
1229 *
1230 * NB: This could/should check a separate bitmap of trigger-
1231 * enabled queues, but for now we only implement uAPSD w/o
1232 * TSPEC changes to the ACs, so they're always the same.
1233 */
1234 if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1235 return RX_CONTINUE;
1236
1237 /* if we are in a service period, do nothing */
1238 if (test_sta_flag(rx->sta, WLAN_STA_SP))
1239 return RX_CONTINUE;
1240
1241 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1242 ieee80211_sta_ps_deliver_uapsd(rx->sta);
1243 else
1244 set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1245 }
1246
1247 return RX_CONTINUE;
1248 }
1249
1250 static ieee80211_rx_result debug_noinline
1251 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1252 {
1253 struct sta_info *sta = rx->sta;
1254 struct sk_buff *skb = rx->skb;
1255 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1256 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1257
1258 if (!sta)
1259 return RX_CONTINUE;
1260
1261 /*
1262 * Update last_rx only for IBSS packets which are for the current
1263 * BSSID to avoid keeping the current IBSS network alive in cases
1264 * where other STAs start using different BSSID.
1265 */
1266 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1267 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1268 NL80211_IFTYPE_ADHOC);
1269 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) {
1270 sta->last_rx = jiffies;
1271 if (ieee80211_is_data(hdr->frame_control)) {
1272 sta->last_rx_rate_idx = status->rate_idx;
1273 sta->last_rx_rate_flag = status->flag;
1274 }
1275 }
1276 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1277 /*
1278 * Mesh beacons will update last_rx when if they are found to
1279 * match the current local configuration when processed.
1280 */
1281 sta->last_rx = jiffies;
1282 if (ieee80211_is_data(hdr->frame_control)) {
1283 sta->last_rx_rate_idx = status->rate_idx;
1284 sta->last_rx_rate_flag = status->flag;
1285 }
1286 }
1287
1288 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1289 return RX_CONTINUE;
1290
1291 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1292 ieee80211_sta_rx_notify(rx->sdata, hdr);
1293
1294 sta->rx_fragments++;
1295 sta->rx_bytes += rx->skb->len;
1296 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1297 sta->last_signal = status->signal;
1298 ewma_add(&sta->avg_signal, -status->signal);
1299 }
1300
1301 /*
1302 * Change STA power saving mode only at the end of a frame
1303 * exchange sequence.
1304 */
1305 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
1306 !ieee80211_has_morefrags(hdr->frame_control) &&
1307 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1308 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1309 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1310 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1311 /*
1312 * Ignore doze->wake transitions that are
1313 * indicated by non-data frames, the standard
1314 * is unclear here, but for example going to
1315 * PS mode and then scanning would cause a
1316 * doze->wake transition for the probe request,
1317 * and that is clearly undesirable.
1318 */
1319 if (ieee80211_is_data(hdr->frame_control) &&
1320 !ieee80211_has_pm(hdr->frame_control))
1321 ap_sta_ps_end(sta);
1322 } else {
1323 if (ieee80211_has_pm(hdr->frame_control))
1324 ap_sta_ps_start(sta);
1325 }
1326 }
1327
1328 /*
1329 * Drop (qos-)data::nullfunc frames silently, since they
1330 * are used only to control station power saving mode.
1331 */
1332 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1333 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1334 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1335
1336 /*
1337 * If we receive a 4-addr nullfunc frame from a STA
1338 * that was not moved to a 4-addr STA vlan yet send
1339 * the event to userspace and for older hostapd drop
1340 * the frame to the monitor interface.
1341 */
1342 if (ieee80211_has_a4(hdr->frame_control) &&
1343 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1344 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1345 !rx->sdata->u.vlan.sta))) {
1346 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1347 cfg80211_rx_unexpected_4addr_frame(
1348 rx->sdata->dev, sta->sta.addr,
1349 GFP_ATOMIC);
1350 return RX_DROP_MONITOR;
1351 }
1352 /*
1353 * Update counter and free packet here to avoid
1354 * counting this as a dropped packed.
1355 */
1356 sta->rx_packets++;
1357 dev_kfree_skb(rx->skb);
1358 return RX_QUEUED;
1359 }
1360
1361 return RX_CONTINUE;
1362 } /* ieee80211_rx_h_sta_process */
1363
1364 static inline struct ieee80211_fragment_entry *
1365 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1366 unsigned int frag, unsigned int seq, int rx_queue,
1367 struct sk_buff **skb)
1368 {
1369 struct ieee80211_fragment_entry *entry;
1370 int idx;
1371
1372 idx = sdata->fragment_next;
1373 entry = &sdata->fragments[sdata->fragment_next++];
1374 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1375 sdata->fragment_next = 0;
1376
1377 if (!skb_queue_empty(&entry->skb_list))
1378 __skb_queue_purge(&entry->skb_list);
1379
1380 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1381 *skb = NULL;
1382 entry->first_frag_time = jiffies;
1383 entry->seq = seq;
1384 entry->rx_queue = rx_queue;
1385 entry->last_frag = frag;
1386 entry->ccmp = 0;
1387 entry->extra_len = 0;
1388
1389 return entry;
1390 }
1391
1392 static inline struct ieee80211_fragment_entry *
1393 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1394 unsigned int frag, unsigned int seq,
1395 int rx_queue, struct ieee80211_hdr *hdr)
1396 {
1397 struct ieee80211_fragment_entry *entry;
1398 int i, idx;
1399
1400 idx = sdata->fragment_next;
1401 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1402 struct ieee80211_hdr *f_hdr;
1403
1404 idx--;
1405 if (idx < 0)
1406 idx = IEEE80211_FRAGMENT_MAX - 1;
1407
1408 entry = &sdata->fragments[idx];
1409 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1410 entry->rx_queue != rx_queue ||
1411 entry->last_frag + 1 != frag)
1412 continue;
1413
1414 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1415
1416 /*
1417 * Check ftype and addresses are equal, else check next fragment
1418 */
1419 if (((hdr->frame_control ^ f_hdr->frame_control) &
1420 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1421 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
1422 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
1423 continue;
1424
1425 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1426 __skb_queue_purge(&entry->skb_list);
1427 continue;
1428 }
1429 return entry;
1430 }
1431
1432 return NULL;
1433 }
1434
1435 static ieee80211_rx_result debug_noinline
1436 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1437 {
1438 struct ieee80211_hdr *hdr;
1439 u16 sc;
1440 __le16 fc;
1441 unsigned int frag, seq;
1442 struct ieee80211_fragment_entry *entry;
1443 struct sk_buff *skb;
1444 struct ieee80211_rx_status *status;
1445
1446 hdr = (struct ieee80211_hdr *)rx->skb->data;
1447 fc = hdr->frame_control;
1448 sc = le16_to_cpu(hdr->seq_ctrl);
1449 frag = sc & IEEE80211_SCTL_FRAG;
1450
1451 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1452 (rx->skb)->len < 24 ||
1453 is_multicast_ether_addr(hdr->addr1))) {
1454 /* not fragmented */
1455 goto out;
1456 }
1457 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1458
1459 if (skb_linearize(rx->skb))
1460 return RX_DROP_UNUSABLE;
1461
1462 /*
1463 * skb_linearize() might change the skb->data and
1464 * previously cached variables (in this case, hdr) need to
1465 * be refreshed with the new data.
1466 */
1467 hdr = (struct ieee80211_hdr *)rx->skb->data;
1468 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1469
1470 if (frag == 0) {
1471 /* This is the first fragment of a new frame. */
1472 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1473 rx->seqno_idx, &(rx->skb));
1474 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1475 ieee80211_has_protected(fc)) {
1476 int queue = rx->security_idx;
1477 /* Store CCMP PN so that we can verify that the next
1478 * fragment has a sequential PN value. */
1479 entry->ccmp = 1;
1480 memcpy(entry->last_pn,
1481 rx->key->u.ccmp.rx_pn[queue],
1482 CCMP_PN_LEN);
1483 }
1484 return RX_QUEUED;
1485 }
1486
1487 /* This is a fragment for a frame that should already be pending in
1488 * fragment cache. Add this fragment to the end of the pending entry.
1489 */
1490 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1491 rx->seqno_idx, hdr);
1492 if (!entry) {
1493 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1494 return RX_DROP_MONITOR;
1495 }
1496
1497 /* Verify that MPDUs within one MSDU have sequential PN values.
1498 * (IEEE 802.11i, 8.3.3.4.5) */
1499 if (entry->ccmp) {
1500 int i;
1501 u8 pn[CCMP_PN_LEN], *rpn;
1502 int queue;
1503 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1504 return RX_DROP_UNUSABLE;
1505 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1506 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1507 pn[i]++;
1508 if (pn[i])
1509 break;
1510 }
1511 queue = rx->security_idx;
1512 rpn = rx->key->u.ccmp.rx_pn[queue];
1513 if (memcmp(pn, rpn, CCMP_PN_LEN))
1514 return RX_DROP_UNUSABLE;
1515 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1516 }
1517
1518 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1519 __skb_queue_tail(&entry->skb_list, rx->skb);
1520 entry->last_frag = frag;
1521 entry->extra_len += rx->skb->len;
1522 if (ieee80211_has_morefrags(fc)) {
1523 rx->skb = NULL;
1524 return RX_QUEUED;
1525 }
1526
1527 rx->skb = __skb_dequeue(&entry->skb_list);
1528 if (skb_tailroom(rx->skb) < entry->extra_len) {
1529 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1530 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1531 GFP_ATOMIC))) {
1532 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1533 __skb_queue_purge(&entry->skb_list);
1534 return RX_DROP_UNUSABLE;
1535 }
1536 }
1537 while ((skb = __skb_dequeue(&entry->skb_list))) {
1538 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1539 dev_kfree_skb(skb);
1540 }
1541
1542 /* Complete frame has been reassembled - process it now */
1543 status = IEEE80211_SKB_RXCB(rx->skb);
1544 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1545
1546 out:
1547 if (rx->sta)
1548 rx->sta->rx_packets++;
1549 if (is_multicast_ether_addr(hdr->addr1))
1550 rx->local->dot11MulticastReceivedFrameCount++;
1551 else
1552 ieee80211_led_rx(rx->local);
1553 return RX_CONTINUE;
1554 }
1555
1556 static int
1557 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1558 {
1559 if (unlikely(!rx->sta ||
1560 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1561 return -EACCES;
1562
1563 return 0;
1564 }
1565
1566 static int
1567 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1568 {
1569 struct sk_buff *skb = rx->skb;
1570 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1571
1572 /*
1573 * Pass through unencrypted frames if the hardware has
1574 * decrypted them already.
1575 */
1576 if (status->flag & RX_FLAG_DECRYPTED)
1577 return 0;
1578
1579 /* Drop unencrypted frames if key is set. */
1580 if (unlikely(!ieee80211_has_protected(fc) &&
1581 !ieee80211_is_nullfunc(fc) &&
1582 ieee80211_is_data(fc) &&
1583 (rx->key || rx->sdata->drop_unencrypted)))
1584 return -EACCES;
1585
1586 return 0;
1587 }
1588
1589 static int
1590 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1591 {
1592 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1593 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1594 __le16 fc = hdr->frame_control;
1595
1596 /*
1597 * Pass through unencrypted frames if the hardware has
1598 * decrypted them already.
1599 */
1600 if (status->flag & RX_FLAG_DECRYPTED)
1601 return 0;
1602
1603 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1604 if (unlikely(!ieee80211_has_protected(fc) &&
1605 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1606 rx->key)) {
1607 if (ieee80211_is_deauth(fc))
1608 cfg80211_send_unprot_deauth(rx->sdata->dev,
1609 rx->skb->data,
1610 rx->skb->len);
1611 else if (ieee80211_is_disassoc(fc))
1612 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1613 rx->skb->data,
1614 rx->skb->len);
1615 return -EACCES;
1616 }
1617 /* BIP does not use Protected field, so need to check MMIE */
1618 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1619 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
1620 if (ieee80211_is_deauth(fc))
1621 cfg80211_send_unprot_deauth(rx->sdata->dev,
1622 rx->skb->data,
1623 rx->skb->len);
1624 else if (ieee80211_is_disassoc(fc))
1625 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1626 rx->skb->data,
1627 rx->skb->len);
1628 return -EACCES;
1629 }
1630 /*
1631 * When using MFP, Action frames are not allowed prior to
1632 * having configured keys.
1633 */
1634 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1635 ieee80211_is_robust_mgmt_frame(
1636 (struct ieee80211_hdr *) rx->skb->data)))
1637 return -EACCES;
1638 }
1639
1640 return 0;
1641 }
1642
1643 static int
1644 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1645 {
1646 struct ieee80211_sub_if_data *sdata = rx->sdata;
1647 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1648 bool check_port_control = false;
1649 struct ethhdr *ehdr;
1650 int ret;
1651
1652 *port_control = false;
1653 if (ieee80211_has_a4(hdr->frame_control) &&
1654 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1655 return -1;
1656
1657 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1658 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
1659
1660 if (!sdata->u.mgd.use_4addr)
1661 return -1;
1662 else
1663 check_port_control = true;
1664 }
1665
1666 if (is_multicast_ether_addr(hdr->addr1) &&
1667 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
1668 return -1;
1669
1670 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1671 if (ret < 0)
1672 return ret;
1673
1674 ehdr = (struct ethhdr *) rx->skb->data;
1675 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1676 *port_control = true;
1677 else if (check_port_control)
1678 return -1;
1679
1680 return 0;
1681 }
1682
1683 /*
1684 * requires that rx->skb is a frame with ethernet header
1685 */
1686 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1687 {
1688 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1689 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1690 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1691
1692 /*
1693 * Allow EAPOL frames to us/the PAE group address regardless
1694 * of whether the frame was encrypted or not.
1695 */
1696 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1697 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
1698 ether_addr_equal(ehdr->h_dest, pae_group_addr)))
1699 return true;
1700
1701 if (ieee80211_802_1x_port_control(rx) ||
1702 ieee80211_drop_unencrypted(rx, fc))
1703 return false;
1704
1705 return true;
1706 }
1707
1708 /*
1709 * requires that rx->skb is a frame with ethernet header
1710 */
1711 static void
1712 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1713 {
1714 struct ieee80211_sub_if_data *sdata = rx->sdata;
1715 struct net_device *dev = sdata->dev;
1716 struct sk_buff *skb, *xmit_skb;
1717 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1718 struct sta_info *dsta;
1719 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1720
1721 skb = rx->skb;
1722 xmit_skb = NULL;
1723
1724 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1725 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1726 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1727 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1728 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1729 if (is_multicast_ether_addr(ehdr->h_dest)) {
1730 /*
1731 * send multicast frames both to higher layers in
1732 * local net stack and back to the wireless medium
1733 */
1734 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1735 if (!xmit_skb)
1736 net_info_ratelimited("%s: failed to clone multicast frame\n",
1737 dev->name);
1738 } else {
1739 dsta = sta_info_get(sdata, skb->data);
1740 if (dsta) {
1741 /*
1742 * The destination station is associated to
1743 * this AP (in this VLAN), so send the frame
1744 * directly to it and do not pass it to local
1745 * net stack.
1746 */
1747 xmit_skb = skb;
1748 skb = NULL;
1749 }
1750 }
1751 }
1752
1753 if (skb) {
1754 int align __maybe_unused;
1755
1756 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1757 /*
1758 * 'align' will only take the values 0 or 2 here
1759 * since all frames are required to be aligned
1760 * to 2-byte boundaries when being passed to
1761 * mac80211. That also explains the __skb_push()
1762 * below.
1763 */
1764 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1765 if (align) {
1766 if (WARN_ON(skb_headroom(skb) < 3)) {
1767 dev_kfree_skb(skb);
1768 skb = NULL;
1769 } else {
1770 u8 *data = skb->data;
1771 size_t len = skb_headlen(skb);
1772 skb->data -= align;
1773 memmove(skb->data, data, len);
1774 skb_set_tail_pointer(skb, len);
1775 }
1776 }
1777 #endif
1778
1779 if (skb) {
1780 /* deliver to local stack */
1781 skb->protocol = eth_type_trans(skb, dev);
1782 memset(skb->cb, 0, sizeof(skb->cb));
1783 netif_receive_skb(skb);
1784 }
1785 }
1786
1787 if (xmit_skb) {
1788 /*
1789 * Send to wireless media and increase priority by 256 to
1790 * keep the received priority instead of reclassifying
1791 * the frame (see cfg80211_classify8021d).
1792 */
1793 xmit_skb->priority += 256;
1794 xmit_skb->protocol = htons(ETH_P_802_3);
1795 skb_reset_network_header(xmit_skb);
1796 skb_reset_mac_header(xmit_skb);
1797 dev_queue_xmit(xmit_skb);
1798 }
1799 }
1800
1801 static ieee80211_rx_result debug_noinline
1802 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1803 {
1804 struct net_device *dev = rx->sdata->dev;
1805 struct sk_buff *skb = rx->skb;
1806 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1807 __le16 fc = hdr->frame_control;
1808 struct sk_buff_head frame_list;
1809 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1810
1811 if (unlikely(!ieee80211_is_data(fc)))
1812 return RX_CONTINUE;
1813
1814 if (unlikely(!ieee80211_is_data_present(fc)))
1815 return RX_DROP_MONITOR;
1816
1817 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1818 return RX_CONTINUE;
1819
1820 if (ieee80211_has_a4(hdr->frame_control) &&
1821 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1822 !rx->sdata->u.vlan.sta)
1823 return RX_DROP_UNUSABLE;
1824
1825 if (is_multicast_ether_addr(hdr->addr1) &&
1826 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1827 rx->sdata->u.vlan.sta) ||
1828 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1829 rx->sdata->u.mgd.use_4addr)))
1830 return RX_DROP_UNUSABLE;
1831
1832 skb->dev = dev;
1833 __skb_queue_head_init(&frame_list);
1834
1835 if (skb_linearize(skb))
1836 return RX_DROP_UNUSABLE;
1837
1838 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1839 rx->sdata->vif.type,
1840 rx->local->hw.extra_tx_headroom, true);
1841
1842 while (!skb_queue_empty(&frame_list)) {
1843 rx->skb = __skb_dequeue(&frame_list);
1844
1845 if (!ieee80211_frame_allowed(rx, fc)) {
1846 dev_kfree_skb(rx->skb);
1847 continue;
1848 }
1849 dev->stats.rx_packets++;
1850 dev->stats.rx_bytes += rx->skb->len;
1851
1852 ieee80211_deliver_skb(rx);
1853 }
1854
1855 return RX_QUEUED;
1856 }
1857
1858 #ifdef CONFIG_MAC80211_MESH
1859 static ieee80211_rx_result
1860 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1861 {
1862 struct ieee80211_hdr *fwd_hdr, *hdr;
1863 struct ieee80211_tx_info *info;
1864 struct ieee80211s_hdr *mesh_hdr;
1865 struct sk_buff *skb = rx->skb, *fwd_skb;
1866 struct ieee80211_local *local = rx->local;
1867 struct ieee80211_sub_if_data *sdata = rx->sdata;
1868 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1869 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1870 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
1871 u16 q, hdrlen;
1872
1873 hdr = (struct ieee80211_hdr *) skb->data;
1874 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1875 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1876
1877 /* frame is in RMC, don't forward */
1878 if (ieee80211_is_data(hdr->frame_control) &&
1879 is_multicast_ether_addr(hdr->addr1) &&
1880 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1881 return RX_DROP_MONITOR;
1882
1883 if (!ieee80211_is_data(hdr->frame_control))
1884 return RX_CONTINUE;
1885
1886 if (!mesh_hdr->ttl)
1887 return RX_DROP_MONITOR;
1888
1889 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1890 struct mesh_path *mppath;
1891 char *proxied_addr;
1892 char *mpp_addr;
1893
1894 if (is_multicast_ether_addr(hdr->addr1)) {
1895 mpp_addr = hdr->addr3;
1896 proxied_addr = mesh_hdr->eaddr1;
1897 } else {
1898 mpp_addr = hdr->addr4;
1899 proxied_addr = mesh_hdr->eaddr2;
1900 }
1901
1902 rcu_read_lock();
1903 mppath = mpp_path_lookup(proxied_addr, sdata);
1904 if (!mppath) {
1905 mpp_path_add(proxied_addr, mpp_addr, sdata);
1906 } else {
1907 spin_lock_bh(&mppath->state_lock);
1908 if (!ether_addr_equal(mppath->mpp, mpp_addr))
1909 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1910 spin_unlock_bh(&mppath->state_lock);
1911 }
1912 rcu_read_unlock();
1913 }
1914
1915 /* Frame has reached destination. Don't forward */
1916 if (!is_multicast_ether_addr(hdr->addr1) &&
1917 ether_addr_equal(sdata->vif.addr, hdr->addr3))
1918 return RX_CONTINUE;
1919
1920 q = ieee80211_select_queue_80211(sdata, skb, hdr);
1921 if (ieee80211_queue_stopped(&local->hw, q)) {
1922 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
1923 return RX_DROP_MONITOR;
1924 }
1925 skb_set_queue_mapping(skb, q);
1926
1927 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1928 goto out;
1929
1930 if (!--mesh_hdr->ttl) {
1931 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
1932 return RX_DROP_MONITOR;
1933 }
1934
1935 if (!ifmsh->mshcfg.dot11MeshForwarding)
1936 goto out;
1937
1938 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1939 if (!fwd_skb) {
1940 net_info_ratelimited("%s: failed to clone mesh frame\n",
1941 sdata->name);
1942 goto out;
1943 }
1944
1945 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1946 info = IEEE80211_SKB_CB(fwd_skb);
1947 memset(info, 0, sizeof(*info));
1948 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1949 info->control.vif = &rx->sdata->vif;
1950 info->control.jiffies = jiffies;
1951 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1952 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
1953 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1954 } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
1955 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
1956 } else {
1957 /* unable to resolve next hop */
1958 mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
1959 0, reason, fwd_hdr->addr2, sdata);
1960 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
1961 kfree_skb(fwd_skb);
1962 return RX_DROP_MONITOR;
1963 }
1964
1965 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
1966 ieee80211_add_pending_skb(local, fwd_skb);
1967 out:
1968 if (is_multicast_ether_addr(hdr->addr1) ||
1969 sdata->dev->flags & IFF_PROMISC)
1970 return RX_CONTINUE;
1971 else
1972 return RX_DROP_MONITOR;
1973 }
1974 #endif
1975
1976 static ieee80211_rx_result debug_noinline
1977 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1978 {
1979 struct ieee80211_sub_if_data *sdata = rx->sdata;
1980 struct ieee80211_local *local = rx->local;
1981 struct net_device *dev = sdata->dev;
1982 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1983 __le16 fc = hdr->frame_control;
1984 bool port_control;
1985 int err;
1986
1987 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1988 return RX_CONTINUE;
1989
1990 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1991 return RX_DROP_MONITOR;
1992
1993 /*
1994 * Send unexpected-4addr-frame event to hostapd. For older versions,
1995 * also drop the frame to cooked monitor interfaces.
1996 */
1997 if (ieee80211_has_a4(hdr->frame_control) &&
1998 sdata->vif.type == NL80211_IFTYPE_AP) {
1999 if (rx->sta &&
2000 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2001 cfg80211_rx_unexpected_4addr_frame(
2002 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2003 return RX_DROP_MONITOR;
2004 }
2005
2006 err = __ieee80211_data_to_8023(rx, &port_control);
2007 if (unlikely(err))
2008 return RX_DROP_UNUSABLE;
2009
2010 if (!ieee80211_frame_allowed(rx, fc))
2011 return RX_DROP_MONITOR;
2012
2013 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2014 unlikely(port_control) && sdata->bss) {
2015 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2016 u.ap);
2017 dev = sdata->dev;
2018 rx->sdata = sdata;
2019 }
2020
2021 rx->skb->dev = dev;
2022
2023 dev->stats.rx_packets++;
2024 dev->stats.rx_bytes += rx->skb->len;
2025
2026 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2027 !is_multicast_ether_addr(
2028 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2029 (!local->scanning &&
2030 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
2031 mod_timer(&local->dynamic_ps_timer, jiffies +
2032 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2033 }
2034
2035 ieee80211_deliver_skb(rx);
2036
2037 return RX_QUEUED;
2038 }
2039
2040 static ieee80211_rx_result debug_noinline
2041 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2042 {
2043 struct sk_buff *skb = rx->skb;
2044 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2045 struct tid_ampdu_rx *tid_agg_rx;
2046 u16 start_seq_num;
2047 u16 tid;
2048
2049 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2050 return RX_CONTINUE;
2051
2052 if (ieee80211_is_back_req(bar->frame_control)) {
2053 struct {
2054 __le16 control, start_seq_num;
2055 } __packed bar_data;
2056
2057 if (!rx->sta)
2058 return RX_DROP_MONITOR;
2059
2060 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2061 &bar_data, sizeof(bar_data)))
2062 return RX_DROP_MONITOR;
2063
2064 tid = le16_to_cpu(bar_data.control) >> 12;
2065
2066 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2067 if (!tid_agg_rx)
2068 return RX_DROP_MONITOR;
2069
2070 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2071
2072 /* reset session timer */
2073 if (tid_agg_rx->timeout)
2074 mod_timer(&tid_agg_rx->session_timer,
2075 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2076
2077 spin_lock(&tid_agg_rx->reorder_lock);
2078 /* release stored frames up to start of BAR */
2079 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2080 start_seq_num);
2081 spin_unlock(&tid_agg_rx->reorder_lock);
2082
2083 kfree_skb(skb);
2084 return RX_QUEUED;
2085 }
2086
2087 /*
2088 * After this point, we only want management frames,
2089 * so we can drop all remaining control frames to
2090 * cooked monitor interfaces.
2091 */
2092 return RX_DROP_MONITOR;
2093 }
2094
2095 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2096 struct ieee80211_mgmt *mgmt,
2097 size_t len)
2098 {
2099 struct ieee80211_local *local = sdata->local;
2100 struct sk_buff *skb;
2101 struct ieee80211_mgmt *resp;
2102
2103 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
2104 /* Not to own unicast address */
2105 return;
2106 }
2107
2108 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
2109 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
2110 /* Not from the current AP or not associated yet. */
2111 return;
2112 }
2113
2114 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2115 /* Too short SA Query request frame */
2116 return;
2117 }
2118
2119 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2120 if (skb == NULL)
2121 return;
2122
2123 skb_reserve(skb, local->hw.extra_tx_headroom);
2124 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2125 memset(resp, 0, 24);
2126 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2127 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2128 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2129 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2130 IEEE80211_STYPE_ACTION);
2131 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2132 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2133 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2134 memcpy(resp->u.action.u.sa_query.trans_id,
2135 mgmt->u.action.u.sa_query.trans_id,
2136 WLAN_SA_QUERY_TR_ID_LEN);
2137
2138 ieee80211_tx_skb(sdata, skb);
2139 }
2140
2141 static ieee80211_rx_result debug_noinline
2142 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2143 {
2144 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2145 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2146
2147 /*
2148 * From here on, look only at management frames.
2149 * Data and control frames are already handled,
2150 * and unknown (reserved) frames are useless.
2151 */
2152 if (rx->skb->len < 24)
2153 return RX_DROP_MONITOR;
2154
2155 if (!ieee80211_is_mgmt(mgmt->frame_control))
2156 return RX_DROP_MONITOR;
2157
2158 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2159 ieee80211_is_beacon(mgmt->frame_control) &&
2160 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2161 int sig = 0;
2162
2163 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2164 sig = status->signal;
2165
2166 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2167 rx->skb->data, rx->skb->len,
2168 status->freq, sig, GFP_ATOMIC);
2169 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2170 }
2171
2172 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2173 return RX_DROP_MONITOR;
2174
2175 if (ieee80211_drop_unencrypted_mgmt(rx))
2176 return RX_DROP_UNUSABLE;
2177
2178 return RX_CONTINUE;
2179 }
2180
2181 static ieee80211_rx_result debug_noinline
2182 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2183 {
2184 struct ieee80211_local *local = rx->local;
2185 struct ieee80211_sub_if_data *sdata = rx->sdata;
2186 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2187 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2188 int len = rx->skb->len;
2189
2190 if (!ieee80211_is_action(mgmt->frame_control))
2191 return RX_CONTINUE;
2192
2193 /* drop too small frames */
2194 if (len < IEEE80211_MIN_ACTION_SIZE)
2195 return RX_DROP_UNUSABLE;
2196
2197 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2198 return RX_DROP_UNUSABLE;
2199
2200 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2201 return RX_DROP_UNUSABLE;
2202
2203 switch (mgmt->u.action.category) {
2204 case WLAN_CATEGORY_HT:
2205 /* reject HT action frames from stations not supporting HT */
2206 if (!rx->sta->sta.ht_cap.ht_supported)
2207 goto invalid;
2208
2209 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2210 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2211 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2212 sdata->vif.type != NL80211_IFTYPE_AP &&
2213 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2214 break;
2215
2216 /* verify action & smps_control are present */
2217 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2218 goto invalid;
2219
2220 switch (mgmt->u.action.u.ht_smps.action) {
2221 case WLAN_HT_ACTION_SMPS: {
2222 struct ieee80211_supported_band *sband;
2223 u8 smps;
2224
2225 /* convert to HT capability */
2226 switch (mgmt->u.action.u.ht_smps.smps_control) {
2227 case WLAN_HT_SMPS_CONTROL_DISABLED:
2228 smps = WLAN_HT_CAP_SM_PS_DISABLED;
2229 break;
2230 case WLAN_HT_SMPS_CONTROL_STATIC:
2231 smps = WLAN_HT_CAP_SM_PS_STATIC;
2232 break;
2233 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
2234 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
2235 break;
2236 default:
2237 goto invalid;
2238 }
2239 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
2240
2241 /* if no change do nothing */
2242 if ((rx->sta->sta.ht_cap.cap &
2243 IEEE80211_HT_CAP_SM_PS) == smps)
2244 goto handled;
2245
2246 rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS;
2247 rx->sta->sta.ht_cap.cap |= smps;
2248
2249 sband = rx->local->hw.wiphy->bands[status->band];
2250
2251 rate_control_rate_update(local, sband, rx->sta,
2252 IEEE80211_RC_SMPS_CHANGED);
2253 goto handled;
2254 }
2255 default:
2256 goto invalid;
2257 }
2258
2259 break;
2260 case WLAN_CATEGORY_BACK:
2261 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2262 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2263 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2264 sdata->vif.type != NL80211_IFTYPE_AP &&
2265 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2266 break;
2267
2268 /* verify action_code is present */
2269 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2270 break;
2271
2272 switch (mgmt->u.action.u.addba_req.action_code) {
2273 case WLAN_ACTION_ADDBA_REQ:
2274 if (len < (IEEE80211_MIN_ACTION_SIZE +
2275 sizeof(mgmt->u.action.u.addba_req)))
2276 goto invalid;
2277 break;
2278 case WLAN_ACTION_ADDBA_RESP:
2279 if (len < (IEEE80211_MIN_ACTION_SIZE +
2280 sizeof(mgmt->u.action.u.addba_resp)))
2281 goto invalid;
2282 break;
2283 case WLAN_ACTION_DELBA:
2284 if (len < (IEEE80211_MIN_ACTION_SIZE +
2285 sizeof(mgmt->u.action.u.delba)))
2286 goto invalid;
2287 break;
2288 default:
2289 goto invalid;
2290 }
2291
2292 goto queue;
2293 case WLAN_CATEGORY_SPECTRUM_MGMT:
2294 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2295 break;
2296
2297 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2298 break;
2299
2300 /* verify action_code is present */
2301 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2302 break;
2303
2304 switch (mgmt->u.action.u.measurement.action_code) {
2305 case WLAN_ACTION_SPCT_MSR_REQ:
2306 if (len < (IEEE80211_MIN_ACTION_SIZE +
2307 sizeof(mgmt->u.action.u.measurement)))
2308 break;
2309 ieee80211_process_measurement_req(sdata, mgmt, len);
2310 goto handled;
2311 case WLAN_ACTION_SPCT_CHL_SWITCH:
2312 if (len < (IEEE80211_MIN_ACTION_SIZE +
2313 sizeof(mgmt->u.action.u.chan_switch)))
2314 break;
2315
2316 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2317 break;
2318
2319 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
2320 break;
2321
2322 goto queue;
2323 }
2324 break;
2325 case WLAN_CATEGORY_SA_QUERY:
2326 if (len < (IEEE80211_MIN_ACTION_SIZE +
2327 sizeof(mgmt->u.action.u.sa_query)))
2328 break;
2329
2330 switch (mgmt->u.action.u.sa_query.action) {
2331 case WLAN_ACTION_SA_QUERY_REQUEST:
2332 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2333 break;
2334 ieee80211_process_sa_query_req(sdata, mgmt, len);
2335 goto handled;
2336 }
2337 break;
2338 case WLAN_CATEGORY_SELF_PROTECTED:
2339 switch (mgmt->u.action.u.self_prot.action_code) {
2340 case WLAN_SP_MESH_PEERING_OPEN:
2341 case WLAN_SP_MESH_PEERING_CLOSE:
2342 case WLAN_SP_MESH_PEERING_CONFIRM:
2343 if (!ieee80211_vif_is_mesh(&sdata->vif))
2344 goto invalid;
2345 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2346 /* userspace handles this frame */
2347 break;
2348 goto queue;
2349 case WLAN_SP_MGK_INFORM:
2350 case WLAN_SP_MGK_ACK:
2351 if (!ieee80211_vif_is_mesh(&sdata->vif))
2352 goto invalid;
2353 break;
2354 }
2355 break;
2356 case WLAN_CATEGORY_MESH_ACTION:
2357 if (!ieee80211_vif_is_mesh(&sdata->vif))
2358 break;
2359 if (mesh_action_is_path_sel(mgmt) &&
2360 (!mesh_path_sel_is_hwmp(sdata)))
2361 break;
2362 goto queue;
2363 }
2364
2365 return RX_CONTINUE;
2366
2367 invalid:
2368 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2369 /* will return in the next handlers */
2370 return RX_CONTINUE;
2371
2372 handled:
2373 if (rx->sta)
2374 rx->sta->rx_packets++;
2375 dev_kfree_skb(rx->skb);
2376 return RX_QUEUED;
2377
2378 queue:
2379 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2380 skb_queue_tail(&sdata->skb_queue, rx->skb);
2381 ieee80211_queue_work(&local->hw, &sdata->work);
2382 if (rx->sta)
2383 rx->sta->rx_packets++;
2384 return RX_QUEUED;
2385 }
2386
2387 static ieee80211_rx_result debug_noinline
2388 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2389 {
2390 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2391 int sig = 0;
2392
2393 /* skip known-bad action frames and return them in the next handler */
2394 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2395 return RX_CONTINUE;
2396
2397 /*
2398 * Getting here means the kernel doesn't know how to handle
2399 * it, but maybe userspace does ... include returned frames
2400 * so userspace can register for those to know whether ones
2401 * it transmitted were processed or returned.
2402 */
2403
2404 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
2405 sig = status->signal;
2406
2407 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, sig,
2408 rx->skb->data, rx->skb->len,
2409 GFP_ATOMIC)) {
2410 if (rx->sta)
2411 rx->sta->rx_packets++;
2412 dev_kfree_skb(rx->skb);
2413 return RX_QUEUED;
2414 }
2415
2416
2417 return RX_CONTINUE;
2418 }
2419
2420 static ieee80211_rx_result debug_noinline
2421 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2422 {
2423 struct ieee80211_local *local = rx->local;
2424 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2425 struct sk_buff *nskb;
2426 struct ieee80211_sub_if_data *sdata = rx->sdata;
2427 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2428
2429 if (!ieee80211_is_action(mgmt->frame_control))
2430 return RX_CONTINUE;
2431
2432 /*
2433 * For AP mode, hostapd is responsible for handling any action
2434 * frames that we didn't handle, including returning unknown
2435 * ones. For all other modes we will return them to the sender,
2436 * setting the 0x80 bit in the action category, as required by
2437 * 802.11-2012 9.24.4.
2438 * Newer versions of hostapd shall also use the management frame
2439 * registration mechanisms, but older ones still use cooked
2440 * monitor interfaces so push all frames there.
2441 */
2442 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2443 (sdata->vif.type == NL80211_IFTYPE_AP ||
2444 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2445 return RX_DROP_MONITOR;
2446
2447 if (is_multicast_ether_addr(mgmt->da))
2448 return RX_DROP_MONITOR;
2449
2450 /* do not return rejected action frames */
2451 if (mgmt->u.action.category & 0x80)
2452 return RX_DROP_UNUSABLE;
2453
2454 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2455 GFP_ATOMIC);
2456 if (nskb) {
2457 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2458
2459 nmgmt->u.action.category |= 0x80;
2460 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2461 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2462
2463 memset(nskb->cb, 0, sizeof(nskb->cb));
2464
2465 ieee80211_tx_skb(rx->sdata, nskb);
2466 }
2467 dev_kfree_skb(rx->skb);
2468 return RX_QUEUED;
2469 }
2470
2471 static ieee80211_rx_result debug_noinline
2472 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2473 {
2474 struct ieee80211_sub_if_data *sdata = rx->sdata;
2475 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2476 __le16 stype;
2477
2478 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2479
2480 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2481 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2482 sdata->vif.type != NL80211_IFTYPE_STATION)
2483 return RX_DROP_MONITOR;
2484
2485 switch (stype) {
2486 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2487 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2488 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2489 /* process for all: mesh, mlme, ibss */
2490 break;
2491 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
2492 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
2493 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2494 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2495 if (is_multicast_ether_addr(mgmt->da) &&
2496 !is_broadcast_ether_addr(mgmt->da))
2497 return RX_DROP_MONITOR;
2498
2499 /* process only for station */
2500 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2501 return RX_DROP_MONITOR;
2502 break;
2503 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2504 /* process only for ibss */
2505 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2506 return RX_DROP_MONITOR;
2507 break;
2508 default:
2509 return RX_DROP_MONITOR;
2510 }
2511
2512 /* queue up frame and kick off work to process it */
2513 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2514 skb_queue_tail(&sdata->skb_queue, rx->skb);
2515 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2516 if (rx->sta)
2517 rx->sta->rx_packets++;
2518
2519 return RX_QUEUED;
2520 }
2521
2522 /* TODO: use IEEE80211_RX_FRAGMENTED */
2523 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2524 struct ieee80211_rate *rate)
2525 {
2526 struct ieee80211_sub_if_data *sdata;
2527 struct ieee80211_local *local = rx->local;
2528 struct sk_buff *skb = rx->skb, *skb2;
2529 struct net_device *prev_dev = NULL;
2530 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2531 int needed_headroom;
2532
2533 /*
2534 * If cooked monitor has been processed already, then
2535 * don't do it again. If not, set the flag.
2536 */
2537 if (rx->flags & IEEE80211_RX_CMNTR)
2538 goto out_free_skb;
2539 rx->flags |= IEEE80211_RX_CMNTR;
2540
2541 /* If there are no cooked monitor interfaces, just free the SKB */
2542 if (!local->cooked_mntrs)
2543 goto out_free_skb;
2544
2545 /* room for the radiotap header based on driver features */
2546 needed_headroom = ieee80211_rx_radiotap_len(local, status);
2547
2548 if (skb_headroom(skb) < needed_headroom &&
2549 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
2550 goto out_free_skb;
2551
2552 /* prepend radiotap information */
2553 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
2554 false);
2555
2556 skb_set_mac_header(skb, 0);
2557 skb->ip_summed = CHECKSUM_UNNECESSARY;
2558 skb->pkt_type = PACKET_OTHERHOST;
2559 skb->protocol = htons(ETH_P_802_2);
2560
2561 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2562 if (!ieee80211_sdata_running(sdata))
2563 continue;
2564
2565 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2566 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2567 continue;
2568
2569 if (prev_dev) {
2570 skb2 = skb_clone(skb, GFP_ATOMIC);
2571 if (skb2) {
2572 skb2->dev = prev_dev;
2573 netif_receive_skb(skb2);
2574 }
2575 }
2576
2577 prev_dev = sdata->dev;
2578 sdata->dev->stats.rx_packets++;
2579 sdata->dev->stats.rx_bytes += skb->len;
2580 }
2581
2582 if (prev_dev) {
2583 skb->dev = prev_dev;
2584 netif_receive_skb(skb);
2585 return;
2586 }
2587
2588 out_free_skb:
2589 dev_kfree_skb(skb);
2590 }
2591
2592 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2593 ieee80211_rx_result res)
2594 {
2595 switch (res) {
2596 case RX_DROP_MONITOR:
2597 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2598 if (rx->sta)
2599 rx->sta->rx_dropped++;
2600 /* fall through */
2601 case RX_CONTINUE: {
2602 struct ieee80211_rate *rate = NULL;
2603 struct ieee80211_supported_band *sband;
2604 struct ieee80211_rx_status *status;
2605
2606 status = IEEE80211_SKB_RXCB((rx->skb));
2607
2608 sband = rx->local->hw.wiphy->bands[status->band];
2609 if (!(status->flag & RX_FLAG_HT))
2610 rate = &sband->bitrates[status->rate_idx];
2611
2612 ieee80211_rx_cooked_monitor(rx, rate);
2613 break;
2614 }
2615 case RX_DROP_UNUSABLE:
2616 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2617 if (rx->sta)
2618 rx->sta->rx_dropped++;
2619 dev_kfree_skb(rx->skb);
2620 break;
2621 case RX_QUEUED:
2622 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2623 break;
2624 }
2625 }
2626
2627 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2628 {
2629 ieee80211_rx_result res = RX_DROP_MONITOR;
2630 struct sk_buff *skb;
2631
2632 #define CALL_RXH(rxh) \
2633 do { \
2634 res = rxh(rx); \
2635 if (res != RX_CONTINUE) \
2636 goto rxh_next; \
2637 } while (0);
2638
2639 spin_lock(&rx->local->rx_skb_queue.lock);
2640 if (rx->local->running_rx_handler)
2641 goto unlock;
2642
2643 rx->local->running_rx_handler = true;
2644
2645 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2646 spin_unlock(&rx->local->rx_skb_queue.lock);
2647
2648 /*
2649 * all the other fields are valid across frames
2650 * that belong to an aMPDU since they are on the
2651 * same TID from the same station
2652 */
2653 rx->skb = skb;
2654
2655 CALL_RXH(ieee80211_rx_h_decrypt)
2656 CALL_RXH(ieee80211_rx_h_check_more_data)
2657 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2658 CALL_RXH(ieee80211_rx_h_sta_process)
2659 CALL_RXH(ieee80211_rx_h_defragment)
2660 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2661 /* must be after MMIC verify so header is counted in MPDU mic */
2662 #ifdef CONFIG_MAC80211_MESH
2663 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2664 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2665 #endif
2666 CALL_RXH(ieee80211_rx_h_amsdu)
2667 CALL_RXH(ieee80211_rx_h_data)
2668 CALL_RXH(ieee80211_rx_h_ctrl);
2669 CALL_RXH(ieee80211_rx_h_mgmt_check)
2670 CALL_RXH(ieee80211_rx_h_action)
2671 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2672 CALL_RXH(ieee80211_rx_h_action_return)
2673 CALL_RXH(ieee80211_rx_h_mgmt)
2674
2675 rxh_next:
2676 ieee80211_rx_handlers_result(rx, res);
2677 spin_lock(&rx->local->rx_skb_queue.lock);
2678 #undef CALL_RXH
2679 }
2680
2681 rx->local->running_rx_handler = false;
2682
2683 unlock:
2684 spin_unlock(&rx->local->rx_skb_queue.lock);
2685 }
2686
2687 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2688 {
2689 ieee80211_rx_result res = RX_DROP_MONITOR;
2690
2691 #define CALL_RXH(rxh) \
2692 do { \
2693 res = rxh(rx); \
2694 if (res != RX_CONTINUE) \
2695 goto rxh_next; \
2696 } while (0);
2697
2698 CALL_RXH(ieee80211_rx_h_passive_scan)
2699 CALL_RXH(ieee80211_rx_h_check)
2700
2701 ieee80211_rx_reorder_ampdu(rx);
2702
2703 ieee80211_rx_handlers(rx);
2704 return;
2705
2706 rxh_next:
2707 ieee80211_rx_handlers_result(rx, res);
2708
2709 #undef CALL_RXH
2710 }
2711
2712 /*
2713 * This function makes calls into the RX path, therefore
2714 * it has to be invoked under RCU read lock.
2715 */
2716 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2717 {
2718 struct ieee80211_rx_data rx = {
2719 .sta = sta,
2720 .sdata = sta->sdata,
2721 .local = sta->local,
2722 /* This is OK -- must be QoS data frame */
2723 .security_idx = tid,
2724 .seqno_idx = tid,
2725 .flags = 0,
2726 };
2727 struct tid_ampdu_rx *tid_agg_rx;
2728
2729 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2730 if (!tid_agg_rx)
2731 return;
2732
2733 spin_lock(&tid_agg_rx->reorder_lock);
2734 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx);
2735 spin_unlock(&tid_agg_rx->reorder_lock);
2736
2737 ieee80211_rx_handlers(&rx);
2738 }
2739
2740 /* main receive path */
2741
2742 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2743 struct ieee80211_hdr *hdr)
2744 {
2745 struct ieee80211_sub_if_data *sdata = rx->sdata;
2746 struct sk_buff *skb = rx->skb;
2747 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2748 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2749 int multicast = is_multicast_ether_addr(hdr->addr1);
2750
2751 switch (sdata->vif.type) {
2752 case NL80211_IFTYPE_STATION:
2753 if (!bssid && !sdata->u.mgd.use_4addr)
2754 return 0;
2755 if (!multicast &&
2756 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2757 if (!(sdata->dev->flags & IFF_PROMISC) ||
2758 sdata->u.mgd.use_4addr)
2759 return 0;
2760 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2761 }
2762 break;
2763 case NL80211_IFTYPE_ADHOC:
2764 if (!bssid)
2765 return 0;
2766 if (ieee80211_is_beacon(hdr->frame_control)) {
2767 return 1;
2768 }
2769 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2770 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2771 return 0;
2772 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2773 } else if (!multicast &&
2774 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2775 if (!(sdata->dev->flags & IFF_PROMISC))
2776 return 0;
2777 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2778 } else if (!rx->sta) {
2779 int rate_idx;
2780 if (status->flag & RX_FLAG_HT)
2781 rate_idx = 0; /* TODO: HT rates */
2782 else
2783 rate_idx = status->rate_idx;
2784 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
2785 BIT(rate_idx));
2786 }
2787 break;
2788 case NL80211_IFTYPE_MESH_POINT:
2789 if (!multicast &&
2790 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
2791 if (!(sdata->dev->flags & IFF_PROMISC))
2792 return 0;
2793
2794 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2795 }
2796 break;
2797 case NL80211_IFTYPE_AP_VLAN:
2798 case NL80211_IFTYPE_AP:
2799 if (!bssid) {
2800 if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
2801 return 0;
2802 } else if (!ieee80211_bssid_match(bssid,
2803 sdata->vif.addr)) {
2804 /*
2805 * Accept public action frames even when the
2806 * BSSID doesn't match, this is used for P2P
2807 * and location updates. Note that mac80211
2808 * itself never looks at these frames.
2809 */
2810 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2811 ieee80211_is_public_action(hdr, skb->len))
2812 return 1;
2813 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2814 !ieee80211_is_beacon(hdr->frame_control))
2815 return 0;
2816 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2817 }
2818 break;
2819 case NL80211_IFTYPE_WDS:
2820 if (bssid || !ieee80211_is_data(hdr->frame_control))
2821 return 0;
2822 if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
2823 return 0;
2824 break;
2825 default:
2826 /* should never get here */
2827 WARN_ON(1);
2828 break;
2829 }
2830
2831 return 1;
2832 }
2833
2834 /*
2835 * This function returns whether or not the SKB
2836 * was destined for RX processing or not, which,
2837 * if consume is true, is equivalent to whether
2838 * or not the skb was consumed.
2839 */
2840 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2841 struct sk_buff *skb, bool consume)
2842 {
2843 struct ieee80211_local *local = rx->local;
2844 struct ieee80211_sub_if_data *sdata = rx->sdata;
2845 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2846 struct ieee80211_hdr *hdr = (void *)skb->data;
2847 int prepares;
2848
2849 rx->skb = skb;
2850 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2851 prepares = prepare_for_handlers(rx, hdr);
2852
2853 if (!prepares)
2854 return false;
2855
2856 if (!consume) {
2857 skb = skb_copy(skb, GFP_ATOMIC);
2858 if (!skb) {
2859 if (net_ratelimit())
2860 wiphy_debug(local->hw.wiphy,
2861 "failed to copy skb for %s\n",
2862 sdata->name);
2863 return true;
2864 }
2865
2866 rx->skb = skb;
2867 }
2868
2869 ieee80211_invoke_rx_handlers(rx);
2870 return true;
2871 }
2872
2873 /*
2874 * This is the actual Rx frames handler. as it blongs to Rx path it must
2875 * be called with rcu_read_lock protection.
2876 */
2877 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2878 struct sk_buff *skb)
2879 {
2880 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2881 struct ieee80211_local *local = hw_to_local(hw);
2882 struct ieee80211_sub_if_data *sdata;
2883 struct ieee80211_hdr *hdr;
2884 __le16 fc;
2885 struct ieee80211_rx_data rx;
2886 struct ieee80211_sub_if_data *prev;
2887 struct sta_info *sta, *tmp, *prev_sta;
2888 int err = 0;
2889
2890 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2891 memset(&rx, 0, sizeof(rx));
2892 rx.skb = skb;
2893 rx.local = local;
2894
2895 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2896 local->dot11ReceivedFragmentCount++;
2897
2898 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2899 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
2900 test_bit(SCAN_SW_SCANNING, &local->scanning)))
2901 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2902
2903 if (ieee80211_is_mgmt(fc))
2904 err = skb_linearize(skb);
2905 else
2906 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2907
2908 if (err) {
2909 dev_kfree_skb(skb);
2910 return;
2911 }
2912
2913 hdr = (struct ieee80211_hdr *)skb->data;
2914 ieee80211_parse_qos(&rx);
2915 ieee80211_verify_alignment(&rx);
2916
2917 if (ieee80211_is_data(fc)) {
2918 prev_sta = NULL;
2919
2920 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2921 if (!prev_sta) {
2922 prev_sta = sta;
2923 continue;
2924 }
2925
2926 rx.sta = prev_sta;
2927 rx.sdata = prev_sta->sdata;
2928 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2929
2930 prev_sta = sta;
2931 }
2932
2933 if (prev_sta) {
2934 rx.sta = prev_sta;
2935 rx.sdata = prev_sta->sdata;
2936
2937 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2938 return;
2939 goto out;
2940 }
2941 }
2942
2943 prev = NULL;
2944
2945 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2946 if (!ieee80211_sdata_running(sdata))
2947 continue;
2948
2949 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2950 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2951 continue;
2952
2953 /*
2954 * frame is destined for this interface, but if it's
2955 * not also for the previous one we handle that after
2956 * the loop to avoid copying the SKB once too much
2957 */
2958
2959 if (!prev) {
2960 prev = sdata;
2961 continue;
2962 }
2963
2964 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2965 rx.sdata = prev;
2966 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2967
2968 prev = sdata;
2969 }
2970
2971 if (prev) {
2972 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2973 rx.sdata = prev;
2974
2975 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2976 return;
2977 }
2978
2979 out:
2980 dev_kfree_skb(skb);
2981 }
2982
2983 /*
2984 * This is the receive path handler. It is called by a low level driver when an
2985 * 802.11 MPDU is received from the hardware.
2986 */
2987 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2988 {
2989 struct ieee80211_local *local = hw_to_local(hw);
2990 struct ieee80211_rate *rate = NULL;
2991 struct ieee80211_supported_band *sband;
2992 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2993
2994 WARN_ON_ONCE(softirq_count() == 0);
2995
2996 if (WARN_ON(status->band < 0 ||
2997 status->band >= IEEE80211_NUM_BANDS))
2998 goto drop;
2999
3000 sband = local->hw.wiphy->bands[status->band];
3001 if (WARN_ON(!sband))
3002 goto drop;
3003
3004 /*
3005 * If we're suspending, it is possible although not too likely
3006 * that we'd be receiving frames after having already partially
3007 * quiesced the stack. We can't process such frames then since
3008 * that might, for example, cause stations to be added or other
3009 * driver callbacks be invoked.
3010 */
3011 if (unlikely(local->quiescing || local->suspended))
3012 goto drop;
3013
3014 /* We might be during a HW reconfig, prevent Rx for the same reason */
3015 if (unlikely(local->in_reconfig))
3016 goto drop;
3017
3018 /*
3019 * The same happens when we're not even started,
3020 * but that's worth a warning.
3021 */
3022 if (WARN_ON(!local->started))
3023 goto drop;
3024
3025 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
3026 /*
3027 * Validate the rate, unless a PLCP error means that
3028 * we probably can't have a valid rate here anyway.
3029 */
3030
3031 if (status->flag & RX_FLAG_HT) {
3032 /*
3033 * rate_idx is MCS index, which can be [0-76]
3034 * as documented on:
3035 *
3036 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
3037 *
3038 * Anything else would be some sort of driver or
3039 * hardware error. The driver should catch hardware
3040 * errors.
3041 */
3042 if (WARN((status->rate_idx < 0 ||
3043 status->rate_idx > 76),
3044 "Rate marked as an HT rate but passed "
3045 "status->rate_idx is not "
3046 "an MCS index [0-76]: %d (0x%02x)\n",
3047 status->rate_idx,
3048 status->rate_idx))
3049 goto drop;
3050 } else {
3051 if (WARN_ON(status->rate_idx < 0 ||
3052 status->rate_idx >= sband->n_bitrates))
3053 goto drop;
3054 rate = &sband->bitrates[status->rate_idx];
3055 }
3056 }
3057
3058 status->rx_flags = 0;
3059
3060 /*
3061 * key references and virtual interfaces are protected using RCU
3062 * and this requires that we are in a read-side RCU section during
3063 * receive processing
3064 */
3065 rcu_read_lock();
3066
3067 /*
3068 * Frames with failed FCS/PLCP checksum are not returned,
3069 * all other frames are returned without radiotap header
3070 * if it was previously present.
3071 * Also, frames with less than 16 bytes are dropped.
3072 */
3073 skb = ieee80211_rx_monitor(local, skb, rate);
3074 if (!skb) {
3075 rcu_read_unlock();
3076 return;
3077 }
3078
3079 ieee80211_tpt_led_trig_rx(local,
3080 ((struct ieee80211_hdr *)skb->data)->frame_control,
3081 skb->len);
3082 __ieee80211_rx_handle_packet(hw, skb);
3083
3084 rcu_read_unlock();
3085
3086 return;
3087 drop:
3088 kfree_skb(skb);
3089 }
3090 EXPORT_SYMBOL(ieee80211_rx);
3091
3092 /* This is a version of the rx handler that can be called from hard irq
3093 * context. Post the skb on the queue and schedule the tasklet */
3094 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
3095 {
3096 struct ieee80211_local *local = hw_to_local(hw);
3097
3098 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
3099
3100 skb->pkt_type = IEEE80211_RX_MSG;
3101 skb_queue_tail(&local->skb_queue, skb);
3102 tasklet_schedule(&local->tasklet);
3103 }
3104 EXPORT_SYMBOL(ieee80211_rx_irqsafe);