]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/mac80211/rx.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
[mirror_ubuntu-artful-kernel.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <net/mac80211.h>
21 #include <net/ieee80211_radiotap.h>
22
23 #include "ieee80211_i.h"
24 #include "driver-ops.h"
25 #include "led.h"
26 #include "mesh.h"
27 #include "wep.h"
28 #include "wpa.h"
29 #include "tkip.h"
30 #include "wme.h"
31
32 /*
33 * monitor mode reception
34 *
35 * This function cleans up the SKB, i.e. it removes all the stuff
36 * only useful for monitoring.
37 */
38 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
39 struct sk_buff *skb)
40 {
41 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
42 if (likely(skb->len > FCS_LEN))
43 __pskb_trim(skb, skb->len - FCS_LEN);
44 else {
45 /* driver bug */
46 WARN_ON(1);
47 dev_kfree_skb(skb);
48 skb = NULL;
49 }
50 }
51
52 return skb;
53 }
54
55 static inline int should_drop_frame(struct sk_buff *skb,
56 int present_fcs_len)
57 {
58 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
59 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
60
61 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
62 return 1;
63 if (unlikely(skb->len < 16 + present_fcs_len))
64 return 1;
65 if (ieee80211_is_ctl(hdr->frame_control) &&
66 !ieee80211_is_pspoll(hdr->frame_control) &&
67 !ieee80211_is_back_req(hdr->frame_control))
68 return 1;
69 return 0;
70 }
71
72 static int
73 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
74 struct ieee80211_rx_status *status)
75 {
76 int len;
77
78 /* always present fields */
79 len = sizeof(struct ieee80211_radiotap_header) + 9;
80
81 if (status->flag & RX_FLAG_MACTIME_MPDU)
82 len += 8;
83 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
84 len += 1;
85
86 if (len & 1) /* padding for RX_FLAGS if necessary */
87 len++;
88
89 if (status->flag & RX_FLAG_HT) /* HT info */
90 len += 3;
91
92 return len;
93 }
94
95 /*
96 * ieee80211_add_rx_radiotap_header - add radiotap header
97 *
98 * add a radiotap header containing all the fields which the hardware provided.
99 */
100 static void
101 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
102 struct sk_buff *skb,
103 struct ieee80211_rate *rate,
104 int rtap_len)
105 {
106 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
107 struct ieee80211_radiotap_header *rthdr;
108 unsigned char *pos;
109 u16 rx_flags = 0;
110
111 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
112 memset(rthdr, 0, rtap_len);
113
114 /* radiotap header, set always present flags */
115 rthdr->it_present =
116 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
117 (1 << IEEE80211_RADIOTAP_CHANNEL) |
118 (1 << IEEE80211_RADIOTAP_ANTENNA) |
119 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
120 rthdr->it_len = cpu_to_le16(rtap_len);
121
122 pos = (unsigned char *)(rthdr+1);
123
124 /* the order of the following fields is important */
125
126 /* IEEE80211_RADIOTAP_TSFT */
127 if (status->flag & RX_FLAG_MACTIME_MPDU) {
128 put_unaligned_le64(status->mactime, pos);
129 rthdr->it_present |=
130 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
131 pos += 8;
132 }
133
134 /* IEEE80211_RADIOTAP_FLAGS */
135 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
136 *pos |= IEEE80211_RADIOTAP_F_FCS;
137 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
138 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
139 if (status->flag & RX_FLAG_SHORTPRE)
140 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
141 pos++;
142
143 /* IEEE80211_RADIOTAP_RATE */
144 if (status->flag & RX_FLAG_HT) {
145 /*
146 * MCS information is a separate field in radiotap,
147 * added below. The byte here is needed as padding
148 * for the channel though, so initialise it to 0.
149 */
150 *pos = 0;
151 } else {
152 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
153 *pos = rate->bitrate / 5;
154 }
155 pos++;
156
157 /* IEEE80211_RADIOTAP_CHANNEL */
158 put_unaligned_le16(status->freq, pos);
159 pos += 2;
160 if (status->band == IEEE80211_BAND_5GHZ)
161 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
162 pos);
163 else if (status->flag & RX_FLAG_HT)
164 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
165 pos);
166 else if (rate->flags & IEEE80211_RATE_ERP_G)
167 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
168 pos);
169 else
170 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
171 pos);
172 pos += 2;
173
174 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
175 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
176 *pos = status->signal;
177 rthdr->it_present |=
178 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
179 pos++;
180 }
181
182 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
183
184 /* IEEE80211_RADIOTAP_ANTENNA */
185 *pos = status->antenna;
186 pos++;
187
188 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
189
190 /* IEEE80211_RADIOTAP_RX_FLAGS */
191 /* ensure 2 byte alignment for the 2 byte field as required */
192 if ((pos - (u8 *)rthdr) & 1)
193 pos++;
194 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
195 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
196 put_unaligned_le16(rx_flags, pos);
197 pos += 2;
198
199 if (status->flag & RX_FLAG_HT) {
200 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
201 *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
202 IEEE80211_RADIOTAP_MCS_HAVE_GI |
203 IEEE80211_RADIOTAP_MCS_HAVE_BW;
204 *pos = 0;
205 if (status->flag & RX_FLAG_SHORT_GI)
206 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
207 if (status->flag & RX_FLAG_40MHZ)
208 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
209 pos++;
210 *pos++ = status->rate_idx;
211 }
212 }
213
214 /*
215 * This function copies a received frame to all monitor interfaces and
216 * returns a cleaned-up SKB that no longer includes the FCS nor the
217 * radiotap header the driver might have added.
218 */
219 static struct sk_buff *
220 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
221 struct ieee80211_rate *rate)
222 {
223 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
224 struct ieee80211_sub_if_data *sdata;
225 int needed_headroom = 0;
226 struct sk_buff *skb, *skb2;
227 struct net_device *prev_dev = NULL;
228 int present_fcs_len = 0;
229
230 /*
231 * First, we may need to make a copy of the skb because
232 * (1) we need to modify it for radiotap (if not present), and
233 * (2) the other RX handlers will modify the skb we got.
234 *
235 * We don't need to, of course, if we aren't going to return
236 * the SKB because it has a bad FCS/PLCP checksum.
237 */
238
239 /* room for the radiotap header based on driver features */
240 needed_headroom = ieee80211_rx_radiotap_len(local, status);
241
242 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
243 present_fcs_len = FCS_LEN;
244
245 /* make sure hdr->frame_control is on the linear part */
246 if (!pskb_may_pull(origskb, 2)) {
247 dev_kfree_skb(origskb);
248 return NULL;
249 }
250
251 if (!local->monitors) {
252 if (should_drop_frame(origskb, present_fcs_len)) {
253 dev_kfree_skb(origskb);
254 return NULL;
255 }
256
257 return remove_monitor_info(local, origskb);
258 }
259
260 if (should_drop_frame(origskb, present_fcs_len)) {
261 /* only need to expand headroom if necessary */
262 skb = origskb;
263 origskb = NULL;
264
265 /*
266 * This shouldn't trigger often because most devices have an
267 * RX header they pull before we get here, and that should
268 * be big enough for our radiotap information. We should
269 * probably export the length to drivers so that we can have
270 * them allocate enough headroom to start with.
271 */
272 if (skb_headroom(skb) < needed_headroom &&
273 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
274 dev_kfree_skb(skb);
275 return NULL;
276 }
277 } else {
278 /*
279 * Need to make a copy and possibly remove radiotap header
280 * and FCS from the original.
281 */
282 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
283
284 origskb = remove_monitor_info(local, origskb);
285
286 if (!skb)
287 return origskb;
288 }
289
290 /* prepend radiotap information */
291 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
292
293 skb_reset_mac_header(skb);
294 skb->ip_summed = CHECKSUM_UNNECESSARY;
295 skb->pkt_type = PACKET_OTHERHOST;
296 skb->protocol = htons(ETH_P_802_2);
297
298 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
299 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
300 continue;
301
302 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
303 continue;
304
305 if (!ieee80211_sdata_running(sdata))
306 continue;
307
308 if (prev_dev) {
309 skb2 = skb_clone(skb, GFP_ATOMIC);
310 if (skb2) {
311 skb2->dev = prev_dev;
312 netif_receive_skb(skb2);
313 }
314 }
315
316 prev_dev = sdata->dev;
317 sdata->dev->stats.rx_packets++;
318 sdata->dev->stats.rx_bytes += skb->len;
319 }
320
321 if (prev_dev) {
322 skb->dev = prev_dev;
323 netif_receive_skb(skb);
324 } else
325 dev_kfree_skb(skb);
326
327 return origskb;
328 }
329
330
331 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
332 {
333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
334 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
335 int tid, seqno_idx, security_idx;
336
337 /* does the frame have a qos control field? */
338 if (ieee80211_is_data_qos(hdr->frame_control)) {
339 u8 *qc = ieee80211_get_qos_ctl(hdr);
340 /* frame has qos control */
341 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
342 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
343 status->rx_flags |= IEEE80211_RX_AMSDU;
344
345 seqno_idx = tid;
346 security_idx = tid;
347 } else {
348 /*
349 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
350 *
351 * Sequence numbers for management frames, QoS data
352 * frames with a broadcast/multicast address in the
353 * Address 1 field, and all non-QoS data frames sent
354 * by QoS STAs are assigned using an additional single
355 * modulo-4096 counter, [...]
356 *
357 * We also use that counter for non-QoS STAs.
358 */
359 seqno_idx = NUM_RX_DATA_QUEUES;
360 security_idx = 0;
361 if (ieee80211_is_mgmt(hdr->frame_control))
362 security_idx = NUM_RX_DATA_QUEUES;
363 tid = 0;
364 }
365
366 rx->seqno_idx = seqno_idx;
367 rx->security_idx = security_idx;
368 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
369 * For now, set skb->priority to 0 for other cases. */
370 rx->skb->priority = (tid > 7) ? 0 : tid;
371 }
372
373 /**
374 * DOC: Packet alignment
375 *
376 * Drivers always need to pass packets that are aligned to two-byte boundaries
377 * to the stack.
378 *
379 * Additionally, should, if possible, align the payload data in a way that
380 * guarantees that the contained IP header is aligned to a four-byte
381 * boundary. In the case of regular frames, this simply means aligning the
382 * payload to a four-byte boundary (because either the IP header is directly
383 * contained, or IV/RFC1042 headers that have a length divisible by four are
384 * in front of it). If the payload data is not properly aligned and the
385 * architecture doesn't support efficient unaligned operations, mac80211
386 * will align the data.
387 *
388 * With A-MSDU frames, however, the payload data address must yield two modulo
389 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
390 * push the IP header further back to a multiple of four again. Thankfully, the
391 * specs were sane enough this time around to require padding each A-MSDU
392 * subframe to a length that is a multiple of four.
393 *
394 * Padding like Atheros hardware adds which is between the 802.11 header and
395 * the payload is not supported, the driver is required to move the 802.11
396 * header to be directly in front of the payload in that case.
397 */
398 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
399 {
400 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
401 WARN_ONCE((unsigned long)rx->skb->data & 1,
402 "unaligned packet at 0x%p\n", rx->skb->data);
403 #endif
404 }
405
406
407 /* rx handlers */
408
409 static ieee80211_rx_result debug_noinline
410 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
411 {
412 struct ieee80211_local *local = rx->local;
413 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
414 struct sk_buff *skb = rx->skb;
415
416 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
417 !local->sched_scanning))
418 return RX_CONTINUE;
419
420 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
421 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
422 local->sched_scanning)
423 return ieee80211_scan_rx(rx->sdata, skb);
424
425 /* scanning finished during invoking of handlers */
426 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
427 return RX_DROP_UNUSABLE;
428 }
429
430
431 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
432 {
433 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
434
435 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
436 return 0;
437
438 return ieee80211_is_robust_mgmt_frame(hdr);
439 }
440
441
442 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
443 {
444 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
445
446 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
447 return 0;
448
449 return ieee80211_is_robust_mgmt_frame(hdr);
450 }
451
452
453 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
454 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
455 {
456 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
457 struct ieee80211_mmie *mmie;
458
459 if (skb->len < 24 + sizeof(*mmie) ||
460 !is_multicast_ether_addr(hdr->da))
461 return -1;
462
463 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
464 return -1; /* not a robust management frame */
465
466 mmie = (struct ieee80211_mmie *)
467 (skb->data + skb->len - sizeof(*mmie));
468 if (mmie->element_id != WLAN_EID_MMIE ||
469 mmie->length != sizeof(*mmie) - 2)
470 return -1;
471
472 return le16_to_cpu(mmie->key_id);
473 }
474
475
476 static ieee80211_rx_result
477 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
478 {
479 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
480 char *dev_addr = rx->sdata->vif.addr;
481
482 if (ieee80211_is_data(hdr->frame_control)) {
483 if (is_multicast_ether_addr(hdr->addr1)) {
484 if (ieee80211_has_tods(hdr->frame_control) ||
485 !ieee80211_has_fromds(hdr->frame_control))
486 return RX_DROP_MONITOR;
487 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
488 return RX_DROP_MONITOR;
489 } else {
490 if (!ieee80211_has_a4(hdr->frame_control))
491 return RX_DROP_MONITOR;
492 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
493 return RX_DROP_MONITOR;
494 }
495 }
496
497 /* If there is not an established peer link and this is not a peer link
498 * establisment frame, beacon or probe, drop the frame.
499 */
500
501 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
502 struct ieee80211_mgmt *mgmt;
503
504 if (!ieee80211_is_mgmt(hdr->frame_control))
505 return RX_DROP_MONITOR;
506
507 if (ieee80211_is_action(hdr->frame_control)) {
508 u8 category;
509 mgmt = (struct ieee80211_mgmt *)hdr;
510 category = mgmt->u.action.category;
511 if (category != WLAN_CATEGORY_MESH_ACTION &&
512 category != WLAN_CATEGORY_SELF_PROTECTED)
513 return RX_DROP_MONITOR;
514 return RX_CONTINUE;
515 }
516
517 if (ieee80211_is_probe_req(hdr->frame_control) ||
518 ieee80211_is_probe_resp(hdr->frame_control) ||
519 ieee80211_is_beacon(hdr->frame_control) ||
520 ieee80211_is_auth(hdr->frame_control))
521 return RX_CONTINUE;
522
523 return RX_DROP_MONITOR;
524
525 }
526
527 return RX_CONTINUE;
528 }
529
530 #define SEQ_MODULO 0x1000
531 #define SEQ_MASK 0xfff
532
533 static inline int seq_less(u16 sq1, u16 sq2)
534 {
535 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
536 }
537
538 static inline u16 seq_inc(u16 sq)
539 {
540 return (sq + 1) & SEQ_MASK;
541 }
542
543 static inline u16 seq_sub(u16 sq1, u16 sq2)
544 {
545 return (sq1 - sq2) & SEQ_MASK;
546 }
547
548
549 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
550 struct tid_ampdu_rx *tid_agg_rx,
551 int index)
552 {
553 struct ieee80211_local *local = hw_to_local(hw);
554 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
555 struct ieee80211_rx_status *status;
556
557 lockdep_assert_held(&tid_agg_rx->reorder_lock);
558
559 if (!skb)
560 goto no_frame;
561
562 /* release the frame from the reorder ring buffer */
563 tid_agg_rx->stored_mpdu_num--;
564 tid_agg_rx->reorder_buf[index] = NULL;
565 status = IEEE80211_SKB_RXCB(skb);
566 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
567 skb_queue_tail(&local->rx_skb_queue, skb);
568
569 no_frame:
570 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
571 }
572
573 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
574 struct tid_ampdu_rx *tid_agg_rx,
575 u16 head_seq_num)
576 {
577 int index;
578
579 lockdep_assert_held(&tid_agg_rx->reorder_lock);
580
581 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
582 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
583 tid_agg_rx->buf_size;
584 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
585 }
586 }
587
588 /*
589 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
590 * the skb was added to the buffer longer than this time ago, the earlier
591 * frames that have not yet been received are assumed to be lost and the skb
592 * can be released for processing. This may also release other skb's from the
593 * reorder buffer if there are no additional gaps between the frames.
594 *
595 * Callers must hold tid_agg_rx->reorder_lock.
596 */
597 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
598
599 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
600 struct tid_ampdu_rx *tid_agg_rx)
601 {
602 int index, j;
603
604 lockdep_assert_held(&tid_agg_rx->reorder_lock);
605
606 /* release the buffer until next missing frame */
607 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
608 tid_agg_rx->buf_size;
609 if (!tid_agg_rx->reorder_buf[index] &&
610 tid_agg_rx->stored_mpdu_num > 1) {
611 /*
612 * No buffers ready to be released, but check whether any
613 * frames in the reorder buffer have timed out.
614 */
615 int skipped = 1;
616 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
617 j = (j + 1) % tid_agg_rx->buf_size) {
618 if (!tid_agg_rx->reorder_buf[j]) {
619 skipped++;
620 continue;
621 }
622 if (skipped &&
623 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
624 HT_RX_REORDER_BUF_TIMEOUT))
625 goto set_release_timer;
626
627 #ifdef CONFIG_MAC80211_HT_DEBUG
628 if (net_ratelimit())
629 wiphy_debug(hw->wiphy,
630 "release an RX reorder frame due to timeout on earlier frames\n");
631 #endif
632 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
633
634 /*
635 * Increment the head seq# also for the skipped slots.
636 */
637 tid_agg_rx->head_seq_num =
638 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
639 skipped = 0;
640 }
641 } else while (tid_agg_rx->reorder_buf[index]) {
642 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
643 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
644 tid_agg_rx->buf_size;
645 }
646
647 if (tid_agg_rx->stored_mpdu_num) {
648 j = index = seq_sub(tid_agg_rx->head_seq_num,
649 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
650
651 for (; j != (index - 1) % tid_agg_rx->buf_size;
652 j = (j + 1) % tid_agg_rx->buf_size) {
653 if (tid_agg_rx->reorder_buf[j])
654 break;
655 }
656
657 set_release_timer:
658
659 mod_timer(&tid_agg_rx->reorder_timer,
660 tid_agg_rx->reorder_time[j] + 1 +
661 HT_RX_REORDER_BUF_TIMEOUT);
662 } else {
663 del_timer(&tid_agg_rx->reorder_timer);
664 }
665 }
666
667 /*
668 * As this function belongs to the RX path it must be under
669 * rcu_read_lock protection. It returns false if the frame
670 * can be processed immediately, true if it was consumed.
671 */
672 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
673 struct tid_ampdu_rx *tid_agg_rx,
674 struct sk_buff *skb)
675 {
676 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
677 u16 sc = le16_to_cpu(hdr->seq_ctrl);
678 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
679 u16 head_seq_num, buf_size;
680 int index;
681 bool ret = true;
682
683 spin_lock(&tid_agg_rx->reorder_lock);
684
685 buf_size = tid_agg_rx->buf_size;
686 head_seq_num = tid_agg_rx->head_seq_num;
687
688 /* frame with out of date sequence number */
689 if (seq_less(mpdu_seq_num, head_seq_num)) {
690 dev_kfree_skb(skb);
691 goto out;
692 }
693
694 /*
695 * If frame the sequence number exceeds our buffering window
696 * size release some previous frames to make room for this one.
697 */
698 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
699 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
700 /* release stored frames up to new head to stack */
701 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
702 }
703
704 /* Now the new frame is always in the range of the reordering buffer */
705
706 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
707
708 /* check if we already stored this frame */
709 if (tid_agg_rx->reorder_buf[index]) {
710 dev_kfree_skb(skb);
711 goto out;
712 }
713
714 /*
715 * If the current MPDU is in the right order and nothing else
716 * is stored we can process it directly, no need to buffer it.
717 * If it is first but there's something stored, we may be able
718 * to release frames after this one.
719 */
720 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
721 tid_agg_rx->stored_mpdu_num == 0) {
722 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
723 ret = false;
724 goto out;
725 }
726
727 /* put the frame in the reordering buffer */
728 tid_agg_rx->reorder_buf[index] = skb;
729 tid_agg_rx->reorder_time[index] = jiffies;
730 tid_agg_rx->stored_mpdu_num++;
731 ieee80211_sta_reorder_release(hw, tid_agg_rx);
732
733 out:
734 spin_unlock(&tid_agg_rx->reorder_lock);
735 return ret;
736 }
737
738 /*
739 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
740 * true if the MPDU was buffered, false if it should be processed.
741 */
742 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
743 {
744 struct sk_buff *skb = rx->skb;
745 struct ieee80211_local *local = rx->local;
746 struct ieee80211_hw *hw = &local->hw;
747 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
748 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
749 struct sta_info *sta = rx->sta;
750 struct tid_ampdu_rx *tid_agg_rx;
751 u16 sc;
752 u8 tid, ack_policy;
753
754 if (!ieee80211_is_data_qos(hdr->frame_control))
755 goto dont_reorder;
756
757 /*
758 * filter the QoS data rx stream according to
759 * STA/TID and check if this STA/TID is on aggregation
760 */
761
762 if (!sta)
763 goto dont_reorder;
764
765 ack_policy = *ieee80211_get_qos_ctl(hdr) &
766 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
767 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
768
769 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
770 if (!tid_agg_rx)
771 goto dont_reorder;
772
773 /* qos null data frames are excluded */
774 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
775 goto dont_reorder;
776
777 /* not part of a BA session */
778 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
779 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
780 goto dont_reorder;
781
782 /* not actually part of this BA session */
783 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
784 goto dont_reorder;
785
786 /* new, potentially un-ordered, ampdu frame - process it */
787
788 /* reset session timer */
789 if (tid_agg_rx->timeout)
790 mod_timer(&tid_agg_rx->session_timer,
791 TU_TO_EXP_TIME(tid_agg_rx->timeout));
792
793 /* if this mpdu is fragmented - terminate rx aggregation session */
794 sc = le16_to_cpu(hdr->seq_ctrl);
795 if (sc & IEEE80211_SCTL_FRAG) {
796 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
797 skb_queue_tail(&rx->sdata->skb_queue, skb);
798 ieee80211_queue_work(&local->hw, &rx->sdata->work);
799 return;
800 }
801
802 /*
803 * No locking needed -- we will only ever process one
804 * RX packet at a time, and thus own tid_agg_rx. All
805 * other code manipulating it needs to (and does) make
806 * sure that we cannot get to it any more before doing
807 * anything with it.
808 */
809 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
810 return;
811
812 dont_reorder:
813 skb_queue_tail(&local->rx_skb_queue, skb);
814 }
815
816 static ieee80211_rx_result debug_noinline
817 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
818 {
819 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
820 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
821
822 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
823 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
824 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
825 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
826 hdr->seq_ctrl)) {
827 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
828 rx->local->dot11FrameDuplicateCount++;
829 rx->sta->num_duplicates++;
830 }
831 return RX_DROP_UNUSABLE;
832 } else
833 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
834 }
835
836 if (unlikely(rx->skb->len < 16)) {
837 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
838 return RX_DROP_MONITOR;
839 }
840
841 /* Drop disallowed frame classes based on STA auth/assoc state;
842 * IEEE 802.11, Chap 5.5.
843 *
844 * mac80211 filters only based on association state, i.e. it drops
845 * Class 3 frames from not associated stations. hostapd sends
846 * deauth/disassoc frames when needed. In addition, hostapd is
847 * responsible for filtering on both auth and assoc states.
848 */
849
850 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
851 return ieee80211_rx_mesh_check(rx);
852
853 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
854 ieee80211_is_pspoll(hdr->frame_control)) &&
855 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
856 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
857 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
858 if (rx->sta && rx->sta->dummy &&
859 ieee80211_is_data_present(hdr->frame_control)) {
860 u16 ethertype;
861 u8 *payload;
862
863 payload = rx->skb->data +
864 ieee80211_hdrlen(hdr->frame_control);
865 ethertype = (payload[6] << 8) | payload[7];
866 if (cpu_to_be16(ethertype) ==
867 rx->sdata->control_port_protocol)
868 return RX_CONTINUE;
869 }
870
871 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
872 cfg80211_rx_spurious_frame(rx->sdata->dev,
873 hdr->addr2,
874 GFP_ATOMIC))
875 return RX_DROP_UNUSABLE;
876
877 return RX_DROP_MONITOR;
878 }
879
880 return RX_CONTINUE;
881 }
882
883
884 static ieee80211_rx_result debug_noinline
885 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
886 {
887 struct sk_buff *skb = rx->skb;
888 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
889 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
890 int keyidx;
891 int hdrlen;
892 ieee80211_rx_result result = RX_DROP_UNUSABLE;
893 struct ieee80211_key *sta_ptk = NULL;
894 int mmie_keyidx = -1;
895 __le16 fc;
896
897 /*
898 * Key selection 101
899 *
900 * There are four types of keys:
901 * - GTK (group keys)
902 * - IGTK (group keys for management frames)
903 * - PTK (pairwise keys)
904 * - STK (station-to-station pairwise keys)
905 *
906 * When selecting a key, we have to distinguish between multicast
907 * (including broadcast) and unicast frames, the latter can only
908 * use PTKs and STKs while the former always use GTKs and IGTKs.
909 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
910 * unicast frames can also use key indices like GTKs. Hence, if we
911 * don't have a PTK/STK we check the key index for a WEP key.
912 *
913 * Note that in a regular BSS, multicast frames are sent by the
914 * AP only, associated stations unicast the frame to the AP first
915 * which then multicasts it on their behalf.
916 *
917 * There is also a slight problem in IBSS mode: GTKs are negotiated
918 * with each station, that is something we don't currently handle.
919 * The spec seems to expect that one negotiates the same key with
920 * every station but there's no such requirement; VLANs could be
921 * possible.
922 */
923
924 /*
925 * No point in finding a key and decrypting if the frame is neither
926 * addressed to us nor a multicast frame.
927 */
928 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
929 return RX_CONTINUE;
930
931 /* start without a key */
932 rx->key = NULL;
933
934 if (rx->sta)
935 sta_ptk = rcu_dereference(rx->sta->ptk);
936
937 fc = hdr->frame_control;
938
939 if (!ieee80211_has_protected(fc))
940 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
941
942 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
943 rx->key = sta_ptk;
944 if ((status->flag & RX_FLAG_DECRYPTED) &&
945 (status->flag & RX_FLAG_IV_STRIPPED))
946 return RX_CONTINUE;
947 /* Skip decryption if the frame is not protected. */
948 if (!ieee80211_has_protected(fc))
949 return RX_CONTINUE;
950 } else if (mmie_keyidx >= 0) {
951 /* Broadcast/multicast robust management frame / BIP */
952 if ((status->flag & RX_FLAG_DECRYPTED) &&
953 (status->flag & RX_FLAG_IV_STRIPPED))
954 return RX_CONTINUE;
955
956 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
957 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
958 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
959 if (rx->sta)
960 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
961 if (!rx->key)
962 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
963 } else if (!ieee80211_has_protected(fc)) {
964 /*
965 * The frame was not protected, so skip decryption. However, we
966 * need to set rx->key if there is a key that could have been
967 * used so that the frame may be dropped if encryption would
968 * have been expected.
969 */
970 struct ieee80211_key *key = NULL;
971 struct ieee80211_sub_if_data *sdata = rx->sdata;
972 int i;
973
974 if (ieee80211_is_mgmt(fc) &&
975 is_multicast_ether_addr(hdr->addr1) &&
976 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
977 rx->key = key;
978 else {
979 if (rx->sta) {
980 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
981 key = rcu_dereference(rx->sta->gtk[i]);
982 if (key)
983 break;
984 }
985 }
986 if (!key) {
987 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
988 key = rcu_dereference(sdata->keys[i]);
989 if (key)
990 break;
991 }
992 }
993 if (key)
994 rx->key = key;
995 }
996 return RX_CONTINUE;
997 } else {
998 u8 keyid;
999 /*
1000 * The device doesn't give us the IV so we won't be
1001 * able to look up the key. That's ok though, we
1002 * don't need to decrypt the frame, we just won't
1003 * be able to keep statistics accurate.
1004 * Except for key threshold notifications, should
1005 * we somehow allow the driver to tell us which key
1006 * the hardware used if this flag is set?
1007 */
1008 if ((status->flag & RX_FLAG_DECRYPTED) &&
1009 (status->flag & RX_FLAG_IV_STRIPPED))
1010 return RX_CONTINUE;
1011
1012 hdrlen = ieee80211_hdrlen(fc);
1013
1014 if (rx->skb->len < 8 + hdrlen)
1015 return RX_DROP_UNUSABLE; /* TODO: count this? */
1016
1017 /*
1018 * no need to call ieee80211_wep_get_keyidx,
1019 * it verifies a bunch of things we've done already
1020 */
1021 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1022 keyidx = keyid >> 6;
1023
1024 /* check per-station GTK first, if multicast packet */
1025 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1026 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1027
1028 /* if not found, try default key */
1029 if (!rx->key) {
1030 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1031
1032 /*
1033 * RSNA-protected unicast frames should always be
1034 * sent with pairwise or station-to-station keys,
1035 * but for WEP we allow using a key index as well.
1036 */
1037 if (rx->key &&
1038 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1039 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1040 !is_multicast_ether_addr(hdr->addr1))
1041 rx->key = NULL;
1042 }
1043 }
1044
1045 if (rx->key) {
1046 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1047 return RX_DROP_MONITOR;
1048
1049 rx->key->tx_rx_count++;
1050 /* TODO: add threshold stuff again */
1051 } else {
1052 return RX_DROP_MONITOR;
1053 }
1054
1055 if (skb_linearize(rx->skb))
1056 return RX_DROP_UNUSABLE;
1057 /* the hdr variable is invalid now! */
1058
1059 switch (rx->key->conf.cipher) {
1060 case WLAN_CIPHER_SUITE_WEP40:
1061 case WLAN_CIPHER_SUITE_WEP104:
1062 /* Check for weak IVs if possible */
1063 if (rx->sta && ieee80211_is_data(fc) &&
1064 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1065 !(status->flag & RX_FLAG_DECRYPTED)) &&
1066 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1067 rx->sta->wep_weak_iv_count++;
1068
1069 result = ieee80211_crypto_wep_decrypt(rx);
1070 break;
1071 case WLAN_CIPHER_SUITE_TKIP:
1072 result = ieee80211_crypto_tkip_decrypt(rx);
1073 break;
1074 case WLAN_CIPHER_SUITE_CCMP:
1075 result = ieee80211_crypto_ccmp_decrypt(rx);
1076 break;
1077 case WLAN_CIPHER_SUITE_AES_CMAC:
1078 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1079 break;
1080 default:
1081 /*
1082 * We can reach here only with HW-only algorithms
1083 * but why didn't it decrypt the frame?!
1084 */
1085 return RX_DROP_UNUSABLE;
1086 }
1087
1088 /* either the frame has been decrypted or will be dropped */
1089 status->flag |= RX_FLAG_DECRYPTED;
1090
1091 return result;
1092 }
1093
1094 static ieee80211_rx_result debug_noinline
1095 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1096 {
1097 struct ieee80211_local *local;
1098 struct ieee80211_hdr *hdr;
1099 struct sk_buff *skb;
1100
1101 local = rx->local;
1102 skb = rx->skb;
1103 hdr = (struct ieee80211_hdr *) skb->data;
1104
1105 if (!local->pspolling)
1106 return RX_CONTINUE;
1107
1108 if (!ieee80211_has_fromds(hdr->frame_control))
1109 /* this is not from AP */
1110 return RX_CONTINUE;
1111
1112 if (!ieee80211_is_data(hdr->frame_control))
1113 return RX_CONTINUE;
1114
1115 if (!ieee80211_has_moredata(hdr->frame_control)) {
1116 /* AP has no more frames buffered for us */
1117 local->pspolling = false;
1118 return RX_CONTINUE;
1119 }
1120
1121 /* more data bit is set, let's request a new frame from the AP */
1122 ieee80211_send_pspoll(local, rx->sdata);
1123
1124 return RX_CONTINUE;
1125 }
1126
1127 static void ap_sta_ps_start(struct sta_info *sta)
1128 {
1129 struct ieee80211_sub_if_data *sdata = sta->sdata;
1130 struct ieee80211_local *local = sdata->local;
1131
1132 atomic_inc(&sdata->bss->num_sta_ps);
1133 set_sta_flag(sta, WLAN_STA_PS_STA);
1134 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1135 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1136 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1137 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1138 sdata->name, sta->sta.addr, sta->sta.aid);
1139 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1140 }
1141
1142 static void ap_sta_ps_end(struct sta_info *sta)
1143 {
1144 struct ieee80211_sub_if_data *sdata = sta->sdata;
1145
1146 atomic_dec(&sdata->bss->num_sta_ps);
1147
1148 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1149 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1150 sdata->name, sta->sta.addr, sta->sta.aid);
1151 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1152
1153 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1154 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1155 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1156 sdata->name, sta->sta.addr, sta->sta.aid);
1157 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1158 return;
1159 }
1160
1161 ieee80211_sta_ps_deliver_wakeup(sta);
1162 }
1163
1164 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1165 {
1166 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
1167 bool in_ps;
1168
1169 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1170
1171 /* Don't let the same PS state be set twice */
1172 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1173 if ((start && in_ps) || (!start && !in_ps))
1174 return -EINVAL;
1175
1176 if (start)
1177 ap_sta_ps_start(sta_inf);
1178 else
1179 ap_sta_ps_end(sta_inf);
1180
1181 return 0;
1182 }
1183 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1184
1185 static ieee80211_rx_result debug_noinline
1186 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1187 {
1188 struct ieee80211_sub_if_data *sdata = rx->sdata;
1189 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1190 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1191 int tid, ac;
1192
1193 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1194 return RX_CONTINUE;
1195
1196 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1197 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1198 return RX_CONTINUE;
1199
1200 /*
1201 * The device handles station powersave, so don't do anything about
1202 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1203 * it to mac80211 since they're handled.)
1204 */
1205 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1206 return RX_CONTINUE;
1207
1208 /*
1209 * Don't do anything if the station isn't already asleep. In
1210 * the uAPSD case, the station will probably be marked asleep,
1211 * in the PS-Poll case the station must be confused ...
1212 */
1213 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1214 return RX_CONTINUE;
1215
1216 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1217 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1218 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1219 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1220 else
1221 set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1222 }
1223
1224 /* Free PS Poll skb here instead of returning RX_DROP that would
1225 * count as an dropped frame. */
1226 dev_kfree_skb(rx->skb);
1227
1228 return RX_QUEUED;
1229 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1230 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1231 ieee80211_has_pm(hdr->frame_control) &&
1232 (ieee80211_is_data_qos(hdr->frame_control) ||
1233 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1234 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1235 ac = ieee802_1d_to_ac[tid & 7];
1236
1237 /*
1238 * If this AC is not trigger-enabled do nothing.
1239 *
1240 * NB: This could/should check a separate bitmap of trigger-
1241 * enabled queues, but for now we only implement uAPSD w/o
1242 * TSPEC changes to the ACs, so they're always the same.
1243 */
1244 if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1245 return RX_CONTINUE;
1246
1247 /* if we are in a service period, do nothing */
1248 if (test_sta_flag(rx->sta, WLAN_STA_SP))
1249 return RX_CONTINUE;
1250
1251 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1252 ieee80211_sta_ps_deliver_uapsd(rx->sta);
1253 else
1254 set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1255 }
1256
1257 return RX_CONTINUE;
1258 }
1259
1260 static ieee80211_rx_result debug_noinline
1261 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1262 {
1263 struct sta_info *sta = rx->sta;
1264 struct sk_buff *skb = rx->skb;
1265 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1266 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1267
1268 if (!sta)
1269 return RX_CONTINUE;
1270
1271 /*
1272 * Update last_rx only for IBSS packets which are for the current
1273 * BSSID to avoid keeping the current IBSS network alive in cases
1274 * where other STAs start using different BSSID.
1275 */
1276 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1277 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1278 NL80211_IFTYPE_ADHOC);
1279 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
1280 sta->last_rx = jiffies;
1281 if (ieee80211_is_data(hdr->frame_control)) {
1282 sta->last_rx_rate_idx = status->rate_idx;
1283 sta->last_rx_rate_flag = status->flag;
1284 }
1285 }
1286 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1287 /*
1288 * Mesh beacons will update last_rx when if they are found to
1289 * match the current local configuration when processed.
1290 */
1291 sta->last_rx = jiffies;
1292 if (ieee80211_is_data(hdr->frame_control)) {
1293 sta->last_rx_rate_idx = status->rate_idx;
1294 sta->last_rx_rate_flag = status->flag;
1295 }
1296 }
1297
1298 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1299 return RX_CONTINUE;
1300
1301 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1302 ieee80211_sta_rx_notify(rx->sdata, hdr);
1303
1304 sta->rx_fragments++;
1305 sta->rx_bytes += rx->skb->len;
1306 sta->last_signal = status->signal;
1307 ewma_add(&sta->avg_signal, -status->signal);
1308
1309 /*
1310 * Change STA power saving mode only at the end of a frame
1311 * exchange sequence.
1312 */
1313 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
1314 !ieee80211_has_morefrags(hdr->frame_control) &&
1315 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1316 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1317 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1318 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1319 /*
1320 * Ignore doze->wake transitions that are
1321 * indicated by non-data frames, the standard
1322 * is unclear here, but for example going to
1323 * PS mode and then scanning would cause a
1324 * doze->wake transition for the probe request,
1325 * and that is clearly undesirable.
1326 */
1327 if (ieee80211_is_data(hdr->frame_control) &&
1328 !ieee80211_has_pm(hdr->frame_control))
1329 ap_sta_ps_end(sta);
1330 } else {
1331 if (ieee80211_has_pm(hdr->frame_control))
1332 ap_sta_ps_start(sta);
1333 }
1334 }
1335
1336 /*
1337 * Drop (qos-)data::nullfunc frames silently, since they
1338 * are used only to control station power saving mode.
1339 */
1340 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1341 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1342 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1343
1344 /*
1345 * If we receive a 4-addr nullfunc frame from a STA
1346 * that was not moved to a 4-addr STA vlan yet send
1347 * the event to userspace and for older hostapd drop
1348 * the frame to the monitor interface.
1349 */
1350 if (ieee80211_has_a4(hdr->frame_control) &&
1351 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1352 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1353 !rx->sdata->u.vlan.sta))) {
1354 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1355 cfg80211_rx_unexpected_4addr_frame(
1356 rx->sdata->dev, sta->sta.addr,
1357 GFP_ATOMIC);
1358 return RX_DROP_MONITOR;
1359 }
1360 /*
1361 * Update counter and free packet here to avoid
1362 * counting this as a dropped packed.
1363 */
1364 sta->rx_packets++;
1365 dev_kfree_skb(rx->skb);
1366 return RX_QUEUED;
1367 }
1368
1369 return RX_CONTINUE;
1370 } /* ieee80211_rx_h_sta_process */
1371
1372 static inline struct ieee80211_fragment_entry *
1373 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1374 unsigned int frag, unsigned int seq, int rx_queue,
1375 struct sk_buff **skb)
1376 {
1377 struct ieee80211_fragment_entry *entry;
1378 int idx;
1379
1380 idx = sdata->fragment_next;
1381 entry = &sdata->fragments[sdata->fragment_next++];
1382 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1383 sdata->fragment_next = 0;
1384
1385 if (!skb_queue_empty(&entry->skb_list)) {
1386 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1387 struct ieee80211_hdr *hdr =
1388 (struct ieee80211_hdr *) entry->skb_list.next->data;
1389 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1390 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1391 "addr1=%pM addr2=%pM\n",
1392 sdata->name, idx,
1393 jiffies - entry->first_frag_time, entry->seq,
1394 entry->last_frag, hdr->addr1, hdr->addr2);
1395 #endif
1396 __skb_queue_purge(&entry->skb_list);
1397 }
1398
1399 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1400 *skb = NULL;
1401 entry->first_frag_time = jiffies;
1402 entry->seq = seq;
1403 entry->rx_queue = rx_queue;
1404 entry->last_frag = frag;
1405 entry->ccmp = 0;
1406 entry->extra_len = 0;
1407
1408 return entry;
1409 }
1410
1411 static inline struct ieee80211_fragment_entry *
1412 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1413 unsigned int frag, unsigned int seq,
1414 int rx_queue, struct ieee80211_hdr *hdr)
1415 {
1416 struct ieee80211_fragment_entry *entry;
1417 int i, idx;
1418
1419 idx = sdata->fragment_next;
1420 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1421 struct ieee80211_hdr *f_hdr;
1422
1423 idx--;
1424 if (idx < 0)
1425 idx = IEEE80211_FRAGMENT_MAX - 1;
1426
1427 entry = &sdata->fragments[idx];
1428 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1429 entry->rx_queue != rx_queue ||
1430 entry->last_frag + 1 != frag)
1431 continue;
1432
1433 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1434
1435 /*
1436 * Check ftype and addresses are equal, else check next fragment
1437 */
1438 if (((hdr->frame_control ^ f_hdr->frame_control) &
1439 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1440 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1441 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1442 continue;
1443
1444 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1445 __skb_queue_purge(&entry->skb_list);
1446 continue;
1447 }
1448 return entry;
1449 }
1450
1451 return NULL;
1452 }
1453
1454 static ieee80211_rx_result debug_noinline
1455 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1456 {
1457 struct ieee80211_hdr *hdr;
1458 u16 sc;
1459 __le16 fc;
1460 unsigned int frag, seq;
1461 struct ieee80211_fragment_entry *entry;
1462 struct sk_buff *skb;
1463 struct ieee80211_rx_status *status;
1464
1465 hdr = (struct ieee80211_hdr *)rx->skb->data;
1466 fc = hdr->frame_control;
1467 sc = le16_to_cpu(hdr->seq_ctrl);
1468 frag = sc & IEEE80211_SCTL_FRAG;
1469
1470 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1471 (rx->skb)->len < 24 ||
1472 is_multicast_ether_addr(hdr->addr1))) {
1473 /* not fragmented */
1474 goto out;
1475 }
1476 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1477
1478 if (skb_linearize(rx->skb))
1479 return RX_DROP_UNUSABLE;
1480
1481 /*
1482 * skb_linearize() might change the skb->data and
1483 * previously cached variables (in this case, hdr) need to
1484 * be refreshed with the new data.
1485 */
1486 hdr = (struct ieee80211_hdr *)rx->skb->data;
1487 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1488
1489 if (frag == 0) {
1490 /* This is the first fragment of a new frame. */
1491 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1492 rx->seqno_idx, &(rx->skb));
1493 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1494 ieee80211_has_protected(fc)) {
1495 int queue = rx->security_idx;
1496 /* Store CCMP PN so that we can verify that the next
1497 * fragment has a sequential PN value. */
1498 entry->ccmp = 1;
1499 memcpy(entry->last_pn,
1500 rx->key->u.ccmp.rx_pn[queue],
1501 CCMP_PN_LEN);
1502 }
1503 return RX_QUEUED;
1504 }
1505
1506 /* This is a fragment for a frame that should already be pending in
1507 * fragment cache. Add this fragment to the end of the pending entry.
1508 */
1509 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1510 rx->seqno_idx, hdr);
1511 if (!entry) {
1512 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1513 return RX_DROP_MONITOR;
1514 }
1515
1516 /* Verify that MPDUs within one MSDU have sequential PN values.
1517 * (IEEE 802.11i, 8.3.3.4.5) */
1518 if (entry->ccmp) {
1519 int i;
1520 u8 pn[CCMP_PN_LEN], *rpn;
1521 int queue;
1522 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1523 return RX_DROP_UNUSABLE;
1524 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1525 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1526 pn[i]++;
1527 if (pn[i])
1528 break;
1529 }
1530 queue = rx->security_idx;
1531 rpn = rx->key->u.ccmp.rx_pn[queue];
1532 if (memcmp(pn, rpn, CCMP_PN_LEN))
1533 return RX_DROP_UNUSABLE;
1534 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1535 }
1536
1537 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1538 __skb_queue_tail(&entry->skb_list, rx->skb);
1539 entry->last_frag = frag;
1540 entry->extra_len += rx->skb->len;
1541 if (ieee80211_has_morefrags(fc)) {
1542 rx->skb = NULL;
1543 return RX_QUEUED;
1544 }
1545
1546 rx->skb = __skb_dequeue(&entry->skb_list);
1547 if (skb_tailroom(rx->skb) < entry->extra_len) {
1548 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1549 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1550 GFP_ATOMIC))) {
1551 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1552 __skb_queue_purge(&entry->skb_list);
1553 return RX_DROP_UNUSABLE;
1554 }
1555 }
1556 while ((skb = __skb_dequeue(&entry->skb_list))) {
1557 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1558 dev_kfree_skb(skb);
1559 }
1560
1561 /* Complete frame has been reassembled - process it now */
1562 status = IEEE80211_SKB_RXCB(rx->skb);
1563 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1564
1565 out:
1566 if (rx->sta)
1567 rx->sta->rx_packets++;
1568 if (is_multicast_ether_addr(hdr->addr1))
1569 rx->local->dot11MulticastReceivedFrameCount++;
1570 else
1571 ieee80211_led_rx(rx->local);
1572 return RX_CONTINUE;
1573 }
1574
1575 static ieee80211_rx_result debug_noinline
1576 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1577 {
1578 u8 *data = rx->skb->data;
1579 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1580
1581 if (!ieee80211_is_data_qos(hdr->frame_control))
1582 return RX_CONTINUE;
1583
1584 /* remove the qos control field, update frame type and meta-data */
1585 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1586 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1587 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1588 /* change frame type to non QOS */
1589 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1590
1591 return RX_CONTINUE;
1592 }
1593
1594 static int
1595 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1596 {
1597 if (unlikely(!rx->sta ||
1598 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1599 return -EACCES;
1600
1601 return 0;
1602 }
1603
1604 static int
1605 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1606 {
1607 struct sk_buff *skb = rx->skb;
1608 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1609
1610 /*
1611 * Pass through unencrypted frames if the hardware has
1612 * decrypted them already.
1613 */
1614 if (status->flag & RX_FLAG_DECRYPTED)
1615 return 0;
1616
1617 /* Drop unencrypted frames if key is set. */
1618 if (unlikely(!ieee80211_has_protected(fc) &&
1619 !ieee80211_is_nullfunc(fc) &&
1620 ieee80211_is_data(fc) &&
1621 (rx->key || rx->sdata->drop_unencrypted)))
1622 return -EACCES;
1623
1624 return 0;
1625 }
1626
1627 static int
1628 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1629 {
1630 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1631 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1632 __le16 fc = hdr->frame_control;
1633
1634 /*
1635 * Pass through unencrypted frames if the hardware has
1636 * decrypted them already.
1637 */
1638 if (status->flag & RX_FLAG_DECRYPTED)
1639 return 0;
1640
1641 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1642 if (unlikely(!ieee80211_has_protected(fc) &&
1643 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1644 rx->key)) {
1645 if (ieee80211_is_deauth(fc))
1646 cfg80211_send_unprot_deauth(rx->sdata->dev,
1647 rx->skb->data,
1648 rx->skb->len);
1649 else if (ieee80211_is_disassoc(fc))
1650 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1651 rx->skb->data,
1652 rx->skb->len);
1653 return -EACCES;
1654 }
1655 /* BIP does not use Protected field, so need to check MMIE */
1656 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1657 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
1658 if (ieee80211_is_deauth(fc))
1659 cfg80211_send_unprot_deauth(rx->sdata->dev,
1660 rx->skb->data,
1661 rx->skb->len);
1662 else if (ieee80211_is_disassoc(fc))
1663 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1664 rx->skb->data,
1665 rx->skb->len);
1666 return -EACCES;
1667 }
1668 /*
1669 * When using MFP, Action frames are not allowed prior to
1670 * having configured keys.
1671 */
1672 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1673 ieee80211_is_robust_mgmt_frame(
1674 (struct ieee80211_hdr *) rx->skb->data)))
1675 return -EACCES;
1676 }
1677
1678 return 0;
1679 }
1680
1681 static int
1682 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1683 {
1684 struct ieee80211_sub_if_data *sdata = rx->sdata;
1685 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1686 bool check_port_control = false;
1687 struct ethhdr *ehdr;
1688 int ret;
1689
1690 *port_control = false;
1691 if (ieee80211_has_a4(hdr->frame_control) &&
1692 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1693 return -1;
1694
1695 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1696 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
1697
1698 if (!sdata->u.mgd.use_4addr)
1699 return -1;
1700 else
1701 check_port_control = true;
1702 }
1703
1704 if (is_multicast_ether_addr(hdr->addr1) &&
1705 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
1706 return -1;
1707
1708 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1709 if (ret < 0)
1710 return ret;
1711
1712 ehdr = (struct ethhdr *) rx->skb->data;
1713 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1714 *port_control = true;
1715 else if (check_port_control)
1716 return -1;
1717
1718 return 0;
1719 }
1720
1721 /*
1722 * requires that rx->skb is a frame with ethernet header
1723 */
1724 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1725 {
1726 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1727 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1728 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1729
1730 /*
1731 * Allow EAPOL frames to us/the PAE group address regardless
1732 * of whether the frame was encrypted or not.
1733 */
1734 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1735 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1736 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1737 return true;
1738
1739 if (ieee80211_802_1x_port_control(rx) ||
1740 ieee80211_drop_unencrypted(rx, fc))
1741 return false;
1742
1743 return true;
1744 }
1745
1746 /*
1747 * requires that rx->skb is a frame with ethernet header
1748 */
1749 static void
1750 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1751 {
1752 struct ieee80211_sub_if_data *sdata = rx->sdata;
1753 struct net_device *dev = sdata->dev;
1754 struct sk_buff *skb, *xmit_skb;
1755 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1756 struct sta_info *dsta;
1757 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1758
1759 skb = rx->skb;
1760 xmit_skb = NULL;
1761
1762 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1763 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1764 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1765 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1766 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1767 if (is_multicast_ether_addr(ehdr->h_dest)) {
1768 /*
1769 * send multicast frames both to higher layers in
1770 * local net stack and back to the wireless medium
1771 */
1772 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1773 if (!xmit_skb && net_ratelimit())
1774 printk(KERN_DEBUG "%s: failed to clone "
1775 "multicast frame\n", dev->name);
1776 } else {
1777 dsta = sta_info_get(sdata, skb->data);
1778 if (dsta) {
1779 /*
1780 * The destination station is associated to
1781 * this AP (in this VLAN), so send the frame
1782 * directly to it and do not pass it to local
1783 * net stack.
1784 */
1785 xmit_skb = skb;
1786 skb = NULL;
1787 }
1788 }
1789 }
1790
1791 if (skb) {
1792 int align __maybe_unused;
1793
1794 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1795 /*
1796 * 'align' will only take the values 0 or 2 here
1797 * since all frames are required to be aligned
1798 * to 2-byte boundaries when being passed to
1799 * mac80211. That also explains the __skb_push()
1800 * below.
1801 */
1802 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1803 if (align) {
1804 if (WARN_ON(skb_headroom(skb) < 3)) {
1805 dev_kfree_skb(skb);
1806 skb = NULL;
1807 } else {
1808 u8 *data = skb->data;
1809 size_t len = skb_headlen(skb);
1810 skb->data -= align;
1811 memmove(skb->data, data, len);
1812 skb_set_tail_pointer(skb, len);
1813 }
1814 }
1815 #endif
1816
1817 if (skb) {
1818 /* deliver to local stack */
1819 skb->protocol = eth_type_trans(skb, dev);
1820 memset(skb->cb, 0, sizeof(skb->cb));
1821 netif_receive_skb(skb);
1822 }
1823 }
1824
1825 if (xmit_skb) {
1826 /* send to wireless media */
1827 xmit_skb->protocol = htons(ETH_P_802_3);
1828 skb_reset_network_header(xmit_skb);
1829 skb_reset_mac_header(xmit_skb);
1830 dev_queue_xmit(xmit_skb);
1831 }
1832 }
1833
1834 static ieee80211_rx_result debug_noinline
1835 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1836 {
1837 struct net_device *dev = rx->sdata->dev;
1838 struct sk_buff *skb = rx->skb;
1839 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1840 __le16 fc = hdr->frame_control;
1841 struct sk_buff_head frame_list;
1842 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1843
1844 if (unlikely(!ieee80211_is_data(fc)))
1845 return RX_CONTINUE;
1846
1847 if (unlikely(!ieee80211_is_data_present(fc)))
1848 return RX_DROP_MONITOR;
1849
1850 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1851 return RX_CONTINUE;
1852
1853 if (ieee80211_has_a4(hdr->frame_control) &&
1854 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1855 !rx->sdata->u.vlan.sta)
1856 return RX_DROP_UNUSABLE;
1857
1858 if (is_multicast_ether_addr(hdr->addr1) &&
1859 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1860 rx->sdata->u.vlan.sta) ||
1861 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1862 rx->sdata->u.mgd.use_4addr)))
1863 return RX_DROP_UNUSABLE;
1864
1865 skb->dev = dev;
1866 __skb_queue_head_init(&frame_list);
1867
1868 if (skb_linearize(skb))
1869 return RX_DROP_UNUSABLE;
1870
1871 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1872 rx->sdata->vif.type,
1873 rx->local->hw.extra_tx_headroom, true);
1874
1875 while (!skb_queue_empty(&frame_list)) {
1876 rx->skb = __skb_dequeue(&frame_list);
1877
1878 if (!ieee80211_frame_allowed(rx, fc)) {
1879 dev_kfree_skb(rx->skb);
1880 continue;
1881 }
1882 dev->stats.rx_packets++;
1883 dev->stats.rx_bytes += rx->skb->len;
1884
1885 ieee80211_deliver_skb(rx);
1886 }
1887
1888 return RX_QUEUED;
1889 }
1890
1891 #ifdef CONFIG_MAC80211_MESH
1892 static ieee80211_rx_result
1893 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1894 {
1895 struct ieee80211_hdr *hdr;
1896 struct ieee80211s_hdr *mesh_hdr;
1897 unsigned int hdrlen;
1898 struct sk_buff *skb = rx->skb, *fwd_skb;
1899 struct ieee80211_local *local = rx->local;
1900 struct ieee80211_sub_if_data *sdata = rx->sdata;
1901 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1902
1903 hdr = (struct ieee80211_hdr *) skb->data;
1904 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1905 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1906
1907 /* frame is in RMC, don't forward */
1908 if (ieee80211_is_data(hdr->frame_control) &&
1909 is_multicast_ether_addr(hdr->addr1) &&
1910 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1911 return RX_DROP_MONITOR;
1912
1913 if (!ieee80211_is_data(hdr->frame_control))
1914 return RX_CONTINUE;
1915
1916 if (!mesh_hdr->ttl)
1917 /* illegal frame */
1918 return RX_DROP_MONITOR;
1919
1920 if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
1921 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1922 dropped_frames_congestion);
1923 return RX_DROP_MONITOR;
1924 }
1925
1926 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1927 struct mesh_path *mppath;
1928 char *proxied_addr;
1929 char *mpp_addr;
1930
1931 if (is_multicast_ether_addr(hdr->addr1)) {
1932 mpp_addr = hdr->addr3;
1933 proxied_addr = mesh_hdr->eaddr1;
1934 } else {
1935 mpp_addr = hdr->addr4;
1936 proxied_addr = mesh_hdr->eaddr2;
1937 }
1938
1939 rcu_read_lock();
1940 mppath = mpp_path_lookup(proxied_addr, sdata);
1941 if (!mppath) {
1942 mpp_path_add(proxied_addr, mpp_addr, sdata);
1943 } else {
1944 spin_lock_bh(&mppath->state_lock);
1945 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1946 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1947 spin_unlock_bh(&mppath->state_lock);
1948 }
1949 rcu_read_unlock();
1950 }
1951
1952 /* Frame has reached destination. Don't forward */
1953 if (!is_multicast_ether_addr(hdr->addr1) &&
1954 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1955 return RX_CONTINUE;
1956
1957 skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
1958 mesh_hdr->ttl--;
1959
1960 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
1961 if (!mesh_hdr->ttl)
1962 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1963 dropped_frames_ttl);
1964 else {
1965 struct ieee80211_hdr *fwd_hdr;
1966 struct ieee80211_tx_info *info;
1967
1968 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1969
1970 if (!fwd_skb && net_ratelimit())
1971 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1972 sdata->name);
1973 if (!fwd_skb)
1974 goto out;
1975
1976 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1977 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1978 info = IEEE80211_SKB_CB(fwd_skb);
1979 memset(info, 0, sizeof(*info));
1980 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1981 info->control.vif = &rx->sdata->vif;
1982 info->control.jiffies = jiffies;
1983 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1984 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1985 fwded_mcast);
1986 } else {
1987 int err;
1988 /*
1989 * Save TA to addr1 to send TA a path error if a
1990 * suitable next hop is not found
1991 */
1992 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1993 ETH_ALEN);
1994 err = mesh_nexthop_lookup(fwd_skb, sdata);
1995 /* Failed to immediately resolve next hop:
1996 * fwded frame was dropped or will be added
1997 * later to the pending skb queue. */
1998 if (err)
1999 return RX_DROP_MONITOR;
2000
2001 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
2002 fwded_unicast);
2003 }
2004 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
2005 fwded_frames);
2006 ieee80211_add_pending_skb(local, fwd_skb);
2007 }
2008 }
2009
2010 out:
2011 if (is_multicast_ether_addr(hdr->addr1) ||
2012 sdata->dev->flags & IFF_PROMISC)
2013 return RX_CONTINUE;
2014 else
2015 return RX_DROP_MONITOR;
2016 }
2017 #endif
2018
2019 static ieee80211_rx_result debug_noinline
2020 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2021 {
2022 struct ieee80211_sub_if_data *sdata = rx->sdata;
2023 struct ieee80211_local *local = rx->local;
2024 struct net_device *dev = sdata->dev;
2025 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2026 __le16 fc = hdr->frame_control;
2027 bool port_control;
2028 int err;
2029
2030 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2031 return RX_CONTINUE;
2032
2033 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2034 return RX_DROP_MONITOR;
2035
2036 /*
2037 * Send unexpected-4addr-frame event to hostapd. For older versions,
2038 * also drop the frame to cooked monitor interfaces.
2039 */
2040 if (ieee80211_has_a4(hdr->frame_control) &&
2041 sdata->vif.type == NL80211_IFTYPE_AP) {
2042 if (rx->sta &&
2043 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2044 cfg80211_rx_unexpected_4addr_frame(
2045 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2046 return RX_DROP_MONITOR;
2047 }
2048
2049 err = __ieee80211_data_to_8023(rx, &port_control);
2050 if (unlikely(err))
2051 return RX_DROP_UNUSABLE;
2052
2053 if (!ieee80211_frame_allowed(rx, fc))
2054 return RX_DROP_MONITOR;
2055
2056 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2057 unlikely(port_control) && sdata->bss) {
2058 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2059 u.ap);
2060 dev = sdata->dev;
2061 rx->sdata = sdata;
2062 }
2063
2064 rx->skb->dev = dev;
2065
2066 dev->stats.rx_packets++;
2067 dev->stats.rx_bytes += rx->skb->len;
2068
2069 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2070 !is_multicast_ether_addr(
2071 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2072 (!local->scanning &&
2073 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
2074 mod_timer(&local->dynamic_ps_timer, jiffies +
2075 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2076 }
2077
2078 ieee80211_deliver_skb(rx);
2079
2080 return RX_QUEUED;
2081 }
2082
2083 static ieee80211_rx_result debug_noinline
2084 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2085 {
2086 struct ieee80211_local *local = rx->local;
2087 struct ieee80211_hw *hw = &local->hw;
2088 struct sk_buff *skb = rx->skb;
2089 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2090 struct tid_ampdu_rx *tid_agg_rx;
2091 u16 start_seq_num;
2092 u16 tid;
2093
2094 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2095 return RX_CONTINUE;
2096
2097 if (ieee80211_is_back_req(bar->frame_control)) {
2098 struct {
2099 __le16 control, start_seq_num;
2100 } __packed bar_data;
2101
2102 if (!rx->sta)
2103 return RX_DROP_MONITOR;
2104
2105 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2106 &bar_data, sizeof(bar_data)))
2107 return RX_DROP_MONITOR;
2108
2109 tid = le16_to_cpu(bar_data.control) >> 12;
2110
2111 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2112 if (!tid_agg_rx)
2113 return RX_DROP_MONITOR;
2114
2115 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2116
2117 /* reset session timer */
2118 if (tid_agg_rx->timeout)
2119 mod_timer(&tid_agg_rx->session_timer,
2120 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2121
2122 spin_lock(&tid_agg_rx->reorder_lock);
2123 /* release stored frames up to start of BAR */
2124 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
2125 spin_unlock(&tid_agg_rx->reorder_lock);
2126
2127 kfree_skb(skb);
2128 return RX_QUEUED;
2129 }
2130
2131 /*
2132 * After this point, we only want management frames,
2133 * so we can drop all remaining control frames to
2134 * cooked monitor interfaces.
2135 */
2136 return RX_DROP_MONITOR;
2137 }
2138
2139 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2140 struct ieee80211_mgmt *mgmt,
2141 size_t len)
2142 {
2143 struct ieee80211_local *local = sdata->local;
2144 struct sk_buff *skb;
2145 struct ieee80211_mgmt *resp;
2146
2147 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
2148 /* Not to own unicast address */
2149 return;
2150 }
2151
2152 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
2153 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
2154 /* Not from the current AP or not associated yet. */
2155 return;
2156 }
2157
2158 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2159 /* Too short SA Query request frame */
2160 return;
2161 }
2162
2163 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2164 if (skb == NULL)
2165 return;
2166
2167 skb_reserve(skb, local->hw.extra_tx_headroom);
2168 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2169 memset(resp, 0, 24);
2170 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2171 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2172 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2173 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2174 IEEE80211_STYPE_ACTION);
2175 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2176 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2177 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2178 memcpy(resp->u.action.u.sa_query.trans_id,
2179 mgmt->u.action.u.sa_query.trans_id,
2180 WLAN_SA_QUERY_TR_ID_LEN);
2181
2182 ieee80211_tx_skb(sdata, skb);
2183 }
2184
2185 static ieee80211_rx_result debug_noinline
2186 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2187 {
2188 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2189 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2190
2191 /*
2192 * From here on, look only at management frames.
2193 * Data and control frames are already handled,
2194 * and unknown (reserved) frames are useless.
2195 */
2196 if (rx->skb->len < 24)
2197 return RX_DROP_MONITOR;
2198
2199 if (!ieee80211_is_mgmt(mgmt->frame_control))
2200 return RX_DROP_MONITOR;
2201
2202 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2203 ieee80211_is_beacon(mgmt->frame_control) &&
2204 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2205 struct ieee80211_rx_status *status;
2206
2207 status = IEEE80211_SKB_RXCB(rx->skb);
2208 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2209 rx->skb->data, rx->skb->len,
2210 status->freq, GFP_ATOMIC);
2211 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2212 }
2213
2214 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2215 return RX_DROP_MONITOR;
2216
2217 if (ieee80211_drop_unencrypted_mgmt(rx))
2218 return RX_DROP_UNUSABLE;
2219
2220 return RX_CONTINUE;
2221 }
2222
2223 static ieee80211_rx_result debug_noinline
2224 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2225 {
2226 struct ieee80211_local *local = rx->local;
2227 struct ieee80211_sub_if_data *sdata = rx->sdata;
2228 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2230 int len = rx->skb->len;
2231
2232 if (!ieee80211_is_action(mgmt->frame_control))
2233 return RX_CONTINUE;
2234
2235 /* drop too small frames */
2236 if (len < IEEE80211_MIN_ACTION_SIZE)
2237 return RX_DROP_UNUSABLE;
2238
2239 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2240 return RX_DROP_UNUSABLE;
2241
2242 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2243 return RX_DROP_UNUSABLE;
2244
2245 switch (mgmt->u.action.category) {
2246 case WLAN_CATEGORY_BACK:
2247 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2248 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2249 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2250 sdata->vif.type != NL80211_IFTYPE_AP)
2251 break;
2252
2253 /* verify action_code is present */
2254 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2255 break;
2256
2257 switch (mgmt->u.action.u.addba_req.action_code) {
2258 case WLAN_ACTION_ADDBA_REQ:
2259 if (len < (IEEE80211_MIN_ACTION_SIZE +
2260 sizeof(mgmt->u.action.u.addba_req)))
2261 goto invalid;
2262 break;
2263 case WLAN_ACTION_ADDBA_RESP:
2264 if (len < (IEEE80211_MIN_ACTION_SIZE +
2265 sizeof(mgmt->u.action.u.addba_resp)))
2266 goto invalid;
2267 break;
2268 case WLAN_ACTION_DELBA:
2269 if (len < (IEEE80211_MIN_ACTION_SIZE +
2270 sizeof(mgmt->u.action.u.delba)))
2271 goto invalid;
2272 break;
2273 default:
2274 goto invalid;
2275 }
2276
2277 goto queue;
2278 case WLAN_CATEGORY_SPECTRUM_MGMT:
2279 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2280 break;
2281
2282 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2283 break;
2284
2285 /* verify action_code is present */
2286 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2287 break;
2288
2289 switch (mgmt->u.action.u.measurement.action_code) {
2290 case WLAN_ACTION_SPCT_MSR_REQ:
2291 if (len < (IEEE80211_MIN_ACTION_SIZE +
2292 sizeof(mgmt->u.action.u.measurement)))
2293 break;
2294 ieee80211_process_measurement_req(sdata, mgmt, len);
2295 goto handled;
2296 case WLAN_ACTION_SPCT_CHL_SWITCH:
2297 if (len < (IEEE80211_MIN_ACTION_SIZE +
2298 sizeof(mgmt->u.action.u.chan_switch)))
2299 break;
2300
2301 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2302 break;
2303
2304 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2305 break;
2306
2307 goto queue;
2308 }
2309 break;
2310 case WLAN_CATEGORY_SA_QUERY:
2311 if (len < (IEEE80211_MIN_ACTION_SIZE +
2312 sizeof(mgmt->u.action.u.sa_query)))
2313 break;
2314
2315 switch (mgmt->u.action.u.sa_query.action) {
2316 case WLAN_ACTION_SA_QUERY_REQUEST:
2317 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2318 break;
2319 ieee80211_process_sa_query_req(sdata, mgmt, len);
2320 goto handled;
2321 }
2322 break;
2323 case WLAN_CATEGORY_SELF_PROTECTED:
2324 switch (mgmt->u.action.u.self_prot.action_code) {
2325 case WLAN_SP_MESH_PEERING_OPEN:
2326 case WLAN_SP_MESH_PEERING_CLOSE:
2327 case WLAN_SP_MESH_PEERING_CONFIRM:
2328 if (!ieee80211_vif_is_mesh(&sdata->vif))
2329 goto invalid;
2330 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2331 /* userspace handles this frame */
2332 break;
2333 goto queue;
2334 case WLAN_SP_MGK_INFORM:
2335 case WLAN_SP_MGK_ACK:
2336 if (!ieee80211_vif_is_mesh(&sdata->vif))
2337 goto invalid;
2338 break;
2339 }
2340 break;
2341 case WLAN_CATEGORY_MESH_ACTION:
2342 if (!ieee80211_vif_is_mesh(&sdata->vif))
2343 break;
2344 if (mesh_action_is_path_sel(mgmt) &&
2345 (!mesh_path_sel_is_hwmp(sdata)))
2346 break;
2347 goto queue;
2348 }
2349
2350 return RX_CONTINUE;
2351
2352 invalid:
2353 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2354 /* will return in the next handlers */
2355 return RX_CONTINUE;
2356
2357 handled:
2358 if (rx->sta)
2359 rx->sta->rx_packets++;
2360 dev_kfree_skb(rx->skb);
2361 return RX_QUEUED;
2362
2363 queue:
2364 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2365 skb_queue_tail(&sdata->skb_queue, rx->skb);
2366 ieee80211_queue_work(&local->hw, &sdata->work);
2367 if (rx->sta)
2368 rx->sta->rx_packets++;
2369 return RX_QUEUED;
2370 }
2371
2372 static ieee80211_rx_result debug_noinline
2373 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2374 {
2375 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2376
2377 /* skip known-bad action frames and return them in the next handler */
2378 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2379 return RX_CONTINUE;
2380
2381 /*
2382 * Getting here means the kernel doesn't know how to handle
2383 * it, but maybe userspace does ... include returned frames
2384 * so userspace can register for those to know whether ones
2385 * it transmitted were processed or returned.
2386 */
2387
2388 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2389 rx->skb->data, rx->skb->len,
2390 GFP_ATOMIC)) {
2391 if (rx->sta)
2392 rx->sta->rx_packets++;
2393 dev_kfree_skb(rx->skb);
2394 return RX_QUEUED;
2395 }
2396
2397
2398 return RX_CONTINUE;
2399 }
2400
2401 static ieee80211_rx_result debug_noinline
2402 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2403 {
2404 struct ieee80211_local *local = rx->local;
2405 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2406 struct sk_buff *nskb;
2407 struct ieee80211_sub_if_data *sdata = rx->sdata;
2408 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2409
2410 if (!ieee80211_is_action(mgmt->frame_control))
2411 return RX_CONTINUE;
2412
2413 /*
2414 * For AP mode, hostapd is responsible for handling any action
2415 * frames that we didn't handle, including returning unknown
2416 * ones. For all other modes we will return them to the sender,
2417 * setting the 0x80 bit in the action category, as required by
2418 * 802.11-2007 7.3.1.11.
2419 * Newer versions of hostapd shall also use the management frame
2420 * registration mechanisms, but older ones still use cooked
2421 * monitor interfaces so push all frames there.
2422 */
2423 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2424 (sdata->vif.type == NL80211_IFTYPE_AP ||
2425 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2426 return RX_DROP_MONITOR;
2427
2428 /* do not return rejected action frames */
2429 if (mgmt->u.action.category & 0x80)
2430 return RX_DROP_UNUSABLE;
2431
2432 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2433 GFP_ATOMIC);
2434 if (nskb) {
2435 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2436
2437 nmgmt->u.action.category |= 0x80;
2438 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2439 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2440
2441 memset(nskb->cb, 0, sizeof(nskb->cb));
2442
2443 ieee80211_tx_skb(rx->sdata, nskb);
2444 }
2445 dev_kfree_skb(rx->skb);
2446 return RX_QUEUED;
2447 }
2448
2449 static ieee80211_rx_result debug_noinline
2450 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2451 {
2452 struct ieee80211_sub_if_data *sdata = rx->sdata;
2453 ieee80211_rx_result rxs;
2454 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2455 __le16 stype;
2456
2457 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2458 if (rxs != RX_CONTINUE)
2459 return rxs;
2460
2461 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2462
2463 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2464 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2465 sdata->vif.type != NL80211_IFTYPE_STATION)
2466 return RX_DROP_MONITOR;
2467
2468 switch (stype) {
2469 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2470 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2471 /* process for all: mesh, mlme, ibss */
2472 break;
2473 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2474 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2475 if (is_multicast_ether_addr(mgmt->da) &&
2476 !is_broadcast_ether_addr(mgmt->da))
2477 return RX_DROP_MONITOR;
2478
2479 /* process only for station */
2480 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2481 return RX_DROP_MONITOR;
2482 break;
2483 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2484 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2485 /* process only for ibss */
2486 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2487 return RX_DROP_MONITOR;
2488 break;
2489 default:
2490 return RX_DROP_MONITOR;
2491 }
2492
2493 /* queue up frame and kick off work to process it */
2494 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2495 skb_queue_tail(&sdata->skb_queue, rx->skb);
2496 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2497 if (rx->sta)
2498 rx->sta->rx_packets++;
2499
2500 return RX_QUEUED;
2501 }
2502
2503 /* TODO: use IEEE80211_RX_FRAGMENTED */
2504 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2505 struct ieee80211_rate *rate)
2506 {
2507 struct ieee80211_sub_if_data *sdata;
2508 struct ieee80211_local *local = rx->local;
2509 struct ieee80211_rtap_hdr {
2510 struct ieee80211_radiotap_header hdr;
2511 u8 flags;
2512 u8 rate_or_pad;
2513 __le16 chan_freq;
2514 __le16 chan_flags;
2515 } __packed *rthdr;
2516 struct sk_buff *skb = rx->skb, *skb2;
2517 struct net_device *prev_dev = NULL;
2518 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2519
2520 /*
2521 * If cooked monitor has been processed already, then
2522 * don't do it again. If not, set the flag.
2523 */
2524 if (rx->flags & IEEE80211_RX_CMNTR)
2525 goto out_free_skb;
2526 rx->flags |= IEEE80211_RX_CMNTR;
2527
2528 /* If there are no cooked monitor interfaces, just free the SKB */
2529 if (!local->cooked_mntrs)
2530 goto out_free_skb;
2531
2532 if (skb_headroom(skb) < sizeof(*rthdr) &&
2533 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2534 goto out_free_skb;
2535
2536 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2537 memset(rthdr, 0, sizeof(*rthdr));
2538 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2539 rthdr->hdr.it_present =
2540 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2541 (1 << IEEE80211_RADIOTAP_CHANNEL));
2542
2543 if (rate) {
2544 rthdr->rate_or_pad = rate->bitrate / 5;
2545 rthdr->hdr.it_present |=
2546 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2547 }
2548 rthdr->chan_freq = cpu_to_le16(status->freq);
2549
2550 if (status->band == IEEE80211_BAND_5GHZ)
2551 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2552 IEEE80211_CHAN_5GHZ);
2553 else
2554 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2555 IEEE80211_CHAN_2GHZ);
2556
2557 skb_set_mac_header(skb, 0);
2558 skb->ip_summed = CHECKSUM_UNNECESSARY;
2559 skb->pkt_type = PACKET_OTHERHOST;
2560 skb->protocol = htons(ETH_P_802_2);
2561
2562 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2563 if (!ieee80211_sdata_running(sdata))
2564 continue;
2565
2566 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2567 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2568 continue;
2569
2570 if (prev_dev) {
2571 skb2 = skb_clone(skb, GFP_ATOMIC);
2572 if (skb2) {
2573 skb2->dev = prev_dev;
2574 netif_receive_skb(skb2);
2575 }
2576 }
2577
2578 prev_dev = sdata->dev;
2579 sdata->dev->stats.rx_packets++;
2580 sdata->dev->stats.rx_bytes += skb->len;
2581 }
2582
2583 if (prev_dev) {
2584 skb->dev = prev_dev;
2585 netif_receive_skb(skb);
2586 return;
2587 }
2588
2589 out_free_skb:
2590 dev_kfree_skb(skb);
2591 }
2592
2593 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2594 ieee80211_rx_result res)
2595 {
2596 switch (res) {
2597 case RX_DROP_MONITOR:
2598 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2599 if (rx->sta)
2600 rx->sta->rx_dropped++;
2601 /* fall through */
2602 case RX_CONTINUE: {
2603 struct ieee80211_rate *rate = NULL;
2604 struct ieee80211_supported_band *sband;
2605 struct ieee80211_rx_status *status;
2606
2607 status = IEEE80211_SKB_RXCB((rx->skb));
2608
2609 sband = rx->local->hw.wiphy->bands[status->band];
2610 if (!(status->flag & RX_FLAG_HT))
2611 rate = &sband->bitrates[status->rate_idx];
2612
2613 ieee80211_rx_cooked_monitor(rx, rate);
2614 break;
2615 }
2616 case RX_DROP_UNUSABLE:
2617 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2618 if (rx->sta)
2619 rx->sta->rx_dropped++;
2620 dev_kfree_skb(rx->skb);
2621 break;
2622 case RX_QUEUED:
2623 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2624 break;
2625 }
2626 }
2627
2628 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2629 {
2630 ieee80211_rx_result res = RX_DROP_MONITOR;
2631 struct sk_buff *skb;
2632
2633 #define CALL_RXH(rxh) \
2634 do { \
2635 res = rxh(rx); \
2636 if (res != RX_CONTINUE) \
2637 goto rxh_next; \
2638 } while (0);
2639
2640 spin_lock(&rx->local->rx_skb_queue.lock);
2641 if (rx->local->running_rx_handler)
2642 goto unlock;
2643
2644 rx->local->running_rx_handler = true;
2645
2646 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2647 spin_unlock(&rx->local->rx_skb_queue.lock);
2648
2649 /*
2650 * all the other fields are valid across frames
2651 * that belong to an aMPDU since they are on the
2652 * same TID from the same station
2653 */
2654 rx->skb = skb;
2655
2656 CALL_RXH(ieee80211_rx_h_decrypt)
2657 CALL_RXH(ieee80211_rx_h_check_more_data)
2658 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2659 CALL_RXH(ieee80211_rx_h_sta_process)
2660 CALL_RXH(ieee80211_rx_h_defragment)
2661 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2662 /* must be after MMIC verify so header is counted in MPDU mic */
2663 #ifdef CONFIG_MAC80211_MESH
2664 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2665 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2666 #endif
2667 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2668 CALL_RXH(ieee80211_rx_h_amsdu)
2669 CALL_RXH(ieee80211_rx_h_data)
2670 CALL_RXH(ieee80211_rx_h_ctrl);
2671 CALL_RXH(ieee80211_rx_h_mgmt_check)
2672 CALL_RXH(ieee80211_rx_h_action)
2673 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2674 CALL_RXH(ieee80211_rx_h_action_return)
2675 CALL_RXH(ieee80211_rx_h_mgmt)
2676
2677 rxh_next:
2678 ieee80211_rx_handlers_result(rx, res);
2679 spin_lock(&rx->local->rx_skb_queue.lock);
2680 #undef CALL_RXH
2681 }
2682
2683 rx->local->running_rx_handler = false;
2684
2685 unlock:
2686 spin_unlock(&rx->local->rx_skb_queue.lock);
2687 }
2688
2689 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2690 {
2691 ieee80211_rx_result res = RX_DROP_MONITOR;
2692
2693 #define CALL_RXH(rxh) \
2694 do { \
2695 res = rxh(rx); \
2696 if (res != RX_CONTINUE) \
2697 goto rxh_next; \
2698 } while (0);
2699
2700 CALL_RXH(ieee80211_rx_h_passive_scan)
2701 CALL_RXH(ieee80211_rx_h_check)
2702
2703 ieee80211_rx_reorder_ampdu(rx);
2704
2705 ieee80211_rx_handlers(rx);
2706 return;
2707
2708 rxh_next:
2709 ieee80211_rx_handlers_result(rx, res);
2710
2711 #undef CALL_RXH
2712 }
2713
2714 /*
2715 * This function makes calls into the RX path, therefore
2716 * it has to be invoked under RCU read lock.
2717 */
2718 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2719 {
2720 struct ieee80211_rx_data rx = {
2721 .sta = sta,
2722 .sdata = sta->sdata,
2723 .local = sta->local,
2724 /* This is OK -- must be QoS data frame */
2725 .security_idx = tid,
2726 .seqno_idx = tid,
2727 .flags = 0,
2728 };
2729 struct tid_ampdu_rx *tid_agg_rx;
2730
2731 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2732 if (!tid_agg_rx)
2733 return;
2734
2735 spin_lock(&tid_agg_rx->reorder_lock);
2736 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
2737 spin_unlock(&tid_agg_rx->reorder_lock);
2738
2739 ieee80211_rx_handlers(&rx);
2740 }
2741
2742 /* main receive path */
2743
2744 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2745 struct ieee80211_hdr *hdr)
2746 {
2747 struct ieee80211_sub_if_data *sdata = rx->sdata;
2748 struct sk_buff *skb = rx->skb;
2749 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2750 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2751 int multicast = is_multicast_ether_addr(hdr->addr1);
2752
2753 switch (sdata->vif.type) {
2754 case NL80211_IFTYPE_STATION:
2755 if (!bssid && !sdata->u.mgd.use_4addr)
2756 return 0;
2757 if (!multicast &&
2758 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2759 if (!(sdata->dev->flags & IFF_PROMISC) ||
2760 sdata->u.mgd.use_4addr)
2761 return 0;
2762 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2763 }
2764 break;
2765 case NL80211_IFTYPE_ADHOC:
2766 if (!bssid)
2767 return 0;
2768 if (ieee80211_is_beacon(hdr->frame_control)) {
2769 return 1;
2770 }
2771 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2772 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2773 return 0;
2774 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2775 } else if (!multicast &&
2776 compare_ether_addr(sdata->vif.addr,
2777 hdr->addr1) != 0) {
2778 if (!(sdata->dev->flags & IFF_PROMISC))
2779 return 0;
2780 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2781 } else if (!rx->sta) {
2782 int rate_idx;
2783 if (status->flag & RX_FLAG_HT)
2784 rate_idx = 0; /* TODO: HT rates */
2785 else
2786 rate_idx = status->rate_idx;
2787 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2788 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2789 }
2790 break;
2791 case NL80211_IFTYPE_MESH_POINT:
2792 if (!multicast &&
2793 compare_ether_addr(sdata->vif.addr,
2794 hdr->addr1) != 0) {
2795 if (!(sdata->dev->flags & IFF_PROMISC))
2796 return 0;
2797
2798 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2799 }
2800 break;
2801 case NL80211_IFTYPE_AP_VLAN:
2802 case NL80211_IFTYPE_AP:
2803 if (!bssid) {
2804 if (compare_ether_addr(sdata->vif.addr,
2805 hdr->addr1))
2806 return 0;
2807 } else if (!ieee80211_bssid_match(bssid,
2808 sdata->vif.addr)) {
2809 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2810 !ieee80211_is_beacon(hdr->frame_control) &&
2811 !(ieee80211_is_action(hdr->frame_control) &&
2812 sdata->vif.p2p))
2813 return 0;
2814 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2815 }
2816 break;
2817 case NL80211_IFTYPE_WDS:
2818 if (bssid || !ieee80211_is_data(hdr->frame_control))
2819 return 0;
2820 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2821 return 0;
2822 break;
2823 default:
2824 /* should never get here */
2825 WARN_ON(1);
2826 break;
2827 }
2828
2829 return 1;
2830 }
2831
2832 /*
2833 * This function returns whether or not the SKB
2834 * was destined for RX processing or not, which,
2835 * if consume is true, is equivalent to whether
2836 * or not the skb was consumed.
2837 */
2838 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2839 struct sk_buff *skb, bool consume)
2840 {
2841 struct ieee80211_local *local = rx->local;
2842 struct ieee80211_sub_if_data *sdata = rx->sdata;
2843 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2844 struct ieee80211_hdr *hdr = (void *)skb->data;
2845 int prepares;
2846
2847 rx->skb = skb;
2848 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2849 prepares = prepare_for_handlers(rx, hdr);
2850
2851 if (!prepares)
2852 return false;
2853
2854 if (!consume) {
2855 skb = skb_copy(skb, GFP_ATOMIC);
2856 if (!skb) {
2857 if (net_ratelimit())
2858 wiphy_debug(local->hw.wiphy,
2859 "failed to copy skb for %s\n",
2860 sdata->name);
2861 return true;
2862 }
2863
2864 rx->skb = skb;
2865 }
2866
2867 ieee80211_invoke_rx_handlers(rx);
2868 return true;
2869 }
2870
2871 /*
2872 * This is the actual Rx frames handler. as it blongs to Rx path it must
2873 * be called with rcu_read_lock protection.
2874 */
2875 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2876 struct sk_buff *skb)
2877 {
2878 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2879 struct ieee80211_local *local = hw_to_local(hw);
2880 struct ieee80211_sub_if_data *sdata;
2881 struct ieee80211_hdr *hdr;
2882 __le16 fc;
2883 struct ieee80211_rx_data rx;
2884 struct ieee80211_sub_if_data *prev;
2885 struct sta_info *sta, *tmp, *prev_sta;
2886 int err = 0;
2887
2888 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2889 memset(&rx, 0, sizeof(rx));
2890 rx.skb = skb;
2891 rx.local = local;
2892
2893 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2894 local->dot11ReceivedFragmentCount++;
2895
2896 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2897 test_bit(SCAN_SW_SCANNING, &local->scanning)))
2898 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2899
2900 if (ieee80211_is_mgmt(fc))
2901 err = skb_linearize(skb);
2902 else
2903 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2904
2905 if (err) {
2906 dev_kfree_skb(skb);
2907 return;
2908 }
2909
2910 hdr = (struct ieee80211_hdr *)skb->data;
2911 ieee80211_parse_qos(&rx);
2912 ieee80211_verify_alignment(&rx);
2913
2914 if (ieee80211_is_data(fc)) {
2915 prev_sta = NULL;
2916
2917 for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
2918 if (!prev_sta) {
2919 prev_sta = sta;
2920 continue;
2921 }
2922
2923 rx.sta = prev_sta;
2924 rx.sdata = prev_sta->sdata;
2925 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2926
2927 prev_sta = sta;
2928 }
2929
2930 if (prev_sta) {
2931 rx.sta = prev_sta;
2932 rx.sdata = prev_sta->sdata;
2933
2934 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2935 return;
2936 goto out;
2937 }
2938 }
2939
2940 prev = NULL;
2941
2942 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2943 if (!ieee80211_sdata_running(sdata))
2944 continue;
2945
2946 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2947 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2948 continue;
2949
2950 /*
2951 * frame is destined for this interface, but if it's
2952 * not also for the previous one we handle that after
2953 * the loop to avoid copying the SKB once too much
2954 */
2955
2956 if (!prev) {
2957 prev = sdata;
2958 continue;
2959 }
2960
2961 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2962 rx.sdata = prev;
2963 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2964
2965 prev = sdata;
2966 }
2967
2968 if (prev) {
2969 rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2970 rx.sdata = prev;
2971
2972 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2973 return;
2974 }
2975
2976 out:
2977 dev_kfree_skb(skb);
2978 }
2979
2980 /*
2981 * This is the receive path handler. It is called by a low level driver when an
2982 * 802.11 MPDU is received from the hardware.
2983 */
2984 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2985 {
2986 struct ieee80211_local *local = hw_to_local(hw);
2987 struct ieee80211_rate *rate = NULL;
2988 struct ieee80211_supported_band *sband;
2989 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2990
2991 WARN_ON_ONCE(softirq_count() == 0);
2992
2993 if (WARN_ON(status->band < 0 ||
2994 status->band >= IEEE80211_NUM_BANDS))
2995 goto drop;
2996
2997 sband = local->hw.wiphy->bands[status->band];
2998 if (WARN_ON(!sband))
2999 goto drop;
3000
3001 /*
3002 * If we're suspending, it is possible although not too likely
3003 * that we'd be receiving frames after having already partially
3004 * quiesced the stack. We can't process such frames then since
3005 * that might, for example, cause stations to be added or other
3006 * driver callbacks be invoked.
3007 */
3008 if (unlikely(local->quiescing || local->suspended))
3009 goto drop;
3010
3011 /*
3012 * The same happens when we're not even started,
3013 * but that's worth a warning.
3014 */
3015 if (WARN_ON(!local->started))
3016 goto drop;
3017
3018 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
3019 /*
3020 * Validate the rate, unless a PLCP error means that
3021 * we probably can't have a valid rate here anyway.
3022 */
3023
3024 if (status->flag & RX_FLAG_HT) {
3025 /*
3026 * rate_idx is MCS index, which can be [0-76]
3027 * as documented on:
3028 *
3029 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
3030 *
3031 * Anything else would be some sort of driver or
3032 * hardware error. The driver should catch hardware
3033 * errors.
3034 */
3035 if (WARN((status->rate_idx < 0 ||
3036 status->rate_idx > 76),
3037 "Rate marked as an HT rate but passed "
3038 "status->rate_idx is not "
3039 "an MCS index [0-76]: %d (0x%02x)\n",
3040 status->rate_idx,
3041 status->rate_idx))
3042 goto drop;
3043 } else {
3044 if (WARN_ON(status->rate_idx < 0 ||
3045 status->rate_idx >= sband->n_bitrates))
3046 goto drop;
3047 rate = &sband->bitrates[status->rate_idx];
3048 }
3049 }
3050
3051 status->rx_flags = 0;
3052
3053 /*
3054 * key references and virtual interfaces are protected using RCU
3055 * and this requires that we are in a read-side RCU section during
3056 * receive processing
3057 */
3058 rcu_read_lock();
3059
3060 /*
3061 * Frames with failed FCS/PLCP checksum are not returned,
3062 * all other frames are returned without radiotap header
3063 * if it was previously present.
3064 * Also, frames with less than 16 bytes are dropped.
3065 */
3066 skb = ieee80211_rx_monitor(local, skb, rate);
3067 if (!skb) {
3068 rcu_read_unlock();
3069 return;
3070 }
3071
3072 ieee80211_tpt_led_trig_rx(local,
3073 ((struct ieee80211_hdr *)skb->data)->frame_control,
3074 skb->len);
3075 __ieee80211_rx_handle_packet(hw, skb);
3076
3077 rcu_read_unlock();
3078
3079 return;
3080 drop:
3081 kfree_skb(skb);
3082 }
3083 EXPORT_SYMBOL(ieee80211_rx);
3084
3085 /* This is a version of the rx handler that can be called from hard irq
3086 * context. Post the skb on the queue and schedule the tasklet */
3087 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
3088 {
3089 struct ieee80211_local *local = hw_to_local(hw);
3090
3091 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
3092
3093 skb->pkt_type = IEEE80211_RX_MSG;
3094 skb_queue_tail(&local->skb_queue, skb);
3095 tasklet_schedule(&local->tasklet);
3096 }
3097 EXPORT_SYMBOL(ieee80211_rx_irqsafe);