]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/mac80211/rx.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[mirror_ubuntu-bionic-kernel.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <net/mac80211.h>
20 #include <net/ieee80211_radiotap.h>
21
22 #include "ieee80211_i.h"
23 #include "driver-ops.h"
24 #include "led.h"
25 #include "mesh.h"
26 #include "wep.h"
27 #include "wpa.h"
28 #include "tkip.h"
29 #include "wme.h"
30
31 /*
32 * monitor mode reception
33 *
34 * This function cleans up the SKB, i.e. it removes all the stuff
35 * only useful for monitoring.
36 */
37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
38 struct sk_buff *skb)
39 {
40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
41 if (likely(skb->len > FCS_LEN))
42 __pskb_trim(skb, skb->len - FCS_LEN);
43 else {
44 /* driver bug */
45 WARN_ON(1);
46 dev_kfree_skb(skb);
47 skb = NULL;
48 }
49 }
50
51 return skb;
52 }
53
54 static inline int should_drop_frame(struct sk_buff *skb,
55 int present_fcs_len)
56 {
57 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
58 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
59
60 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
61 return 1;
62 if (unlikely(skb->len < 16 + present_fcs_len))
63 return 1;
64 if (ieee80211_is_ctl(hdr->frame_control) &&
65 !ieee80211_is_pspoll(hdr->frame_control) &&
66 !ieee80211_is_back_req(hdr->frame_control))
67 return 1;
68 return 0;
69 }
70
71 static int
72 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
73 struct ieee80211_rx_status *status)
74 {
75 int len;
76
77 /* always present fields */
78 len = sizeof(struct ieee80211_radiotap_header) + 9;
79
80 if (status->flag & RX_FLAG_TSFT)
81 len += 8;
82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
83 len += 1;
84
85 if (len & 1) /* padding for RX_FLAGS if necessary */
86 len++;
87
88 return len;
89 }
90
91 /*
92 * ieee80211_add_rx_radiotap_header - add radiotap header
93 *
94 * add a radiotap header containing all the fields which the hardware provided.
95 */
96 static void
97 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
98 struct sk_buff *skb,
99 struct ieee80211_rate *rate,
100 int rtap_len)
101 {
102 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
103 struct ieee80211_radiotap_header *rthdr;
104 unsigned char *pos;
105 u16 rx_flags = 0;
106
107 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
108 memset(rthdr, 0, rtap_len);
109
110 /* radiotap header, set always present flags */
111 rthdr->it_present =
112 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
113 (1 << IEEE80211_RADIOTAP_CHANNEL) |
114 (1 << IEEE80211_RADIOTAP_ANTENNA) |
115 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
116 rthdr->it_len = cpu_to_le16(rtap_len);
117
118 pos = (unsigned char *)(rthdr+1);
119
120 /* the order of the following fields is important */
121
122 /* IEEE80211_RADIOTAP_TSFT */
123 if (status->flag & RX_FLAG_TSFT) {
124 put_unaligned_le64(status->mactime, pos);
125 rthdr->it_present |=
126 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
127 pos += 8;
128 }
129
130 /* IEEE80211_RADIOTAP_FLAGS */
131 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
132 *pos |= IEEE80211_RADIOTAP_F_FCS;
133 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
134 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
135 if (status->flag & RX_FLAG_SHORTPRE)
136 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
137 pos++;
138
139 /* IEEE80211_RADIOTAP_RATE */
140 if (status->flag & RX_FLAG_HT) {
141 /*
142 * TODO: add following information into radiotap header once
143 * suitable fields are defined for it:
144 * - MCS index (status->rate_idx)
145 * - HT40 (status->flag & RX_FLAG_40MHZ)
146 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
147 */
148 *pos = 0;
149 } else {
150 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
151 *pos = rate->bitrate / 5;
152 }
153 pos++;
154
155 /* IEEE80211_RADIOTAP_CHANNEL */
156 put_unaligned_le16(status->freq, pos);
157 pos += 2;
158 if (status->band == IEEE80211_BAND_5GHZ)
159 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
160 pos);
161 else if (status->flag & RX_FLAG_HT)
162 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
163 pos);
164 else if (rate->flags & IEEE80211_RATE_ERP_G)
165 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
166 pos);
167 else
168 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
169 pos);
170 pos += 2;
171
172 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
173 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
174 *pos = status->signal;
175 rthdr->it_present |=
176 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
177 pos++;
178 }
179
180 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
181
182 /* IEEE80211_RADIOTAP_ANTENNA */
183 *pos = status->antenna;
184 pos++;
185
186 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
187
188 /* IEEE80211_RADIOTAP_RX_FLAGS */
189 /* ensure 2 byte alignment for the 2 byte field as required */
190 if ((pos - (u8 *)rthdr) & 1)
191 pos++;
192 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
193 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
194 put_unaligned_le16(rx_flags, pos);
195 pos += 2;
196 }
197
198 /*
199 * This function copies a received frame to all monitor interfaces and
200 * returns a cleaned-up SKB that no longer includes the FCS nor the
201 * radiotap header the driver might have added.
202 */
203 static struct sk_buff *
204 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
205 struct ieee80211_rate *rate)
206 {
207 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
208 struct ieee80211_sub_if_data *sdata;
209 int needed_headroom = 0;
210 struct sk_buff *skb, *skb2;
211 struct net_device *prev_dev = NULL;
212 int present_fcs_len = 0;
213
214 /*
215 * First, we may need to make a copy of the skb because
216 * (1) we need to modify it for radiotap (if not present), and
217 * (2) the other RX handlers will modify the skb we got.
218 *
219 * We don't need to, of course, if we aren't going to return
220 * the SKB because it has a bad FCS/PLCP checksum.
221 */
222
223 /* room for the radiotap header based on driver features */
224 needed_headroom = ieee80211_rx_radiotap_len(local, status);
225
226 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
227 present_fcs_len = FCS_LEN;
228
229 /* make sure hdr->frame_control is on the linear part */
230 if (!pskb_may_pull(origskb, 2)) {
231 dev_kfree_skb(origskb);
232 return NULL;
233 }
234
235 if (!local->monitors) {
236 if (should_drop_frame(origskb, present_fcs_len)) {
237 dev_kfree_skb(origskb);
238 return NULL;
239 }
240
241 return remove_monitor_info(local, origskb);
242 }
243
244 if (should_drop_frame(origskb, present_fcs_len)) {
245 /* only need to expand headroom if necessary */
246 skb = origskb;
247 origskb = NULL;
248
249 /*
250 * This shouldn't trigger often because most devices have an
251 * RX header they pull before we get here, and that should
252 * be big enough for our radiotap information. We should
253 * probably export the length to drivers so that we can have
254 * them allocate enough headroom to start with.
255 */
256 if (skb_headroom(skb) < needed_headroom &&
257 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
258 dev_kfree_skb(skb);
259 return NULL;
260 }
261 } else {
262 /*
263 * Need to make a copy and possibly remove radiotap header
264 * and FCS from the original.
265 */
266 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
267
268 origskb = remove_monitor_info(local, origskb);
269
270 if (!skb)
271 return origskb;
272 }
273
274 /* prepend radiotap information */
275 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
276
277 skb_reset_mac_header(skb);
278 skb->ip_summed = CHECKSUM_UNNECESSARY;
279 skb->pkt_type = PACKET_OTHERHOST;
280 skb->protocol = htons(ETH_P_802_2);
281
282 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
283 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
284 continue;
285
286 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
287 continue;
288
289 if (!ieee80211_sdata_running(sdata))
290 continue;
291
292 if (prev_dev) {
293 skb2 = skb_clone(skb, GFP_ATOMIC);
294 if (skb2) {
295 skb2->dev = prev_dev;
296 netif_receive_skb(skb2);
297 }
298 }
299
300 prev_dev = sdata->dev;
301 sdata->dev->stats.rx_packets++;
302 sdata->dev->stats.rx_bytes += skb->len;
303 }
304
305 if (prev_dev) {
306 skb->dev = prev_dev;
307 netif_receive_skb(skb);
308 } else
309 dev_kfree_skb(skb);
310
311 return origskb;
312 }
313
314
315 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
316 {
317 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
318 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
319 int tid;
320
321 /* does the frame have a qos control field? */
322 if (ieee80211_is_data_qos(hdr->frame_control)) {
323 u8 *qc = ieee80211_get_qos_ctl(hdr);
324 /* frame has qos control */
325 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
326 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
327 status->rx_flags |= IEEE80211_RX_AMSDU;
328 } else {
329 /*
330 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
331 *
332 * Sequence numbers for management frames, QoS data
333 * frames with a broadcast/multicast address in the
334 * Address 1 field, and all non-QoS data frames sent
335 * by QoS STAs are assigned using an additional single
336 * modulo-4096 counter, [...]
337 *
338 * We also use that counter for non-QoS STAs.
339 */
340 tid = NUM_RX_DATA_QUEUES - 1;
341 }
342
343 rx->queue = tid;
344 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
345 * For now, set skb->priority to 0 for other cases. */
346 rx->skb->priority = (tid > 7) ? 0 : tid;
347 }
348
349 /**
350 * DOC: Packet alignment
351 *
352 * Drivers always need to pass packets that are aligned to two-byte boundaries
353 * to the stack.
354 *
355 * Additionally, should, if possible, align the payload data in a way that
356 * guarantees that the contained IP header is aligned to a four-byte
357 * boundary. In the case of regular frames, this simply means aligning the
358 * payload to a four-byte boundary (because either the IP header is directly
359 * contained, or IV/RFC1042 headers that have a length divisible by four are
360 * in front of it). If the payload data is not properly aligned and the
361 * architecture doesn't support efficient unaligned operations, mac80211
362 * will align the data.
363 *
364 * With A-MSDU frames, however, the payload data address must yield two modulo
365 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
366 * push the IP header further back to a multiple of four again. Thankfully, the
367 * specs were sane enough this time around to require padding each A-MSDU
368 * subframe to a length that is a multiple of four.
369 *
370 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
371 * the payload is not supported, the driver is required to move the 802.11
372 * header to be directly in front of the payload in that case.
373 */
374 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
375 {
376 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
377 WARN_ONCE((unsigned long)rx->skb->data & 1,
378 "unaligned packet at 0x%p\n", rx->skb->data);
379 #endif
380 }
381
382
383 /* rx handlers */
384
385 static ieee80211_rx_result debug_noinline
386 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
387 {
388 struct ieee80211_local *local = rx->local;
389 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
390 struct sk_buff *skb = rx->skb;
391
392 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
393 return RX_CONTINUE;
394
395 if (test_bit(SCAN_HW_SCANNING, &local->scanning))
396 return ieee80211_scan_rx(rx->sdata, skb);
397
398 if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
399 /* drop all the other packets during a software scan anyway */
400 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
401 dev_kfree_skb(skb);
402 return RX_QUEUED;
403 }
404
405 /* scanning finished during invoking of handlers */
406 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
407 return RX_DROP_UNUSABLE;
408 }
409
410
411 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
412 {
413 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
414
415 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
416 return 0;
417
418 return ieee80211_is_robust_mgmt_frame(hdr);
419 }
420
421
422 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
423 {
424 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
425
426 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
427 return 0;
428
429 return ieee80211_is_robust_mgmt_frame(hdr);
430 }
431
432
433 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
434 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
435 {
436 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
437 struct ieee80211_mmie *mmie;
438
439 if (skb->len < 24 + sizeof(*mmie) ||
440 !is_multicast_ether_addr(hdr->da))
441 return -1;
442
443 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
444 return -1; /* not a robust management frame */
445
446 mmie = (struct ieee80211_mmie *)
447 (skb->data + skb->len - sizeof(*mmie));
448 if (mmie->element_id != WLAN_EID_MMIE ||
449 mmie->length != sizeof(*mmie) - 2)
450 return -1;
451
452 return le16_to_cpu(mmie->key_id);
453 }
454
455
456 static ieee80211_rx_result
457 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
458 {
459 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
460 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
461 char *dev_addr = rx->sdata->vif.addr;
462
463 if (ieee80211_is_data(hdr->frame_control)) {
464 if (is_multicast_ether_addr(hdr->addr1)) {
465 if (ieee80211_has_tods(hdr->frame_control) ||
466 !ieee80211_has_fromds(hdr->frame_control))
467 return RX_DROP_MONITOR;
468 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
469 return RX_DROP_MONITOR;
470 } else {
471 if (!ieee80211_has_a4(hdr->frame_control))
472 return RX_DROP_MONITOR;
473 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
474 return RX_DROP_MONITOR;
475 }
476 }
477
478 /* If there is not an established peer link and this is not a peer link
479 * establisment frame, beacon or probe, drop the frame.
480 */
481
482 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
483 struct ieee80211_mgmt *mgmt;
484
485 if (!ieee80211_is_mgmt(hdr->frame_control))
486 return RX_DROP_MONITOR;
487
488 if (ieee80211_is_action(hdr->frame_control)) {
489 mgmt = (struct ieee80211_mgmt *)hdr;
490 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
491 return RX_DROP_MONITOR;
492 return RX_CONTINUE;
493 }
494
495 if (ieee80211_is_probe_req(hdr->frame_control) ||
496 ieee80211_is_probe_resp(hdr->frame_control) ||
497 ieee80211_is_beacon(hdr->frame_control))
498 return RX_CONTINUE;
499
500 return RX_DROP_MONITOR;
501
502 }
503
504 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
505
506 if (ieee80211_is_data(hdr->frame_control) &&
507 is_multicast_ether_addr(hdr->addr1) &&
508 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
509 return RX_DROP_MONITOR;
510 #undef msh_h_get
511
512 return RX_CONTINUE;
513 }
514
515 #define SEQ_MODULO 0x1000
516 #define SEQ_MASK 0xfff
517
518 static inline int seq_less(u16 sq1, u16 sq2)
519 {
520 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
521 }
522
523 static inline u16 seq_inc(u16 sq)
524 {
525 return (sq + 1) & SEQ_MASK;
526 }
527
528 static inline u16 seq_sub(u16 sq1, u16 sq2)
529 {
530 return (sq1 - sq2) & SEQ_MASK;
531 }
532
533
534 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
535 struct tid_ampdu_rx *tid_agg_rx,
536 int index,
537 struct sk_buff_head *frames)
538 {
539 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
540
541 if (!skb)
542 goto no_frame;
543
544 /* release the frame from the reorder ring buffer */
545 tid_agg_rx->stored_mpdu_num--;
546 tid_agg_rx->reorder_buf[index] = NULL;
547 __skb_queue_tail(frames, skb);
548
549 no_frame:
550 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
551 }
552
553 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
554 struct tid_ampdu_rx *tid_agg_rx,
555 u16 head_seq_num,
556 struct sk_buff_head *frames)
557 {
558 int index;
559
560 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
561 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
562 tid_agg_rx->buf_size;
563 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
564 }
565 }
566
567 /*
568 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
569 * the skb was added to the buffer longer than this time ago, the earlier
570 * frames that have not yet been received are assumed to be lost and the skb
571 * can be released for processing. This may also release other skb's from the
572 * reorder buffer if there are no additional gaps between the frames.
573 *
574 * Callers must hold tid_agg_rx->reorder_lock.
575 */
576 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
577
578 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
579 struct tid_ampdu_rx *tid_agg_rx,
580 struct sk_buff_head *frames)
581 {
582 int index, j;
583
584 /* release the buffer until next missing frame */
585 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
586 tid_agg_rx->buf_size;
587 if (!tid_agg_rx->reorder_buf[index] &&
588 tid_agg_rx->stored_mpdu_num > 1) {
589 /*
590 * No buffers ready to be released, but check whether any
591 * frames in the reorder buffer have timed out.
592 */
593 int skipped = 1;
594 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
595 j = (j + 1) % tid_agg_rx->buf_size) {
596 if (!tid_agg_rx->reorder_buf[j]) {
597 skipped++;
598 continue;
599 }
600 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
601 HT_RX_REORDER_BUF_TIMEOUT))
602 goto set_release_timer;
603
604 #ifdef CONFIG_MAC80211_HT_DEBUG
605 if (net_ratelimit())
606 wiphy_debug(hw->wiphy,
607 "release an RX reorder frame due to timeout on earlier frames\n");
608 #endif
609 ieee80211_release_reorder_frame(hw, tid_agg_rx,
610 j, frames);
611
612 /*
613 * Increment the head seq# also for the skipped slots.
614 */
615 tid_agg_rx->head_seq_num =
616 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
617 skipped = 0;
618 }
619 } else while (tid_agg_rx->reorder_buf[index]) {
620 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
621 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
622 tid_agg_rx->buf_size;
623 }
624
625 /*
626 * Disable the reorder release timer for now.
627 *
628 * The current implementation lacks a proper locking scheme
629 * which would protect vital statistic and debug counters
630 * from being updated by two different but concurrent BHs.
631 *
632 * More information about the topic is available from:
633 * - thread: http://marc.info/?t=128635927000001
634 *
635 * What was wrong:
636 * => http://marc.info/?l=linux-wireless&m=128636170811964
637 * "Basically the thing is that until your patch, the data
638 * in the struct didn't actually need locking because it
639 * was accessed by the RX path only which is not concurrent."
640 *
641 * List of what needs to be fixed:
642 * => http://marc.info/?l=linux-wireless&m=128656352920957
643 *
644
645 if (tid_agg_rx->stored_mpdu_num) {
646 j = index = seq_sub(tid_agg_rx->head_seq_num,
647 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
648
649 for (; j != (index - 1) % tid_agg_rx->buf_size;
650 j = (j + 1) % tid_agg_rx->buf_size) {
651 if (tid_agg_rx->reorder_buf[j])
652 break;
653 }
654
655 set_release_timer:
656
657 mod_timer(&tid_agg_rx->reorder_timer,
658 tid_agg_rx->reorder_time[j] +
659 HT_RX_REORDER_BUF_TIMEOUT);
660 } else {
661 del_timer(&tid_agg_rx->reorder_timer);
662 }
663 */
664
665 set_release_timer:
666 return;
667 }
668
669 /*
670 * As this function belongs to the RX path it must be under
671 * rcu_read_lock protection. It returns false if the frame
672 * can be processed immediately, true if it was consumed.
673 */
674 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
675 struct tid_ampdu_rx *tid_agg_rx,
676 struct sk_buff *skb,
677 struct sk_buff_head *frames)
678 {
679 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
680 u16 sc = le16_to_cpu(hdr->seq_ctrl);
681 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
682 u16 head_seq_num, buf_size;
683 int index;
684 bool ret = true;
685
686 buf_size = tid_agg_rx->buf_size;
687 head_seq_num = tid_agg_rx->head_seq_num;
688
689 spin_lock(&tid_agg_rx->reorder_lock);
690 /* frame with out of date sequence number */
691 if (seq_less(mpdu_seq_num, head_seq_num)) {
692 dev_kfree_skb(skb);
693 goto out;
694 }
695
696 /*
697 * If frame the sequence number exceeds our buffering window
698 * size release some previous frames to make room for this one.
699 */
700 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
701 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
702 /* release stored frames up to new head to stack */
703 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num,
704 frames);
705 }
706
707 /* Now the new frame is always in the range of the reordering buffer */
708
709 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
710
711 /* check if we already stored this frame */
712 if (tid_agg_rx->reorder_buf[index]) {
713 dev_kfree_skb(skb);
714 goto out;
715 }
716
717 /*
718 * If the current MPDU is in the right order and nothing else
719 * is stored we can process it directly, no need to buffer it.
720 */
721 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
722 tid_agg_rx->stored_mpdu_num == 0) {
723 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
724 ret = false;
725 goto out;
726 }
727
728 /* put the frame in the reordering buffer */
729 tid_agg_rx->reorder_buf[index] = skb;
730 tid_agg_rx->reorder_time[index] = jiffies;
731 tid_agg_rx->stored_mpdu_num++;
732 ieee80211_sta_reorder_release(hw, tid_agg_rx, frames);
733
734 out:
735 spin_unlock(&tid_agg_rx->reorder_lock);
736 return ret;
737 }
738
739 /*
740 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
741 * true if the MPDU was buffered, false if it should be processed.
742 */
743 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
744 struct sk_buff_head *frames)
745 {
746 struct sk_buff *skb = rx->skb;
747 struct ieee80211_local *local = rx->local;
748 struct ieee80211_hw *hw = &local->hw;
749 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
750 struct sta_info *sta = rx->sta;
751 struct tid_ampdu_rx *tid_agg_rx;
752 u16 sc;
753 int tid;
754
755 if (!ieee80211_is_data_qos(hdr->frame_control))
756 goto dont_reorder;
757
758 /*
759 * filter the QoS data rx stream according to
760 * STA/TID and check if this STA/TID is on aggregation
761 */
762
763 if (!sta)
764 goto dont_reorder;
765
766 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
767
768 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
769 if (!tid_agg_rx)
770 goto dont_reorder;
771
772 /* qos null data frames are excluded */
773 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
774 goto dont_reorder;
775
776 /* new, potentially un-ordered, ampdu frame - process it */
777
778 /* reset session timer */
779 if (tid_agg_rx->timeout)
780 mod_timer(&tid_agg_rx->session_timer,
781 TU_TO_EXP_TIME(tid_agg_rx->timeout));
782
783 /* if this mpdu is fragmented - terminate rx aggregation session */
784 sc = le16_to_cpu(hdr->seq_ctrl);
785 if (sc & IEEE80211_SCTL_FRAG) {
786 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
787 skb_queue_tail(&rx->sdata->skb_queue, skb);
788 ieee80211_queue_work(&local->hw, &rx->sdata->work);
789 return;
790 }
791
792 /*
793 * No locking needed -- we will only ever process one
794 * RX packet at a time, and thus own tid_agg_rx. All
795 * other code manipulating it needs to (and does) make
796 * sure that we cannot get to it any more before doing
797 * anything with it.
798 */
799 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
800 return;
801
802 dont_reorder:
803 __skb_queue_tail(frames, skb);
804 }
805
806 static ieee80211_rx_result debug_noinline
807 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
808 {
809 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
810 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
811
812 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
813 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
814 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
815 rx->sta->last_seq_ctrl[rx->queue] ==
816 hdr->seq_ctrl)) {
817 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
818 rx->local->dot11FrameDuplicateCount++;
819 rx->sta->num_duplicates++;
820 }
821 return RX_DROP_MONITOR;
822 } else
823 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
824 }
825
826 if (unlikely(rx->skb->len < 16)) {
827 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
828 return RX_DROP_MONITOR;
829 }
830
831 /* Drop disallowed frame classes based on STA auth/assoc state;
832 * IEEE 802.11, Chap 5.5.
833 *
834 * mac80211 filters only based on association state, i.e. it drops
835 * Class 3 frames from not associated stations. hostapd sends
836 * deauth/disassoc frames when needed. In addition, hostapd is
837 * responsible for filtering on both auth and assoc states.
838 */
839
840 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
841 return ieee80211_rx_mesh_check(rx);
842
843 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
844 ieee80211_is_pspoll(hdr->frame_control)) &&
845 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
846 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
847 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
848 if ((!ieee80211_has_fromds(hdr->frame_control) &&
849 !ieee80211_has_tods(hdr->frame_control) &&
850 ieee80211_is_data(hdr->frame_control)) ||
851 !(status->rx_flags & IEEE80211_RX_RA_MATCH)) {
852 /* Drop IBSS frames and frames for other hosts
853 * silently. */
854 return RX_DROP_MONITOR;
855 }
856
857 return RX_DROP_MONITOR;
858 }
859
860 return RX_CONTINUE;
861 }
862
863
864 static ieee80211_rx_result debug_noinline
865 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
866 {
867 struct sk_buff *skb = rx->skb;
868 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
869 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
870 int keyidx;
871 int hdrlen;
872 ieee80211_rx_result result = RX_DROP_UNUSABLE;
873 struct ieee80211_key *sta_ptk = NULL;
874 int mmie_keyidx = -1;
875 __le16 fc;
876
877 /*
878 * Key selection 101
879 *
880 * There are four types of keys:
881 * - GTK (group keys)
882 * - IGTK (group keys for management frames)
883 * - PTK (pairwise keys)
884 * - STK (station-to-station pairwise keys)
885 *
886 * When selecting a key, we have to distinguish between multicast
887 * (including broadcast) and unicast frames, the latter can only
888 * use PTKs and STKs while the former always use GTKs and IGTKs.
889 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
890 * unicast frames can also use key indices like GTKs. Hence, if we
891 * don't have a PTK/STK we check the key index for a WEP key.
892 *
893 * Note that in a regular BSS, multicast frames are sent by the
894 * AP only, associated stations unicast the frame to the AP first
895 * which then multicasts it on their behalf.
896 *
897 * There is also a slight problem in IBSS mode: GTKs are negotiated
898 * with each station, that is something we don't currently handle.
899 * The spec seems to expect that one negotiates the same key with
900 * every station but there's no such requirement; VLANs could be
901 * possible.
902 */
903
904 /*
905 * No point in finding a key and decrypting if the frame is neither
906 * addressed to us nor a multicast frame.
907 */
908 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
909 return RX_CONTINUE;
910
911 /* start without a key */
912 rx->key = NULL;
913
914 if (rx->sta)
915 sta_ptk = rcu_dereference(rx->sta->ptk);
916
917 fc = hdr->frame_control;
918
919 if (!ieee80211_has_protected(fc))
920 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
921
922 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
923 rx->key = sta_ptk;
924 if ((status->flag & RX_FLAG_DECRYPTED) &&
925 (status->flag & RX_FLAG_IV_STRIPPED))
926 return RX_CONTINUE;
927 /* Skip decryption if the frame is not protected. */
928 if (!ieee80211_has_protected(fc))
929 return RX_CONTINUE;
930 } else if (mmie_keyidx >= 0) {
931 /* Broadcast/multicast robust management frame / BIP */
932 if ((status->flag & RX_FLAG_DECRYPTED) &&
933 (status->flag & RX_FLAG_IV_STRIPPED))
934 return RX_CONTINUE;
935
936 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
937 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
938 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
939 if (rx->sta)
940 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
941 if (!rx->key)
942 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
943 } else if (!ieee80211_has_protected(fc)) {
944 /*
945 * The frame was not protected, so skip decryption. However, we
946 * need to set rx->key if there is a key that could have been
947 * used so that the frame may be dropped if encryption would
948 * have been expected.
949 */
950 struct ieee80211_key *key = NULL;
951 if (ieee80211_is_mgmt(fc) &&
952 is_multicast_ether_addr(hdr->addr1) &&
953 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
954 rx->key = key;
955 else if ((key = rcu_dereference(rx->sdata->default_key)))
956 rx->key = key;
957 return RX_CONTINUE;
958 } else {
959 u8 keyid;
960 /*
961 * The device doesn't give us the IV so we won't be
962 * able to look up the key. That's ok though, we
963 * don't need to decrypt the frame, we just won't
964 * be able to keep statistics accurate.
965 * Except for key threshold notifications, should
966 * we somehow allow the driver to tell us which key
967 * the hardware used if this flag is set?
968 */
969 if ((status->flag & RX_FLAG_DECRYPTED) &&
970 (status->flag & RX_FLAG_IV_STRIPPED))
971 return RX_CONTINUE;
972
973 hdrlen = ieee80211_hdrlen(fc);
974
975 if (rx->skb->len < 8 + hdrlen)
976 return RX_DROP_UNUSABLE; /* TODO: count this? */
977
978 /*
979 * no need to call ieee80211_wep_get_keyidx,
980 * it verifies a bunch of things we've done already
981 */
982 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
983 keyidx = keyid >> 6;
984
985 /* check per-station GTK first, if multicast packet */
986 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
987 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
988
989 /* if not found, try default key */
990 if (!rx->key) {
991 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
992
993 /*
994 * RSNA-protected unicast frames should always be
995 * sent with pairwise or station-to-station keys,
996 * but for WEP we allow using a key index as well.
997 */
998 if (rx->key &&
999 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1000 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1001 !is_multicast_ether_addr(hdr->addr1))
1002 rx->key = NULL;
1003 }
1004 }
1005
1006 if (rx->key) {
1007 rx->key->tx_rx_count++;
1008 /* TODO: add threshold stuff again */
1009 } else {
1010 return RX_DROP_MONITOR;
1011 }
1012
1013 if (skb_linearize(rx->skb))
1014 return RX_DROP_UNUSABLE;
1015 /* the hdr variable is invalid now! */
1016
1017 switch (rx->key->conf.cipher) {
1018 case WLAN_CIPHER_SUITE_WEP40:
1019 case WLAN_CIPHER_SUITE_WEP104:
1020 /* Check for weak IVs if possible */
1021 if (rx->sta && ieee80211_is_data(fc) &&
1022 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1023 !(status->flag & RX_FLAG_DECRYPTED)) &&
1024 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1025 rx->sta->wep_weak_iv_count++;
1026
1027 result = ieee80211_crypto_wep_decrypt(rx);
1028 break;
1029 case WLAN_CIPHER_SUITE_TKIP:
1030 result = ieee80211_crypto_tkip_decrypt(rx);
1031 break;
1032 case WLAN_CIPHER_SUITE_CCMP:
1033 result = ieee80211_crypto_ccmp_decrypt(rx);
1034 break;
1035 case WLAN_CIPHER_SUITE_AES_CMAC:
1036 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1037 break;
1038 default:
1039 /*
1040 * We can reach here only with HW-only algorithms
1041 * but why didn't it decrypt the frame?!
1042 */
1043 return RX_DROP_UNUSABLE;
1044 }
1045
1046 /* either the frame has been decrypted or will be dropped */
1047 status->flag |= RX_FLAG_DECRYPTED;
1048
1049 return result;
1050 }
1051
1052 static ieee80211_rx_result debug_noinline
1053 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1054 {
1055 struct ieee80211_local *local;
1056 struct ieee80211_hdr *hdr;
1057 struct sk_buff *skb;
1058
1059 local = rx->local;
1060 skb = rx->skb;
1061 hdr = (struct ieee80211_hdr *) skb->data;
1062
1063 if (!local->pspolling)
1064 return RX_CONTINUE;
1065
1066 if (!ieee80211_has_fromds(hdr->frame_control))
1067 /* this is not from AP */
1068 return RX_CONTINUE;
1069
1070 if (!ieee80211_is_data(hdr->frame_control))
1071 return RX_CONTINUE;
1072
1073 if (!ieee80211_has_moredata(hdr->frame_control)) {
1074 /* AP has no more frames buffered for us */
1075 local->pspolling = false;
1076 return RX_CONTINUE;
1077 }
1078
1079 /* more data bit is set, let's request a new frame from the AP */
1080 ieee80211_send_pspoll(local, rx->sdata);
1081
1082 return RX_CONTINUE;
1083 }
1084
1085 static void ap_sta_ps_start(struct sta_info *sta)
1086 {
1087 struct ieee80211_sub_if_data *sdata = sta->sdata;
1088 struct ieee80211_local *local = sdata->local;
1089
1090 atomic_inc(&sdata->bss->num_sta_ps);
1091 set_sta_flags(sta, WLAN_STA_PS_STA);
1092 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1093 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1094 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1095 sdata->name, sta->sta.addr, sta->sta.aid);
1096 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1097 }
1098
1099 static void ap_sta_ps_end(struct sta_info *sta)
1100 {
1101 struct ieee80211_sub_if_data *sdata = sta->sdata;
1102
1103 atomic_dec(&sdata->bss->num_sta_ps);
1104
1105 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1106 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1107 sdata->name, sta->sta.addr, sta->sta.aid);
1108 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1109
1110 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1111 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1112 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1113 sdata->name, sta->sta.addr, sta->sta.aid);
1114 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1115 return;
1116 }
1117
1118 ieee80211_sta_ps_deliver_wakeup(sta);
1119 }
1120
1121 static ieee80211_rx_result debug_noinline
1122 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1123 {
1124 struct sta_info *sta = rx->sta;
1125 struct sk_buff *skb = rx->skb;
1126 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1127 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1128
1129 if (!sta)
1130 return RX_CONTINUE;
1131
1132 /*
1133 * Update last_rx only for IBSS packets which are for the current
1134 * BSSID to avoid keeping the current IBSS network alive in cases
1135 * where other STAs start using different BSSID.
1136 */
1137 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1138 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1139 NL80211_IFTYPE_ADHOC);
1140 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
1141 sta->last_rx = jiffies;
1142 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1143 /*
1144 * Mesh beacons will update last_rx when if they are found to
1145 * match the current local configuration when processed.
1146 */
1147 sta->last_rx = jiffies;
1148 }
1149
1150 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1151 return RX_CONTINUE;
1152
1153 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1154 ieee80211_sta_rx_notify(rx->sdata, hdr);
1155
1156 sta->rx_fragments++;
1157 sta->rx_bytes += rx->skb->len;
1158 sta->last_signal = status->signal;
1159
1160 /*
1161 * Change STA power saving mode only at the end of a frame
1162 * exchange sequence.
1163 */
1164 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1165 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1166 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1167 if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
1168 /*
1169 * Ignore doze->wake transitions that are
1170 * indicated by non-data frames, the standard
1171 * is unclear here, but for example going to
1172 * PS mode and then scanning would cause a
1173 * doze->wake transition for the probe request,
1174 * and that is clearly undesirable.
1175 */
1176 if (ieee80211_is_data(hdr->frame_control) &&
1177 !ieee80211_has_pm(hdr->frame_control))
1178 ap_sta_ps_end(sta);
1179 } else {
1180 if (ieee80211_has_pm(hdr->frame_control))
1181 ap_sta_ps_start(sta);
1182 }
1183 }
1184
1185 /*
1186 * Drop (qos-)data::nullfunc frames silently, since they
1187 * are used only to control station power saving mode.
1188 */
1189 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1190 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1191 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1192
1193 /*
1194 * If we receive a 4-addr nullfunc frame from a STA
1195 * that was not moved to a 4-addr STA vlan yet, drop
1196 * the frame to the monitor interface, to make sure
1197 * that hostapd sees it
1198 */
1199 if (ieee80211_has_a4(hdr->frame_control) &&
1200 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1201 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1202 !rx->sdata->u.vlan.sta)))
1203 return RX_DROP_MONITOR;
1204 /*
1205 * Update counter and free packet here to avoid
1206 * counting this as a dropped packed.
1207 */
1208 sta->rx_packets++;
1209 dev_kfree_skb(rx->skb);
1210 return RX_QUEUED;
1211 }
1212
1213 return RX_CONTINUE;
1214 } /* ieee80211_rx_h_sta_process */
1215
1216 static inline struct ieee80211_fragment_entry *
1217 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1218 unsigned int frag, unsigned int seq, int rx_queue,
1219 struct sk_buff **skb)
1220 {
1221 struct ieee80211_fragment_entry *entry;
1222 int idx;
1223
1224 idx = sdata->fragment_next;
1225 entry = &sdata->fragments[sdata->fragment_next++];
1226 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1227 sdata->fragment_next = 0;
1228
1229 if (!skb_queue_empty(&entry->skb_list)) {
1230 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1231 struct ieee80211_hdr *hdr =
1232 (struct ieee80211_hdr *) entry->skb_list.next->data;
1233 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1234 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1235 "addr1=%pM addr2=%pM\n",
1236 sdata->name, idx,
1237 jiffies - entry->first_frag_time, entry->seq,
1238 entry->last_frag, hdr->addr1, hdr->addr2);
1239 #endif
1240 __skb_queue_purge(&entry->skb_list);
1241 }
1242
1243 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1244 *skb = NULL;
1245 entry->first_frag_time = jiffies;
1246 entry->seq = seq;
1247 entry->rx_queue = rx_queue;
1248 entry->last_frag = frag;
1249 entry->ccmp = 0;
1250 entry->extra_len = 0;
1251
1252 return entry;
1253 }
1254
1255 static inline struct ieee80211_fragment_entry *
1256 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1257 unsigned int frag, unsigned int seq,
1258 int rx_queue, struct ieee80211_hdr *hdr)
1259 {
1260 struct ieee80211_fragment_entry *entry;
1261 int i, idx;
1262
1263 idx = sdata->fragment_next;
1264 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1265 struct ieee80211_hdr *f_hdr;
1266
1267 idx--;
1268 if (idx < 0)
1269 idx = IEEE80211_FRAGMENT_MAX - 1;
1270
1271 entry = &sdata->fragments[idx];
1272 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1273 entry->rx_queue != rx_queue ||
1274 entry->last_frag + 1 != frag)
1275 continue;
1276
1277 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1278
1279 /*
1280 * Check ftype and addresses are equal, else check next fragment
1281 */
1282 if (((hdr->frame_control ^ f_hdr->frame_control) &
1283 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1284 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1285 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1286 continue;
1287
1288 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1289 __skb_queue_purge(&entry->skb_list);
1290 continue;
1291 }
1292 return entry;
1293 }
1294
1295 return NULL;
1296 }
1297
1298 static ieee80211_rx_result debug_noinline
1299 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1300 {
1301 struct ieee80211_hdr *hdr;
1302 u16 sc;
1303 __le16 fc;
1304 unsigned int frag, seq;
1305 struct ieee80211_fragment_entry *entry;
1306 struct sk_buff *skb;
1307 struct ieee80211_rx_status *status;
1308
1309 hdr = (struct ieee80211_hdr *)rx->skb->data;
1310 fc = hdr->frame_control;
1311 sc = le16_to_cpu(hdr->seq_ctrl);
1312 frag = sc & IEEE80211_SCTL_FRAG;
1313
1314 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1315 (rx->skb)->len < 24 ||
1316 is_multicast_ether_addr(hdr->addr1))) {
1317 /* not fragmented */
1318 goto out;
1319 }
1320 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1321
1322 if (skb_linearize(rx->skb))
1323 return RX_DROP_UNUSABLE;
1324
1325 /*
1326 * skb_linearize() might change the skb->data and
1327 * previously cached variables (in this case, hdr) need to
1328 * be refreshed with the new data.
1329 */
1330 hdr = (struct ieee80211_hdr *)rx->skb->data;
1331 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1332
1333 if (frag == 0) {
1334 /* This is the first fragment of a new frame. */
1335 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1336 rx->queue, &(rx->skb));
1337 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1338 ieee80211_has_protected(fc)) {
1339 int queue = ieee80211_is_mgmt(fc) ?
1340 NUM_RX_DATA_QUEUES : rx->queue;
1341 /* Store CCMP PN so that we can verify that the next
1342 * fragment has a sequential PN value. */
1343 entry->ccmp = 1;
1344 memcpy(entry->last_pn,
1345 rx->key->u.ccmp.rx_pn[queue],
1346 CCMP_PN_LEN);
1347 }
1348 return RX_QUEUED;
1349 }
1350
1351 /* This is a fragment for a frame that should already be pending in
1352 * fragment cache. Add this fragment to the end of the pending entry.
1353 */
1354 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1355 if (!entry) {
1356 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1357 return RX_DROP_MONITOR;
1358 }
1359
1360 /* Verify that MPDUs within one MSDU have sequential PN values.
1361 * (IEEE 802.11i, 8.3.3.4.5) */
1362 if (entry->ccmp) {
1363 int i;
1364 u8 pn[CCMP_PN_LEN], *rpn;
1365 int queue;
1366 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1367 return RX_DROP_UNUSABLE;
1368 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1369 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1370 pn[i]++;
1371 if (pn[i])
1372 break;
1373 }
1374 queue = ieee80211_is_mgmt(fc) ?
1375 NUM_RX_DATA_QUEUES : rx->queue;
1376 rpn = rx->key->u.ccmp.rx_pn[queue];
1377 if (memcmp(pn, rpn, CCMP_PN_LEN))
1378 return RX_DROP_UNUSABLE;
1379 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1380 }
1381
1382 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1383 __skb_queue_tail(&entry->skb_list, rx->skb);
1384 entry->last_frag = frag;
1385 entry->extra_len += rx->skb->len;
1386 if (ieee80211_has_morefrags(fc)) {
1387 rx->skb = NULL;
1388 return RX_QUEUED;
1389 }
1390
1391 rx->skb = __skb_dequeue(&entry->skb_list);
1392 if (skb_tailroom(rx->skb) < entry->extra_len) {
1393 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1394 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1395 GFP_ATOMIC))) {
1396 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1397 __skb_queue_purge(&entry->skb_list);
1398 return RX_DROP_UNUSABLE;
1399 }
1400 }
1401 while ((skb = __skb_dequeue(&entry->skb_list))) {
1402 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1403 dev_kfree_skb(skb);
1404 }
1405
1406 /* Complete frame has been reassembled - process it now */
1407 status = IEEE80211_SKB_RXCB(rx->skb);
1408 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1409
1410 out:
1411 if (rx->sta)
1412 rx->sta->rx_packets++;
1413 if (is_multicast_ether_addr(hdr->addr1))
1414 rx->local->dot11MulticastReceivedFrameCount++;
1415 else
1416 ieee80211_led_rx(rx->local);
1417 return RX_CONTINUE;
1418 }
1419
1420 static ieee80211_rx_result debug_noinline
1421 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1422 {
1423 struct ieee80211_sub_if_data *sdata = rx->sdata;
1424 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1425 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1426
1427 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1428 !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
1429 return RX_CONTINUE;
1430
1431 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1432 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1433 return RX_DROP_UNUSABLE;
1434
1435 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1436 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1437 else
1438 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1439
1440 /* Free PS Poll skb here instead of returning RX_DROP that would
1441 * count as an dropped frame. */
1442 dev_kfree_skb(rx->skb);
1443
1444 return RX_QUEUED;
1445 }
1446
1447 static ieee80211_rx_result debug_noinline
1448 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1449 {
1450 u8 *data = rx->skb->data;
1451 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1452
1453 if (!ieee80211_is_data_qos(hdr->frame_control))
1454 return RX_CONTINUE;
1455
1456 /* remove the qos control field, update frame type and meta-data */
1457 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1458 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1459 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1460 /* change frame type to non QOS */
1461 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1462
1463 return RX_CONTINUE;
1464 }
1465
1466 static int
1467 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1468 {
1469 if (unlikely(!rx->sta ||
1470 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1471 return -EACCES;
1472
1473 return 0;
1474 }
1475
1476 static int
1477 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1478 {
1479 struct sk_buff *skb = rx->skb;
1480 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1481
1482 /*
1483 * Pass through unencrypted frames if the hardware has
1484 * decrypted them already.
1485 */
1486 if (status->flag & RX_FLAG_DECRYPTED)
1487 return 0;
1488
1489 /* Drop unencrypted frames if key is set. */
1490 if (unlikely(!ieee80211_has_protected(fc) &&
1491 !ieee80211_is_nullfunc(fc) &&
1492 ieee80211_is_data(fc) &&
1493 (rx->key || rx->sdata->drop_unencrypted)))
1494 return -EACCES;
1495
1496 return 0;
1497 }
1498
1499 static int
1500 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1501 {
1502 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1503 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1504 __le16 fc = hdr->frame_control;
1505
1506 /*
1507 * Pass through unencrypted frames if the hardware has
1508 * decrypted them already.
1509 */
1510 if (status->flag & RX_FLAG_DECRYPTED)
1511 return 0;
1512
1513 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1514 if (unlikely(!ieee80211_has_protected(fc) &&
1515 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1516 rx->key))
1517 return -EACCES;
1518 /* BIP does not use Protected field, so need to check MMIE */
1519 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1520 ieee80211_get_mmie_keyidx(rx->skb) < 0))
1521 return -EACCES;
1522 /*
1523 * When using MFP, Action frames are not allowed prior to
1524 * having configured keys.
1525 */
1526 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1527 ieee80211_is_robust_mgmt_frame(
1528 (struct ieee80211_hdr *) rx->skb->data)))
1529 return -EACCES;
1530 }
1531
1532 return 0;
1533 }
1534
1535 static int
1536 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1537 {
1538 struct ieee80211_sub_if_data *sdata = rx->sdata;
1539 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1540
1541 if (ieee80211_has_a4(hdr->frame_control) &&
1542 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1543 return -1;
1544
1545 if (is_multicast_ether_addr(hdr->addr1) &&
1546 ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
1547 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1548 return -1;
1549
1550 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1551 }
1552
1553 /*
1554 * requires that rx->skb is a frame with ethernet header
1555 */
1556 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1557 {
1558 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1559 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1560 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1561
1562 /*
1563 * Allow EAPOL frames to us/the PAE group address regardless
1564 * of whether the frame was encrypted or not.
1565 */
1566 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1567 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1568 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1569 return true;
1570
1571 if (ieee80211_802_1x_port_control(rx) ||
1572 ieee80211_drop_unencrypted(rx, fc))
1573 return false;
1574
1575 return true;
1576 }
1577
1578 /*
1579 * requires that rx->skb is a frame with ethernet header
1580 */
1581 static void
1582 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1583 {
1584 struct ieee80211_sub_if_data *sdata = rx->sdata;
1585 struct net_device *dev = sdata->dev;
1586 struct sk_buff *skb, *xmit_skb;
1587 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1588 struct sta_info *dsta;
1589 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1590
1591 skb = rx->skb;
1592 xmit_skb = NULL;
1593
1594 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1595 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1596 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1597 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1598 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1599 if (is_multicast_ether_addr(ehdr->h_dest)) {
1600 /*
1601 * send multicast frames both to higher layers in
1602 * local net stack and back to the wireless medium
1603 */
1604 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1605 if (!xmit_skb && net_ratelimit())
1606 printk(KERN_DEBUG "%s: failed to clone "
1607 "multicast frame\n", dev->name);
1608 } else {
1609 dsta = sta_info_get(sdata, skb->data);
1610 if (dsta) {
1611 /*
1612 * The destination station is associated to
1613 * this AP (in this VLAN), so send the frame
1614 * directly to it and do not pass it to local
1615 * net stack.
1616 */
1617 xmit_skb = skb;
1618 skb = NULL;
1619 }
1620 }
1621 }
1622
1623 if (skb) {
1624 int align __maybe_unused;
1625
1626 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1627 /*
1628 * 'align' will only take the values 0 or 2 here
1629 * since all frames are required to be aligned
1630 * to 2-byte boundaries when being passed to
1631 * mac80211. That also explains the __skb_push()
1632 * below.
1633 */
1634 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1635 if (align) {
1636 if (WARN_ON(skb_headroom(skb) < 3)) {
1637 dev_kfree_skb(skb);
1638 skb = NULL;
1639 } else {
1640 u8 *data = skb->data;
1641 size_t len = skb_headlen(skb);
1642 skb->data -= align;
1643 memmove(skb->data, data, len);
1644 skb_set_tail_pointer(skb, len);
1645 }
1646 }
1647 #endif
1648
1649 if (skb) {
1650 /* deliver to local stack */
1651 skb->protocol = eth_type_trans(skb, dev);
1652 memset(skb->cb, 0, sizeof(skb->cb));
1653 netif_receive_skb(skb);
1654 }
1655 }
1656
1657 if (xmit_skb) {
1658 /* send to wireless media */
1659 xmit_skb->protocol = htons(ETH_P_802_3);
1660 skb_reset_network_header(xmit_skb);
1661 skb_reset_mac_header(xmit_skb);
1662 dev_queue_xmit(xmit_skb);
1663 }
1664 }
1665
1666 static ieee80211_rx_result debug_noinline
1667 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1668 {
1669 struct net_device *dev = rx->sdata->dev;
1670 struct sk_buff *skb = rx->skb;
1671 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1672 __le16 fc = hdr->frame_control;
1673 struct sk_buff_head frame_list;
1674 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1675
1676 if (unlikely(!ieee80211_is_data(fc)))
1677 return RX_CONTINUE;
1678
1679 if (unlikely(!ieee80211_is_data_present(fc)))
1680 return RX_DROP_MONITOR;
1681
1682 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1683 return RX_CONTINUE;
1684
1685 if (ieee80211_has_a4(hdr->frame_control) &&
1686 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1687 !rx->sdata->u.vlan.sta)
1688 return RX_DROP_UNUSABLE;
1689
1690 if (is_multicast_ether_addr(hdr->addr1) &&
1691 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1692 rx->sdata->u.vlan.sta) ||
1693 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1694 rx->sdata->u.mgd.use_4addr)))
1695 return RX_DROP_UNUSABLE;
1696
1697 skb->dev = dev;
1698 __skb_queue_head_init(&frame_list);
1699
1700 if (skb_linearize(skb))
1701 return RX_DROP_UNUSABLE;
1702
1703 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1704 rx->sdata->vif.type,
1705 rx->local->hw.extra_tx_headroom);
1706
1707 while (!skb_queue_empty(&frame_list)) {
1708 rx->skb = __skb_dequeue(&frame_list);
1709
1710 if (!ieee80211_frame_allowed(rx, fc)) {
1711 dev_kfree_skb(rx->skb);
1712 continue;
1713 }
1714 dev->stats.rx_packets++;
1715 dev->stats.rx_bytes += rx->skb->len;
1716
1717 ieee80211_deliver_skb(rx);
1718 }
1719
1720 return RX_QUEUED;
1721 }
1722
1723 #ifdef CONFIG_MAC80211_MESH
1724 static ieee80211_rx_result
1725 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1726 {
1727 struct ieee80211_hdr *hdr;
1728 struct ieee80211s_hdr *mesh_hdr;
1729 unsigned int hdrlen;
1730 struct sk_buff *skb = rx->skb, *fwd_skb;
1731 struct ieee80211_local *local = rx->local;
1732 struct ieee80211_sub_if_data *sdata = rx->sdata;
1733 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1734
1735 hdr = (struct ieee80211_hdr *) skb->data;
1736 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1737 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1738
1739 if (!ieee80211_is_data(hdr->frame_control))
1740 return RX_CONTINUE;
1741
1742 if (!mesh_hdr->ttl)
1743 /* illegal frame */
1744 return RX_DROP_MONITOR;
1745
1746 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1747 struct mesh_path *mppath;
1748 char *proxied_addr;
1749 char *mpp_addr;
1750
1751 if (is_multicast_ether_addr(hdr->addr1)) {
1752 mpp_addr = hdr->addr3;
1753 proxied_addr = mesh_hdr->eaddr1;
1754 } else {
1755 mpp_addr = hdr->addr4;
1756 proxied_addr = mesh_hdr->eaddr2;
1757 }
1758
1759 rcu_read_lock();
1760 mppath = mpp_path_lookup(proxied_addr, sdata);
1761 if (!mppath) {
1762 mpp_path_add(proxied_addr, mpp_addr, sdata);
1763 } else {
1764 spin_lock_bh(&mppath->state_lock);
1765 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1766 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1767 spin_unlock_bh(&mppath->state_lock);
1768 }
1769 rcu_read_unlock();
1770 }
1771
1772 /* Frame has reached destination. Don't forward */
1773 if (!is_multicast_ether_addr(hdr->addr1) &&
1774 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1775 return RX_CONTINUE;
1776
1777 mesh_hdr->ttl--;
1778
1779 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
1780 if (!mesh_hdr->ttl)
1781 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1782 dropped_frames_ttl);
1783 else {
1784 struct ieee80211_hdr *fwd_hdr;
1785 struct ieee80211_tx_info *info;
1786
1787 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1788
1789 if (!fwd_skb && net_ratelimit())
1790 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1791 sdata->name);
1792
1793 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1794 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1795 info = IEEE80211_SKB_CB(fwd_skb);
1796 memset(info, 0, sizeof(*info));
1797 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1798 info->control.vif = &rx->sdata->vif;
1799 skb_set_queue_mapping(skb,
1800 ieee80211_select_queue(rx->sdata, fwd_skb));
1801 ieee80211_set_qos_hdr(local, skb);
1802 if (is_multicast_ether_addr(fwd_hdr->addr1))
1803 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1804 fwded_mcast);
1805 else {
1806 int err;
1807 /*
1808 * Save TA to addr1 to send TA a path error if a
1809 * suitable next hop is not found
1810 */
1811 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1812 ETH_ALEN);
1813 err = mesh_nexthop_lookup(fwd_skb, sdata);
1814 /* Failed to immediately resolve next hop:
1815 * fwded frame was dropped or will be added
1816 * later to the pending skb queue. */
1817 if (err)
1818 return RX_DROP_MONITOR;
1819
1820 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1821 fwded_unicast);
1822 }
1823 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1824 fwded_frames);
1825 ieee80211_add_pending_skb(local, fwd_skb);
1826 }
1827 }
1828
1829 if (is_multicast_ether_addr(hdr->addr1) ||
1830 sdata->dev->flags & IFF_PROMISC)
1831 return RX_CONTINUE;
1832 else
1833 return RX_DROP_MONITOR;
1834 }
1835 #endif
1836
1837 static ieee80211_rx_result debug_noinline
1838 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1839 {
1840 struct ieee80211_sub_if_data *sdata = rx->sdata;
1841 struct ieee80211_local *local = rx->local;
1842 struct net_device *dev = sdata->dev;
1843 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1844 __le16 fc = hdr->frame_control;
1845 int err;
1846
1847 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1848 return RX_CONTINUE;
1849
1850 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1851 return RX_DROP_MONITOR;
1852
1853 /*
1854 * Allow the cooked monitor interface of an AP to see 4-addr frames so
1855 * that a 4-addr station can be detected and moved into a separate VLAN
1856 */
1857 if (ieee80211_has_a4(hdr->frame_control) &&
1858 sdata->vif.type == NL80211_IFTYPE_AP)
1859 return RX_DROP_MONITOR;
1860
1861 err = __ieee80211_data_to_8023(rx);
1862 if (unlikely(err))
1863 return RX_DROP_UNUSABLE;
1864
1865 if (!ieee80211_frame_allowed(rx, fc))
1866 return RX_DROP_MONITOR;
1867
1868 rx->skb->dev = dev;
1869
1870 dev->stats.rx_packets++;
1871 dev->stats.rx_bytes += rx->skb->len;
1872
1873 if (ieee80211_is_data(hdr->frame_control) &&
1874 !is_multicast_ether_addr(hdr->addr1) &&
1875 local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
1876 mod_timer(&local->dynamic_ps_timer, jiffies +
1877 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1878 }
1879
1880 ieee80211_deliver_skb(rx);
1881
1882 return RX_QUEUED;
1883 }
1884
1885 static ieee80211_rx_result debug_noinline
1886 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1887 {
1888 struct ieee80211_local *local = rx->local;
1889 struct ieee80211_hw *hw = &local->hw;
1890 struct sk_buff *skb = rx->skb;
1891 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1892 struct tid_ampdu_rx *tid_agg_rx;
1893 u16 start_seq_num;
1894 u16 tid;
1895
1896 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1897 return RX_CONTINUE;
1898
1899 if (ieee80211_is_back_req(bar->frame_control)) {
1900 struct {
1901 __le16 control, start_seq_num;
1902 } __packed bar_data;
1903
1904 if (!rx->sta)
1905 return RX_DROP_MONITOR;
1906
1907 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
1908 &bar_data, sizeof(bar_data)))
1909 return RX_DROP_MONITOR;
1910
1911 tid = le16_to_cpu(bar_data.control) >> 12;
1912
1913 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
1914 if (!tid_agg_rx)
1915 return RX_DROP_MONITOR;
1916
1917 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1918
1919 /* reset session timer */
1920 if (tid_agg_rx->timeout)
1921 mod_timer(&tid_agg_rx->session_timer,
1922 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1923
1924 /* release stored frames up to start of BAR */
1925 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1926 frames);
1927 kfree_skb(skb);
1928 return RX_QUEUED;
1929 }
1930
1931 /*
1932 * After this point, we only want management frames,
1933 * so we can drop all remaining control frames to
1934 * cooked monitor interfaces.
1935 */
1936 return RX_DROP_MONITOR;
1937 }
1938
1939 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1940 struct ieee80211_mgmt *mgmt,
1941 size_t len)
1942 {
1943 struct ieee80211_local *local = sdata->local;
1944 struct sk_buff *skb;
1945 struct ieee80211_mgmt *resp;
1946
1947 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
1948 /* Not to own unicast address */
1949 return;
1950 }
1951
1952 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1953 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1954 /* Not from the current AP or not associated yet. */
1955 return;
1956 }
1957
1958 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1959 /* Too short SA Query request frame */
1960 return;
1961 }
1962
1963 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1964 if (skb == NULL)
1965 return;
1966
1967 skb_reserve(skb, local->hw.extra_tx_headroom);
1968 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1969 memset(resp, 0, 24);
1970 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1971 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
1972 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1973 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1974 IEEE80211_STYPE_ACTION);
1975 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1976 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1977 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1978 memcpy(resp->u.action.u.sa_query.trans_id,
1979 mgmt->u.action.u.sa_query.trans_id,
1980 WLAN_SA_QUERY_TR_ID_LEN);
1981
1982 ieee80211_tx_skb(sdata, skb);
1983 }
1984
1985 static ieee80211_rx_result debug_noinline
1986 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
1987 {
1988 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1989 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1990
1991 /*
1992 * From here on, look only at management frames.
1993 * Data and control frames are already handled,
1994 * and unknown (reserved) frames are useless.
1995 */
1996 if (rx->skb->len < 24)
1997 return RX_DROP_MONITOR;
1998
1999 if (!ieee80211_is_mgmt(mgmt->frame_control))
2000 return RX_DROP_MONITOR;
2001
2002 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2003 return RX_DROP_MONITOR;
2004
2005 if (ieee80211_drop_unencrypted_mgmt(rx))
2006 return RX_DROP_UNUSABLE;
2007
2008 return RX_CONTINUE;
2009 }
2010
2011 static ieee80211_rx_result debug_noinline
2012 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2013 {
2014 struct ieee80211_local *local = rx->local;
2015 struct ieee80211_sub_if_data *sdata = rx->sdata;
2016 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2017 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2018 int len = rx->skb->len;
2019
2020 if (!ieee80211_is_action(mgmt->frame_control))
2021 return RX_CONTINUE;
2022
2023 /* drop too small frames */
2024 if (len < IEEE80211_MIN_ACTION_SIZE)
2025 return RX_DROP_UNUSABLE;
2026
2027 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2028 return RX_DROP_UNUSABLE;
2029
2030 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2031 return RX_DROP_UNUSABLE;
2032
2033 switch (mgmt->u.action.category) {
2034 case WLAN_CATEGORY_BACK:
2035 /*
2036 * The aggregation code is not prepared to handle
2037 * anything but STA/AP due to the BSSID handling;
2038 * IBSS could work in the code but isn't supported
2039 * by drivers or the standard.
2040 */
2041 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2042 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2043 sdata->vif.type != NL80211_IFTYPE_AP)
2044 break;
2045
2046 /* verify action_code is present */
2047 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2048 break;
2049
2050 switch (mgmt->u.action.u.addba_req.action_code) {
2051 case WLAN_ACTION_ADDBA_REQ:
2052 if (len < (IEEE80211_MIN_ACTION_SIZE +
2053 sizeof(mgmt->u.action.u.addba_req)))
2054 goto invalid;
2055 break;
2056 case WLAN_ACTION_ADDBA_RESP:
2057 if (len < (IEEE80211_MIN_ACTION_SIZE +
2058 sizeof(mgmt->u.action.u.addba_resp)))
2059 goto invalid;
2060 break;
2061 case WLAN_ACTION_DELBA:
2062 if (len < (IEEE80211_MIN_ACTION_SIZE +
2063 sizeof(mgmt->u.action.u.delba)))
2064 goto invalid;
2065 break;
2066 default:
2067 goto invalid;
2068 }
2069
2070 goto queue;
2071 case WLAN_CATEGORY_SPECTRUM_MGMT:
2072 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2073 break;
2074
2075 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2076 break;
2077
2078 /* verify action_code is present */
2079 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2080 break;
2081
2082 switch (mgmt->u.action.u.measurement.action_code) {
2083 case WLAN_ACTION_SPCT_MSR_REQ:
2084 if (len < (IEEE80211_MIN_ACTION_SIZE +
2085 sizeof(mgmt->u.action.u.measurement)))
2086 break;
2087 ieee80211_process_measurement_req(sdata, mgmt, len);
2088 goto handled;
2089 case WLAN_ACTION_SPCT_CHL_SWITCH:
2090 if (len < (IEEE80211_MIN_ACTION_SIZE +
2091 sizeof(mgmt->u.action.u.chan_switch)))
2092 break;
2093
2094 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2095 break;
2096
2097 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2098 break;
2099
2100 goto queue;
2101 }
2102 break;
2103 case WLAN_CATEGORY_SA_QUERY:
2104 if (len < (IEEE80211_MIN_ACTION_SIZE +
2105 sizeof(mgmt->u.action.u.sa_query)))
2106 break;
2107
2108 switch (mgmt->u.action.u.sa_query.action) {
2109 case WLAN_ACTION_SA_QUERY_REQUEST:
2110 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2111 break;
2112 ieee80211_process_sa_query_req(sdata, mgmt, len);
2113 goto handled;
2114 }
2115 break;
2116 case WLAN_CATEGORY_MESH_PLINK:
2117 case WLAN_CATEGORY_MESH_PATH_SEL:
2118 if (!ieee80211_vif_is_mesh(&sdata->vif))
2119 break;
2120 goto queue;
2121 }
2122
2123 return RX_CONTINUE;
2124
2125 invalid:
2126 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2127 /* will return in the next handlers */
2128 return RX_CONTINUE;
2129
2130 handled:
2131 if (rx->sta)
2132 rx->sta->rx_packets++;
2133 dev_kfree_skb(rx->skb);
2134 return RX_QUEUED;
2135
2136 queue:
2137 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2138 skb_queue_tail(&sdata->skb_queue, rx->skb);
2139 ieee80211_queue_work(&local->hw, &sdata->work);
2140 if (rx->sta)
2141 rx->sta->rx_packets++;
2142 return RX_QUEUED;
2143 }
2144
2145 static ieee80211_rx_result debug_noinline
2146 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2147 {
2148 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2149
2150 /* skip known-bad action frames and return them in the next handler */
2151 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2152 return RX_CONTINUE;
2153
2154 /*
2155 * Getting here means the kernel doesn't know how to handle
2156 * it, but maybe userspace does ... include returned frames
2157 * so userspace can register for those to know whether ones
2158 * it transmitted were processed or returned.
2159 */
2160
2161 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2162 rx->skb->data, rx->skb->len,
2163 GFP_ATOMIC)) {
2164 if (rx->sta)
2165 rx->sta->rx_packets++;
2166 dev_kfree_skb(rx->skb);
2167 return RX_QUEUED;
2168 }
2169
2170
2171 return RX_CONTINUE;
2172 }
2173
2174 static ieee80211_rx_result debug_noinline
2175 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2176 {
2177 struct ieee80211_local *local = rx->local;
2178 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2179 struct sk_buff *nskb;
2180 struct ieee80211_sub_if_data *sdata = rx->sdata;
2181 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2182
2183 if (!ieee80211_is_action(mgmt->frame_control))
2184 return RX_CONTINUE;
2185
2186 /*
2187 * For AP mode, hostapd is responsible for handling any action
2188 * frames that we didn't handle, including returning unknown
2189 * ones. For all other modes we will return them to the sender,
2190 * setting the 0x80 bit in the action category, as required by
2191 * 802.11-2007 7.3.1.11.
2192 * Newer versions of hostapd shall also use the management frame
2193 * registration mechanisms, but older ones still use cooked
2194 * monitor interfaces so push all frames there.
2195 */
2196 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2197 (sdata->vif.type == NL80211_IFTYPE_AP ||
2198 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2199 return RX_DROP_MONITOR;
2200
2201 /* do not return rejected action frames */
2202 if (mgmt->u.action.category & 0x80)
2203 return RX_DROP_UNUSABLE;
2204
2205 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2206 GFP_ATOMIC);
2207 if (nskb) {
2208 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2209
2210 nmgmt->u.action.category |= 0x80;
2211 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2212 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2213
2214 memset(nskb->cb, 0, sizeof(nskb->cb));
2215
2216 ieee80211_tx_skb(rx->sdata, nskb);
2217 }
2218 dev_kfree_skb(rx->skb);
2219 return RX_QUEUED;
2220 }
2221
2222 static ieee80211_rx_result debug_noinline
2223 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2224 {
2225 struct ieee80211_sub_if_data *sdata = rx->sdata;
2226 ieee80211_rx_result rxs;
2227 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2228 __le16 stype;
2229
2230 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2231 if (rxs != RX_CONTINUE)
2232 return rxs;
2233
2234 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2235
2236 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2237 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2238 sdata->vif.type != NL80211_IFTYPE_STATION)
2239 return RX_DROP_MONITOR;
2240
2241 switch (stype) {
2242 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2243 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2244 /* process for all: mesh, mlme, ibss */
2245 break;
2246 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2247 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2248 if (is_multicast_ether_addr(mgmt->da) &&
2249 !is_broadcast_ether_addr(mgmt->da))
2250 return RX_DROP_MONITOR;
2251
2252 /* process only for station */
2253 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2254 return RX_DROP_MONITOR;
2255 break;
2256 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2257 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2258 /* process only for ibss */
2259 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2260 return RX_DROP_MONITOR;
2261 break;
2262 default:
2263 return RX_DROP_MONITOR;
2264 }
2265
2266 /* queue up frame and kick off work to process it */
2267 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2268 skb_queue_tail(&sdata->skb_queue, rx->skb);
2269 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2270 if (rx->sta)
2271 rx->sta->rx_packets++;
2272
2273 return RX_QUEUED;
2274 }
2275
2276 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
2277 struct ieee80211_rx_data *rx)
2278 {
2279 int keyidx;
2280 unsigned int hdrlen;
2281
2282 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2283 if (rx->skb->len >= hdrlen + 4)
2284 keyidx = rx->skb->data[hdrlen + 3] >> 6;
2285 else
2286 keyidx = -1;
2287
2288 if (!rx->sta) {
2289 /*
2290 * Some hardware seem to generate incorrect Michael MIC
2291 * reports; ignore them to avoid triggering countermeasures.
2292 */
2293 return;
2294 }
2295
2296 if (!ieee80211_has_protected(hdr->frame_control))
2297 return;
2298
2299 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
2300 /*
2301 * APs with pairwise keys should never receive Michael MIC
2302 * errors for non-zero keyidx because these are reserved for
2303 * group keys and only the AP is sending real multicast
2304 * frames in the BSS.
2305 */
2306 return;
2307 }
2308
2309 if (!ieee80211_is_data(hdr->frame_control) &&
2310 !ieee80211_is_auth(hdr->frame_control))
2311 return;
2312
2313 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
2314 GFP_ATOMIC);
2315 }
2316
2317 /* TODO: use IEEE80211_RX_FRAGMENTED */
2318 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2319 struct ieee80211_rate *rate)
2320 {
2321 struct ieee80211_sub_if_data *sdata;
2322 struct ieee80211_local *local = rx->local;
2323 struct ieee80211_rtap_hdr {
2324 struct ieee80211_radiotap_header hdr;
2325 u8 flags;
2326 u8 rate_or_pad;
2327 __le16 chan_freq;
2328 __le16 chan_flags;
2329 } __packed *rthdr;
2330 struct sk_buff *skb = rx->skb, *skb2;
2331 struct net_device *prev_dev = NULL;
2332 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2333
2334 /*
2335 * If cooked monitor has been processed already, then
2336 * don't do it again. If not, set the flag.
2337 */
2338 if (rx->flags & IEEE80211_RX_CMNTR)
2339 goto out_free_skb;
2340 rx->flags |= IEEE80211_RX_CMNTR;
2341
2342 if (skb_headroom(skb) < sizeof(*rthdr) &&
2343 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2344 goto out_free_skb;
2345
2346 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2347 memset(rthdr, 0, sizeof(*rthdr));
2348 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2349 rthdr->hdr.it_present =
2350 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2351 (1 << IEEE80211_RADIOTAP_CHANNEL));
2352
2353 if (rate) {
2354 rthdr->rate_or_pad = rate->bitrate / 5;
2355 rthdr->hdr.it_present |=
2356 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2357 }
2358 rthdr->chan_freq = cpu_to_le16(status->freq);
2359
2360 if (status->band == IEEE80211_BAND_5GHZ)
2361 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2362 IEEE80211_CHAN_5GHZ);
2363 else
2364 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2365 IEEE80211_CHAN_2GHZ);
2366
2367 skb_set_mac_header(skb, 0);
2368 skb->ip_summed = CHECKSUM_UNNECESSARY;
2369 skb->pkt_type = PACKET_OTHERHOST;
2370 skb->protocol = htons(ETH_P_802_2);
2371
2372 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2373 if (!ieee80211_sdata_running(sdata))
2374 continue;
2375
2376 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2377 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2378 continue;
2379
2380 if (prev_dev) {
2381 skb2 = skb_clone(skb, GFP_ATOMIC);
2382 if (skb2) {
2383 skb2->dev = prev_dev;
2384 netif_receive_skb(skb2);
2385 }
2386 }
2387
2388 prev_dev = sdata->dev;
2389 sdata->dev->stats.rx_packets++;
2390 sdata->dev->stats.rx_bytes += skb->len;
2391 }
2392
2393 if (prev_dev) {
2394 skb->dev = prev_dev;
2395 netif_receive_skb(skb);
2396 return;
2397 }
2398
2399 out_free_skb:
2400 dev_kfree_skb(skb);
2401 }
2402
2403 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2404 ieee80211_rx_result res)
2405 {
2406 switch (res) {
2407 case RX_DROP_MONITOR:
2408 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2409 if (rx->sta)
2410 rx->sta->rx_dropped++;
2411 /* fall through */
2412 case RX_CONTINUE: {
2413 struct ieee80211_rate *rate = NULL;
2414 struct ieee80211_supported_band *sband;
2415 struct ieee80211_rx_status *status;
2416
2417 status = IEEE80211_SKB_RXCB((rx->skb));
2418
2419 sband = rx->local->hw.wiphy->bands[status->band];
2420 if (!(status->flag & RX_FLAG_HT))
2421 rate = &sband->bitrates[status->rate_idx];
2422
2423 ieee80211_rx_cooked_monitor(rx, rate);
2424 break;
2425 }
2426 case RX_DROP_UNUSABLE:
2427 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2428 if (rx->sta)
2429 rx->sta->rx_dropped++;
2430 dev_kfree_skb(rx->skb);
2431 break;
2432 case RX_QUEUED:
2433 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2434 break;
2435 }
2436 }
2437
2438 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2439 struct sk_buff_head *frames)
2440 {
2441 ieee80211_rx_result res = RX_DROP_MONITOR;
2442 struct sk_buff *skb;
2443
2444 #define CALL_RXH(rxh) \
2445 do { \
2446 res = rxh(rx); \
2447 if (res != RX_CONTINUE) \
2448 goto rxh_next; \
2449 } while (0);
2450
2451 while ((skb = __skb_dequeue(frames))) {
2452 /*
2453 * all the other fields are valid across frames
2454 * that belong to an aMPDU since they are on the
2455 * same TID from the same station
2456 */
2457 rx->skb = skb;
2458 rx->flags = 0;
2459
2460 CALL_RXH(ieee80211_rx_h_decrypt)
2461 CALL_RXH(ieee80211_rx_h_check_more_data)
2462 CALL_RXH(ieee80211_rx_h_sta_process)
2463 CALL_RXH(ieee80211_rx_h_defragment)
2464 CALL_RXH(ieee80211_rx_h_ps_poll)
2465 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2466 /* must be after MMIC verify so header is counted in MPDU mic */
2467 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2468 CALL_RXH(ieee80211_rx_h_amsdu)
2469 #ifdef CONFIG_MAC80211_MESH
2470 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2471 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2472 #endif
2473 CALL_RXH(ieee80211_rx_h_data)
2474
2475 /* special treatment -- needs the queue */
2476 res = ieee80211_rx_h_ctrl(rx, frames);
2477 if (res != RX_CONTINUE)
2478 goto rxh_next;
2479
2480 CALL_RXH(ieee80211_rx_h_mgmt_check)
2481 CALL_RXH(ieee80211_rx_h_action)
2482 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2483 CALL_RXH(ieee80211_rx_h_action_return)
2484 CALL_RXH(ieee80211_rx_h_mgmt)
2485
2486 rxh_next:
2487 ieee80211_rx_handlers_result(rx, res);
2488
2489 #undef CALL_RXH
2490 }
2491 }
2492
2493 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2494 {
2495 struct sk_buff_head reorder_release;
2496 ieee80211_rx_result res = RX_DROP_MONITOR;
2497
2498 __skb_queue_head_init(&reorder_release);
2499
2500 #define CALL_RXH(rxh) \
2501 do { \
2502 res = rxh(rx); \
2503 if (res != RX_CONTINUE) \
2504 goto rxh_next; \
2505 } while (0);
2506
2507 CALL_RXH(ieee80211_rx_h_passive_scan)
2508 CALL_RXH(ieee80211_rx_h_check)
2509
2510 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2511
2512 ieee80211_rx_handlers(rx, &reorder_release);
2513 return;
2514
2515 rxh_next:
2516 ieee80211_rx_handlers_result(rx, res);
2517
2518 #undef CALL_RXH
2519 }
2520
2521 /*
2522 * This function makes calls into the RX path. Therefore the
2523 * caller must hold the sta_info->lock and everything has to
2524 * be under rcu_read_lock protection as well.
2525 */
2526 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2527 {
2528 struct sk_buff_head frames;
2529 struct ieee80211_rx_data rx = {
2530 .sta = sta,
2531 .sdata = sta->sdata,
2532 .local = sta->local,
2533 .queue = tid,
2534 };
2535 struct tid_ampdu_rx *tid_agg_rx;
2536
2537 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2538 if (!tid_agg_rx)
2539 return;
2540
2541 __skb_queue_head_init(&frames);
2542
2543 spin_lock(&tid_agg_rx->reorder_lock);
2544 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames);
2545 spin_unlock(&tid_agg_rx->reorder_lock);
2546
2547 ieee80211_rx_handlers(&rx, &frames);
2548 }
2549
2550 /* main receive path */
2551
2552 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2553 struct ieee80211_hdr *hdr)
2554 {
2555 struct ieee80211_sub_if_data *sdata = rx->sdata;
2556 struct sk_buff *skb = rx->skb;
2557 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2558 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2559 int multicast = is_multicast_ether_addr(hdr->addr1);
2560
2561 switch (sdata->vif.type) {
2562 case NL80211_IFTYPE_STATION:
2563 if (!bssid && !sdata->u.mgd.use_4addr)
2564 return 0;
2565 if (!multicast &&
2566 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2567 if (!(sdata->dev->flags & IFF_PROMISC))
2568 return 0;
2569 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2570 }
2571 break;
2572 case NL80211_IFTYPE_ADHOC:
2573 if (!bssid)
2574 return 0;
2575 if (ieee80211_is_beacon(hdr->frame_control)) {
2576 return 1;
2577 }
2578 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2579 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2580 return 0;
2581 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2582 } else if (!multicast &&
2583 compare_ether_addr(sdata->vif.addr,
2584 hdr->addr1) != 0) {
2585 if (!(sdata->dev->flags & IFF_PROMISC))
2586 return 0;
2587 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2588 } else if (!rx->sta) {
2589 int rate_idx;
2590 if (status->flag & RX_FLAG_HT)
2591 rate_idx = 0; /* TODO: HT rates */
2592 else
2593 rate_idx = status->rate_idx;
2594 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2595 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2596 }
2597 break;
2598 case NL80211_IFTYPE_MESH_POINT:
2599 if (!multicast &&
2600 compare_ether_addr(sdata->vif.addr,
2601 hdr->addr1) != 0) {
2602 if (!(sdata->dev->flags & IFF_PROMISC))
2603 return 0;
2604
2605 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2606 }
2607 break;
2608 case NL80211_IFTYPE_AP_VLAN:
2609 case NL80211_IFTYPE_AP:
2610 if (!bssid) {
2611 if (compare_ether_addr(sdata->vif.addr,
2612 hdr->addr1))
2613 return 0;
2614 } else if (!ieee80211_bssid_match(bssid,
2615 sdata->vif.addr)) {
2616 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2617 return 0;
2618 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2619 }
2620 break;
2621 case NL80211_IFTYPE_WDS:
2622 if (bssid || !ieee80211_is_data(hdr->frame_control))
2623 return 0;
2624 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2625 return 0;
2626 break;
2627 default:
2628 /* should never get here */
2629 WARN_ON(1);
2630 break;
2631 }
2632
2633 return 1;
2634 }
2635
2636 /*
2637 * This function returns whether or not the SKB
2638 * was destined for RX processing or not, which,
2639 * if consume is true, is equivalent to whether
2640 * or not the skb was consumed.
2641 */
2642 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2643 struct sk_buff *skb, bool consume)
2644 {
2645 struct ieee80211_local *local = rx->local;
2646 struct ieee80211_sub_if_data *sdata = rx->sdata;
2647 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2648 struct ieee80211_hdr *hdr = (void *)skb->data;
2649 int prepares;
2650
2651 rx->skb = skb;
2652 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2653 prepares = prepare_for_handlers(rx, hdr);
2654
2655 if (!prepares)
2656 return false;
2657
2658 if (status->flag & RX_FLAG_MMIC_ERROR) {
2659 if (status->rx_flags & IEEE80211_RX_RA_MATCH)
2660 ieee80211_rx_michael_mic_report(hdr, rx);
2661 return false;
2662 }
2663
2664 if (!consume) {
2665 skb = skb_copy(skb, GFP_ATOMIC);
2666 if (!skb) {
2667 if (net_ratelimit())
2668 wiphy_debug(local->hw.wiphy,
2669 "failed to copy multicast frame for %s\n",
2670 sdata->name);
2671 return true;
2672 }
2673
2674 rx->skb = skb;
2675 }
2676
2677 ieee80211_invoke_rx_handlers(rx);
2678 return true;
2679 }
2680
2681 /*
2682 * This is the actual Rx frames handler. as it blongs to Rx path it must
2683 * be called with rcu_read_lock protection.
2684 */
2685 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2686 struct sk_buff *skb)
2687 {
2688 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2689 struct ieee80211_local *local = hw_to_local(hw);
2690 struct ieee80211_sub_if_data *sdata;
2691 struct ieee80211_hdr *hdr;
2692 __le16 fc;
2693 struct ieee80211_rx_data rx;
2694 struct ieee80211_sub_if_data *prev;
2695 struct sta_info *sta, *tmp, *prev_sta;
2696 int err = 0;
2697
2698 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2699 memset(&rx, 0, sizeof(rx));
2700 rx.skb = skb;
2701 rx.local = local;
2702
2703 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2704 local->dot11ReceivedFragmentCount++;
2705
2706 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2707 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2708 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2709
2710 if (ieee80211_is_mgmt(fc))
2711 err = skb_linearize(skb);
2712 else
2713 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2714
2715 if (err) {
2716 dev_kfree_skb(skb);
2717 return;
2718 }
2719
2720 hdr = (struct ieee80211_hdr *)skb->data;
2721 ieee80211_parse_qos(&rx);
2722 ieee80211_verify_alignment(&rx);
2723
2724 if (ieee80211_is_data(fc)) {
2725 prev_sta = NULL;
2726
2727 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2728 if (!prev_sta) {
2729 prev_sta = sta;
2730 continue;
2731 }
2732
2733 rx.sta = prev_sta;
2734 rx.sdata = prev_sta->sdata;
2735 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2736
2737 prev_sta = sta;
2738 }
2739
2740 if (prev_sta) {
2741 rx.sta = prev_sta;
2742 rx.sdata = prev_sta->sdata;
2743
2744 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2745 return;
2746 goto out;
2747 }
2748 }
2749
2750 prev = NULL;
2751
2752 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2753 if (!ieee80211_sdata_running(sdata))
2754 continue;
2755
2756 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2757 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2758 continue;
2759
2760 /*
2761 * frame is destined for this interface, but if it's
2762 * not also for the previous one we handle that after
2763 * the loop to avoid copying the SKB once too much
2764 */
2765
2766 if (!prev) {
2767 prev = sdata;
2768 continue;
2769 }
2770
2771 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2772 rx.sdata = prev;
2773 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2774
2775 prev = sdata;
2776 }
2777
2778 if (prev) {
2779 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2780 rx.sdata = prev;
2781
2782 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2783 return;
2784 }
2785
2786 out:
2787 dev_kfree_skb(skb);
2788 }
2789
2790 /*
2791 * This is the receive path handler. It is called by a low level driver when an
2792 * 802.11 MPDU is received from the hardware.
2793 */
2794 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2795 {
2796 struct ieee80211_local *local = hw_to_local(hw);
2797 struct ieee80211_rate *rate = NULL;
2798 struct ieee80211_supported_band *sband;
2799 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2800
2801 WARN_ON_ONCE(softirq_count() == 0);
2802
2803 if (WARN_ON(status->band < 0 ||
2804 status->band >= IEEE80211_NUM_BANDS))
2805 goto drop;
2806
2807 sband = local->hw.wiphy->bands[status->band];
2808 if (WARN_ON(!sband))
2809 goto drop;
2810
2811 /*
2812 * If we're suspending, it is possible although not too likely
2813 * that we'd be receiving frames after having already partially
2814 * quiesced the stack. We can't process such frames then since
2815 * that might, for example, cause stations to be added or other
2816 * driver callbacks be invoked.
2817 */
2818 if (unlikely(local->quiescing || local->suspended))
2819 goto drop;
2820
2821 /*
2822 * The same happens when we're not even started,
2823 * but that's worth a warning.
2824 */
2825 if (WARN_ON(!local->started))
2826 goto drop;
2827
2828 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
2829 /*
2830 * Validate the rate, unless a PLCP error means that
2831 * we probably can't have a valid rate here anyway.
2832 */
2833
2834 if (status->flag & RX_FLAG_HT) {
2835 /*
2836 * rate_idx is MCS index, which can be [0-76]
2837 * as documented on:
2838 *
2839 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2840 *
2841 * Anything else would be some sort of driver or
2842 * hardware error. The driver should catch hardware
2843 * errors.
2844 */
2845 if (WARN((status->rate_idx < 0 ||
2846 status->rate_idx > 76),
2847 "Rate marked as an HT rate but passed "
2848 "status->rate_idx is not "
2849 "an MCS index [0-76]: %d (0x%02x)\n",
2850 status->rate_idx,
2851 status->rate_idx))
2852 goto drop;
2853 } else {
2854 if (WARN_ON(status->rate_idx < 0 ||
2855 status->rate_idx >= sband->n_bitrates))
2856 goto drop;
2857 rate = &sband->bitrates[status->rate_idx];
2858 }
2859 }
2860
2861 status->rx_flags = 0;
2862
2863 /*
2864 * key references and virtual interfaces are protected using RCU
2865 * and this requires that we are in a read-side RCU section during
2866 * receive processing
2867 */
2868 rcu_read_lock();
2869
2870 /*
2871 * Frames with failed FCS/PLCP checksum are not returned,
2872 * all other frames are returned without radiotap header
2873 * if it was previously present.
2874 * Also, frames with less than 16 bytes are dropped.
2875 */
2876 skb = ieee80211_rx_monitor(local, skb, rate);
2877 if (!skb) {
2878 rcu_read_unlock();
2879 return;
2880 }
2881
2882 __ieee80211_rx_handle_packet(hw, skb);
2883
2884 rcu_read_unlock();
2885
2886 return;
2887 drop:
2888 kfree_skb(skb);
2889 }
2890 EXPORT_SYMBOL(ieee80211_rx);
2891
2892 /* This is a version of the rx handler that can be called from hard irq
2893 * context. Post the skb on the queue and schedule the tasklet */
2894 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
2895 {
2896 struct ieee80211_local *local = hw_to_local(hw);
2897
2898 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2899
2900 skb->pkt_type = IEEE80211_RX_MSG;
2901 skb_queue_tail(&local->skb_queue, skb);
2902 tasklet_schedule(&local->tasklet);
2903 }
2904 EXPORT_SYMBOL(ieee80211_rx_irqsafe);