]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/mac80211/rx.c
mac80211: fix too early reorder release timer
[mirror_ubuntu-jammy-kernel.git] / net / mac80211 / rx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <net/mac80211.h>
20 #include <net/ieee80211_radiotap.h>
21
22 #include "ieee80211_i.h"
23 #include "driver-ops.h"
24 #include "led.h"
25 #include "mesh.h"
26 #include "wep.h"
27 #include "wpa.h"
28 #include "tkip.h"
29 #include "wme.h"
30
31 /*
32 * monitor mode reception
33 *
34 * This function cleans up the SKB, i.e. it removes all the stuff
35 * only useful for monitoring.
36 */
37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
38 struct sk_buff *skb)
39 {
40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
41 if (likely(skb->len > FCS_LEN))
42 __pskb_trim(skb, skb->len - FCS_LEN);
43 else {
44 /* driver bug */
45 WARN_ON(1);
46 dev_kfree_skb(skb);
47 skb = NULL;
48 }
49 }
50
51 return skb;
52 }
53
54 static inline int should_drop_frame(struct sk_buff *skb,
55 int present_fcs_len)
56 {
57 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
58 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
59
60 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
61 return 1;
62 if (unlikely(skb->len < 16 + present_fcs_len))
63 return 1;
64 if (ieee80211_is_ctl(hdr->frame_control) &&
65 !ieee80211_is_pspoll(hdr->frame_control) &&
66 !ieee80211_is_back_req(hdr->frame_control))
67 return 1;
68 return 0;
69 }
70
71 static int
72 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
73 struct ieee80211_rx_status *status)
74 {
75 int len;
76
77 /* always present fields */
78 len = sizeof(struct ieee80211_radiotap_header) + 9;
79
80 if (status->flag & RX_FLAG_MACTIME_MPDU)
81 len += 8;
82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
83 len += 1;
84
85 if (len & 1) /* padding for RX_FLAGS if necessary */
86 len++;
87
88 if (status->flag & RX_FLAG_HT) /* HT info */
89 len += 3;
90
91 return len;
92 }
93
94 /*
95 * ieee80211_add_rx_radiotap_header - add radiotap header
96 *
97 * add a radiotap header containing all the fields which the hardware provided.
98 */
99 static void
100 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
101 struct sk_buff *skb,
102 struct ieee80211_rate *rate,
103 int rtap_len)
104 {
105 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
106 struct ieee80211_radiotap_header *rthdr;
107 unsigned char *pos;
108 u16 rx_flags = 0;
109
110 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
111 memset(rthdr, 0, rtap_len);
112
113 /* radiotap header, set always present flags */
114 rthdr->it_present =
115 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
116 (1 << IEEE80211_RADIOTAP_CHANNEL) |
117 (1 << IEEE80211_RADIOTAP_ANTENNA) |
118 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
119 rthdr->it_len = cpu_to_le16(rtap_len);
120
121 pos = (unsigned char *)(rthdr+1);
122
123 /* the order of the following fields is important */
124
125 /* IEEE80211_RADIOTAP_TSFT */
126 if (status->flag & RX_FLAG_MACTIME_MPDU) {
127 put_unaligned_le64(status->mactime, pos);
128 rthdr->it_present |=
129 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
130 pos += 8;
131 }
132
133 /* IEEE80211_RADIOTAP_FLAGS */
134 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
135 *pos |= IEEE80211_RADIOTAP_F_FCS;
136 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
137 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
138 if (status->flag & RX_FLAG_SHORTPRE)
139 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
140 pos++;
141
142 /* IEEE80211_RADIOTAP_RATE */
143 if (status->flag & RX_FLAG_HT) {
144 /*
145 * MCS information is a separate field in radiotap,
146 * added below. The byte here is needed as padding
147 * for the channel though, so initialise it to 0.
148 */
149 *pos = 0;
150 } else {
151 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
152 *pos = rate->bitrate / 5;
153 }
154 pos++;
155
156 /* IEEE80211_RADIOTAP_CHANNEL */
157 put_unaligned_le16(status->freq, pos);
158 pos += 2;
159 if (status->band == IEEE80211_BAND_5GHZ)
160 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
161 pos);
162 else if (status->flag & RX_FLAG_HT)
163 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
164 pos);
165 else if (rate->flags & IEEE80211_RATE_ERP_G)
166 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
167 pos);
168 else
169 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
170 pos);
171 pos += 2;
172
173 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
174 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
175 *pos = status->signal;
176 rthdr->it_present |=
177 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
178 pos++;
179 }
180
181 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
182
183 /* IEEE80211_RADIOTAP_ANTENNA */
184 *pos = status->antenna;
185 pos++;
186
187 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
188
189 /* IEEE80211_RADIOTAP_RX_FLAGS */
190 /* ensure 2 byte alignment for the 2 byte field as required */
191 if ((pos - (u8 *)rthdr) & 1)
192 pos++;
193 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
194 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
195 put_unaligned_le16(rx_flags, pos);
196 pos += 2;
197
198 if (status->flag & RX_FLAG_HT) {
199 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
200 *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
201 IEEE80211_RADIOTAP_MCS_HAVE_GI |
202 IEEE80211_RADIOTAP_MCS_HAVE_BW;
203 *pos = 0;
204 if (status->flag & RX_FLAG_SHORT_GI)
205 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
206 if (status->flag & RX_FLAG_40MHZ)
207 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
208 pos++;
209 *pos++ = status->rate_idx;
210 }
211 }
212
213 /*
214 * This function copies a received frame to all monitor interfaces and
215 * returns a cleaned-up SKB that no longer includes the FCS nor the
216 * radiotap header the driver might have added.
217 */
218 static struct sk_buff *
219 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
220 struct ieee80211_rate *rate)
221 {
222 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
223 struct ieee80211_sub_if_data *sdata;
224 int needed_headroom = 0;
225 struct sk_buff *skb, *skb2;
226 struct net_device *prev_dev = NULL;
227 int present_fcs_len = 0;
228
229 /*
230 * First, we may need to make a copy of the skb because
231 * (1) we need to modify it for radiotap (if not present), and
232 * (2) the other RX handlers will modify the skb we got.
233 *
234 * We don't need to, of course, if we aren't going to return
235 * the SKB because it has a bad FCS/PLCP checksum.
236 */
237
238 /* room for the radiotap header based on driver features */
239 needed_headroom = ieee80211_rx_radiotap_len(local, status);
240
241 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
242 present_fcs_len = FCS_LEN;
243
244 /* make sure hdr->frame_control is on the linear part */
245 if (!pskb_may_pull(origskb, 2)) {
246 dev_kfree_skb(origskb);
247 return NULL;
248 }
249
250 if (!local->monitors) {
251 if (should_drop_frame(origskb, present_fcs_len)) {
252 dev_kfree_skb(origskb);
253 return NULL;
254 }
255
256 return remove_monitor_info(local, origskb);
257 }
258
259 if (should_drop_frame(origskb, present_fcs_len)) {
260 /* only need to expand headroom if necessary */
261 skb = origskb;
262 origskb = NULL;
263
264 /*
265 * This shouldn't trigger often because most devices have an
266 * RX header they pull before we get here, and that should
267 * be big enough for our radiotap information. We should
268 * probably export the length to drivers so that we can have
269 * them allocate enough headroom to start with.
270 */
271 if (skb_headroom(skb) < needed_headroom &&
272 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
273 dev_kfree_skb(skb);
274 return NULL;
275 }
276 } else {
277 /*
278 * Need to make a copy and possibly remove radiotap header
279 * and FCS from the original.
280 */
281 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
282
283 origskb = remove_monitor_info(local, origskb);
284
285 if (!skb)
286 return origskb;
287 }
288
289 /* prepend radiotap information */
290 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
291
292 skb_reset_mac_header(skb);
293 skb->ip_summed = CHECKSUM_UNNECESSARY;
294 skb->pkt_type = PACKET_OTHERHOST;
295 skb->protocol = htons(ETH_P_802_2);
296
297 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
298 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
299 continue;
300
301 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
302 continue;
303
304 if (!ieee80211_sdata_running(sdata))
305 continue;
306
307 if (prev_dev) {
308 skb2 = skb_clone(skb, GFP_ATOMIC);
309 if (skb2) {
310 skb2->dev = prev_dev;
311 netif_receive_skb(skb2);
312 }
313 }
314
315 prev_dev = sdata->dev;
316 sdata->dev->stats.rx_packets++;
317 sdata->dev->stats.rx_bytes += skb->len;
318 }
319
320 if (prev_dev) {
321 skb->dev = prev_dev;
322 netif_receive_skb(skb);
323 } else
324 dev_kfree_skb(skb);
325
326 return origskb;
327 }
328
329
330 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
331 {
332 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
333 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
334 int tid;
335
336 /* does the frame have a qos control field? */
337 if (ieee80211_is_data_qos(hdr->frame_control)) {
338 u8 *qc = ieee80211_get_qos_ctl(hdr);
339 /* frame has qos control */
340 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
341 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
342 status->rx_flags |= IEEE80211_RX_AMSDU;
343 } else {
344 /*
345 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
346 *
347 * Sequence numbers for management frames, QoS data
348 * frames with a broadcast/multicast address in the
349 * Address 1 field, and all non-QoS data frames sent
350 * by QoS STAs are assigned using an additional single
351 * modulo-4096 counter, [...]
352 *
353 * We also use that counter for non-QoS STAs.
354 */
355 tid = NUM_RX_DATA_QUEUES - 1;
356 }
357
358 rx->queue = tid;
359 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
360 * For now, set skb->priority to 0 for other cases. */
361 rx->skb->priority = (tid > 7) ? 0 : tid;
362 }
363
364 /**
365 * DOC: Packet alignment
366 *
367 * Drivers always need to pass packets that are aligned to two-byte boundaries
368 * to the stack.
369 *
370 * Additionally, should, if possible, align the payload data in a way that
371 * guarantees that the contained IP header is aligned to a four-byte
372 * boundary. In the case of regular frames, this simply means aligning the
373 * payload to a four-byte boundary (because either the IP header is directly
374 * contained, or IV/RFC1042 headers that have a length divisible by four are
375 * in front of it). If the payload data is not properly aligned and the
376 * architecture doesn't support efficient unaligned operations, mac80211
377 * will align the data.
378 *
379 * With A-MSDU frames, however, the payload data address must yield two modulo
380 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
381 * push the IP header further back to a multiple of four again. Thankfully, the
382 * specs were sane enough this time around to require padding each A-MSDU
383 * subframe to a length that is a multiple of four.
384 *
385 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
386 * the payload is not supported, the driver is required to move the 802.11
387 * header to be directly in front of the payload in that case.
388 */
389 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
390 {
391 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
392 WARN_ONCE((unsigned long)rx->skb->data & 1,
393 "unaligned packet at 0x%p\n", rx->skb->data);
394 #endif
395 }
396
397
398 /* rx handlers */
399
400 static ieee80211_rx_result debug_noinline
401 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
402 {
403 struct ieee80211_local *local = rx->local;
404 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
405 struct sk_buff *skb = rx->skb;
406
407 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
408 return RX_CONTINUE;
409
410 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
411 test_bit(SCAN_SW_SCANNING, &local->scanning))
412 return ieee80211_scan_rx(rx->sdata, skb);
413
414 /* scanning finished during invoking of handlers */
415 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
416 return RX_DROP_UNUSABLE;
417 }
418
419
420 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
421 {
422 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
423
424 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
425 return 0;
426
427 return ieee80211_is_robust_mgmt_frame(hdr);
428 }
429
430
431 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
432 {
433 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
434
435 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
436 return 0;
437
438 return ieee80211_is_robust_mgmt_frame(hdr);
439 }
440
441
442 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
443 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
444 {
445 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
446 struct ieee80211_mmie *mmie;
447
448 if (skb->len < 24 + sizeof(*mmie) ||
449 !is_multicast_ether_addr(hdr->da))
450 return -1;
451
452 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
453 return -1; /* not a robust management frame */
454
455 mmie = (struct ieee80211_mmie *)
456 (skb->data + skb->len - sizeof(*mmie));
457 if (mmie->element_id != WLAN_EID_MMIE ||
458 mmie->length != sizeof(*mmie) - 2)
459 return -1;
460
461 return le16_to_cpu(mmie->key_id);
462 }
463
464
465 static ieee80211_rx_result
466 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
467 {
468 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
469 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
470 char *dev_addr = rx->sdata->vif.addr;
471
472 if (ieee80211_is_data(hdr->frame_control)) {
473 if (is_multicast_ether_addr(hdr->addr1)) {
474 if (ieee80211_has_tods(hdr->frame_control) ||
475 !ieee80211_has_fromds(hdr->frame_control))
476 return RX_DROP_MONITOR;
477 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
478 return RX_DROP_MONITOR;
479 } else {
480 if (!ieee80211_has_a4(hdr->frame_control))
481 return RX_DROP_MONITOR;
482 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
483 return RX_DROP_MONITOR;
484 }
485 }
486
487 /* If there is not an established peer link and this is not a peer link
488 * establisment frame, beacon or probe, drop the frame.
489 */
490
491 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
492 struct ieee80211_mgmt *mgmt;
493
494 if (!ieee80211_is_mgmt(hdr->frame_control))
495 return RX_DROP_MONITOR;
496
497 if (ieee80211_is_action(hdr->frame_control)) {
498 mgmt = (struct ieee80211_mgmt *)hdr;
499 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
500 return RX_DROP_MONITOR;
501 return RX_CONTINUE;
502 }
503
504 if (ieee80211_is_probe_req(hdr->frame_control) ||
505 ieee80211_is_probe_resp(hdr->frame_control) ||
506 ieee80211_is_beacon(hdr->frame_control) ||
507 ieee80211_is_auth(hdr->frame_control))
508 return RX_CONTINUE;
509
510 return RX_DROP_MONITOR;
511
512 }
513
514 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
515
516 if (ieee80211_is_data(hdr->frame_control) &&
517 is_multicast_ether_addr(hdr->addr1) &&
518 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
519 return RX_DROP_MONITOR;
520 #undef msh_h_get
521
522 return RX_CONTINUE;
523 }
524
525 #define SEQ_MODULO 0x1000
526 #define SEQ_MASK 0xfff
527
528 static inline int seq_less(u16 sq1, u16 sq2)
529 {
530 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
531 }
532
533 static inline u16 seq_inc(u16 sq)
534 {
535 return (sq + 1) & SEQ_MASK;
536 }
537
538 static inline u16 seq_sub(u16 sq1, u16 sq2)
539 {
540 return (sq1 - sq2) & SEQ_MASK;
541 }
542
543
544 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
545 struct tid_ampdu_rx *tid_agg_rx,
546 int index)
547 {
548 struct ieee80211_local *local = hw_to_local(hw);
549 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
550 struct ieee80211_rx_status *status;
551
552 lockdep_assert_held(&tid_agg_rx->reorder_lock);
553
554 if (!skb)
555 goto no_frame;
556
557 /* release the frame from the reorder ring buffer */
558 tid_agg_rx->stored_mpdu_num--;
559 tid_agg_rx->reorder_buf[index] = NULL;
560 status = IEEE80211_SKB_RXCB(skb);
561 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
562 skb_queue_tail(&local->rx_skb_queue, skb);
563
564 no_frame:
565 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
566 }
567
568 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
569 struct tid_ampdu_rx *tid_agg_rx,
570 u16 head_seq_num)
571 {
572 int index;
573
574 lockdep_assert_held(&tid_agg_rx->reorder_lock);
575
576 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
577 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
578 tid_agg_rx->buf_size;
579 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
580 }
581 }
582
583 /*
584 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
585 * the skb was added to the buffer longer than this time ago, the earlier
586 * frames that have not yet been received are assumed to be lost and the skb
587 * can be released for processing. This may also release other skb's from the
588 * reorder buffer if there are no additional gaps between the frames.
589 *
590 * Callers must hold tid_agg_rx->reorder_lock.
591 */
592 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
593
594 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
595 struct tid_ampdu_rx *tid_agg_rx)
596 {
597 int index, j;
598
599 lockdep_assert_held(&tid_agg_rx->reorder_lock);
600
601 /* release the buffer until next missing frame */
602 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
603 tid_agg_rx->buf_size;
604 if (!tid_agg_rx->reorder_buf[index] &&
605 tid_agg_rx->stored_mpdu_num > 1) {
606 /*
607 * No buffers ready to be released, but check whether any
608 * frames in the reorder buffer have timed out.
609 */
610 int skipped = 1;
611 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
612 j = (j + 1) % tid_agg_rx->buf_size) {
613 if (!tid_agg_rx->reorder_buf[j]) {
614 skipped++;
615 continue;
616 }
617 if (skipped &&
618 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
619 HT_RX_REORDER_BUF_TIMEOUT))
620 goto set_release_timer;
621
622 #ifdef CONFIG_MAC80211_HT_DEBUG
623 if (net_ratelimit())
624 wiphy_debug(hw->wiphy,
625 "release an RX reorder frame due to timeout on earlier frames\n");
626 #endif
627 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
628
629 /*
630 * Increment the head seq# also for the skipped slots.
631 */
632 tid_agg_rx->head_seq_num =
633 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
634 skipped = 0;
635 }
636 } else while (tid_agg_rx->reorder_buf[index]) {
637 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
638 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
639 tid_agg_rx->buf_size;
640 }
641
642 if (tid_agg_rx->stored_mpdu_num) {
643 j = index = seq_sub(tid_agg_rx->head_seq_num,
644 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
645
646 for (; j != (index - 1) % tid_agg_rx->buf_size;
647 j = (j + 1) % tid_agg_rx->buf_size) {
648 if (tid_agg_rx->reorder_buf[j])
649 break;
650 }
651
652 set_release_timer:
653
654 mod_timer(&tid_agg_rx->reorder_timer,
655 tid_agg_rx->reorder_time[j] + 1 +
656 HT_RX_REORDER_BUF_TIMEOUT);
657 } else {
658 del_timer(&tid_agg_rx->reorder_timer);
659 }
660 }
661
662 /*
663 * As this function belongs to the RX path it must be under
664 * rcu_read_lock protection. It returns false if the frame
665 * can be processed immediately, true if it was consumed.
666 */
667 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
668 struct tid_ampdu_rx *tid_agg_rx,
669 struct sk_buff *skb)
670 {
671 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
672 u16 sc = le16_to_cpu(hdr->seq_ctrl);
673 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
674 u16 head_seq_num, buf_size;
675 int index;
676 bool ret = true;
677
678 spin_lock(&tid_agg_rx->reorder_lock);
679
680 buf_size = tid_agg_rx->buf_size;
681 head_seq_num = tid_agg_rx->head_seq_num;
682
683 /* frame with out of date sequence number */
684 if (seq_less(mpdu_seq_num, head_seq_num)) {
685 dev_kfree_skb(skb);
686 goto out;
687 }
688
689 /*
690 * If frame the sequence number exceeds our buffering window
691 * size release some previous frames to make room for this one.
692 */
693 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
694 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
695 /* release stored frames up to new head to stack */
696 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
697 }
698
699 /* Now the new frame is always in the range of the reordering buffer */
700
701 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
702
703 /* check if we already stored this frame */
704 if (tid_agg_rx->reorder_buf[index]) {
705 dev_kfree_skb(skb);
706 goto out;
707 }
708
709 /*
710 * If the current MPDU is in the right order and nothing else
711 * is stored we can process it directly, no need to buffer it.
712 * If it is first but there's something stored, we may be able
713 * to release frames after this one.
714 */
715 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
716 tid_agg_rx->stored_mpdu_num == 0) {
717 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
718 ret = false;
719 goto out;
720 }
721
722 /* put the frame in the reordering buffer */
723 tid_agg_rx->reorder_buf[index] = skb;
724 tid_agg_rx->reorder_time[index] = jiffies;
725 tid_agg_rx->stored_mpdu_num++;
726 ieee80211_sta_reorder_release(hw, tid_agg_rx);
727
728 out:
729 spin_unlock(&tid_agg_rx->reorder_lock);
730 return ret;
731 }
732
733 /*
734 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
735 * true if the MPDU was buffered, false if it should be processed.
736 */
737 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
738 {
739 struct sk_buff *skb = rx->skb;
740 struct ieee80211_local *local = rx->local;
741 struct ieee80211_hw *hw = &local->hw;
742 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
743 struct sta_info *sta = rx->sta;
744 struct tid_ampdu_rx *tid_agg_rx;
745 u16 sc;
746 int tid;
747
748 if (!ieee80211_is_data_qos(hdr->frame_control))
749 goto dont_reorder;
750
751 /*
752 * filter the QoS data rx stream according to
753 * STA/TID and check if this STA/TID is on aggregation
754 */
755
756 if (!sta)
757 goto dont_reorder;
758
759 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
760
761 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
762 if (!tid_agg_rx)
763 goto dont_reorder;
764
765 /* qos null data frames are excluded */
766 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
767 goto dont_reorder;
768
769 /* new, potentially un-ordered, ampdu frame - process it */
770
771 /* reset session timer */
772 if (tid_agg_rx->timeout)
773 mod_timer(&tid_agg_rx->session_timer,
774 TU_TO_EXP_TIME(tid_agg_rx->timeout));
775
776 /* if this mpdu is fragmented - terminate rx aggregation session */
777 sc = le16_to_cpu(hdr->seq_ctrl);
778 if (sc & IEEE80211_SCTL_FRAG) {
779 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
780 skb_queue_tail(&rx->sdata->skb_queue, skb);
781 ieee80211_queue_work(&local->hw, &rx->sdata->work);
782 return;
783 }
784
785 /*
786 * No locking needed -- we will only ever process one
787 * RX packet at a time, and thus own tid_agg_rx. All
788 * other code manipulating it needs to (and does) make
789 * sure that we cannot get to it any more before doing
790 * anything with it.
791 */
792 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
793 return;
794
795 dont_reorder:
796 skb_queue_tail(&local->rx_skb_queue, skb);
797 }
798
799 static ieee80211_rx_result debug_noinline
800 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
801 {
802 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
803 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
804
805 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
806 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
807 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
808 rx->sta->last_seq_ctrl[rx->queue] ==
809 hdr->seq_ctrl)) {
810 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
811 rx->local->dot11FrameDuplicateCount++;
812 rx->sta->num_duplicates++;
813 }
814 return RX_DROP_UNUSABLE;
815 } else
816 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
817 }
818
819 if (unlikely(rx->skb->len < 16)) {
820 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
821 return RX_DROP_MONITOR;
822 }
823
824 /* Drop disallowed frame classes based on STA auth/assoc state;
825 * IEEE 802.11, Chap 5.5.
826 *
827 * mac80211 filters only based on association state, i.e. it drops
828 * Class 3 frames from not associated stations. hostapd sends
829 * deauth/disassoc frames when needed. In addition, hostapd is
830 * responsible for filtering on both auth and assoc states.
831 */
832
833 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
834 return ieee80211_rx_mesh_check(rx);
835
836 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
837 ieee80211_is_pspoll(hdr->frame_control)) &&
838 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
839 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
840 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC))))
841 return RX_DROP_MONITOR;
842
843 return RX_CONTINUE;
844 }
845
846
847 static ieee80211_rx_result debug_noinline
848 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
849 {
850 struct sk_buff *skb = rx->skb;
851 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
852 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
853 int keyidx;
854 int hdrlen;
855 ieee80211_rx_result result = RX_DROP_UNUSABLE;
856 struct ieee80211_key *sta_ptk = NULL;
857 int mmie_keyidx = -1;
858 __le16 fc;
859
860 /*
861 * Key selection 101
862 *
863 * There are four types of keys:
864 * - GTK (group keys)
865 * - IGTK (group keys for management frames)
866 * - PTK (pairwise keys)
867 * - STK (station-to-station pairwise keys)
868 *
869 * When selecting a key, we have to distinguish between multicast
870 * (including broadcast) and unicast frames, the latter can only
871 * use PTKs and STKs while the former always use GTKs and IGTKs.
872 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
873 * unicast frames can also use key indices like GTKs. Hence, if we
874 * don't have a PTK/STK we check the key index for a WEP key.
875 *
876 * Note that in a regular BSS, multicast frames are sent by the
877 * AP only, associated stations unicast the frame to the AP first
878 * which then multicasts it on their behalf.
879 *
880 * There is also a slight problem in IBSS mode: GTKs are negotiated
881 * with each station, that is something we don't currently handle.
882 * The spec seems to expect that one negotiates the same key with
883 * every station but there's no such requirement; VLANs could be
884 * possible.
885 */
886
887 /*
888 * No point in finding a key and decrypting if the frame is neither
889 * addressed to us nor a multicast frame.
890 */
891 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
892 return RX_CONTINUE;
893
894 /* start without a key */
895 rx->key = NULL;
896
897 if (rx->sta)
898 sta_ptk = rcu_dereference(rx->sta->ptk);
899
900 fc = hdr->frame_control;
901
902 if (!ieee80211_has_protected(fc))
903 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
904
905 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
906 rx->key = sta_ptk;
907 if ((status->flag & RX_FLAG_DECRYPTED) &&
908 (status->flag & RX_FLAG_IV_STRIPPED))
909 return RX_CONTINUE;
910 /* Skip decryption if the frame is not protected. */
911 if (!ieee80211_has_protected(fc))
912 return RX_CONTINUE;
913 } else if (mmie_keyidx >= 0) {
914 /* Broadcast/multicast robust management frame / BIP */
915 if ((status->flag & RX_FLAG_DECRYPTED) &&
916 (status->flag & RX_FLAG_IV_STRIPPED))
917 return RX_CONTINUE;
918
919 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
920 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
921 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
922 if (rx->sta)
923 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
924 if (!rx->key)
925 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
926 } else if (!ieee80211_has_protected(fc)) {
927 /*
928 * The frame was not protected, so skip decryption. However, we
929 * need to set rx->key if there is a key that could have been
930 * used so that the frame may be dropped if encryption would
931 * have been expected.
932 */
933 struct ieee80211_key *key = NULL;
934 struct ieee80211_sub_if_data *sdata = rx->sdata;
935 int i;
936
937 if (ieee80211_is_mgmt(fc) &&
938 is_multicast_ether_addr(hdr->addr1) &&
939 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
940 rx->key = key;
941 else {
942 if (rx->sta) {
943 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
944 key = rcu_dereference(rx->sta->gtk[i]);
945 if (key)
946 break;
947 }
948 }
949 if (!key) {
950 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
951 key = rcu_dereference(sdata->keys[i]);
952 if (key)
953 break;
954 }
955 }
956 if (key)
957 rx->key = key;
958 }
959 return RX_CONTINUE;
960 } else {
961 u8 keyid;
962 /*
963 * The device doesn't give us the IV so we won't be
964 * able to look up the key. That's ok though, we
965 * don't need to decrypt the frame, we just won't
966 * be able to keep statistics accurate.
967 * Except for key threshold notifications, should
968 * we somehow allow the driver to tell us which key
969 * the hardware used if this flag is set?
970 */
971 if ((status->flag & RX_FLAG_DECRYPTED) &&
972 (status->flag & RX_FLAG_IV_STRIPPED))
973 return RX_CONTINUE;
974
975 hdrlen = ieee80211_hdrlen(fc);
976
977 if (rx->skb->len < 8 + hdrlen)
978 return RX_DROP_UNUSABLE; /* TODO: count this? */
979
980 /*
981 * no need to call ieee80211_wep_get_keyidx,
982 * it verifies a bunch of things we've done already
983 */
984 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
985 keyidx = keyid >> 6;
986
987 /* check per-station GTK first, if multicast packet */
988 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
989 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
990
991 /* if not found, try default key */
992 if (!rx->key) {
993 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
994
995 /*
996 * RSNA-protected unicast frames should always be
997 * sent with pairwise or station-to-station keys,
998 * but for WEP we allow using a key index as well.
999 */
1000 if (rx->key &&
1001 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1002 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1003 !is_multicast_ether_addr(hdr->addr1))
1004 rx->key = NULL;
1005 }
1006 }
1007
1008 if (rx->key) {
1009 rx->key->tx_rx_count++;
1010 /* TODO: add threshold stuff again */
1011 } else {
1012 return RX_DROP_MONITOR;
1013 }
1014
1015 if (skb_linearize(rx->skb))
1016 return RX_DROP_UNUSABLE;
1017 /* the hdr variable is invalid now! */
1018
1019 switch (rx->key->conf.cipher) {
1020 case WLAN_CIPHER_SUITE_WEP40:
1021 case WLAN_CIPHER_SUITE_WEP104:
1022 /* Check for weak IVs if possible */
1023 if (rx->sta && ieee80211_is_data(fc) &&
1024 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1025 !(status->flag & RX_FLAG_DECRYPTED)) &&
1026 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1027 rx->sta->wep_weak_iv_count++;
1028
1029 result = ieee80211_crypto_wep_decrypt(rx);
1030 break;
1031 case WLAN_CIPHER_SUITE_TKIP:
1032 result = ieee80211_crypto_tkip_decrypt(rx);
1033 break;
1034 case WLAN_CIPHER_SUITE_CCMP:
1035 result = ieee80211_crypto_ccmp_decrypt(rx);
1036 break;
1037 case WLAN_CIPHER_SUITE_AES_CMAC:
1038 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1039 break;
1040 default:
1041 /*
1042 * We can reach here only with HW-only algorithms
1043 * but why didn't it decrypt the frame?!
1044 */
1045 return RX_DROP_UNUSABLE;
1046 }
1047
1048 /* either the frame has been decrypted or will be dropped */
1049 status->flag |= RX_FLAG_DECRYPTED;
1050
1051 return result;
1052 }
1053
1054 static ieee80211_rx_result debug_noinline
1055 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1056 {
1057 struct ieee80211_local *local;
1058 struct ieee80211_hdr *hdr;
1059 struct sk_buff *skb;
1060
1061 local = rx->local;
1062 skb = rx->skb;
1063 hdr = (struct ieee80211_hdr *) skb->data;
1064
1065 if (!local->pspolling)
1066 return RX_CONTINUE;
1067
1068 if (!ieee80211_has_fromds(hdr->frame_control))
1069 /* this is not from AP */
1070 return RX_CONTINUE;
1071
1072 if (!ieee80211_is_data(hdr->frame_control))
1073 return RX_CONTINUE;
1074
1075 if (!ieee80211_has_moredata(hdr->frame_control)) {
1076 /* AP has no more frames buffered for us */
1077 local->pspolling = false;
1078 return RX_CONTINUE;
1079 }
1080
1081 /* more data bit is set, let's request a new frame from the AP */
1082 ieee80211_send_pspoll(local, rx->sdata);
1083
1084 return RX_CONTINUE;
1085 }
1086
1087 static void ap_sta_ps_start(struct sta_info *sta)
1088 {
1089 struct ieee80211_sub_if_data *sdata = sta->sdata;
1090 struct ieee80211_local *local = sdata->local;
1091
1092 atomic_inc(&sdata->bss->num_sta_ps);
1093 set_sta_flags(sta, WLAN_STA_PS_STA);
1094 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1095 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1096 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1097 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1098 sdata->name, sta->sta.addr, sta->sta.aid);
1099 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1100 }
1101
1102 static void ap_sta_ps_end(struct sta_info *sta)
1103 {
1104 struct ieee80211_sub_if_data *sdata = sta->sdata;
1105
1106 atomic_dec(&sdata->bss->num_sta_ps);
1107
1108 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1109 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1110 sdata->name, sta->sta.addr, sta->sta.aid);
1111 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1112
1113 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1114 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1115 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1116 sdata->name, sta->sta.addr, sta->sta.aid);
1117 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1118 return;
1119 }
1120
1121 ieee80211_sta_ps_deliver_wakeup(sta);
1122 }
1123
1124 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1125 {
1126 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
1127 bool in_ps;
1128
1129 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1130
1131 /* Don't let the same PS state be set twice */
1132 in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA);
1133 if ((start && in_ps) || (!start && !in_ps))
1134 return -EINVAL;
1135
1136 if (start)
1137 ap_sta_ps_start(sta_inf);
1138 else
1139 ap_sta_ps_end(sta_inf);
1140
1141 return 0;
1142 }
1143 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1144
1145 static ieee80211_rx_result debug_noinline
1146 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1147 {
1148 struct sta_info *sta = rx->sta;
1149 struct sk_buff *skb = rx->skb;
1150 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1151 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1152
1153 if (!sta)
1154 return RX_CONTINUE;
1155
1156 /*
1157 * Update last_rx only for IBSS packets which are for the current
1158 * BSSID to avoid keeping the current IBSS network alive in cases
1159 * where other STAs start using different BSSID.
1160 */
1161 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1162 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1163 NL80211_IFTYPE_ADHOC);
1164 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
1165 sta->last_rx = jiffies;
1166 if (ieee80211_is_data(hdr->frame_control)) {
1167 sta->last_rx_rate_idx = status->rate_idx;
1168 sta->last_rx_rate_flag = status->flag;
1169 }
1170 }
1171 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1172 /*
1173 * Mesh beacons will update last_rx when if they are found to
1174 * match the current local configuration when processed.
1175 */
1176 sta->last_rx = jiffies;
1177 if (ieee80211_is_data(hdr->frame_control)) {
1178 sta->last_rx_rate_idx = status->rate_idx;
1179 sta->last_rx_rate_flag = status->flag;
1180 }
1181 }
1182
1183 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1184 return RX_CONTINUE;
1185
1186 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1187 ieee80211_sta_rx_notify(rx->sdata, hdr);
1188
1189 sta->rx_fragments++;
1190 sta->rx_bytes += rx->skb->len;
1191 sta->last_signal = status->signal;
1192 ewma_add(&sta->avg_signal, -status->signal);
1193
1194 /*
1195 * Change STA power saving mode only at the end of a frame
1196 * exchange sequence.
1197 */
1198 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
1199 !ieee80211_has_morefrags(hdr->frame_control) &&
1200 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1201 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1202 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1203 if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
1204 /*
1205 * Ignore doze->wake transitions that are
1206 * indicated by non-data frames, the standard
1207 * is unclear here, but for example going to
1208 * PS mode and then scanning would cause a
1209 * doze->wake transition for the probe request,
1210 * and that is clearly undesirable.
1211 */
1212 if (ieee80211_is_data(hdr->frame_control) &&
1213 !ieee80211_has_pm(hdr->frame_control))
1214 ap_sta_ps_end(sta);
1215 } else {
1216 if (ieee80211_has_pm(hdr->frame_control))
1217 ap_sta_ps_start(sta);
1218 }
1219 }
1220
1221 /*
1222 * Drop (qos-)data::nullfunc frames silently, since they
1223 * are used only to control station power saving mode.
1224 */
1225 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1226 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1227 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1228
1229 /*
1230 * If we receive a 4-addr nullfunc frame from a STA
1231 * that was not moved to a 4-addr STA vlan yet, drop
1232 * the frame to the monitor interface, to make sure
1233 * that hostapd sees it
1234 */
1235 if (ieee80211_has_a4(hdr->frame_control) &&
1236 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1237 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1238 !rx->sdata->u.vlan.sta)))
1239 return RX_DROP_MONITOR;
1240 /*
1241 * Update counter and free packet here to avoid
1242 * counting this as a dropped packed.
1243 */
1244 sta->rx_packets++;
1245 dev_kfree_skb(rx->skb);
1246 return RX_QUEUED;
1247 }
1248
1249 return RX_CONTINUE;
1250 } /* ieee80211_rx_h_sta_process */
1251
1252 static inline struct ieee80211_fragment_entry *
1253 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1254 unsigned int frag, unsigned int seq, int rx_queue,
1255 struct sk_buff **skb)
1256 {
1257 struct ieee80211_fragment_entry *entry;
1258 int idx;
1259
1260 idx = sdata->fragment_next;
1261 entry = &sdata->fragments[sdata->fragment_next++];
1262 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1263 sdata->fragment_next = 0;
1264
1265 if (!skb_queue_empty(&entry->skb_list)) {
1266 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1267 struct ieee80211_hdr *hdr =
1268 (struct ieee80211_hdr *) entry->skb_list.next->data;
1269 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1270 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1271 "addr1=%pM addr2=%pM\n",
1272 sdata->name, idx,
1273 jiffies - entry->first_frag_time, entry->seq,
1274 entry->last_frag, hdr->addr1, hdr->addr2);
1275 #endif
1276 __skb_queue_purge(&entry->skb_list);
1277 }
1278
1279 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1280 *skb = NULL;
1281 entry->first_frag_time = jiffies;
1282 entry->seq = seq;
1283 entry->rx_queue = rx_queue;
1284 entry->last_frag = frag;
1285 entry->ccmp = 0;
1286 entry->extra_len = 0;
1287
1288 return entry;
1289 }
1290
1291 static inline struct ieee80211_fragment_entry *
1292 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1293 unsigned int frag, unsigned int seq,
1294 int rx_queue, struct ieee80211_hdr *hdr)
1295 {
1296 struct ieee80211_fragment_entry *entry;
1297 int i, idx;
1298
1299 idx = sdata->fragment_next;
1300 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1301 struct ieee80211_hdr *f_hdr;
1302
1303 idx--;
1304 if (idx < 0)
1305 idx = IEEE80211_FRAGMENT_MAX - 1;
1306
1307 entry = &sdata->fragments[idx];
1308 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1309 entry->rx_queue != rx_queue ||
1310 entry->last_frag + 1 != frag)
1311 continue;
1312
1313 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1314
1315 /*
1316 * Check ftype and addresses are equal, else check next fragment
1317 */
1318 if (((hdr->frame_control ^ f_hdr->frame_control) &
1319 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1320 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1321 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1322 continue;
1323
1324 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1325 __skb_queue_purge(&entry->skb_list);
1326 continue;
1327 }
1328 return entry;
1329 }
1330
1331 return NULL;
1332 }
1333
1334 static ieee80211_rx_result debug_noinline
1335 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1336 {
1337 struct ieee80211_hdr *hdr;
1338 u16 sc;
1339 __le16 fc;
1340 unsigned int frag, seq;
1341 struct ieee80211_fragment_entry *entry;
1342 struct sk_buff *skb;
1343 struct ieee80211_rx_status *status;
1344
1345 hdr = (struct ieee80211_hdr *)rx->skb->data;
1346 fc = hdr->frame_control;
1347 sc = le16_to_cpu(hdr->seq_ctrl);
1348 frag = sc & IEEE80211_SCTL_FRAG;
1349
1350 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1351 (rx->skb)->len < 24 ||
1352 is_multicast_ether_addr(hdr->addr1))) {
1353 /* not fragmented */
1354 goto out;
1355 }
1356 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1357
1358 if (skb_linearize(rx->skb))
1359 return RX_DROP_UNUSABLE;
1360
1361 /*
1362 * skb_linearize() might change the skb->data and
1363 * previously cached variables (in this case, hdr) need to
1364 * be refreshed with the new data.
1365 */
1366 hdr = (struct ieee80211_hdr *)rx->skb->data;
1367 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1368
1369 if (frag == 0) {
1370 /* This is the first fragment of a new frame. */
1371 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1372 rx->queue, &(rx->skb));
1373 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1374 ieee80211_has_protected(fc)) {
1375 int queue = ieee80211_is_mgmt(fc) ?
1376 NUM_RX_DATA_QUEUES : rx->queue;
1377 /* Store CCMP PN so that we can verify that the next
1378 * fragment has a sequential PN value. */
1379 entry->ccmp = 1;
1380 memcpy(entry->last_pn,
1381 rx->key->u.ccmp.rx_pn[queue],
1382 CCMP_PN_LEN);
1383 }
1384 return RX_QUEUED;
1385 }
1386
1387 /* This is a fragment for a frame that should already be pending in
1388 * fragment cache. Add this fragment to the end of the pending entry.
1389 */
1390 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1391 if (!entry) {
1392 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1393 return RX_DROP_MONITOR;
1394 }
1395
1396 /* Verify that MPDUs within one MSDU have sequential PN values.
1397 * (IEEE 802.11i, 8.3.3.4.5) */
1398 if (entry->ccmp) {
1399 int i;
1400 u8 pn[CCMP_PN_LEN], *rpn;
1401 int queue;
1402 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1403 return RX_DROP_UNUSABLE;
1404 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1405 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1406 pn[i]++;
1407 if (pn[i])
1408 break;
1409 }
1410 queue = ieee80211_is_mgmt(fc) ?
1411 NUM_RX_DATA_QUEUES : rx->queue;
1412 rpn = rx->key->u.ccmp.rx_pn[queue];
1413 if (memcmp(pn, rpn, CCMP_PN_LEN))
1414 return RX_DROP_UNUSABLE;
1415 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1416 }
1417
1418 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1419 __skb_queue_tail(&entry->skb_list, rx->skb);
1420 entry->last_frag = frag;
1421 entry->extra_len += rx->skb->len;
1422 if (ieee80211_has_morefrags(fc)) {
1423 rx->skb = NULL;
1424 return RX_QUEUED;
1425 }
1426
1427 rx->skb = __skb_dequeue(&entry->skb_list);
1428 if (skb_tailroom(rx->skb) < entry->extra_len) {
1429 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1430 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1431 GFP_ATOMIC))) {
1432 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1433 __skb_queue_purge(&entry->skb_list);
1434 return RX_DROP_UNUSABLE;
1435 }
1436 }
1437 while ((skb = __skb_dequeue(&entry->skb_list))) {
1438 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1439 dev_kfree_skb(skb);
1440 }
1441
1442 /* Complete frame has been reassembled - process it now */
1443 status = IEEE80211_SKB_RXCB(rx->skb);
1444 status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1445
1446 out:
1447 if (rx->sta)
1448 rx->sta->rx_packets++;
1449 if (is_multicast_ether_addr(hdr->addr1))
1450 rx->local->dot11MulticastReceivedFrameCount++;
1451 else
1452 ieee80211_led_rx(rx->local);
1453 return RX_CONTINUE;
1454 }
1455
1456 static ieee80211_rx_result debug_noinline
1457 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1458 {
1459 struct ieee80211_sub_if_data *sdata = rx->sdata;
1460 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1461 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1462
1463 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1464 !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
1465 return RX_CONTINUE;
1466
1467 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1468 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1469 return RX_DROP_UNUSABLE;
1470
1471 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1472 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1473 else
1474 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1475
1476 /* Free PS Poll skb here instead of returning RX_DROP that would
1477 * count as an dropped frame. */
1478 dev_kfree_skb(rx->skb);
1479
1480 return RX_QUEUED;
1481 }
1482
1483 static ieee80211_rx_result debug_noinline
1484 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1485 {
1486 u8 *data = rx->skb->data;
1487 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1488
1489 if (!ieee80211_is_data_qos(hdr->frame_control))
1490 return RX_CONTINUE;
1491
1492 /* remove the qos control field, update frame type and meta-data */
1493 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1494 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1495 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1496 /* change frame type to non QOS */
1497 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1498
1499 return RX_CONTINUE;
1500 }
1501
1502 static int
1503 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1504 {
1505 if (unlikely(!rx->sta ||
1506 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1507 return -EACCES;
1508
1509 return 0;
1510 }
1511
1512 static int
1513 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1514 {
1515 struct sk_buff *skb = rx->skb;
1516 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1517
1518 /*
1519 * Pass through unencrypted frames if the hardware has
1520 * decrypted them already.
1521 */
1522 if (status->flag & RX_FLAG_DECRYPTED)
1523 return 0;
1524
1525 /* Drop unencrypted frames if key is set. */
1526 if (unlikely(!ieee80211_has_protected(fc) &&
1527 !ieee80211_is_nullfunc(fc) &&
1528 ieee80211_is_data(fc) &&
1529 (rx->key || rx->sdata->drop_unencrypted)))
1530 return -EACCES;
1531
1532 return 0;
1533 }
1534
1535 static int
1536 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1537 {
1538 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1539 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1540 __le16 fc = hdr->frame_control;
1541
1542 /*
1543 * Pass through unencrypted frames if the hardware has
1544 * decrypted them already.
1545 */
1546 if (status->flag & RX_FLAG_DECRYPTED)
1547 return 0;
1548
1549 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1550 if (unlikely(!ieee80211_has_protected(fc) &&
1551 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1552 rx->key)) {
1553 if (ieee80211_is_deauth(fc))
1554 cfg80211_send_unprot_deauth(rx->sdata->dev,
1555 rx->skb->data,
1556 rx->skb->len);
1557 else if (ieee80211_is_disassoc(fc))
1558 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1559 rx->skb->data,
1560 rx->skb->len);
1561 return -EACCES;
1562 }
1563 /* BIP does not use Protected field, so need to check MMIE */
1564 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1565 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
1566 if (ieee80211_is_deauth(fc))
1567 cfg80211_send_unprot_deauth(rx->sdata->dev,
1568 rx->skb->data,
1569 rx->skb->len);
1570 else if (ieee80211_is_disassoc(fc))
1571 cfg80211_send_unprot_disassoc(rx->sdata->dev,
1572 rx->skb->data,
1573 rx->skb->len);
1574 return -EACCES;
1575 }
1576 /*
1577 * When using MFP, Action frames are not allowed prior to
1578 * having configured keys.
1579 */
1580 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1581 ieee80211_is_robust_mgmt_frame(
1582 (struct ieee80211_hdr *) rx->skb->data)))
1583 return -EACCES;
1584 }
1585
1586 return 0;
1587 }
1588
1589 static int
1590 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1591 {
1592 struct ieee80211_sub_if_data *sdata = rx->sdata;
1593 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1594 bool check_port_control = false;
1595 struct ethhdr *ehdr;
1596 int ret;
1597
1598 *port_control = false;
1599 if (ieee80211_has_a4(hdr->frame_control) &&
1600 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1601 return -1;
1602
1603 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1604 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
1605
1606 if (!sdata->u.mgd.use_4addr)
1607 return -1;
1608 else
1609 check_port_control = true;
1610 }
1611
1612 if (is_multicast_ether_addr(hdr->addr1) &&
1613 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
1614 return -1;
1615
1616 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1617 if (ret < 0)
1618 return ret;
1619
1620 ehdr = (struct ethhdr *) rx->skb->data;
1621 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1622 *port_control = true;
1623 else if (check_port_control)
1624 return -1;
1625
1626 return 0;
1627 }
1628
1629 /*
1630 * requires that rx->skb is a frame with ethernet header
1631 */
1632 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1633 {
1634 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1635 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1636 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1637
1638 /*
1639 * Allow EAPOL frames to us/the PAE group address regardless
1640 * of whether the frame was encrypted or not.
1641 */
1642 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1643 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1644 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1645 return true;
1646
1647 if (ieee80211_802_1x_port_control(rx) ||
1648 ieee80211_drop_unencrypted(rx, fc))
1649 return false;
1650
1651 return true;
1652 }
1653
1654 /*
1655 * requires that rx->skb is a frame with ethernet header
1656 */
1657 static void
1658 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1659 {
1660 struct ieee80211_sub_if_data *sdata = rx->sdata;
1661 struct net_device *dev = sdata->dev;
1662 struct sk_buff *skb, *xmit_skb;
1663 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1664 struct sta_info *dsta;
1665 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1666
1667 skb = rx->skb;
1668 xmit_skb = NULL;
1669
1670 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1671 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1672 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1673 (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1674 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1675 if (is_multicast_ether_addr(ehdr->h_dest)) {
1676 /*
1677 * send multicast frames both to higher layers in
1678 * local net stack and back to the wireless medium
1679 */
1680 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1681 if (!xmit_skb && net_ratelimit())
1682 printk(KERN_DEBUG "%s: failed to clone "
1683 "multicast frame\n", dev->name);
1684 } else {
1685 dsta = sta_info_get(sdata, skb->data);
1686 if (dsta) {
1687 /*
1688 * The destination station is associated to
1689 * this AP (in this VLAN), so send the frame
1690 * directly to it and do not pass it to local
1691 * net stack.
1692 */
1693 xmit_skb = skb;
1694 skb = NULL;
1695 }
1696 }
1697 }
1698
1699 if (skb) {
1700 int align __maybe_unused;
1701
1702 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1703 /*
1704 * 'align' will only take the values 0 or 2 here
1705 * since all frames are required to be aligned
1706 * to 2-byte boundaries when being passed to
1707 * mac80211. That also explains the __skb_push()
1708 * below.
1709 */
1710 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1711 if (align) {
1712 if (WARN_ON(skb_headroom(skb) < 3)) {
1713 dev_kfree_skb(skb);
1714 skb = NULL;
1715 } else {
1716 u8 *data = skb->data;
1717 size_t len = skb_headlen(skb);
1718 skb->data -= align;
1719 memmove(skb->data, data, len);
1720 skb_set_tail_pointer(skb, len);
1721 }
1722 }
1723 #endif
1724
1725 if (skb) {
1726 /* deliver to local stack */
1727 skb->protocol = eth_type_trans(skb, dev);
1728 memset(skb->cb, 0, sizeof(skb->cb));
1729 netif_receive_skb(skb);
1730 }
1731 }
1732
1733 if (xmit_skb) {
1734 /* send to wireless media */
1735 xmit_skb->protocol = htons(ETH_P_802_3);
1736 skb_reset_network_header(xmit_skb);
1737 skb_reset_mac_header(xmit_skb);
1738 dev_queue_xmit(xmit_skb);
1739 }
1740 }
1741
1742 static ieee80211_rx_result debug_noinline
1743 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1744 {
1745 struct net_device *dev = rx->sdata->dev;
1746 struct sk_buff *skb = rx->skb;
1747 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1748 __le16 fc = hdr->frame_control;
1749 struct sk_buff_head frame_list;
1750 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1751
1752 if (unlikely(!ieee80211_is_data(fc)))
1753 return RX_CONTINUE;
1754
1755 if (unlikely(!ieee80211_is_data_present(fc)))
1756 return RX_DROP_MONITOR;
1757
1758 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1759 return RX_CONTINUE;
1760
1761 if (ieee80211_has_a4(hdr->frame_control) &&
1762 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1763 !rx->sdata->u.vlan.sta)
1764 return RX_DROP_UNUSABLE;
1765
1766 if (is_multicast_ether_addr(hdr->addr1) &&
1767 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1768 rx->sdata->u.vlan.sta) ||
1769 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1770 rx->sdata->u.mgd.use_4addr)))
1771 return RX_DROP_UNUSABLE;
1772
1773 skb->dev = dev;
1774 __skb_queue_head_init(&frame_list);
1775
1776 if (skb_linearize(skb))
1777 return RX_DROP_UNUSABLE;
1778
1779 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1780 rx->sdata->vif.type,
1781 rx->local->hw.extra_tx_headroom);
1782
1783 while (!skb_queue_empty(&frame_list)) {
1784 rx->skb = __skb_dequeue(&frame_list);
1785
1786 if (!ieee80211_frame_allowed(rx, fc)) {
1787 dev_kfree_skb(rx->skb);
1788 continue;
1789 }
1790 dev->stats.rx_packets++;
1791 dev->stats.rx_bytes += rx->skb->len;
1792
1793 ieee80211_deliver_skb(rx);
1794 }
1795
1796 return RX_QUEUED;
1797 }
1798
1799 #ifdef CONFIG_MAC80211_MESH
1800 static ieee80211_rx_result
1801 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1802 {
1803 struct ieee80211_hdr *hdr;
1804 struct ieee80211s_hdr *mesh_hdr;
1805 unsigned int hdrlen;
1806 struct sk_buff *skb = rx->skb, *fwd_skb;
1807 struct ieee80211_local *local = rx->local;
1808 struct ieee80211_sub_if_data *sdata = rx->sdata;
1809 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1810
1811 hdr = (struct ieee80211_hdr *) skb->data;
1812 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1813 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1814
1815 if (!ieee80211_is_data(hdr->frame_control))
1816 return RX_CONTINUE;
1817
1818 if (!mesh_hdr->ttl)
1819 /* illegal frame */
1820 return RX_DROP_MONITOR;
1821
1822 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1823 struct mesh_path *mppath;
1824 char *proxied_addr;
1825 char *mpp_addr;
1826
1827 if (is_multicast_ether_addr(hdr->addr1)) {
1828 mpp_addr = hdr->addr3;
1829 proxied_addr = mesh_hdr->eaddr1;
1830 } else {
1831 mpp_addr = hdr->addr4;
1832 proxied_addr = mesh_hdr->eaddr2;
1833 }
1834
1835 rcu_read_lock();
1836 mppath = mpp_path_lookup(proxied_addr, sdata);
1837 if (!mppath) {
1838 mpp_path_add(proxied_addr, mpp_addr, sdata);
1839 } else {
1840 spin_lock_bh(&mppath->state_lock);
1841 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1842 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1843 spin_unlock_bh(&mppath->state_lock);
1844 }
1845 rcu_read_unlock();
1846 }
1847
1848 /* Frame has reached destination. Don't forward */
1849 if (!is_multicast_ether_addr(hdr->addr1) &&
1850 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1851 return RX_CONTINUE;
1852
1853 mesh_hdr->ttl--;
1854
1855 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
1856 if (!mesh_hdr->ttl)
1857 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1858 dropped_frames_ttl);
1859 else {
1860 struct ieee80211_hdr *fwd_hdr;
1861 struct ieee80211_tx_info *info;
1862
1863 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1864
1865 if (!fwd_skb && net_ratelimit())
1866 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1867 sdata->name);
1868 if (!fwd_skb)
1869 goto out;
1870
1871 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1872 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1873 info = IEEE80211_SKB_CB(fwd_skb);
1874 memset(info, 0, sizeof(*info));
1875 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1876 info->control.vif = &rx->sdata->vif;
1877 skb_set_queue_mapping(skb,
1878 ieee80211_select_queue(rx->sdata, fwd_skb));
1879 ieee80211_set_qos_hdr(local, skb);
1880 if (is_multicast_ether_addr(fwd_hdr->addr1))
1881 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1882 fwded_mcast);
1883 else {
1884 int err;
1885 /*
1886 * Save TA to addr1 to send TA a path error if a
1887 * suitable next hop is not found
1888 */
1889 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1890 ETH_ALEN);
1891 err = mesh_nexthop_lookup(fwd_skb, sdata);
1892 /* Failed to immediately resolve next hop:
1893 * fwded frame was dropped or will be added
1894 * later to the pending skb queue. */
1895 if (err)
1896 return RX_DROP_MONITOR;
1897
1898 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1899 fwded_unicast);
1900 }
1901 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1902 fwded_frames);
1903 ieee80211_add_pending_skb(local, fwd_skb);
1904 }
1905 }
1906
1907 out:
1908 if (is_multicast_ether_addr(hdr->addr1) ||
1909 sdata->dev->flags & IFF_PROMISC)
1910 return RX_CONTINUE;
1911 else
1912 return RX_DROP_MONITOR;
1913 }
1914 #endif
1915
1916 static ieee80211_rx_result debug_noinline
1917 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1918 {
1919 struct ieee80211_sub_if_data *sdata = rx->sdata;
1920 struct ieee80211_local *local = rx->local;
1921 struct net_device *dev = sdata->dev;
1922 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1923 __le16 fc = hdr->frame_control;
1924 bool port_control;
1925 int err;
1926
1927 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1928 return RX_CONTINUE;
1929
1930 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1931 return RX_DROP_MONITOR;
1932
1933 /*
1934 * Allow the cooked monitor interface of an AP to see 4-addr frames so
1935 * that a 4-addr station can be detected and moved into a separate VLAN
1936 */
1937 if (ieee80211_has_a4(hdr->frame_control) &&
1938 sdata->vif.type == NL80211_IFTYPE_AP)
1939 return RX_DROP_MONITOR;
1940
1941 err = __ieee80211_data_to_8023(rx, &port_control);
1942 if (unlikely(err))
1943 return RX_DROP_UNUSABLE;
1944
1945 if (!ieee80211_frame_allowed(rx, fc))
1946 return RX_DROP_MONITOR;
1947
1948 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1949 unlikely(port_control) && sdata->bss) {
1950 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
1951 u.ap);
1952 dev = sdata->dev;
1953 rx->sdata = sdata;
1954 }
1955
1956 rx->skb->dev = dev;
1957
1958 dev->stats.rx_packets++;
1959 dev->stats.rx_bytes += rx->skb->len;
1960
1961 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
1962 !is_multicast_ether_addr(
1963 ((struct ethhdr *)rx->skb->data)->h_dest) &&
1964 (!local->scanning &&
1965 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
1966 mod_timer(&local->dynamic_ps_timer, jiffies +
1967 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1968 }
1969
1970 ieee80211_deliver_skb(rx);
1971
1972 return RX_QUEUED;
1973 }
1974
1975 static ieee80211_rx_result debug_noinline
1976 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1977 {
1978 struct ieee80211_local *local = rx->local;
1979 struct ieee80211_hw *hw = &local->hw;
1980 struct sk_buff *skb = rx->skb;
1981 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1982 struct tid_ampdu_rx *tid_agg_rx;
1983 u16 start_seq_num;
1984 u16 tid;
1985
1986 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1987 return RX_CONTINUE;
1988
1989 if (ieee80211_is_back_req(bar->frame_control)) {
1990 struct {
1991 __le16 control, start_seq_num;
1992 } __packed bar_data;
1993
1994 if (!rx->sta)
1995 return RX_DROP_MONITOR;
1996
1997 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
1998 &bar_data, sizeof(bar_data)))
1999 return RX_DROP_MONITOR;
2000
2001 tid = le16_to_cpu(bar_data.control) >> 12;
2002
2003 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2004 if (!tid_agg_rx)
2005 return RX_DROP_MONITOR;
2006
2007 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2008
2009 /* reset session timer */
2010 if (tid_agg_rx->timeout)
2011 mod_timer(&tid_agg_rx->session_timer,
2012 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2013
2014 spin_lock(&tid_agg_rx->reorder_lock);
2015 /* release stored frames up to start of BAR */
2016 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
2017 spin_unlock(&tid_agg_rx->reorder_lock);
2018
2019 kfree_skb(skb);
2020 return RX_QUEUED;
2021 }
2022
2023 /*
2024 * After this point, we only want management frames,
2025 * so we can drop all remaining control frames to
2026 * cooked monitor interfaces.
2027 */
2028 return RX_DROP_MONITOR;
2029 }
2030
2031 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2032 struct ieee80211_mgmt *mgmt,
2033 size_t len)
2034 {
2035 struct ieee80211_local *local = sdata->local;
2036 struct sk_buff *skb;
2037 struct ieee80211_mgmt *resp;
2038
2039 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
2040 /* Not to own unicast address */
2041 return;
2042 }
2043
2044 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
2045 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
2046 /* Not from the current AP or not associated yet. */
2047 return;
2048 }
2049
2050 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2051 /* Too short SA Query request frame */
2052 return;
2053 }
2054
2055 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2056 if (skb == NULL)
2057 return;
2058
2059 skb_reserve(skb, local->hw.extra_tx_headroom);
2060 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2061 memset(resp, 0, 24);
2062 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2063 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2064 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2065 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2066 IEEE80211_STYPE_ACTION);
2067 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2068 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2069 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2070 memcpy(resp->u.action.u.sa_query.trans_id,
2071 mgmt->u.action.u.sa_query.trans_id,
2072 WLAN_SA_QUERY_TR_ID_LEN);
2073
2074 ieee80211_tx_skb(sdata, skb);
2075 }
2076
2077 static ieee80211_rx_result debug_noinline
2078 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2079 {
2080 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2081 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2082
2083 /*
2084 * From here on, look only at management frames.
2085 * Data and control frames are already handled,
2086 * and unknown (reserved) frames are useless.
2087 */
2088 if (rx->skb->len < 24)
2089 return RX_DROP_MONITOR;
2090
2091 if (!ieee80211_is_mgmt(mgmt->frame_control))
2092 return RX_DROP_MONITOR;
2093
2094 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2095 return RX_DROP_MONITOR;
2096
2097 if (ieee80211_drop_unencrypted_mgmt(rx))
2098 return RX_DROP_UNUSABLE;
2099
2100 return RX_CONTINUE;
2101 }
2102
2103 static ieee80211_rx_result debug_noinline
2104 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2105 {
2106 struct ieee80211_local *local = rx->local;
2107 struct ieee80211_sub_if_data *sdata = rx->sdata;
2108 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2109 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2110 int len = rx->skb->len;
2111
2112 if (!ieee80211_is_action(mgmt->frame_control))
2113 return RX_CONTINUE;
2114
2115 /* drop too small frames */
2116 if (len < IEEE80211_MIN_ACTION_SIZE)
2117 return RX_DROP_UNUSABLE;
2118
2119 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2120 return RX_DROP_UNUSABLE;
2121
2122 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2123 return RX_DROP_UNUSABLE;
2124
2125 switch (mgmt->u.action.category) {
2126 case WLAN_CATEGORY_BACK:
2127 /*
2128 * The aggregation code is not prepared to handle
2129 * anything but STA/AP due to the BSSID handling;
2130 * IBSS could work in the code but isn't supported
2131 * by drivers or the standard.
2132 */
2133 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2134 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2135 sdata->vif.type != NL80211_IFTYPE_AP)
2136 break;
2137
2138 /* verify action_code is present */
2139 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2140 break;
2141
2142 switch (mgmt->u.action.u.addba_req.action_code) {
2143 case WLAN_ACTION_ADDBA_REQ:
2144 if (len < (IEEE80211_MIN_ACTION_SIZE +
2145 sizeof(mgmt->u.action.u.addba_req)))
2146 goto invalid;
2147 break;
2148 case WLAN_ACTION_ADDBA_RESP:
2149 if (len < (IEEE80211_MIN_ACTION_SIZE +
2150 sizeof(mgmt->u.action.u.addba_resp)))
2151 goto invalid;
2152 break;
2153 case WLAN_ACTION_DELBA:
2154 if (len < (IEEE80211_MIN_ACTION_SIZE +
2155 sizeof(mgmt->u.action.u.delba)))
2156 goto invalid;
2157 break;
2158 default:
2159 goto invalid;
2160 }
2161
2162 goto queue;
2163 case WLAN_CATEGORY_SPECTRUM_MGMT:
2164 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2165 break;
2166
2167 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2168 break;
2169
2170 /* verify action_code is present */
2171 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2172 break;
2173
2174 switch (mgmt->u.action.u.measurement.action_code) {
2175 case WLAN_ACTION_SPCT_MSR_REQ:
2176 if (len < (IEEE80211_MIN_ACTION_SIZE +
2177 sizeof(mgmt->u.action.u.measurement)))
2178 break;
2179 ieee80211_process_measurement_req(sdata, mgmt, len);
2180 goto handled;
2181 case WLAN_ACTION_SPCT_CHL_SWITCH:
2182 if (len < (IEEE80211_MIN_ACTION_SIZE +
2183 sizeof(mgmt->u.action.u.chan_switch)))
2184 break;
2185
2186 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2187 break;
2188
2189 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2190 break;
2191
2192 goto queue;
2193 }
2194 break;
2195 case WLAN_CATEGORY_SA_QUERY:
2196 if (len < (IEEE80211_MIN_ACTION_SIZE +
2197 sizeof(mgmt->u.action.u.sa_query)))
2198 break;
2199
2200 switch (mgmt->u.action.u.sa_query.action) {
2201 case WLAN_ACTION_SA_QUERY_REQUEST:
2202 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2203 break;
2204 ieee80211_process_sa_query_req(sdata, mgmt, len);
2205 goto handled;
2206 }
2207 break;
2208 case WLAN_CATEGORY_MESH_PLINK:
2209 if (!ieee80211_vif_is_mesh(&sdata->vif))
2210 break;
2211 goto queue;
2212 case WLAN_CATEGORY_MESH_PATH_SEL:
2213 if (!mesh_path_sel_is_hwmp(sdata))
2214 break;
2215 goto queue;
2216 }
2217
2218 return RX_CONTINUE;
2219
2220 invalid:
2221 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2222 /* will return in the next handlers */
2223 return RX_CONTINUE;
2224
2225 handled:
2226 if (rx->sta)
2227 rx->sta->rx_packets++;
2228 dev_kfree_skb(rx->skb);
2229 return RX_QUEUED;
2230
2231 queue:
2232 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2233 skb_queue_tail(&sdata->skb_queue, rx->skb);
2234 ieee80211_queue_work(&local->hw, &sdata->work);
2235 if (rx->sta)
2236 rx->sta->rx_packets++;
2237 return RX_QUEUED;
2238 }
2239
2240 static ieee80211_rx_result debug_noinline
2241 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2242 {
2243 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2244
2245 /* skip known-bad action frames and return them in the next handler */
2246 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2247 return RX_CONTINUE;
2248
2249 /*
2250 * Getting here means the kernel doesn't know how to handle
2251 * it, but maybe userspace does ... include returned frames
2252 * so userspace can register for those to know whether ones
2253 * it transmitted were processed or returned.
2254 */
2255
2256 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2257 rx->skb->data, rx->skb->len,
2258 GFP_ATOMIC)) {
2259 if (rx->sta)
2260 rx->sta->rx_packets++;
2261 dev_kfree_skb(rx->skb);
2262 return RX_QUEUED;
2263 }
2264
2265
2266 return RX_CONTINUE;
2267 }
2268
2269 static ieee80211_rx_result debug_noinline
2270 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2271 {
2272 struct ieee80211_local *local = rx->local;
2273 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2274 struct sk_buff *nskb;
2275 struct ieee80211_sub_if_data *sdata = rx->sdata;
2276 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2277
2278 if (!ieee80211_is_action(mgmt->frame_control))
2279 return RX_CONTINUE;
2280
2281 /*
2282 * For AP mode, hostapd is responsible for handling any action
2283 * frames that we didn't handle, including returning unknown
2284 * ones. For all other modes we will return them to the sender,
2285 * setting the 0x80 bit in the action category, as required by
2286 * 802.11-2007 7.3.1.11.
2287 * Newer versions of hostapd shall also use the management frame
2288 * registration mechanisms, but older ones still use cooked
2289 * monitor interfaces so push all frames there.
2290 */
2291 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2292 (sdata->vif.type == NL80211_IFTYPE_AP ||
2293 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2294 return RX_DROP_MONITOR;
2295
2296 /* do not return rejected action frames */
2297 if (mgmt->u.action.category & 0x80)
2298 return RX_DROP_UNUSABLE;
2299
2300 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2301 GFP_ATOMIC);
2302 if (nskb) {
2303 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2304
2305 nmgmt->u.action.category |= 0x80;
2306 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2307 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2308
2309 memset(nskb->cb, 0, sizeof(nskb->cb));
2310
2311 ieee80211_tx_skb(rx->sdata, nskb);
2312 }
2313 dev_kfree_skb(rx->skb);
2314 return RX_QUEUED;
2315 }
2316
2317 static ieee80211_rx_result debug_noinline
2318 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2319 {
2320 struct ieee80211_sub_if_data *sdata = rx->sdata;
2321 ieee80211_rx_result rxs;
2322 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2323 __le16 stype;
2324
2325 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2326 if (rxs != RX_CONTINUE)
2327 return rxs;
2328
2329 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2330
2331 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2332 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2333 sdata->vif.type != NL80211_IFTYPE_STATION)
2334 return RX_DROP_MONITOR;
2335
2336 switch (stype) {
2337 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2338 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2339 /* process for all: mesh, mlme, ibss */
2340 break;
2341 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2342 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2343 if (is_multicast_ether_addr(mgmt->da) &&
2344 !is_broadcast_ether_addr(mgmt->da))
2345 return RX_DROP_MONITOR;
2346
2347 /* process only for station */
2348 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2349 return RX_DROP_MONITOR;
2350 break;
2351 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2352 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2353 /* process only for ibss */
2354 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2355 return RX_DROP_MONITOR;
2356 break;
2357 default:
2358 return RX_DROP_MONITOR;
2359 }
2360
2361 /* queue up frame and kick off work to process it */
2362 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2363 skb_queue_tail(&sdata->skb_queue, rx->skb);
2364 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2365 if (rx->sta)
2366 rx->sta->rx_packets++;
2367
2368 return RX_QUEUED;
2369 }
2370
2371 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
2372 struct ieee80211_rx_data *rx)
2373 {
2374 int keyidx;
2375 unsigned int hdrlen;
2376
2377 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2378 if (rx->skb->len >= hdrlen + 4)
2379 keyidx = rx->skb->data[hdrlen + 3] >> 6;
2380 else
2381 keyidx = -1;
2382
2383 if (!rx->sta) {
2384 /*
2385 * Some hardware seem to generate incorrect Michael MIC
2386 * reports; ignore them to avoid triggering countermeasures.
2387 */
2388 return;
2389 }
2390
2391 if (!ieee80211_has_protected(hdr->frame_control))
2392 return;
2393
2394 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
2395 /*
2396 * APs with pairwise keys should never receive Michael MIC
2397 * errors for non-zero keyidx because these are reserved for
2398 * group keys and only the AP is sending real multicast
2399 * frames in the BSS.
2400 */
2401 return;
2402 }
2403
2404 if (!ieee80211_is_data(hdr->frame_control) &&
2405 !ieee80211_is_auth(hdr->frame_control))
2406 return;
2407
2408 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
2409 GFP_ATOMIC);
2410 }
2411
2412 /* TODO: use IEEE80211_RX_FRAGMENTED */
2413 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2414 struct ieee80211_rate *rate)
2415 {
2416 struct ieee80211_sub_if_data *sdata;
2417 struct ieee80211_local *local = rx->local;
2418 struct ieee80211_rtap_hdr {
2419 struct ieee80211_radiotap_header hdr;
2420 u8 flags;
2421 u8 rate_or_pad;
2422 __le16 chan_freq;
2423 __le16 chan_flags;
2424 } __packed *rthdr;
2425 struct sk_buff *skb = rx->skb, *skb2;
2426 struct net_device *prev_dev = NULL;
2427 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2428
2429 /*
2430 * If cooked monitor has been processed already, then
2431 * don't do it again. If not, set the flag.
2432 */
2433 if (rx->flags & IEEE80211_RX_CMNTR)
2434 goto out_free_skb;
2435 rx->flags |= IEEE80211_RX_CMNTR;
2436
2437 if (skb_headroom(skb) < sizeof(*rthdr) &&
2438 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2439 goto out_free_skb;
2440
2441 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2442 memset(rthdr, 0, sizeof(*rthdr));
2443 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2444 rthdr->hdr.it_present =
2445 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2446 (1 << IEEE80211_RADIOTAP_CHANNEL));
2447
2448 if (rate) {
2449 rthdr->rate_or_pad = rate->bitrate / 5;
2450 rthdr->hdr.it_present |=
2451 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2452 }
2453 rthdr->chan_freq = cpu_to_le16(status->freq);
2454
2455 if (status->band == IEEE80211_BAND_5GHZ)
2456 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2457 IEEE80211_CHAN_5GHZ);
2458 else
2459 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2460 IEEE80211_CHAN_2GHZ);
2461
2462 skb_set_mac_header(skb, 0);
2463 skb->ip_summed = CHECKSUM_UNNECESSARY;
2464 skb->pkt_type = PACKET_OTHERHOST;
2465 skb->protocol = htons(ETH_P_802_2);
2466
2467 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2468 if (!ieee80211_sdata_running(sdata))
2469 continue;
2470
2471 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2472 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2473 continue;
2474
2475 if (prev_dev) {
2476 skb2 = skb_clone(skb, GFP_ATOMIC);
2477 if (skb2) {
2478 skb2->dev = prev_dev;
2479 netif_receive_skb(skb2);
2480 }
2481 }
2482
2483 prev_dev = sdata->dev;
2484 sdata->dev->stats.rx_packets++;
2485 sdata->dev->stats.rx_bytes += skb->len;
2486 }
2487
2488 if (prev_dev) {
2489 skb->dev = prev_dev;
2490 netif_receive_skb(skb);
2491 return;
2492 }
2493
2494 out_free_skb:
2495 dev_kfree_skb(skb);
2496 }
2497
2498 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2499 ieee80211_rx_result res)
2500 {
2501 switch (res) {
2502 case RX_DROP_MONITOR:
2503 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2504 if (rx->sta)
2505 rx->sta->rx_dropped++;
2506 /* fall through */
2507 case RX_CONTINUE: {
2508 struct ieee80211_rate *rate = NULL;
2509 struct ieee80211_supported_band *sband;
2510 struct ieee80211_rx_status *status;
2511
2512 status = IEEE80211_SKB_RXCB((rx->skb));
2513
2514 sband = rx->local->hw.wiphy->bands[status->band];
2515 if (!(status->flag & RX_FLAG_HT))
2516 rate = &sband->bitrates[status->rate_idx];
2517
2518 ieee80211_rx_cooked_monitor(rx, rate);
2519 break;
2520 }
2521 case RX_DROP_UNUSABLE:
2522 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2523 if (rx->sta)
2524 rx->sta->rx_dropped++;
2525 dev_kfree_skb(rx->skb);
2526 break;
2527 case RX_QUEUED:
2528 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2529 break;
2530 }
2531 }
2532
2533 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2534 {
2535 ieee80211_rx_result res = RX_DROP_MONITOR;
2536 struct sk_buff *skb;
2537
2538 #define CALL_RXH(rxh) \
2539 do { \
2540 res = rxh(rx); \
2541 if (res != RX_CONTINUE) \
2542 goto rxh_next; \
2543 } while (0);
2544
2545 spin_lock(&rx->local->rx_skb_queue.lock);
2546 if (rx->local->running_rx_handler)
2547 goto unlock;
2548
2549 rx->local->running_rx_handler = true;
2550
2551 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2552 spin_unlock(&rx->local->rx_skb_queue.lock);
2553
2554 /*
2555 * all the other fields are valid across frames
2556 * that belong to an aMPDU since they are on the
2557 * same TID from the same station
2558 */
2559 rx->skb = skb;
2560
2561 CALL_RXH(ieee80211_rx_h_decrypt)
2562 CALL_RXH(ieee80211_rx_h_check_more_data)
2563 CALL_RXH(ieee80211_rx_h_sta_process)
2564 CALL_RXH(ieee80211_rx_h_defragment)
2565 CALL_RXH(ieee80211_rx_h_ps_poll)
2566 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2567 /* must be after MMIC verify so header is counted in MPDU mic */
2568 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2569 CALL_RXH(ieee80211_rx_h_amsdu)
2570 #ifdef CONFIG_MAC80211_MESH
2571 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2572 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2573 #endif
2574 CALL_RXH(ieee80211_rx_h_data)
2575 CALL_RXH(ieee80211_rx_h_ctrl);
2576 CALL_RXH(ieee80211_rx_h_mgmt_check)
2577 CALL_RXH(ieee80211_rx_h_action)
2578 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2579 CALL_RXH(ieee80211_rx_h_action_return)
2580 CALL_RXH(ieee80211_rx_h_mgmt)
2581
2582 rxh_next:
2583 ieee80211_rx_handlers_result(rx, res);
2584 spin_lock(&rx->local->rx_skb_queue.lock);
2585 #undef CALL_RXH
2586 }
2587
2588 rx->local->running_rx_handler = false;
2589
2590 unlock:
2591 spin_unlock(&rx->local->rx_skb_queue.lock);
2592 }
2593
2594 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2595 {
2596 ieee80211_rx_result res = RX_DROP_MONITOR;
2597
2598 #define CALL_RXH(rxh) \
2599 do { \
2600 res = rxh(rx); \
2601 if (res != RX_CONTINUE) \
2602 goto rxh_next; \
2603 } while (0);
2604
2605 CALL_RXH(ieee80211_rx_h_passive_scan)
2606 CALL_RXH(ieee80211_rx_h_check)
2607
2608 ieee80211_rx_reorder_ampdu(rx);
2609
2610 ieee80211_rx_handlers(rx);
2611 return;
2612
2613 rxh_next:
2614 ieee80211_rx_handlers_result(rx, res);
2615
2616 #undef CALL_RXH
2617 }
2618
2619 /*
2620 * This function makes calls into the RX path, therefore
2621 * it has to be invoked under RCU read lock.
2622 */
2623 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2624 {
2625 struct ieee80211_rx_data rx = {
2626 .sta = sta,
2627 .sdata = sta->sdata,
2628 .local = sta->local,
2629 .queue = tid,
2630 .flags = 0,
2631 };
2632 struct tid_ampdu_rx *tid_agg_rx;
2633
2634 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2635 if (!tid_agg_rx)
2636 return;
2637
2638 spin_lock(&tid_agg_rx->reorder_lock);
2639 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
2640 spin_unlock(&tid_agg_rx->reorder_lock);
2641
2642 ieee80211_rx_handlers(&rx);
2643 }
2644
2645 /* main receive path */
2646
2647 static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2648 struct ieee80211_hdr *hdr)
2649 {
2650 struct ieee80211_sub_if_data *sdata = rx->sdata;
2651 struct sk_buff *skb = rx->skb;
2652 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2653 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2654 int multicast = is_multicast_ether_addr(hdr->addr1);
2655
2656 switch (sdata->vif.type) {
2657 case NL80211_IFTYPE_STATION:
2658 if (!bssid && !sdata->u.mgd.use_4addr)
2659 return 0;
2660 if (!multicast &&
2661 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2662 if (!(sdata->dev->flags & IFF_PROMISC) ||
2663 sdata->u.mgd.use_4addr)
2664 return 0;
2665 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2666 }
2667 break;
2668 case NL80211_IFTYPE_ADHOC:
2669 if (!bssid)
2670 return 0;
2671 if (ieee80211_is_beacon(hdr->frame_control)) {
2672 return 1;
2673 }
2674 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2675 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2676 return 0;
2677 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2678 } else if (!multicast &&
2679 compare_ether_addr(sdata->vif.addr,
2680 hdr->addr1) != 0) {
2681 if (!(sdata->dev->flags & IFF_PROMISC))
2682 return 0;
2683 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2684 } else if (!rx->sta) {
2685 int rate_idx;
2686 if (status->flag & RX_FLAG_HT)
2687 rate_idx = 0; /* TODO: HT rates */
2688 else
2689 rate_idx = status->rate_idx;
2690 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2691 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2692 }
2693 break;
2694 case NL80211_IFTYPE_MESH_POINT:
2695 if (!multicast &&
2696 compare_ether_addr(sdata->vif.addr,
2697 hdr->addr1) != 0) {
2698 if (!(sdata->dev->flags & IFF_PROMISC))
2699 return 0;
2700
2701 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2702 }
2703 break;
2704 case NL80211_IFTYPE_AP_VLAN:
2705 case NL80211_IFTYPE_AP:
2706 if (!bssid) {
2707 if (compare_ether_addr(sdata->vif.addr,
2708 hdr->addr1))
2709 return 0;
2710 } else if (!ieee80211_bssid_match(bssid,
2711 sdata->vif.addr)) {
2712 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2713 !ieee80211_is_beacon(hdr->frame_control))
2714 return 0;
2715 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2716 }
2717 break;
2718 case NL80211_IFTYPE_WDS:
2719 if (bssid || !ieee80211_is_data(hdr->frame_control))
2720 return 0;
2721 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2722 return 0;
2723 break;
2724 default:
2725 /* should never get here */
2726 WARN_ON(1);
2727 break;
2728 }
2729
2730 return 1;
2731 }
2732
2733 /*
2734 * This function returns whether or not the SKB
2735 * was destined for RX processing or not, which,
2736 * if consume is true, is equivalent to whether
2737 * or not the skb was consumed.
2738 */
2739 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2740 struct sk_buff *skb, bool consume)
2741 {
2742 struct ieee80211_local *local = rx->local;
2743 struct ieee80211_sub_if_data *sdata = rx->sdata;
2744 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2745 struct ieee80211_hdr *hdr = (void *)skb->data;
2746 int prepares;
2747
2748 rx->skb = skb;
2749 status->rx_flags |= IEEE80211_RX_RA_MATCH;
2750 prepares = prepare_for_handlers(rx, hdr);
2751
2752 if (!prepares)
2753 return false;
2754
2755 if (status->flag & RX_FLAG_MMIC_ERROR) {
2756 if (status->rx_flags & IEEE80211_RX_RA_MATCH)
2757 ieee80211_rx_michael_mic_report(hdr, rx);
2758 return false;
2759 }
2760
2761 if (!consume) {
2762 skb = skb_copy(skb, GFP_ATOMIC);
2763 if (!skb) {
2764 if (net_ratelimit())
2765 wiphy_debug(local->hw.wiphy,
2766 "failed to copy skb for %s\n",
2767 sdata->name);
2768 return true;
2769 }
2770
2771 rx->skb = skb;
2772 }
2773
2774 ieee80211_invoke_rx_handlers(rx);
2775 return true;
2776 }
2777
2778 /*
2779 * This is the actual Rx frames handler. as it blongs to Rx path it must
2780 * be called with rcu_read_lock protection.
2781 */
2782 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2783 struct sk_buff *skb)
2784 {
2785 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2786 struct ieee80211_local *local = hw_to_local(hw);
2787 struct ieee80211_sub_if_data *sdata;
2788 struct ieee80211_hdr *hdr;
2789 __le16 fc;
2790 struct ieee80211_rx_data rx;
2791 struct ieee80211_sub_if_data *prev;
2792 struct sta_info *sta, *tmp, *prev_sta;
2793 int err = 0;
2794
2795 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2796 memset(&rx, 0, sizeof(rx));
2797 rx.skb = skb;
2798 rx.local = local;
2799
2800 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2801 local->dot11ReceivedFragmentCount++;
2802
2803 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2804 test_bit(SCAN_SW_SCANNING, &local->scanning)))
2805 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2806
2807 if (ieee80211_is_mgmt(fc))
2808 err = skb_linearize(skb);
2809 else
2810 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2811
2812 if (err) {
2813 dev_kfree_skb(skb);
2814 return;
2815 }
2816
2817 hdr = (struct ieee80211_hdr *)skb->data;
2818 ieee80211_parse_qos(&rx);
2819 ieee80211_verify_alignment(&rx);
2820
2821 if (ieee80211_is_data(fc)) {
2822 prev_sta = NULL;
2823
2824 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2825 if (!prev_sta) {
2826 prev_sta = sta;
2827 continue;
2828 }
2829
2830 rx.sta = prev_sta;
2831 rx.sdata = prev_sta->sdata;
2832 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2833
2834 prev_sta = sta;
2835 }
2836
2837 if (prev_sta) {
2838 rx.sta = prev_sta;
2839 rx.sdata = prev_sta->sdata;
2840
2841 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2842 return;
2843 goto out;
2844 }
2845 }
2846
2847 prev = NULL;
2848
2849 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2850 if (!ieee80211_sdata_running(sdata))
2851 continue;
2852
2853 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2854 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2855 continue;
2856
2857 /*
2858 * frame is destined for this interface, but if it's
2859 * not also for the previous one we handle that after
2860 * the loop to avoid copying the SKB once too much
2861 */
2862
2863 if (!prev) {
2864 prev = sdata;
2865 continue;
2866 }
2867
2868 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2869 rx.sdata = prev;
2870 ieee80211_prepare_and_rx_handle(&rx, skb, false);
2871
2872 prev = sdata;
2873 }
2874
2875 if (prev) {
2876 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2877 rx.sdata = prev;
2878
2879 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2880 return;
2881 }
2882
2883 out:
2884 dev_kfree_skb(skb);
2885 }
2886
2887 /*
2888 * This is the receive path handler. It is called by a low level driver when an
2889 * 802.11 MPDU is received from the hardware.
2890 */
2891 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2892 {
2893 struct ieee80211_local *local = hw_to_local(hw);
2894 struct ieee80211_rate *rate = NULL;
2895 struct ieee80211_supported_band *sband;
2896 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2897
2898 WARN_ON_ONCE(softirq_count() == 0);
2899
2900 if (WARN_ON(status->band < 0 ||
2901 status->band >= IEEE80211_NUM_BANDS))
2902 goto drop;
2903
2904 sband = local->hw.wiphy->bands[status->band];
2905 if (WARN_ON(!sband))
2906 goto drop;
2907
2908 /*
2909 * If we're suspending, it is possible although not too likely
2910 * that we'd be receiving frames after having already partially
2911 * quiesced the stack. We can't process such frames then since
2912 * that might, for example, cause stations to be added or other
2913 * driver callbacks be invoked.
2914 */
2915 if (unlikely(local->quiescing || local->suspended))
2916 goto drop;
2917
2918 /*
2919 * The same happens when we're not even started,
2920 * but that's worth a warning.
2921 */
2922 if (WARN_ON(!local->started))
2923 goto drop;
2924
2925 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
2926 /*
2927 * Validate the rate, unless a PLCP error means that
2928 * we probably can't have a valid rate here anyway.
2929 */
2930
2931 if (status->flag & RX_FLAG_HT) {
2932 /*
2933 * rate_idx is MCS index, which can be [0-76]
2934 * as documented on:
2935 *
2936 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2937 *
2938 * Anything else would be some sort of driver or
2939 * hardware error. The driver should catch hardware
2940 * errors.
2941 */
2942 if (WARN((status->rate_idx < 0 ||
2943 status->rate_idx > 76),
2944 "Rate marked as an HT rate but passed "
2945 "status->rate_idx is not "
2946 "an MCS index [0-76]: %d (0x%02x)\n",
2947 status->rate_idx,
2948 status->rate_idx))
2949 goto drop;
2950 } else {
2951 if (WARN_ON(status->rate_idx < 0 ||
2952 status->rate_idx >= sband->n_bitrates))
2953 goto drop;
2954 rate = &sband->bitrates[status->rate_idx];
2955 }
2956 }
2957
2958 status->rx_flags = 0;
2959
2960 /*
2961 * key references and virtual interfaces are protected using RCU
2962 * and this requires that we are in a read-side RCU section during
2963 * receive processing
2964 */
2965 rcu_read_lock();
2966
2967 /*
2968 * Frames with failed FCS/PLCP checksum are not returned,
2969 * all other frames are returned without radiotap header
2970 * if it was previously present.
2971 * Also, frames with less than 16 bytes are dropped.
2972 */
2973 skb = ieee80211_rx_monitor(local, skb, rate);
2974 if (!skb) {
2975 rcu_read_unlock();
2976 return;
2977 }
2978
2979 ieee80211_tpt_led_trig_rx(local,
2980 ((struct ieee80211_hdr *)skb->data)->frame_control,
2981 skb->len);
2982 __ieee80211_rx_handle_packet(hw, skb);
2983
2984 rcu_read_unlock();
2985
2986 return;
2987 drop:
2988 kfree_skb(skb);
2989 }
2990 EXPORT_SYMBOL(ieee80211_rx);
2991
2992 /* This is a version of the rx handler that can be called from hard irq
2993 * context. Post the skb on the queue and schedule the tasklet */
2994 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
2995 {
2996 struct ieee80211_local *local = hw_to_local(hw);
2997
2998 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2999
3000 skb->pkt_type = IEEE80211_RX_MSG;
3001 skb_queue_tail(&local->skb_queue, skb);
3002 tasklet_schedule(&local->tasklet);
3003 }
3004 EXPORT_SYMBOL(ieee80211_rx_irqsafe);