]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/mac80211/rx.c
mac80211: check defrag PN against current frame
[mirror_ubuntu-hirsute-kernel.git] / net / mac80211 / rx.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2002-2005, Instant802 Networks, Inc.
4 * Copyright 2005-2006, Devicescape Software, Inc.
5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
9 * Copyright (C) 2018-2020 Intel Corporation
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <linux/bitops.h>
21 #include <net/mac80211.h>
22 #include <net/ieee80211_radiotap.h>
23 #include <asm/unaligned.h>
24
25 #include "ieee80211_i.h"
26 #include "driver-ops.h"
27 #include "led.h"
28 #include "mesh.h"
29 #include "wep.h"
30 #include "wpa.h"
31 #include "tkip.h"
32 #include "wme.h"
33 #include "rate.h"
34
35 /*
36 * monitor mode reception
37 *
38 * This function cleans up the SKB, i.e. it removes all the stuff
39 * only useful for monitoring.
40 */
41 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb,
42 unsigned int present_fcs_len,
43 unsigned int rtap_space)
44 {
45 struct ieee80211_hdr *hdr;
46 unsigned int hdrlen;
47 __le16 fc;
48
49 if (present_fcs_len)
50 __pskb_trim(skb, skb->len - present_fcs_len);
51 __pskb_pull(skb, rtap_space);
52
53 hdr = (void *)skb->data;
54 fc = hdr->frame_control;
55
56 /*
57 * Remove the HT-Control field (if present) on management
58 * frames after we've sent the frame to monitoring. We
59 * (currently) don't need it, and don't properly parse
60 * frames with it present, due to the assumption of a
61 * fixed management header length.
62 */
63 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc)))
64 return skb;
65
66 hdrlen = ieee80211_hdrlen(fc);
67 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
68
69 if (!pskb_may_pull(skb, hdrlen)) {
70 dev_kfree_skb(skb);
71 return NULL;
72 }
73
74 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data,
75 hdrlen - IEEE80211_HT_CTL_LEN);
76 __pskb_pull(skb, IEEE80211_HT_CTL_LEN);
77
78 return skb;
79 }
80
81 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
82 unsigned int rtap_space)
83 {
84 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
85 struct ieee80211_hdr *hdr;
86
87 hdr = (void *)(skb->data + rtap_space);
88
89 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
90 RX_FLAG_FAILED_PLCP_CRC |
91 RX_FLAG_ONLY_MONITOR |
92 RX_FLAG_NO_PSDU))
93 return true;
94
95 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
96 return true;
97
98 if (ieee80211_is_ctl(hdr->frame_control) &&
99 !ieee80211_is_pspoll(hdr->frame_control) &&
100 !ieee80211_is_back_req(hdr->frame_control))
101 return true;
102
103 return false;
104 }
105
106 static int
107 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
108 struct ieee80211_rx_status *status,
109 struct sk_buff *skb)
110 {
111 int len;
112
113 /* always present fields */
114 len = sizeof(struct ieee80211_radiotap_header) + 8;
115
116 /* allocate extra bitmaps */
117 if (status->chains)
118 len += 4 * hweight8(status->chains);
119 /* vendor presence bitmap */
120 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
121 len += 4;
122
123 if (ieee80211_have_rx_timestamp(status)) {
124 len = ALIGN(len, 8);
125 len += 8;
126 }
127 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
128 len += 1;
129
130 /* antenna field, if we don't have per-chain info */
131 if (!status->chains)
132 len += 1;
133
134 /* padding for RX_FLAGS if necessary */
135 len = ALIGN(len, 2);
136
137 if (status->encoding == RX_ENC_HT) /* HT info */
138 len += 3;
139
140 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
141 len = ALIGN(len, 4);
142 len += 8;
143 }
144
145 if (status->encoding == RX_ENC_VHT) {
146 len = ALIGN(len, 2);
147 len += 12;
148 }
149
150 if (local->hw.radiotap_timestamp.units_pos >= 0) {
151 len = ALIGN(len, 8);
152 len += 12;
153 }
154
155 if (status->encoding == RX_ENC_HE &&
156 status->flag & RX_FLAG_RADIOTAP_HE) {
157 len = ALIGN(len, 2);
158 len += 12;
159 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
160 }
161
162 if (status->encoding == RX_ENC_HE &&
163 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
164 len = ALIGN(len, 2);
165 len += 12;
166 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
167 }
168
169 if (status->flag & RX_FLAG_NO_PSDU)
170 len += 1;
171
172 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
173 len = ALIGN(len, 2);
174 len += 4;
175 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
176 }
177
178 if (status->chains) {
179 /* antenna and antenna signal fields */
180 len += 2 * hweight8(status->chains);
181 }
182
183 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
184 struct ieee80211_vendor_radiotap *rtap;
185 int vendor_data_offset = 0;
186
187 /*
188 * The position to look at depends on the existence (or non-
189 * existence) of other elements, so take that into account...
190 */
191 if (status->flag & RX_FLAG_RADIOTAP_HE)
192 vendor_data_offset +=
193 sizeof(struct ieee80211_radiotap_he);
194 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
195 vendor_data_offset +=
196 sizeof(struct ieee80211_radiotap_he_mu);
197 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
198 vendor_data_offset +=
199 sizeof(struct ieee80211_radiotap_lsig);
200
201 rtap = (void *)&skb->data[vendor_data_offset];
202
203 /* alignment for fixed 6-byte vendor data header */
204 len = ALIGN(len, 2);
205 /* vendor data header */
206 len += 6;
207 if (WARN_ON(rtap->align == 0))
208 rtap->align = 1;
209 len = ALIGN(len, rtap->align);
210 len += rtap->len + rtap->pad;
211 }
212
213 return len;
214 }
215
216 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
217 struct sk_buff *skb,
218 int rtap_space)
219 {
220 struct {
221 struct ieee80211_hdr_3addr hdr;
222 u8 category;
223 u8 action_code;
224 } __packed __aligned(2) action;
225
226 if (!sdata)
227 return;
228
229 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
230
231 if (skb->len < rtap_space + sizeof(action) +
232 VHT_MUMIMO_GROUPS_DATA_LEN)
233 return;
234
235 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
236 return;
237
238 skb_copy_bits(skb, rtap_space, &action, sizeof(action));
239
240 if (!ieee80211_is_action(action.hdr.frame_control))
241 return;
242
243 if (action.category != WLAN_CATEGORY_VHT)
244 return;
245
246 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
247 return;
248
249 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
250 return;
251
252 skb = skb_copy(skb, GFP_ATOMIC);
253 if (!skb)
254 return;
255
256 skb_queue_tail(&sdata->skb_queue, skb);
257 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
258 }
259
260 /*
261 * ieee80211_add_rx_radiotap_header - add radiotap header
262 *
263 * add a radiotap header containing all the fields which the hardware provided.
264 */
265 static void
266 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
267 struct sk_buff *skb,
268 struct ieee80211_rate *rate,
269 int rtap_len, bool has_fcs)
270 {
271 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
272 struct ieee80211_radiotap_header *rthdr;
273 unsigned char *pos;
274 __le32 *it_present;
275 u32 it_present_val;
276 u16 rx_flags = 0;
277 u16 channel_flags = 0;
278 int mpdulen, chain;
279 unsigned long chains = status->chains;
280 struct ieee80211_vendor_radiotap rtap = {};
281 struct ieee80211_radiotap_he he = {};
282 struct ieee80211_radiotap_he_mu he_mu = {};
283 struct ieee80211_radiotap_lsig lsig = {};
284
285 if (status->flag & RX_FLAG_RADIOTAP_HE) {
286 he = *(struct ieee80211_radiotap_he *)skb->data;
287 skb_pull(skb, sizeof(he));
288 WARN_ON_ONCE(status->encoding != RX_ENC_HE);
289 }
290
291 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
292 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
293 skb_pull(skb, sizeof(he_mu));
294 }
295
296 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
297 lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
298 skb_pull(skb, sizeof(lsig));
299 }
300
301 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
302 rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
303 /* rtap.len and rtap.pad are undone immediately */
304 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
305 }
306
307 mpdulen = skb->len;
308 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
309 mpdulen += FCS_LEN;
310
311 rthdr = skb_push(skb, rtap_len);
312 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
313 it_present = &rthdr->it_present;
314
315 /* radiotap header, set always present flags */
316 rthdr->it_len = cpu_to_le16(rtap_len);
317 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
318 BIT(IEEE80211_RADIOTAP_CHANNEL) |
319 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
320
321 if (!status->chains)
322 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
323
324 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
325 it_present_val |=
326 BIT(IEEE80211_RADIOTAP_EXT) |
327 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
328 put_unaligned_le32(it_present_val, it_present);
329 it_present++;
330 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
331 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
332 }
333
334 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
335 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
336 BIT(IEEE80211_RADIOTAP_EXT);
337 put_unaligned_le32(it_present_val, it_present);
338 it_present++;
339 it_present_val = rtap.present;
340 }
341
342 put_unaligned_le32(it_present_val, it_present);
343
344 pos = (void *)(it_present + 1);
345
346 /* the order of the following fields is important */
347
348 /* IEEE80211_RADIOTAP_TSFT */
349 if (ieee80211_have_rx_timestamp(status)) {
350 /* padding */
351 while ((pos - (u8 *)rthdr) & 7)
352 *pos++ = 0;
353 put_unaligned_le64(
354 ieee80211_calculate_rx_timestamp(local, status,
355 mpdulen, 0),
356 pos);
357 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
358 pos += 8;
359 }
360
361 /* IEEE80211_RADIOTAP_FLAGS */
362 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
363 *pos |= IEEE80211_RADIOTAP_F_FCS;
364 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
365 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
366 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
367 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
368 pos++;
369
370 /* IEEE80211_RADIOTAP_RATE */
371 if (!rate || status->encoding != RX_ENC_LEGACY) {
372 /*
373 * Without rate information don't add it. If we have,
374 * MCS information is a separate field in radiotap,
375 * added below. The byte here is needed as padding
376 * for the channel though, so initialise it to 0.
377 */
378 *pos = 0;
379 } else {
380 int shift = 0;
381 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
382 if (status->bw == RATE_INFO_BW_10)
383 shift = 1;
384 else if (status->bw == RATE_INFO_BW_5)
385 shift = 2;
386 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
387 }
388 pos++;
389
390 /* IEEE80211_RADIOTAP_CHANNEL */
391 /* TODO: frequency offset in KHz */
392 put_unaligned_le16(status->freq, pos);
393 pos += 2;
394 if (status->bw == RATE_INFO_BW_10)
395 channel_flags |= IEEE80211_CHAN_HALF;
396 else if (status->bw == RATE_INFO_BW_5)
397 channel_flags |= IEEE80211_CHAN_QUARTER;
398
399 if (status->band == NL80211_BAND_5GHZ ||
400 status->band == NL80211_BAND_6GHZ)
401 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
402 else if (status->encoding != RX_ENC_LEGACY)
403 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
404 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
405 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
406 else if (rate)
407 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
408 else
409 channel_flags |= IEEE80211_CHAN_2GHZ;
410 put_unaligned_le16(channel_flags, pos);
411 pos += 2;
412
413 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
414 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
415 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
416 *pos = status->signal;
417 rthdr->it_present |=
418 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
419 pos++;
420 }
421
422 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
423
424 if (!status->chains) {
425 /* IEEE80211_RADIOTAP_ANTENNA */
426 *pos = status->antenna;
427 pos++;
428 }
429
430 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
431
432 /* IEEE80211_RADIOTAP_RX_FLAGS */
433 /* ensure 2 byte alignment for the 2 byte field as required */
434 if ((pos - (u8 *)rthdr) & 1)
435 *pos++ = 0;
436 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
437 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
438 put_unaligned_le16(rx_flags, pos);
439 pos += 2;
440
441 if (status->encoding == RX_ENC_HT) {
442 unsigned int stbc;
443
444 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
445 *pos++ = local->hw.radiotap_mcs_details;
446 *pos = 0;
447 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
448 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
449 if (status->bw == RATE_INFO_BW_40)
450 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
451 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
452 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
453 if (status->enc_flags & RX_ENC_FLAG_LDPC)
454 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
455 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
456 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
457 pos++;
458 *pos++ = status->rate_idx;
459 }
460
461 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
462 u16 flags = 0;
463
464 /* ensure 4 byte alignment */
465 while ((pos - (u8 *)rthdr) & 3)
466 pos++;
467 rthdr->it_present |=
468 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
469 put_unaligned_le32(status->ampdu_reference, pos);
470 pos += 4;
471 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
472 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
473 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
474 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
475 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
476 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
477 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
478 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
479 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
480 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
481 if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
482 flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
483 put_unaligned_le16(flags, pos);
484 pos += 2;
485 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
486 *pos++ = status->ampdu_delimiter_crc;
487 else
488 *pos++ = 0;
489 *pos++ = 0;
490 }
491
492 if (status->encoding == RX_ENC_VHT) {
493 u16 known = local->hw.radiotap_vht_details;
494
495 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
496 put_unaligned_le16(known, pos);
497 pos += 2;
498 /* flags */
499 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
500 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
501 /* in VHT, STBC is binary */
502 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
503 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
504 if (status->enc_flags & RX_ENC_FLAG_BF)
505 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
506 pos++;
507 /* bandwidth */
508 switch (status->bw) {
509 case RATE_INFO_BW_80:
510 *pos++ = 4;
511 break;
512 case RATE_INFO_BW_160:
513 *pos++ = 11;
514 break;
515 case RATE_INFO_BW_40:
516 *pos++ = 1;
517 break;
518 default:
519 *pos++ = 0;
520 }
521 /* MCS/NSS */
522 *pos = (status->rate_idx << 4) | status->nss;
523 pos += 4;
524 /* coding field */
525 if (status->enc_flags & RX_ENC_FLAG_LDPC)
526 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
527 pos++;
528 /* group ID */
529 pos++;
530 /* partial_aid */
531 pos += 2;
532 }
533
534 if (local->hw.radiotap_timestamp.units_pos >= 0) {
535 u16 accuracy = 0;
536 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
537
538 rthdr->it_present |=
539 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
540
541 /* ensure 8 byte alignment */
542 while ((pos - (u8 *)rthdr) & 7)
543 pos++;
544
545 put_unaligned_le64(status->device_timestamp, pos);
546 pos += sizeof(u64);
547
548 if (local->hw.radiotap_timestamp.accuracy >= 0) {
549 accuracy = local->hw.radiotap_timestamp.accuracy;
550 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
551 }
552 put_unaligned_le16(accuracy, pos);
553 pos += sizeof(u16);
554
555 *pos++ = local->hw.radiotap_timestamp.units_pos;
556 *pos++ = flags;
557 }
558
559 if (status->encoding == RX_ENC_HE &&
560 status->flag & RX_FLAG_RADIOTAP_HE) {
561 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
562
563 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
564 he.data6 |= HE_PREP(DATA6_NSTS,
565 FIELD_GET(RX_ENC_FLAG_STBC_MASK,
566 status->enc_flags));
567 he.data3 |= HE_PREP(DATA3_STBC, 1);
568 } else {
569 he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
570 }
571
572 #define CHECK_GI(s) \
573 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
574 (int)NL80211_RATE_INFO_HE_GI_##s)
575
576 CHECK_GI(0_8);
577 CHECK_GI(1_6);
578 CHECK_GI(3_2);
579
580 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
581 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
582 he.data3 |= HE_PREP(DATA3_CODING,
583 !!(status->enc_flags & RX_ENC_FLAG_LDPC));
584
585 he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
586
587 switch (status->bw) {
588 case RATE_INFO_BW_20:
589 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
590 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
591 break;
592 case RATE_INFO_BW_40:
593 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
594 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
595 break;
596 case RATE_INFO_BW_80:
597 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
598 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
599 break;
600 case RATE_INFO_BW_160:
601 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
602 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
603 break;
604 case RATE_INFO_BW_HE_RU:
605 #define CHECK_RU_ALLOC(s) \
606 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
607 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
608
609 CHECK_RU_ALLOC(26);
610 CHECK_RU_ALLOC(52);
611 CHECK_RU_ALLOC(106);
612 CHECK_RU_ALLOC(242);
613 CHECK_RU_ALLOC(484);
614 CHECK_RU_ALLOC(996);
615 CHECK_RU_ALLOC(2x996);
616
617 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
618 status->he_ru + 4);
619 break;
620 default:
621 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
622 }
623
624 /* ensure 2 byte alignment */
625 while ((pos - (u8 *)rthdr) & 1)
626 pos++;
627 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
628 memcpy(pos, &he, sizeof(he));
629 pos += sizeof(he);
630 }
631
632 if (status->encoding == RX_ENC_HE &&
633 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
634 /* ensure 2 byte alignment */
635 while ((pos - (u8 *)rthdr) & 1)
636 pos++;
637 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
638 memcpy(pos, &he_mu, sizeof(he_mu));
639 pos += sizeof(he_mu);
640 }
641
642 if (status->flag & RX_FLAG_NO_PSDU) {
643 rthdr->it_present |=
644 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU);
645 *pos++ = status->zero_length_psdu_type;
646 }
647
648 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
649 /* ensure 2 byte alignment */
650 while ((pos - (u8 *)rthdr) & 1)
651 pos++;
652 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG);
653 memcpy(pos, &lsig, sizeof(lsig));
654 pos += sizeof(lsig);
655 }
656
657 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
658 *pos++ = status->chain_signal[chain];
659 *pos++ = chain;
660 }
661
662 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
663 /* ensure 2 byte alignment for the vendor field as required */
664 if ((pos - (u8 *)rthdr) & 1)
665 *pos++ = 0;
666 *pos++ = rtap.oui[0];
667 *pos++ = rtap.oui[1];
668 *pos++ = rtap.oui[2];
669 *pos++ = rtap.subns;
670 put_unaligned_le16(rtap.len, pos);
671 pos += 2;
672 /* align the actual payload as requested */
673 while ((pos - (u8 *)rthdr) & (rtap.align - 1))
674 *pos++ = 0;
675 /* data (and possible padding) already follows */
676 }
677 }
678
679 static struct sk_buff *
680 ieee80211_make_monitor_skb(struct ieee80211_local *local,
681 struct sk_buff **origskb,
682 struct ieee80211_rate *rate,
683 int rtap_space, bool use_origskb)
684 {
685 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
686 int rt_hdrlen, needed_headroom;
687 struct sk_buff *skb;
688
689 /* room for the radiotap header based on driver features */
690 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
691 needed_headroom = rt_hdrlen - rtap_space;
692
693 if (use_origskb) {
694 /* only need to expand headroom if necessary */
695 skb = *origskb;
696 *origskb = NULL;
697
698 /*
699 * This shouldn't trigger often because most devices have an
700 * RX header they pull before we get here, and that should
701 * be big enough for our radiotap information. We should
702 * probably export the length to drivers so that we can have
703 * them allocate enough headroom to start with.
704 */
705 if (skb_headroom(skb) < needed_headroom &&
706 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
707 dev_kfree_skb(skb);
708 return NULL;
709 }
710 } else {
711 /*
712 * Need to make a copy and possibly remove radiotap header
713 * and FCS from the original.
714 */
715 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
716
717 if (!skb)
718 return NULL;
719 }
720
721 /* prepend radiotap information */
722 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
723
724 skb_reset_mac_header(skb);
725 skb->ip_summed = CHECKSUM_UNNECESSARY;
726 skb->pkt_type = PACKET_OTHERHOST;
727 skb->protocol = htons(ETH_P_802_2);
728
729 return skb;
730 }
731
732 /*
733 * This function copies a received frame to all monitor interfaces and
734 * returns a cleaned-up SKB that no longer includes the FCS nor the
735 * radiotap header the driver might have added.
736 */
737 static struct sk_buff *
738 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
739 struct ieee80211_rate *rate)
740 {
741 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
742 struct ieee80211_sub_if_data *sdata;
743 struct sk_buff *monskb = NULL;
744 int present_fcs_len = 0;
745 unsigned int rtap_space = 0;
746 struct ieee80211_sub_if_data *monitor_sdata =
747 rcu_dereference(local->monitor_sdata);
748 bool only_monitor = false;
749 unsigned int min_head_len;
750
751 if (status->flag & RX_FLAG_RADIOTAP_HE)
752 rtap_space += sizeof(struct ieee80211_radiotap_he);
753
754 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
755 rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
756
757 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
758 rtap_space += sizeof(struct ieee80211_radiotap_lsig);
759
760 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
761 struct ieee80211_vendor_radiotap *rtap =
762 (void *)(origskb->data + rtap_space);
763
764 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
765 }
766
767 min_head_len = rtap_space;
768
769 /*
770 * First, we may need to make a copy of the skb because
771 * (1) we need to modify it for radiotap (if not present), and
772 * (2) the other RX handlers will modify the skb we got.
773 *
774 * We don't need to, of course, if we aren't going to return
775 * the SKB because it has a bad FCS/PLCP checksum.
776 */
777
778 if (!(status->flag & RX_FLAG_NO_PSDU)) {
779 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
780 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
781 /* driver bug */
782 WARN_ON(1);
783 dev_kfree_skb(origskb);
784 return NULL;
785 }
786 present_fcs_len = FCS_LEN;
787 }
788
789 /* also consider the hdr->frame_control */
790 min_head_len += 2;
791 }
792
793 /* ensure that the expected data elements are in skb head */
794 if (!pskb_may_pull(origskb, min_head_len)) {
795 dev_kfree_skb(origskb);
796 return NULL;
797 }
798
799 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
800
801 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
802 if (only_monitor) {
803 dev_kfree_skb(origskb);
804 return NULL;
805 }
806
807 return ieee80211_clean_skb(origskb, present_fcs_len,
808 rtap_space);
809 }
810
811 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
812
813 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
814 bool last_monitor = list_is_last(&sdata->u.mntr.list,
815 &local->mon_list);
816
817 if (!monskb)
818 monskb = ieee80211_make_monitor_skb(local, &origskb,
819 rate, rtap_space,
820 only_monitor &&
821 last_monitor);
822
823 if (monskb) {
824 struct sk_buff *skb;
825
826 if (last_monitor) {
827 skb = monskb;
828 monskb = NULL;
829 } else {
830 skb = skb_clone(monskb, GFP_ATOMIC);
831 }
832
833 if (skb) {
834 skb->dev = sdata->dev;
835 dev_sw_netstats_rx_add(skb->dev, skb->len);
836 netif_receive_skb(skb);
837 }
838 }
839
840 if (last_monitor)
841 break;
842 }
843
844 /* this happens if last_monitor was erroneously false */
845 dev_kfree_skb(monskb);
846
847 /* ditto */
848 if (!origskb)
849 return NULL;
850
851 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
852 }
853
854 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
855 {
856 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
857 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
858 int tid, seqno_idx, security_idx;
859
860 /* does the frame have a qos control field? */
861 if (ieee80211_is_data_qos(hdr->frame_control)) {
862 u8 *qc = ieee80211_get_qos_ctl(hdr);
863 /* frame has qos control */
864 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
865 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
866 status->rx_flags |= IEEE80211_RX_AMSDU;
867
868 seqno_idx = tid;
869 security_idx = tid;
870 } else {
871 /*
872 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
873 *
874 * Sequence numbers for management frames, QoS data
875 * frames with a broadcast/multicast address in the
876 * Address 1 field, and all non-QoS data frames sent
877 * by QoS STAs are assigned using an additional single
878 * modulo-4096 counter, [...]
879 *
880 * We also use that counter for non-QoS STAs.
881 */
882 seqno_idx = IEEE80211_NUM_TIDS;
883 security_idx = 0;
884 if (ieee80211_is_mgmt(hdr->frame_control))
885 security_idx = IEEE80211_NUM_TIDS;
886 tid = 0;
887 }
888
889 rx->seqno_idx = seqno_idx;
890 rx->security_idx = security_idx;
891 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
892 * For now, set skb->priority to 0 for other cases. */
893 rx->skb->priority = (tid > 7) ? 0 : tid;
894 }
895
896 /**
897 * DOC: Packet alignment
898 *
899 * Drivers always need to pass packets that are aligned to two-byte boundaries
900 * to the stack.
901 *
902 * Additionally, should, if possible, align the payload data in a way that
903 * guarantees that the contained IP header is aligned to a four-byte
904 * boundary. In the case of regular frames, this simply means aligning the
905 * payload to a four-byte boundary (because either the IP header is directly
906 * contained, or IV/RFC1042 headers that have a length divisible by four are
907 * in front of it). If the payload data is not properly aligned and the
908 * architecture doesn't support efficient unaligned operations, mac80211
909 * will align the data.
910 *
911 * With A-MSDU frames, however, the payload data address must yield two modulo
912 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
913 * push the IP header further back to a multiple of four again. Thankfully, the
914 * specs were sane enough this time around to require padding each A-MSDU
915 * subframe to a length that is a multiple of four.
916 *
917 * Padding like Atheros hardware adds which is between the 802.11 header and
918 * the payload is not supported, the driver is required to move the 802.11
919 * header to be directly in front of the payload in that case.
920 */
921 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
922 {
923 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
924 WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
925 #endif
926 }
927
928
929 /* rx handlers */
930
931 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
932 {
933 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
934
935 if (is_multicast_ether_addr(hdr->addr1))
936 return 0;
937
938 return ieee80211_is_robust_mgmt_frame(skb);
939 }
940
941
942 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
943 {
944 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
945
946 if (!is_multicast_ether_addr(hdr->addr1))
947 return 0;
948
949 return ieee80211_is_robust_mgmt_frame(skb);
950 }
951
952
953 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
954 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
955 {
956 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
957 struct ieee80211_mmie *mmie;
958 struct ieee80211_mmie_16 *mmie16;
959
960 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
961 return -1;
962
963 if (!ieee80211_is_robust_mgmt_frame(skb) &&
964 !ieee80211_is_beacon(hdr->frame_control))
965 return -1; /* not a robust management frame */
966
967 mmie = (struct ieee80211_mmie *)
968 (skb->data + skb->len - sizeof(*mmie));
969 if (mmie->element_id == WLAN_EID_MMIE &&
970 mmie->length == sizeof(*mmie) - 2)
971 return le16_to_cpu(mmie->key_id);
972
973 mmie16 = (struct ieee80211_mmie_16 *)
974 (skb->data + skb->len - sizeof(*mmie16));
975 if (skb->len >= 24 + sizeof(*mmie16) &&
976 mmie16->element_id == WLAN_EID_MMIE &&
977 mmie16->length == sizeof(*mmie16) - 2)
978 return le16_to_cpu(mmie16->key_id);
979
980 return -1;
981 }
982
983 static int ieee80211_get_keyid(struct sk_buff *skb,
984 const struct ieee80211_cipher_scheme *cs)
985 {
986 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
987 __le16 fc;
988 int hdrlen;
989 int minlen;
990 u8 key_idx_off;
991 u8 key_idx_shift;
992 u8 keyid;
993
994 fc = hdr->frame_control;
995 hdrlen = ieee80211_hdrlen(fc);
996
997 if (cs) {
998 minlen = hdrlen + cs->hdr_len;
999 key_idx_off = hdrlen + cs->key_idx_off;
1000 key_idx_shift = cs->key_idx_shift;
1001 } else {
1002 /* WEP, TKIP, CCMP and GCMP */
1003 minlen = hdrlen + IEEE80211_WEP_IV_LEN;
1004 key_idx_off = hdrlen + 3;
1005 key_idx_shift = 6;
1006 }
1007
1008 if (unlikely(skb->len < minlen))
1009 return -EINVAL;
1010
1011 skb_copy_bits(skb, key_idx_off, &keyid, 1);
1012
1013 if (cs)
1014 keyid &= cs->key_idx_mask;
1015 keyid >>= key_idx_shift;
1016
1017 /* cs could use more than the usual two bits for the keyid */
1018 if (unlikely(keyid >= NUM_DEFAULT_KEYS))
1019 return -EINVAL;
1020
1021 return keyid;
1022 }
1023
1024 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
1025 {
1026 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1027 char *dev_addr = rx->sdata->vif.addr;
1028
1029 if (ieee80211_is_data(hdr->frame_control)) {
1030 if (is_multicast_ether_addr(hdr->addr1)) {
1031 if (ieee80211_has_tods(hdr->frame_control) ||
1032 !ieee80211_has_fromds(hdr->frame_control))
1033 return RX_DROP_MONITOR;
1034 if (ether_addr_equal(hdr->addr3, dev_addr))
1035 return RX_DROP_MONITOR;
1036 } else {
1037 if (!ieee80211_has_a4(hdr->frame_control))
1038 return RX_DROP_MONITOR;
1039 if (ether_addr_equal(hdr->addr4, dev_addr))
1040 return RX_DROP_MONITOR;
1041 }
1042 }
1043
1044 /* If there is not an established peer link and this is not a peer link
1045 * establisment frame, beacon or probe, drop the frame.
1046 */
1047
1048 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
1049 struct ieee80211_mgmt *mgmt;
1050
1051 if (!ieee80211_is_mgmt(hdr->frame_control))
1052 return RX_DROP_MONITOR;
1053
1054 if (ieee80211_is_action(hdr->frame_control)) {
1055 u8 category;
1056
1057 /* make sure category field is present */
1058 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
1059 return RX_DROP_MONITOR;
1060
1061 mgmt = (struct ieee80211_mgmt *)hdr;
1062 category = mgmt->u.action.category;
1063 if (category != WLAN_CATEGORY_MESH_ACTION &&
1064 category != WLAN_CATEGORY_SELF_PROTECTED)
1065 return RX_DROP_MONITOR;
1066 return RX_CONTINUE;
1067 }
1068
1069 if (ieee80211_is_probe_req(hdr->frame_control) ||
1070 ieee80211_is_probe_resp(hdr->frame_control) ||
1071 ieee80211_is_beacon(hdr->frame_control) ||
1072 ieee80211_is_auth(hdr->frame_control))
1073 return RX_CONTINUE;
1074
1075 return RX_DROP_MONITOR;
1076 }
1077
1078 return RX_CONTINUE;
1079 }
1080
1081 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
1082 int index)
1083 {
1084 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
1085 struct sk_buff *tail = skb_peek_tail(frames);
1086 struct ieee80211_rx_status *status;
1087
1088 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
1089 return true;
1090
1091 if (!tail)
1092 return false;
1093
1094 status = IEEE80211_SKB_RXCB(tail);
1095 if (status->flag & RX_FLAG_AMSDU_MORE)
1096 return false;
1097
1098 return true;
1099 }
1100
1101 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
1102 struct tid_ampdu_rx *tid_agg_rx,
1103 int index,
1104 struct sk_buff_head *frames)
1105 {
1106 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
1107 struct sk_buff *skb;
1108 struct ieee80211_rx_status *status;
1109
1110 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1111
1112 if (skb_queue_empty(skb_list))
1113 goto no_frame;
1114
1115 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1116 __skb_queue_purge(skb_list);
1117 goto no_frame;
1118 }
1119
1120 /* release frames from the reorder ring buffer */
1121 tid_agg_rx->stored_mpdu_num--;
1122 while ((skb = __skb_dequeue(skb_list))) {
1123 status = IEEE80211_SKB_RXCB(skb);
1124 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
1125 __skb_queue_tail(frames, skb);
1126 }
1127
1128 no_frame:
1129 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
1130 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1131 }
1132
1133 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
1134 struct tid_ampdu_rx *tid_agg_rx,
1135 u16 head_seq_num,
1136 struct sk_buff_head *frames)
1137 {
1138 int index;
1139
1140 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1141
1142 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1143 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1144 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1145 frames);
1146 }
1147 }
1148
1149 /*
1150 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
1151 * the skb was added to the buffer longer than this time ago, the earlier
1152 * frames that have not yet been received are assumed to be lost and the skb
1153 * can be released for processing. This may also release other skb's from the
1154 * reorder buffer if there are no additional gaps between the frames.
1155 *
1156 * Callers must hold tid_agg_rx->reorder_lock.
1157 */
1158 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
1159
1160 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
1161 struct tid_ampdu_rx *tid_agg_rx,
1162 struct sk_buff_head *frames)
1163 {
1164 int index, i, j;
1165
1166 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1167
1168 /* release the buffer until next missing frame */
1169 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1170 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
1171 tid_agg_rx->stored_mpdu_num) {
1172 /*
1173 * No buffers ready to be released, but check whether any
1174 * frames in the reorder buffer have timed out.
1175 */
1176 int skipped = 1;
1177 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1178 j = (j + 1) % tid_agg_rx->buf_size) {
1179 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1180 skipped++;
1181 continue;
1182 }
1183 if (skipped &&
1184 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1185 HT_RX_REORDER_BUF_TIMEOUT))
1186 goto set_release_timer;
1187
1188 /* don't leave incomplete A-MSDUs around */
1189 for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1190 i = (i + 1) % tid_agg_rx->buf_size)
1191 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1192
1193 ht_dbg_ratelimited(sdata,
1194 "release an RX reorder frame due to timeout on earlier frames\n");
1195 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1196 frames);
1197
1198 /*
1199 * Increment the head seq# also for the skipped slots.
1200 */
1201 tid_agg_rx->head_seq_num =
1202 (tid_agg_rx->head_seq_num +
1203 skipped) & IEEE80211_SN_MASK;
1204 skipped = 0;
1205 }
1206 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1207 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1208 frames);
1209 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1210 }
1211
1212 if (tid_agg_rx->stored_mpdu_num) {
1213 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1214
1215 for (; j != (index - 1) % tid_agg_rx->buf_size;
1216 j = (j + 1) % tid_agg_rx->buf_size) {
1217 if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1218 break;
1219 }
1220
1221 set_release_timer:
1222
1223 if (!tid_agg_rx->removed)
1224 mod_timer(&tid_agg_rx->reorder_timer,
1225 tid_agg_rx->reorder_time[j] + 1 +
1226 HT_RX_REORDER_BUF_TIMEOUT);
1227 } else {
1228 del_timer(&tid_agg_rx->reorder_timer);
1229 }
1230 }
1231
1232 /*
1233 * As this function belongs to the RX path it must be under
1234 * rcu_read_lock protection. It returns false if the frame
1235 * can be processed immediately, true if it was consumed.
1236 */
1237 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1238 struct tid_ampdu_rx *tid_agg_rx,
1239 struct sk_buff *skb,
1240 struct sk_buff_head *frames)
1241 {
1242 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1243 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1244 u16 sc = le16_to_cpu(hdr->seq_ctrl);
1245 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
1246 u16 head_seq_num, buf_size;
1247 int index;
1248 bool ret = true;
1249
1250 spin_lock(&tid_agg_rx->reorder_lock);
1251
1252 /*
1253 * Offloaded BA sessions have no known starting sequence number so pick
1254 * one from first Rxed frame for this tid after BA was started.
1255 */
1256 if (unlikely(tid_agg_rx->auto_seq)) {
1257 tid_agg_rx->auto_seq = false;
1258 tid_agg_rx->ssn = mpdu_seq_num;
1259 tid_agg_rx->head_seq_num = mpdu_seq_num;
1260 }
1261
1262 buf_size = tid_agg_rx->buf_size;
1263 head_seq_num = tid_agg_rx->head_seq_num;
1264
1265 /*
1266 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1267 * be reordered.
1268 */
1269 if (unlikely(!tid_agg_rx->started)) {
1270 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1271 ret = false;
1272 goto out;
1273 }
1274 tid_agg_rx->started = true;
1275 }
1276
1277 /* frame with out of date sequence number */
1278 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1279 dev_kfree_skb(skb);
1280 goto out;
1281 }
1282
1283 /*
1284 * If frame the sequence number exceeds our buffering window
1285 * size release some previous frames to make room for this one.
1286 */
1287 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1288 head_seq_num = ieee80211_sn_inc(
1289 ieee80211_sn_sub(mpdu_seq_num, buf_size));
1290 /* release stored frames up to new head to stack */
1291 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1292 head_seq_num, frames);
1293 }
1294
1295 /* Now the new frame is always in the range of the reordering buffer */
1296
1297 index = mpdu_seq_num % tid_agg_rx->buf_size;
1298
1299 /* check if we already stored this frame */
1300 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1301 dev_kfree_skb(skb);
1302 goto out;
1303 }
1304
1305 /*
1306 * If the current MPDU is in the right order and nothing else
1307 * is stored we can process it directly, no need to buffer it.
1308 * If it is first but there's something stored, we may be able
1309 * to release frames after this one.
1310 */
1311 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1312 tid_agg_rx->stored_mpdu_num == 0) {
1313 if (!(status->flag & RX_FLAG_AMSDU_MORE))
1314 tid_agg_rx->head_seq_num =
1315 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1316 ret = false;
1317 goto out;
1318 }
1319
1320 /* put the frame in the reordering buffer */
1321 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1322 if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1323 tid_agg_rx->reorder_time[index] = jiffies;
1324 tid_agg_rx->stored_mpdu_num++;
1325 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1326 }
1327
1328 out:
1329 spin_unlock(&tid_agg_rx->reorder_lock);
1330 return ret;
1331 }
1332
1333 /*
1334 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1335 * true if the MPDU was buffered, false if it should be processed.
1336 */
1337 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1338 struct sk_buff_head *frames)
1339 {
1340 struct sk_buff *skb = rx->skb;
1341 struct ieee80211_local *local = rx->local;
1342 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1343 struct sta_info *sta = rx->sta;
1344 struct tid_ampdu_rx *tid_agg_rx;
1345 u16 sc;
1346 u8 tid, ack_policy;
1347
1348 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1349 is_multicast_ether_addr(hdr->addr1))
1350 goto dont_reorder;
1351
1352 /*
1353 * filter the QoS data rx stream according to
1354 * STA/TID and check if this STA/TID is on aggregation
1355 */
1356
1357 if (!sta)
1358 goto dont_reorder;
1359
1360 ack_policy = *ieee80211_get_qos_ctl(hdr) &
1361 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1362 tid = ieee80211_get_tid(hdr);
1363
1364 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1365 if (!tid_agg_rx) {
1366 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1367 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1368 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1369 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1370 WLAN_BACK_RECIPIENT,
1371 WLAN_REASON_QSTA_REQUIRE_SETUP);
1372 goto dont_reorder;
1373 }
1374
1375 /* qos null data frames are excluded */
1376 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1377 goto dont_reorder;
1378
1379 /* not part of a BA session */
1380 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1381 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
1382 goto dont_reorder;
1383
1384 /* new, potentially un-ordered, ampdu frame - process it */
1385
1386 /* reset session timer */
1387 if (tid_agg_rx->timeout)
1388 tid_agg_rx->last_rx = jiffies;
1389
1390 /* if this mpdu is fragmented - terminate rx aggregation session */
1391 sc = le16_to_cpu(hdr->seq_ctrl);
1392 if (sc & IEEE80211_SCTL_FRAG) {
1393 skb_queue_tail(&rx->sdata->skb_queue, skb);
1394 ieee80211_queue_work(&local->hw, &rx->sdata->work);
1395 return;
1396 }
1397
1398 /*
1399 * No locking needed -- we will only ever process one
1400 * RX packet at a time, and thus own tid_agg_rx. All
1401 * other code manipulating it needs to (and does) make
1402 * sure that we cannot get to it any more before doing
1403 * anything with it.
1404 */
1405 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1406 frames))
1407 return;
1408
1409 dont_reorder:
1410 __skb_queue_tail(frames, skb);
1411 }
1412
1413 static ieee80211_rx_result debug_noinline
1414 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1415 {
1416 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1417 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1418
1419 if (status->flag & RX_FLAG_DUP_VALIDATED)
1420 return RX_CONTINUE;
1421
1422 /*
1423 * Drop duplicate 802.11 retransmissions
1424 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1425 */
1426
1427 if (rx->skb->len < 24)
1428 return RX_CONTINUE;
1429
1430 if (ieee80211_is_ctl(hdr->frame_control) ||
1431 ieee80211_is_any_nullfunc(hdr->frame_control) ||
1432 is_multicast_ether_addr(hdr->addr1))
1433 return RX_CONTINUE;
1434
1435 if (!rx->sta)
1436 return RX_CONTINUE;
1437
1438 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1439 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1440 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1441 rx->sta->rx_stats.num_duplicates++;
1442 return RX_DROP_UNUSABLE;
1443 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1444 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1445 }
1446
1447 return RX_CONTINUE;
1448 }
1449
1450 static ieee80211_rx_result debug_noinline
1451 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1452 {
1453 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1454
1455 /* Drop disallowed frame classes based on STA auth/assoc state;
1456 * IEEE 802.11, Chap 5.5.
1457 *
1458 * mac80211 filters only based on association state, i.e. it drops
1459 * Class 3 frames from not associated stations. hostapd sends
1460 * deauth/disassoc frames when needed. In addition, hostapd is
1461 * responsible for filtering on both auth and assoc states.
1462 */
1463
1464 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1465 return ieee80211_rx_mesh_check(rx);
1466
1467 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1468 ieee80211_is_pspoll(hdr->frame_control)) &&
1469 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1470 rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1471 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1472 /*
1473 * accept port control frames from the AP even when it's not
1474 * yet marked ASSOC to prevent a race where we don't set the
1475 * assoc bit quickly enough before it sends the first frame
1476 */
1477 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1478 ieee80211_is_data_present(hdr->frame_control)) {
1479 unsigned int hdrlen;
1480 __be16 ethertype;
1481
1482 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1483
1484 if (rx->skb->len < hdrlen + 8)
1485 return RX_DROP_MONITOR;
1486
1487 skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
1488 if (ethertype == rx->sdata->control_port_protocol)
1489 return RX_CONTINUE;
1490 }
1491
1492 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1493 cfg80211_rx_spurious_frame(rx->sdata->dev,
1494 hdr->addr2,
1495 GFP_ATOMIC))
1496 return RX_DROP_UNUSABLE;
1497
1498 return RX_DROP_MONITOR;
1499 }
1500
1501 return RX_CONTINUE;
1502 }
1503
1504
1505 static ieee80211_rx_result debug_noinline
1506 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1507 {
1508 struct ieee80211_local *local;
1509 struct ieee80211_hdr *hdr;
1510 struct sk_buff *skb;
1511
1512 local = rx->local;
1513 skb = rx->skb;
1514 hdr = (struct ieee80211_hdr *) skb->data;
1515
1516 if (!local->pspolling)
1517 return RX_CONTINUE;
1518
1519 if (!ieee80211_has_fromds(hdr->frame_control))
1520 /* this is not from AP */
1521 return RX_CONTINUE;
1522
1523 if (!ieee80211_is_data(hdr->frame_control))
1524 return RX_CONTINUE;
1525
1526 if (!ieee80211_has_moredata(hdr->frame_control)) {
1527 /* AP has no more frames buffered for us */
1528 local->pspolling = false;
1529 return RX_CONTINUE;
1530 }
1531
1532 /* more data bit is set, let's request a new frame from the AP */
1533 ieee80211_send_pspoll(local, rx->sdata);
1534
1535 return RX_CONTINUE;
1536 }
1537
1538 static void sta_ps_start(struct sta_info *sta)
1539 {
1540 struct ieee80211_sub_if_data *sdata = sta->sdata;
1541 struct ieee80211_local *local = sdata->local;
1542 struct ps_data *ps;
1543 int tid;
1544
1545 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1546 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1547 ps = &sdata->bss->ps;
1548 else
1549 return;
1550
1551 atomic_inc(&ps->num_sta_ps);
1552 set_sta_flag(sta, WLAN_STA_PS_STA);
1553 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1554 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1555 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1556 sta->sta.addr, sta->sta.aid);
1557
1558 ieee80211_clear_fast_xmit(sta);
1559
1560 if (!sta->sta.txq[0])
1561 return;
1562
1563 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
1564 struct ieee80211_txq *txq = sta->sta.txq[tid];
1565 struct txq_info *txqi = to_txq_info(txq);
1566
1567 spin_lock(&local->active_txq_lock[txq->ac]);
1568 if (!list_empty(&txqi->schedule_order))
1569 list_del_init(&txqi->schedule_order);
1570 spin_unlock(&local->active_txq_lock[txq->ac]);
1571
1572 if (txq_has_queue(txq))
1573 set_bit(tid, &sta->txq_buffered_tids);
1574 else
1575 clear_bit(tid, &sta->txq_buffered_tids);
1576 }
1577 }
1578
1579 static void sta_ps_end(struct sta_info *sta)
1580 {
1581 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1582 sta->sta.addr, sta->sta.aid);
1583
1584 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1585 /*
1586 * Clear the flag only if the other one is still set
1587 * so that the TX path won't start TX'ing new frames
1588 * directly ... In the case that the driver flag isn't
1589 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1590 */
1591 clear_sta_flag(sta, WLAN_STA_PS_STA);
1592 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1593 sta->sta.addr, sta->sta.aid);
1594 return;
1595 }
1596
1597 set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1598 clear_sta_flag(sta, WLAN_STA_PS_STA);
1599 ieee80211_sta_ps_deliver_wakeup(sta);
1600 }
1601
1602 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1603 {
1604 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1605 bool in_ps;
1606
1607 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1608
1609 /* Don't let the same PS state be set twice */
1610 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1611 if ((start && in_ps) || (!start && !in_ps))
1612 return -EINVAL;
1613
1614 if (start)
1615 sta_ps_start(sta);
1616 else
1617 sta_ps_end(sta);
1618
1619 return 0;
1620 }
1621 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1622
1623 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1624 {
1625 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1626
1627 if (test_sta_flag(sta, WLAN_STA_SP))
1628 return;
1629
1630 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1631 ieee80211_sta_ps_deliver_poll_response(sta);
1632 else
1633 set_sta_flag(sta, WLAN_STA_PSPOLL);
1634 }
1635 EXPORT_SYMBOL(ieee80211_sta_pspoll);
1636
1637 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1638 {
1639 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1640 int ac = ieee80211_ac_from_tid(tid);
1641
1642 /*
1643 * If this AC is not trigger-enabled do nothing unless the
1644 * driver is calling us after it already checked.
1645 *
1646 * NB: This could/should check a separate bitmap of trigger-
1647 * enabled queues, but for now we only implement uAPSD w/o
1648 * TSPEC changes to the ACs, so they're always the same.
1649 */
1650 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1651 tid != IEEE80211_NUM_TIDS)
1652 return;
1653
1654 /* if we are in a service period, do nothing */
1655 if (test_sta_flag(sta, WLAN_STA_SP))
1656 return;
1657
1658 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1659 ieee80211_sta_ps_deliver_uapsd(sta);
1660 else
1661 set_sta_flag(sta, WLAN_STA_UAPSD);
1662 }
1663 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1664
1665 static ieee80211_rx_result debug_noinline
1666 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1667 {
1668 struct ieee80211_sub_if_data *sdata = rx->sdata;
1669 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1670 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1671
1672 if (!rx->sta)
1673 return RX_CONTINUE;
1674
1675 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1676 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1677 return RX_CONTINUE;
1678
1679 /*
1680 * The device handles station powersave, so don't do anything about
1681 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1682 * it to mac80211 since they're handled.)
1683 */
1684 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1685 return RX_CONTINUE;
1686
1687 /*
1688 * Don't do anything if the station isn't already asleep. In
1689 * the uAPSD case, the station will probably be marked asleep,
1690 * in the PS-Poll case the station must be confused ...
1691 */
1692 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1693 return RX_CONTINUE;
1694
1695 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1696 ieee80211_sta_pspoll(&rx->sta->sta);
1697
1698 /* Free PS Poll skb here instead of returning RX_DROP that would
1699 * count as an dropped frame. */
1700 dev_kfree_skb(rx->skb);
1701
1702 return RX_QUEUED;
1703 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1704 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1705 ieee80211_has_pm(hdr->frame_control) &&
1706 (ieee80211_is_data_qos(hdr->frame_control) ||
1707 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1708 u8 tid = ieee80211_get_tid(hdr);
1709
1710 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1711 }
1712
1713 return RX_CONTINUE;
1714 }
1715
1716 static ieee80211_rx_result debug_noinline
1717 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1718 {
1719 struct sta_info *sta = rx->sta;
1720 struct sk_buff *skb = rx->skb;
1721 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1722 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1723 int i;
1724
1725 if (!sta)
1726 return RX_CONTINUE;
1727
1728 /*
1729 * Update last_rx only for IBSS packets which are for the current
1730 * BSSID and for station already AUTHORIZED to avoid keeping the
1731 * current IBSS network alive in cases where other STAs start
1732 * using different BSSID. This will also give the station another
1733 * chance to restart the authentication/authorization in case
1734 * something went wrong the first time.
1735 */
1736 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1737 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1738 NL80211_IFTYPE_ADHOC);
1739 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1740 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1741 sta->rx_stats.last_rx = jiffies;
1742 if (ieee80211_is_data(hdr->frame_control) &&
1743 !is_multicast_ether_addr(hdr->addr1))
1744 sta->rx_stats.last_rate =
1745 sta_stats_encode_rate(status);
1746 }
1747 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1748 sta->rx_stats.last_rx = jiffies;
1749 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
1750 !is_multicast_ether_addr(hdr->addr1)) {
1751 /*
1752 * Mesh beacons will update last_rx when if they are found to
1753 * match the current local configuration when processed.
1754 */
1755 sta->rx_stats.last_rx = jiffies;
1756 if (ieee80211_is_data(hdr->frame_control))
1757 sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1758 }
1759
1760 sta->rx_stats.fragments++;
1761
1762 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
1763 sta->rx_stats.bytes += rx->skb->len;
1764 u64_stats_update_end(&rx->sta->rx_stats.syncp);
1765
1766 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1767 sta->rx_stats.last_signal = status->signal;
1768 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
1769 }
1770
1771 if (status->chains) {
1772 sta->rx_stats.chains = status->chains;
1773 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1774 int signal = status->chain_signal[i];
1775
1776 if (!(status->chains & BIT(i)))
1777 continue;
1778
1779 sta->rx_stats.chain_signal_last[i] = signal;
1780 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
1781 -signal);
1782 }
1783 }
1784
1785 if (ieee80211_is_s1g_beacon(hdr->frame_control))
1786 return RX_CONTINUE;
1787
1788 /*
1789 * Change STA power saving mode only at the end of a frame
1790 * exchange sequence, and only for a data or management
1791 * frame as specified in IEEE 802.11-2016 11.2.3.2
1792 */
1793 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1794 !ieee80211_has_morefrags(hdr->frame_control) &&
1795 !is_multicast_ether_addr(hdr->addr1) &&
1796 (ieee80211_is_mgmt(hdr->frame_control) ||
1797 ieee80211_is_data(hdr->frame_control)) &&
1798 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1799 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1800 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1801 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1802 if (!ieee80211_has_pm(hdr->frame_control))
1803 sta_ps_end(sta);
1804 } else {
1805 if (ieee80211_has_pm(hdr->frame_control))
1806 sta_ps_start(sta);
1807 }
1808 }
1809
1810 /* mesh power save support */
1811 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1812 ieee80211_mps_rx_h_sta_process(sta, hdr);
1813
1814 /*
1815 * Drop (qos-)data::nullfunc frames silently, since they
1816 * are used only to control station power saving mode.
1817 */
1818 if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
1819 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1820
1821 /*
1822 * If we receive a 4-addr nullfunc frame from a STA
1823 * that was not moved to a 4-addr STA vlan yet send
1824 * the event to userspace and for older hostapd drop
1825 * the frame to the monitor interface.
1826 */
1827 if (ieee80211_has_a4(hdr->frame_control) &&
1828 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1829 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1830 !rx->sdata->u.vlan.sta))) {
1831 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1832 cfg80211_rx_unexpected_4addr_frame(
1833 rx->sdata->dev, sta->sta.addr,
1834 GFP_ATOMIC);
1835 return RX_DROP_MONITOR;
1836 }
1837 /*
1838 * Update counter and free packet here to avoid
1839 * counting this as a dropped packed.
1840 */
1841 sta->rx_stats.packets++;
1842 dev_kfree_skb(rx->skb);
1843 return RX_QUEUED;
1844 }
1845
1846 return RX_CONTINUE;
1847 } /* ieee80211_rx_h_sta_process */
1848
1849 static struct ieee80211_key *
1850 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
1851 {
1852 struct ieee80211_key *key = NULL;
1853 struct ieee80211_sub_if_data *sdata = rx->sdata;
1854 int idx2;
1855
1856 /* Make sure key gets set if either BIGTK key index is set so that
1857 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected
1858 * Beacon frames and Beacon frames that claim to use another BIGTK key
1859 * index (i.e., a key that we do not have).
1860 */
1861
1862 if (idx < 0) {
1863 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
1864 idx2 = idx + 1;
1865 } else {
1866 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1867 idx2 = idx + 1;
1868 else
1869 idx2 = idx - 1;
1870 }
1871
1872 if (rx->sta)
1873 key = rcu_dereference(rx->sta->gtk[idx]);
1874 if (!key)
1875 key = rcu_dereference(sdata->keys[idx]);
1876 if (!key && rx->sta)
1877 key = rcu_dereference(rx->sta->gtk[idx2]);
1878 if (!key)
1879 key = rcu_dereference(sdata->keys[idx2]);
1880
1881 return key;
1882 }
1883
1884 static ieee80211_rx_result debug_noinline
1885 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1886 {
1887 struct sk_buff *skb = rx->skb;
1888 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1889 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1890 int keyidx;
1891 ieee80211_rx_result result = RX_DROP_UNUSABLE;
1892 struct ieee80211_key *sta_ptk = NULL;
1893 struct ieee80211_key *ptk_idx = NULL;
1894 int mmie_keyidx = -1;
1895 __le16 fc;
1896 const struct ieee80211_cipher_scheme *cs = NULL;
1897
1898 if (ieee80211_is_ext(hdr->frame_control))
1899 return RX_CONTINUE;
1900
1901 /*
1902 * Key selection 101
1903 *
1904 * There are five types of keys:
1905 * - GTK (group keys)
1906 * - IGTK (group keys for management frames)
1907 * - BIGTK (group keys for Beacon frames)
1908 * - PTK (pairwise keys)
1909 * - STK (station-to-station pairwise keys)
1910 *
1911 * When selecting a key, we have to distinguish between multicast
1912 * (including broadcast) and unicast frames, the latter can only
1913 * use PTKs and STKs while the former always use GTKs, IGTKs, and
1914 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used,
1915 * then unicast frames can also use key indices like GTKs. Hence, if we
1916 * don't have a PTK/STK we check the key index for a WEP key.
1917 *
1918 * Note that in a regular BSS, multicast frames are sent by the
1919 * AP only, associated stations unicast the frame to the AP first
1920 * which then multicasts it on their behalf.
1921 *
1922 * There is also a slight problem in IBSS mode: GTKs are negotiated
1923 * with each station, that is something we don't currently handle.
1924 * The spec seems to expect that one negotiates the same key with
1925 * every station but there's no such requirement; VLANs could be
1926 * possible.
1927 */
1928
1929 /* start without a key */
1930 rx->key = NULL;
1931 fc = hdr->frame_control;
1932
1933 if (rx->sta) {
1934 int keyid = rx->sta->ptk_idx;
1935 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1936
1937 if (ieee80211_has_protected(fc)) {
1938 cs = rx->sta->cipher_scheme;
1939 keyid = ieee80211_get_keyid(rx->skb, cs);
1940
1941 if (unlikely(keyid < 0))
1942 return RX_DROP_UNUSABLE;
1943
1944 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
1945 }
1946 }
1947
1948 if (!ieee80211_has_protected(fc))
1949 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1950
1951 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1952 rx->key = ptk_idx ? ptk_idx : sta_ptk;
1953 if ((status->flag & RX_FLAG_DECRYPTED) &&
1954 (status->flag & RX_FLAG_IV_STRIPPED))
1955 return RX_CONTINUE;
1956 /* Skip decryption if the frame is not protected. */
1957 if (!ieee80211_has_protected(fc))
1958 return RX_CONTINUE;
1959 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) {
1960 /* Broadcast/multicast robust management frame / BIP */
1961 if ((status->flag & RX_FLAG_DECRYPTED) &&
1962 (status->flag & RX_FLAG_IV_STRIPPED))
1963 return RX_CONTINUE;
1964
1965 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
1966 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
1967 NUM_DEFAULT_BEACON_KEYS) {
1968 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
1969 skb->data,
1970 skb->len);
1971 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1972 }
1973
1974 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx);
1975 if (!rx->key)
1976 return RX_CONTINUE; /* Beacon protection not in use */
1977 } else if (mmie_keyidx >= 0) {
1978 /* Broadcast/multicast robust management frame / BIP */
1979 if ((status->flag & RX_FLAG_DECRYPTED) &&
1980 (status->flag & RX_FLAG_IV_STRIPPED))
1981 return RX_CONTINUE;
1982
1983 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1984 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1985 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1986 if (rx->sta) {
1987 if (ieee80211_is_group_privacy_action(skb) &&
1988 test_sta_flag(rx->sta, WLAN_STA_MFP))
1989 return RX_DROP_MONITOR;
1990
1991 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
1992 }
1993 if (!rx->key)
1994 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
1995 } else if (!ieee80211_has_protected(fc)) {
1996 /*
1997 * The frame was not protected, so skip decryption. However, we
1998 * need to set rx->key if there is a key that could have been
1999 * used so that the frame may be dropped if encryption would
2000 * have been expected.
2001 */
2002 struct ieee80211_key *key = NULL;
2003 struct ieee80211_sub_if_data *sdata = rx->sdata;
2004 int i;
2005
2006 if (ieee80211_is_beacon(fc)) {
2007 key = ieee80211_rx_get_bigtk(rx, -1);
2008 } else if (ieee80211_is_mgmt(fc) &&
2009 is_multicast_ether_addr(hdr->addr1)) {
2010 key = rcu_dereference(rx->sdata->default_mgmt_key);
2011 } else {
2012 if (rx->sta) {
2013 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2014 key = rcu_dereference(rx->sta->gtk[i]);
2015 if (key)
2016 break;
2017 }
2018 }
2019 if (!key) {
2020 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2021 key = rcu_dereference(sdata->keys[i]);
2022 if (key)
2023 break;
2024 }
2025 }
2026 }
2027 if (key)
2028 rx->key = key;
2029 return RX_CONTINUE;
2030 } else {
2031 /*
2032 * The device doesn't give us the IV so we won't be
2033 * able to look up the key. That's ok though, we
2034 * don't need to decrypt the frame, we just won't
2035 * be able to keep statistics accurate.
2036 * Except for key threshold notifications, should
2037 * we somehow allow the driver to tell us which key
2038 * the hardware used if this flag is set?
2039 */
2040 if ((status->flag & RX_FLAG_DECRYPTED) &&
2041 (status->flag & RX_FLAG_IV_STRIPPED))
2042 return RX_CONTINUE;
2043
2044 keyidx = ieee80211_get_keyid(rx->skb, cs);
2045
2046 if (unlikely(keyidx < 0))
2047 return RX_DROP_UNUSABLE;
2048
2049 /* check per-station GTK first, if multicast packet */
2050 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
2051 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
2052
2053 /* if not found, try default key */
2054 if (!rx->key) {
2055 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
2056
2057 /*
2058 * RSNA-protected unicast frames should always be
2059 * sent with pairwise or station-to-station keys,
2060 * but for WEP we allow using a key index as well.
2061 */
2062 if (rx->key &&
2063 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
2064 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
2065 !is_multicast_ether_addr(hdr->addr1))
2066 rx->key = NULL;
2067 }
2068 }
2069
2070 if (rx->key) {
2071 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
2072 return RX_DROP_MONITOR;
2073
2074 /* TODO: add threshold stuff again */
2075 } else {
2076 return RX_DROP_MONITOR;
2077 }
2078
2079 switch (rx->key->conf.cipher) {
2080 case WLAN_CIPHER_SUITE_WEP40:
2081 case WLAN_CIPHER_SUITE_WEP104:
2082 result = ieee80211_crypto_wep_decrypt(rx);
2083 break;
2084 case WLAN_CIPHER_SUITE_TKIP:
2085 result = ieee80211_crypto_tkip_decrypt(rx);
2086 break;
2087 case WLAN_CIPHER_SUITE_CCMP:
2088 result = ieee80211_crypto_ccmp_decrypt(
2089 rx, IEEE80211_CCMP_MIC_LEN);
2090 break;
2091 case WLAN_CIPHER_SUITE_CCMP_256:
2092 result = ieee80211_crypto_ccmp_decrypt(
2093 rx, IEEE80211_CCMP_256_MIC_LEN);
2094 break;
2095 case WLAN_CIPHER_SUITE_AES_CMAC:
2096 result = ieee80211_crypto_aes_cmac_decrypt(rx);
2097 break;
2098 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
2099 result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
2100 break;
2101 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2102 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2103 result = ieee80211_crypto_aes_gmac_decrypt(rx);
2104 break;
2105 case WLAN_CIPHER_SUITE_GCMP:
2106 case WLAN_CIPHER_SUITE_GCMP_256:
2107 result = ieee80211_crypto_gcmp_decrypt(rx);
2108 break;
2109 default:
2110 result = ieee80211_crypto_hw_decrypt(rx);
2111 }
2112
2113 /* the hdr variable is invalid after the decrypt handlers */
2114
2115 /* either the frame has been decrypted or will be dropped */
2116 status->flag |= RX_FLAG_DECRYPTED;
2117
2118 if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE))
2119 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2120 skb->data, skb->len);
2121
2122 return result;
2123 }
2124
2125 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
2126 {
2127 int i;
2128
2129 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2130 skb_queue_head_init(&cache->entries[i].skb_list);
2131 }
2132
2133 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
2134 {
2135 int i;
2136
2137 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2138 __skb_queue_purge(&cache->entries[i].skb_list);
2139 }
2140
2141 static inline struct ieee80211_fragment_entry *
2142 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
2143 unsigned int frag, unsigned int seq, int rx_queue,
2144 struct sk_buff **skb)
2145 {
2146 struct ieee80211_fragment_entry *entry;
2147
2148 entry = &cache->entries[cache->next++];
2149 if (cache->next >= IEEE80211_FRAGMENT_MAX)
2150 cache->next = 0;
2151
2152 __skb_queue_purge(&entry->skb_list);
2153
2154 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
2155 *skb = NULL;
2156 entry->first_frag_time = jiffies;
2157 entry->seq = seq;
2158 entry->rx_queue = rx_queue;
2159 entry->last_frag = frag;
2160 entry->check_sequential_pn = false;
2161 entry->extra_len = 0;
2162
2163 return entry;
2164 }
2165
2166 static inline struct ieee80211_fragment_entry *
2167 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
2168 unsigned int frag, unsigned int seq,
2169 int rx_queue, struct ieee80211_hdr *hdr)
2170 {
2171 struct ieee80211_fragment_entry *entry;
2172 int i, idx;
2173
2174 idx = cache->next;
2175 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
2176 struct ieee80211_hdr *f_hdr;
2177 struct sk_buff *f_skb;
2178
2179 idx--;
2180 if (idx < 0)
2181 idx = IEEE80211_FRAGMENT_MAX - 1;
2182
2183 entry = &cache->entries[idx];
2184 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
2185 entry->rx_queue != rx_queue ||
2186 entry->last_frag + 1 != frag)
2187 continue;
2188
2189 f_skb = __skb_peek(&entry->skb_list);
2190 f_hdr = (struct ieee80211_hdr *) f_skb->data;
2191
2192 /*
2193 * Check ftype and addresses are equal, else check next fragment
2194 */
2195 if (((hdr->frame_control ^ f_hdr->frame_control) &
2196 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
2197 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
2198 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
2199 continue;
2200
2201 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
2202 __skb_queue_purge(&entry->skb_list);
2203 continue;
2204 }
2205 return entry;
2206 }
2207
2208 return NULL;
2209 }
2210
2211 static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
2212 {
2213 return rx->key &&
2214 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
2215 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
2216 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
2217 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
2218 ieee80211_has_protected(fc);
2219 }
2220
2221 static ieee80211_rx_result debug_noinline
2222 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2223 {
2224 struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
2225 struct ieee80211_hdr *hdr;
2226 u16 sc;
2227 __le16 fc;
2228 unsigned int frag, seq;
2229 struct ieee80211_fragment_entry *entry;
2230 struct sk_buff *skb;
2231
2232 hdr = (struct ieee80211_hdr *)rx->skb->data;
2233 fc = hdr->frame_control;
2234
2235 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc))
2236 return RX_CONTINUE;
2237
2238 sc = le16_to_cpu(hdr->seq_ctrl);
2239 frag = sc & IEEE80211_SCTL_FRAG;
2240
2241 if (is_multicast_ether_addr(hdr->addr1)) {
2242 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
2243 goto out_no_led;
2244 }
2245
2246 if (rx->sta)
2247 cache = &rx->sta->frags;
2248
2249 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2250 goto out;
2251
2252 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2253
2254 if (skb_linearize(rx->skb))
2255 return RX_DROP_UNUSABLE;
2256
2257 /*
2258 * skb_linearize() might change the skb->data and
2259 * previously cached variables (in this case, hdr) need to
2260 * be refreshed with the new data.
2261 */
2262 hdr = (struct ieee80211_hdr *)rx->skb->data;
2263 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2264
2265 if (frag == 0) {
2266 /* This is the first fragment of a new frame. */
2267 entry = ieee80211_reassemble_add(cache, frag, seq,
2268 rx->seqno_idx, &(rx->skb));
2269 if (requires_sequential_pn(rx, fc)) {
2270 int queue = rx->security_idx;
2271
2272 /* Store CCMP/GCMP PN so that we can verify that the
2273 * next fragment has a sequential PN value.
2274 */
2275 entry->check_sequential_pn = true;
2276 entry->key_color = rx->key->color;
2277 memcpy(entry->last_pn,
2278 rx->key->u.ccmp.rx_pn[queue],
2279 IEEE80211_CCMP_PN_LEN);
2280 BUILD_BUG_ON(offsetof(struct ieee80211_key,
2281 u.ccmp.rx_pn) !=
2282 offsetof(struct ieee80211_key,
2283 u.gcmp.rx_pn));
2284 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2285 sizeof(rx->key->u.gcmp.rx_pn[queue]));
2286 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2287 IEEE80211_GCMP_PN_LEN);
2288 }
2289 return RX_QUEUED;
2290 }
2291
2292 /* This is a fragment for a frame that should already be pending in
2293 * fragment cache. Add this fragment to the end of the pending entry.
2294 */
2295 entry = ieee80211_reassemble_find(cache, frag, seq,
2296 rx->seqno_idx, hdr);
2297 if (!entry) {
2298 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2299 return RX_DROP_MONITOR;
2300 }
2301
2302 /* "The receiver shall discard MSDUs and MMPDUs whose constituent
2303 * MPDU PN values are not incrementing in steps of 1."
2304 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2305 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2306 */
2307 if (entry->check_sequential_pn) {
2308 int i;
2309 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2310
2311 if (!requires_sequential_pn(rx, fc))
2312 return RX_DROP_UNUSABLE;
2313
2314 /* Prevent mixed key and fragment cache attacks */
2315 if (entry->key_color != rx->key->color)
2316 return RX_DROP_UNUSABLE;
2317
2318 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2319 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2320 pn[i]++;
2321 if (pn[i])
2322 break;
2323 }
2324
2325 rpn = rx->ccm_gcm.pn;
2326 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2327 return RX_DROP_UNUSABLE;
2328 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2329 }
2330
2331 skb_pull(rx->skb, ieee80211_hdrlen(fc));
2332 __skb_queue_tail(&entry->skb_list, rx->skb);
2333 entry->last_frag = frag;
2334 entry->extra_len += rx->skb->len;
2335 if (ieee80211_has_morefrags(fc)) {
2336 rx->skb = NULL;
2337 return RX_QUEUED;
2338 }
2339
2340 rx->skb = __skb_dequeue(&entry->skb_list);
2341 if (skb_tailroom(rx->skb) < entry->extra_len) {
2342 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2343 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2344 GFP_ATOMIC))) {
2345 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2346 __skb_queue_purge(&entry->skb_list);
2347 return RX_DROP_UNUSABLE;
2348 }
2349 }
2350 while ((skb = __skb_dequeue(&entry->skb_list))) {
2351 skb_put_data(rx->skb, skb->data, skb->len);
2352 dev_kfree_skb(skb);
2353 }
2354
2355 out:
2356 ieee80211_led_rx(rx->local);
2357 out_no_led:
2358 if (rx->sta)
2359 rx->sta->rx_stats.packets++;
2360 return RX_CONTINUE;
2361 }
2362
2363 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2364 {
2365 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2366 return -EACCES;
2367
2368 return 0;
2369 }
2370
2371 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2372 {
2373 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
2374 struct sk_buff *skb = rx->skb;
2375 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2376
2377 /*
2378 * Pass through unencrypted frames if the hardware has
2379 * decrypted them already.
2380 */
2381 if (status->flag & RX_FLAG_DECRYPTED)
2382 return 0;
2383
2384 /* check mesh EAPOL frames first */
2385 if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
2386 ieee80211_is_data(fc))) {
2387 struct ieee80211s_hdr *mesh_hdr;
2388 u16 hdr_len = ieee80211_hdrlen(fc);
2389 u16 ethertype_offset;
2390 __be16 ethertype;
2391
2392 if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
2393 goto drop_check;
2394
2395 /* make sure fixed part of mesh header is there, also checks skb len */
2396 if (!pskb_may_pull(rx->skb, hdr_len + 6))
2397 goto drop_check;
2398
2399 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
2400 ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
2401 sizeof(rfc1042_header);
2402
2403 if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
2404 ethertype == rx->sdata->control_port_protocol)
2405 return 0;
2406 }
2407
2408 drop_check:
2409 /* Drop unencrypted frames if key is set. */
2410 if (unlikely(!ieee80211_has_protected(fc) &&
2411 !ieee80211_is_any_nullfunc(fc) &&
2412 ieee80211_is_data(fc) && rx->key))
2413 return -EACCES;
2414
2415 return 0;
2416 }
2417
2418 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2419 {
2420 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2421 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2422 __le16 fc = hdr->frame_control;
2423
2424 /*
2425 * Pass through unencrypted frames if the hardware has
2426 * decrypted them already.
2427 */
2428 if (status->flag & RX_FLAG_DECRYPTED)
2429 return 0;
2430
2431 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2432 if (unlikely(!ieee80211_has_protected(fc) &&
2433 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
2434 rx->key)) {
2435 if (ieee80211_is_deauth(fc) ||
2436 ieee80211_is_disassoc(fc))
2437 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2438 rx->skb->data,
2439 rx->skb->len);
2440 return -EACCES;
2441 }
2442 /* BIP does not use Protected field, so need to check MMIE */
2443 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2444 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2445 if (ieee80211_is_deauth(fc) ||
2446 ieee80211_is_disassoc(fc))
2447 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2448 rx->skb->data,
2449 rx->skb->len);
2450 return -EACCES;
2451 }
2452 if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
2453 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2454 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2455 rx->skb->data,
2456 rx->skb->len);
2457 return -EACCES;
2458 }
2459 /*
2460 * When using MFP, Action frames are not allowed prior to
2461 * having configured keys.
2462 */
2463 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2464 ieee80211_is_robust_mgmt_frame(rx->skb)))
2465 return -EACCES;
2466 }
2467
2468 return 0;
2469 }
2470
2471 static int
2472 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2473 {
2474 struct ieee80211_sub_if_data *sdata = rx->sdata;
2475 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2476 bool check_port_control = false;
2477 struct ethhdr *ehdr;
2478 int ret;
2479
2480 *port_control = false;
2481 if (ieee80211_has_a4(hdr->frame_control) &&
2482 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2483 return -1;
2484
2485 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2486 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2487
2488 if (!sdata->u.mgd.use_4addr)
2489 return -1;
2490 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
2491 check_port_control = true;
2492 }
2493
2494 if (is_multicast_ether_addr(hdr->addr1) &&
2495 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2496 return -1;
2497
2498 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2499 if (ret < 0)
2500 return ret;
2501
2502 ehdr = (struct ethhdr *) rx->skb->data;
2503 if (ehdr->h_proto == rx->sdata->control_port_protocol)
2504 *port_control = true;
2505 else if (check_port_control)
2506 return -1;
2507
2508 return 0;
2509 }
2510
2511 /*
2512 * requires that rx->skb is a frame with ethernet header
2513 */
2514 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2515 {
2516 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2517 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2518 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2519
2520 /*
2521 * Allow EAPOL frames to us/the PAE group address regardless
2522 * of whether the frame was encrypted or not.
2523 */
2524 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
2525 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
2526 ether_addr_equal(ehdr->h_dest, pae_group_addr)))
2527 return true;
2528
2529 if (ieee80211_802_1x_port_control(rx) ||
2530 ieee80211_drop_unencrypted(rx, fc))
2531 return false;
2532
2533 return true;
2534 }
2535
2536 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2537 struct ieee80211_rx_data *rx)
2538 {
2539 struct ieee80211_sub_if_data *sdata = rx->sdata;
2540 struct net_device *dev = sdata->dev;
2541
2542 if (unlikely((skb->protocol == sdata->control_port_protocol ||
2543 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) &&
2544 !sdata->control_port_no_preauth)) &&
2545 sdata->control_port_over_nl80211)) {
2546 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2547 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2548
2549 cfg80211_rx_control_port(dev, skb, noencrypt);
2550 dev_kfree_skb(skb);
2551 } else {
2552 memset(skb->cb, 0, sizeof(skb->cb));
2553
2554 /* deliver to local stack */
2555 if (rx->list)
2556 list_add_tail(&skb->list, rx->list);
2557 else
2558 netif_receive_skb(skb);
2559 }
2560 }
2561
2562 /*
2563 * requires that rx->skb is a frame with ethernet header
2564 */
2565 static void
2566 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2567 {
2568 struct ieee80211_sub_if_data *sdata = rx->sdata;
2569 struct net_device *dev = sdata->dev;
2570 struct sk_buff *skb, *xmit_skb;
2571 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2572 struct sta_info *dsta;
2573
2574 skb = rx->skb;
2575 xmit_skb = NULL;
2576
2577 dev_sw_netstats_rx_add(dev, skb->len);
2578
2579 if (rx->sta) {
2580 /* The seqno index has the same property as needed
2581 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2582 * for non-QoS-data frames. Here we know it's a data
2583 * frame, so count MSDUs.
2584 */
2585 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
2586 rx->sta->rx_stats.msdu[rx->seqno_idx]++;
2587 u64_stats_update_end(&rx->sta->rx_stats.syncp);
2588 }
2589
2590 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2591 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2592 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2593 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2594 if (is_multicast_ether_addr(ehdr->h_dest) &&
2595 ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2596 /*
2597 * send multicast frames both to higher layers in
2598 * local net stack and back to the wireless medium
2599 */
2600 xmit_skb = skb_copy(skb, GFP_ATOMIC);
2601 if (!xmit_skb)
2602 net_info_ratelimited("%s: failed to clone multicast frame\n",
2603 dev->name);
2604 } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
2605 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
2606 dsta = sta_info_get(sdata, ehdr->h_dest);
2607 if (dsta) {
2608 /*
2609 * The destination station is associated to
2610 * this AP (in this VLAN), so send the frame
2611 * directly to it and do not pass it to local
2612 * net stack.
2613 */
2614 xmit_skb = skb;
2615 skb = NULL;
2616 }
2617 }
2618 }
2619
2620 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2621 if (skb) {
2622 /* 'align' will only take the values 0 or 2 here since all
2623 * frames are required to be aligned to 2-byte boundaries
2624 * when being passed to mac80211; the code here works just
2625 * as well if that isn't true, but mac80211 assumes it can
2626 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2627 */
2628 int align;
2629
2630 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2631 if (align) {
2632 if (WARN_ON(skb_headroom(skb) < 3)) {
2633 dev_kfree_skb(skb);
2634 skb = NULL;
2635 } else {
2636 u8 *data = skb->data;
2637 size_t len = skb_headlen(skb);
2638 skb->data -= align;
2639 memmove(skb->data, data, len);
2640 skb_set_tail_pointer(skb, len);
2641 }
2642 }
2643 }
2644 #endif
2645
2646 if (skb) {
2647 skb->protocol = eth_type_trans(skb, dev);
2648 ieee80211_deliver_skb_to_local_stack(skb, rx);
2649 }
2650
2651 if (xmit_skb) {
2652 /*
2653 * Send to wireless media and increase priority by 256 to
2654 * keep the received priority instead of reclassifying
2655 * the frame (see cfg80211_classify8021d).
2656 */
2657 xmit_skb->priority += 256;
2658 xmit_skb->protocol = htons(ETH_P_802_3);
2659 skb_reset_network_header(xmit_skb);
2660 skb_reset_mac_header(xmit_skb);
2661 dev_queue_xmit(xmit_skb);
2662 }
2663 }
2664
2665 static ieee80211_rx_result debug_noinline
2666 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
2667 {
2668 struct net_device *dev = rx->sdata->dev;
2669 struct sk_buff *skb = rx->skb;
2670 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2671 __le16 fc = hdr->frame_control;
2672 struct sk_buff_head frame_list;
2673 struct ethhdr ethhdr;
2674 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2675
2676 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2677 check_da = NULL;
2678 check_sa = NULL;
2679 } else switch (rx->sdata->vif.type) {
2680 case NL80211_IFTYPE_AP:
2681 case NL80211_IFTYPE_AP_VLAN:
2682 check_da = NULL;
2683 break;
2684 case NL80211_IFTYPE_STATION:
2685 if (!rx->sta ||
2686 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2687 check_sa = NULL;
2688 break;
2689 case NL80211_IFTYPE_MESH_POINT:
2690 check_sa = NULL;
2691 break;
2692 default:
2693 break;
2694 }
2695
2696 skb->dev = dev;
2697 __skb_queue_head_init(&frame_list);
2698
2699 if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
2700 rx->sdata->vif.addr,
2701 rx->sdata->vif.type,
2702 data_offset))
2703 return RX_DROP_UNUSABLE;
2704
2705 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2706 rx->sdata->vif.type,
2707 rx->local->hw.extra_tx_headroom,
2708 check_da, check_sa);
2709
2710 while (!skb_queue_empty(&frame_list)) {
2711 rx->skb = __skb_dequeue(&frame_list);
2712
2713 if (!ieee80211_frame_allowed(rx, fc)) {
2714 dev_kfree_skb(rx->skb);
2715 continue;
2716 }
2717
2718 ieee80211_deliver_skb(rx);
2719 }
2720
2721 return RX_QUEUED;
2722 }
2723
2724 static ieee80211_rx_result debug_noinline
2725 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2726 {
2727 struct sk_buff *skb = rx->skb;
2728 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2729 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2730 __le16 fc = hdr->frame_control;
2731
2732 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2733 return RX_CONTINUE;
2734
2735 if (unlikely(!ieee80211_is_data(fc)))
2736 return RX_CONTINUE;
2737
2738 if (unlikely(!ieee80211_is_data_present(fc)))
2739 return RX_DROP_MONITOR;
2740
2741 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2742 switch (rx->sdata->vif.type) {
2743 case NL80211_IFTYPE_AP_VLAN:
2744 if (!rx->sdata->u.vlan.sta)
2745 return RX_DROP_UNUSABLE;
2746 break;
2747 case NL80211_IFTYPE_STATION:
2748 if (!rx->sdata->u.mgd.use_4addr)
2749 return RX_DROP_UNUSABLE;
2750 break;
2751 default:
2752 return RX_DROP_UNUSABLE;
2753 }
2754 }
2755
2756 if (is_multicast_ether_addr(hdr->addr1))
2757 return RX_DROP_UNUSABLE;
2758
2759 return __ieee80211_rx_h_amsdu(rx, 0);
2760 }
2761
2762 #ifdef CONFIG_MAC80211_MESH
2763 static ieee80211_rx_result
2764 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2765 {
2766 struct ieee80211_hdr *fwd_hdr, *hdr;
2767 struct ieee80211_tx_info *info;
2768 struct ieee80211s_hdr *mesh_hdr;
2769 struct sk_buff *skb = rx->skb, *fwd_skb;
2770 struct ieee80211_local *local = rx->local;
2771 struct ieee80211_sub_if_data *sdata = rx->sdata;
2772 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2773 u16 ac, q, hdrlen;
2774 int tailroom = 0;
2775
2776 hdr = (struct ieee80211_hdr *) skb->data;
2777 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2778
2779 /* make sure fixed part of mesh header is there, also checks skb len */
2780 if (!pskb_may_pull(rx->skb, hdrlen + 6))
2781 return RX_DROP_MONITOR;
2782
2783 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2784
2785 /* make sure full mesh header is there, also checks skb len */
2786 if (!pskb_may_pull(rx->skb,
2787 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
2788 return RX_DROP_MONITOR;
2789
2790 /* reload pointers */
2791 hdr = (struct ieee80211_hdr *) skb->data;
2792 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2793
2794 if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2795 return RX_DROP_MONITOR;
2796
2797 /* frame is in RMC, don't forward */
2798 if (ieee80211_is_data(hdr->frame_control) &&
2799 is_multicast_ether_addr(hdr->addr1) &&
2800 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2801 return RX_DROP_MONITOR;
2802
2803 if (!ieee80211_is_data(hdr->frame_control))
2804 return RX_CONTINUE;
2805
2806 if (!mesh_hdr->ttl)
2807 return RX_DROP_MONITOR;
2808
2809 if (mesh_hdr->flags & MESH_FLAGS_AE) {
2810 struct mesh_path *mppath;
2811 char *proxied_addr;
2812 char *mpp_addr;
2813
2814 if (is_multicast_ether_addr(hdr->addr1)) {
2815 mpp_addr = hdr->addr3;
2816 proxied_addr = mesh_hdr->eaddr1;
2817 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2818 MESH_FLAGS_AE_A5_A6) {
2819 /* has_a4 already checked in ieee80211_rx_mesh_check */
2820 mpp_addr = hdr->addr4;
2821 proxied_addr = mesh_hdr->eaddr2;
2822 } else {
2823 return RX_DROP_MONITOR;
2824 }
2825
2826 rcu_read_lock();
2827 mppath = mpp_path_lookup(sdata, proxied_addr);
2828 if (!mppath) {
2829 mpp_path_add(sdata, proxied_addr, mpp_addr);
2830 } else {
2831 spin_lock_bh(&mppath->state_lock);
2832 if (!ether_addr_equal(mppath->mpp, mpp_addr))
2833 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
2834 mppath->exp_time = jiffies;
2835 spin_unlock_bh(&mppath->state_lock);
2836 }
2837 rcu_read_unlock();
2838 }
2839
2840 /* Frame has reached destination. Don't forward */
2841 if (!is_multicast_ether_addr(hdr->addr1) &&
2842 ether_addr_equal(sdata->vif.addr, hdr->addr3))
2843 return RX_CONTINUE;
2844
2845 ac = ieee80211_select_queue_80211(sdata, skb, hdr);
2846 q = sdata->vif.hw_queue[ac];
2847 if (ieee80211_queue_stopped(&local->hw, q)) {
2848 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
2849 return RX_DROP_MONITOR;
2850 }
2851 skb_set_queue_mapping(skb, q);
2852
2853 if (!--mesh_hdr->ttl) {
2854 if (!is_multicast_ether_addr(hdr->addr1))
2855 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
2856 dropped_frames_ttl);
2857 goto out;
2858 }
2859
2860 if (!ifmsh->mshcfg.dot11MeshForwarding)
2861 goto out;
2862
2863 if (sdata->crypto_tx_tailroom_needed_cnt)
2864 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2865
2866 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2867 sdata->encrypt_headroom,
2868 tailroom, GFP_ATOMIC);
2869 if (!fwd_skb)
2870 goto out;
2871
2872 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
2873 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
2874 info = IEEE80211_SKB_CB(fwd_skb);
2875 memset(info, 0, sizeof(*info));
2876 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
2877 info->control.vif = &rx->sdata->vif;
2878 info->control.jiffies = jiffies;
2879 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2880 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2881 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2882 /* update power mode indication when forwarding */
2883 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2884 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2885 /* mesh power mode flags updated in mesh_nexthop_lookup */
2886 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2887 } else {
2888 /* unable to resolve next hop */
2889 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2890 fwd_hdr->addr3, 0,
2891 WLAN_REASON_MESH_PATH_NOFORWARD,
2892 fwd_hdr->addr2);
2893 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2894 kfree_skb(fwd_skb);
2895 return RX_DROP_MONITOR;
2896 }
2897
2898 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2899 ieee80211_add_pending_skb(local, fwd_skb);
2900 out:
2901 if (is_multicast_ether_addr(hdr->addr1))
2902 return RX_CONTINUE;
2903 return RX_DROP_MONITOR;
2904 }
2905 #endif
2906
2907 static ieee80211_rx_result debug_noinline
2908 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2909 {
2910 struct ieee80211_sub_if_data *sdata = rx->sdata;
2911 struct ieee80211_local *local = rx->local;
2912 struct net_device *dev = sdata->dev;
2913 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2914 __le16 fc = hdr->frame_control;
2915 bool port_control;
2916 int err;
2917
2918 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2919 return RX_CONTINUE;
2920
2921 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2922 return RX_DROP_MONITOR;
2923
2924 /*
2925 * Send unexpected-4addr-frame event to hostapd. For older versions,
2926 * also drop the frame to cooked monitor interfaces.
2927 */
2928 if (ieee80211_has_a4(hdr->frame_control) &&
2929 sdata->vif.type == NL80211_IFTYPE_AP) {
2930 if (rx->sta &&
2931 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2932 cfg80211_rx_unexpected_4addr_frame(
2933 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2934 return RX_DROP_MONITOR;
2935 }
2936
2937 err = __ieee80211_data_to_8023(rx, &port_control);
2938 if (unlikely(err))
2939 return RX_DROP_UNUSABLE;
2940
2941 if (!ieee80211_frame_allowed(rx, fc))
2942 return RX_DROP_MONITOR;
2943
2944 /* directly handle TDLS channel switch requests/responses */
2945 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
2946 cpu_to_be16(ETH_P_TDLS))) {
2947 struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
2948
2949 if (pskb_may_pull(rx->skb,
2950 offsetof(struct ieee80211_tdls_data, u)) &&
2951 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
2952 tf->category == WLAN_CATEGORY_TDLS &&
2953 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
2954 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
2955 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
2956 schedule_work(&local->tdls_chsw_work);
2957 if (rx->sta)
2958 rx->sta->rx_stats.packets++;
2959
2960 return RX_QUEUED;
2961 }
2962 }
2963
2964 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2965 unlikely(port_control) && sdata->bss) {
2966 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2967 u.ap);
2968 dev = sdata->dev;
2969 rx->sdata = sdata;
2970 }
2971
2972 rx->skb->dev = dev;
2973
2974 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
2975 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2976 !is_multicast_ether_addr(
2977 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2978 (!local->scanning &&
2979 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
2980 mod_timer(&local->dynamic_ps_timer, jiffies +
2981 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2982
2983 ieee80211_deliver_skb(rx);
2984
2985 return RX_QUEUED;
2986 }
2987
2988 static ieee80211_rx_result debug_noinline
2989 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2990 {
2991 struct sk_buff *skb = rx->skb;
2992 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2993 struct tid_ampdu_rx *tid_agg_rx;
2994 u16 start_seq_num;
2995 u16 tid;
2996
2997 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2998 return RX_CONTINUE;
2999
3000 if (ieee80211_is_back_req(bar->frame_control)) {
3001 struct {
3002 __le16 control, start_seq_num;
3003 } __packed bar_data;
3004 struct ieee80211_event event = {
3005 .type = BAR_RX_EVENT,
3006 };
3007
3008 if (!rx->sta)
3009 return RX_DROP_MONITOR;
3010
3011 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
3012 &bar_data, sizeof(bar_data)))
3013 return RX_DROP_MONITOR;
3014
3015 tid = le16_to_cpu(bar_data.control) >> 12;
3016
3017 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
3018 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
3019 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
3020 WLAN_BACK_RECIPIENT,
3021 WLAN_REASON_QSTA_REQUIRE_SETUP);
3022
3023 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
3024 if (!tid_agg_rx)
3025 return RX_DROP_MONITOR;
3026
3027 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
3028 event.u.ba.tid = tid;
3029 event.u.ba.ssn = start_seq_num;
3030 event.u.ba.sta = &rx->sta->sta;
3031
3032 /* reset session timer */
3033 if (tid_agg_rx->timeout)
3034 mod_timer(&tid_agg_rx->session_timer,
3035 TU_TO_EXP_TIME(tid_agg_rx->timeout));
3036
3037 spin_lock(&tid_agg_rx->reorder_lock);
3038 /* release stored frames up to start of BAR */
3039 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
3040 start_seq_num, frames);
3041 spin_unlock(&tid_agg_rx->reorder_lock);
3042
3043 drv_event_callback(rx->local, rx->sdata, &event);
3044
3045 kfree_skb(skb);
3046 return RX_QUEUED;
3047 }
3048
3049 /*
3050 * After this point, we only want management frames,
3051 * so we can drop all remaining control frames to
3052 * cooked monitor interfaces.
3053 */
3054 return RX_DROP_MONITOR;
3055 }
3056
3057 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
3058 struct ieee80211_mgmt *mgmt,
3059 size_t len)
3060 {
3061 struct ieee80211_local *local = sdata->local;
3062 struct sk_buff *skb;
3063 struct ieee80211_mgmt *resp;
3064
3065 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
3066 /* Not to own unicast address */
3067 return;
3068 }
3069
3070 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
3071 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
3072 /* Not from the current AP or not associated yet. */
3073 return;
3074 }
3075
3076 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
3077 /* Too short SA Query request frame */
3078 return;
3079 }
3080
3081 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
3082 if (skb == NULL)
3083 return;
3084
3085 skb_reserve(skb, local->hw.extra_tx_headroom);
3086 resp = skb_put_zero(skb, 24);
3087 memcpy(resp->da, mgmt->sa, ETH_ALEN);
3088 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
3089 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
3090 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3091 IEEE80211_STYPE_ACTION);
3092 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
3093 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
3094 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
3095 memcpy(resp->u.action.u.sa_query.trans_id,
3096 mgmt->u.action.u.sa_query.trans_id,
3097 WLAN_SA_QUERY_TR_ID_LEN);
3098
3099 ieee80211_tx_skb(sdata, skb);
3100 }
3101
3102 static ieee80211_rx_result debug_noinline
3103 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
3104 {
3105 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3106 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3107
3108 if (ieee80211_is_s1g_beacon(mgmt->frame_control))
3109 return RX_CONTINUE;
3110
3111 /*
3112 * From here on, look only at management frames.
3113 * Data and control frames are already handled,
3114 * and unknown (reserved) frames are useless.
3115 */
3116 if (rx->skb->len < 24)
3117 return RX_DROP_MONITOR;
3118
3119 if (!ieee80211_is_mgmt(mgmt->frame_control))
3120 return RX_DROP_MONITOR;
3121
3122 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
3123 ieee80211_is_beacon(mgmt->frame_control) &&
3124 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
3125 int sig = 0;
3126
3127 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3128 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3129 sig = status->signal;
3130
3131 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy,
3132 rx->skb->data, rx->skb->len,
3133 ieee80211_rx_status_to_khz(status),
3134 sig);
3135 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
3136 }
3137
3138 if (ieee80211_drop_unencrypted_mgmt(rx))
3139 return RX_DROP_UNUSABLE;
3140
3141 return RX_CONTINUE;
3142 }
3143
3144 static ieee80211_rx_result debug_noinline
3145 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
3146 {
3147 struct ieee80211_local *local = rx->local;
3148 struct ieee80211_sub_if_data *sdata = rx->sdata;
3149 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3150 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3151 int len = rx->skb->len;
3152
3153 if (!ieee80211_is_action(mgmt->frame_control))
3154 return RX_CONTINUE;
3155
3156 /* drop too small frames */
3157 if (len < IEEE80211_MIN_ACTION_SIZE)
3158 return RX_DROP_UNUSABLE;
3159
3160 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
3161 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
3162 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
3163 return RX_DROP_UNUSABLE;
3164
3165 switch (mgmt->u.action.category) {
3166 case WLAN_CATEGORY_HT:
3167 /* reject HT action frames from stations not supporting HT */
3168 if (!rx->sta->sta.ht_cap.ht_supported)
3169 goto invalid;
3170
3171 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3172 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3173 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3174 sdata->vif.type != NL80211_IFTYPE_AP &&
3175 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3176 break;
3177
3178 /* verify action & smps_control/chanwidth are present */
3179 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3180 goto invalid;
3181
3182 switch (mgmt->u.action.u.ht_smps.action) {
3183 case WLAN_HT_ACTION_SMPS: {
3184 struct ieee80211_supported_band *sband;
3185 enum ieee80211_smps_mode smps_mode;
3186 struct sta_opmode_info sta_opmode = {};
3187
3188 if (sdata->vif.type != NL80211_IFTYPE_AP &&
3189 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
3190 goto handled;
3191
3192 /* convert to HT capability */
3193 switch (mgmt->u.action.u.ht_smps.smps_control) {
3194 case WLAN_HT_SMPS_CONTROL_DISABLED:
3195 smps_mode = IEEE80211_SMPS_OFF;
3196 break;
3197 case WLAN_HT_SMPS_CONTROL_STATIC:
3198 smps_mode = IEEE80211_SMPS_STATIC;
3199 break;
3200 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
3201 smps_mode = IEEE80211_SMPS_DYNAMIC;
3202 break;
3203 default:
3204 goto invalid;
3205 }
3206
3207 /* if no change do nothing */
3208 if (rx->sta->sta.smps_mode == smps_mode)
3209 goto handled;
3210 rx->sta->sta.smps_mode = smps_mode;
3211 sta_opmode.smps_mode =
3212 ieee80211_smps_mode_to_smps_mode(smps_mode);
3213 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
3214
3215 sband = rx->local->hw.wiphy->bands[status->band];
3216
3217 rate_control_rate_update(local, sband, rx->sta,
3218 IEEE80211_RC_SMPS_CHANGED);
3219 cfg80211_sta_opmode_change_notify(sdata->dev,
3220 rx->sta->addr,
3221 &sta_opmode,
3222 GFP_ATOMIC);
3223 goto handled;
3224 }
3225 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
3226 struct ieee80211_supported_band *sband;
3227 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
3228 enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
3229 struct sta_opmode_info sta_opmode = {};
3230
3231 /* If it doesn't support 40 MHz it can't change ... */
3232 if (!(rx->sta->sta.ht_cap.cap &
3233 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
3234 goto handled;
3235
3236 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
3237 max_bw = IEEE80211_STA_RX_BW_20;
3238 else
3239 max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
3240
3241 /* set cur_max_bandwidth and recalc sta bw */
3242 rx->sta->cur_max_bandwidth = max_bw;
3243 new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
3244
3245 if (rx->sta->sta.bandwidth == new_bw)
3246 goto handled;
3247
3248 rx->sta->sta.bandwidth = new_bw;
3249 sband = rx->local->hw.wiphy->bands[status->band];
3250 sta_opmode.bw =
3251 ieee80211_sta_rx_bw_to_chan_width(rx->sta);
3252 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
3253
3254 rate_control_rate_update(local, sband, rx->sta,
3255 IEEE80211_RC_BW_CHANGED);
3256 cfg80211_sta_opmode_change_notify(sdata->dev,
3257 rx->sta->addr,
3258 &sta_opmode,
3259 GFP_ATOMIC);
3260 goto handled;
3261 }
3262 default:
3263 goto invalid;
3264 }
3265
3266 break;
3267 case WLAN_CATEGORY_PUBLIC:
3268 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3269 goto invalid;
3270 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3271 break;
3272 if (!rx->sta)
3273 break;
3274 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
3275 break;
3276 if (mgmt->u.action.u.ext_chan_switch.action_code !=
3277 WLAN_PUB_ACTION_EXT_CHANSW_ANN)
3278 break;
3279 if (len < offsetof(struct ieee80211_mgmt,
3280 u.action.u.ext_chan_switch.variable))
3281 goto invalid;
3282 goto queue;
3283 case WLAN_CATEGORY_VHT:
3284 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3285 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3286 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3287 sdata->vif.type != NL80211_IFTYPE_AP &&
3288 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3289 break;
3290
3291 /* verify action code is present */
3292 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3293 goto invalid;
3294
3295 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
3296 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
3297 /* verify opmode is present */
3298 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3299 goto invalid;
3300 goto queue;
3301 }
3302 case WLAN_VHT_ACTION_GROUPID_MGMT: {
3303 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3304 goto invalid;
3305 goto queue;
3306 }
3307 default:
3308 break;
3309 }
3310 break;
3311 case WLAN_CATEGORY_BACK:
3312 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3313 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3314 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3315 sdata->vif.type != NL80211_IFTYPE_AP &&
3316 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3317 break;
3318
3319 /* verify action_code is present */
3320 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3321 break;
3322
3323 switch (mgmt->u.action.u.addba_req.action_code) {
3324 case WLAN_ACTION_ADDBA_REQ:
3325 if (len < (IEEE80211_MIN_ACTION_SIZE +
3326 sizeof(mgmt->u.action.u.addba_req)))
3327 goto invalid;
3328 break;
3329 case WLAN_ACTION_ADDBA_RESP:
3330 if (len < (IEEE80211_MIN_ACTION_SIZE +
3331 sizeof(mgmt->u.action.u.addba_resp)))
3332 goto invalid;
3333 break;
3334 case WLAN_ACTION_DELBA:
3335 if (len < (IEEE80211_MIN_ACTION_SIZE +
3336 sizeof(mgmt->u.action.u.delba)))
3337 goto invalid;
3338 break;
3339 default:
3340 goto invalid;
3341 }
3342
3343 goto queue;
3344 case WLAN_CATEGORY_SPECTRUM_MGMT:
3345 /* verify action_code is present */
3346 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3347 break;
3348
3349 switch (mgmt->u.action.u.measurement.action_code) {
3350 case WLAN_ACTION_SPCT_MSR_REQ:
3351 if (status->band != NL80211_BAND_5GHZ)
3352 break;
3353
3354 if (len < (IEEE80211_MIN_ACTION_SIZE +
3355 sizeof(mgmt->u.action.u.measurement)))
3356 break;
3357
3358 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3359 break;
3360
3361 ieee80211_process_measurement_req(sdata, mgmt, len);
3362 goto handled;
3363 case WLAN_ACTION_SPCT_CHL_SWITCH: {
3364 u8 *bssid;
3365 if (len < (IEEE80211_MIN_ACTION_SIZE +
3366 sizeof(mgmt->u.action.u.chan_switch)))
3367 break;
3368
3369 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3370 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3371 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3372 break;
3373
3374 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3375 bssid = sdata->u.mgd.bssid;
3376 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3377 bssid = sdata->u.ibss.bssid;
3378 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3379 bssid = mgmt->sa;
3380 else
3381 break;
3382
3383 if (!ether_addr_equal(mgmt->bssid, bssid))
3384 break;
3385
3386 goto queue;
3387 }
3388 }
3389 break;
3390 case WLAN_CATEGORY_SELF_PROTECTED:
3391 if (len < (IEEE80211_MIN_ACTION_SIZE +
3392 sizeof(mgmt->u.action.u.self_prot.action_code)))
3393 break;
3394
3395 switch (mgmt->u.action.u.self_prot.action_code) {
3396 case WLAN_SP_MESH_PEERING_OPEN:
3397 case WLAN_SP_MESH_PEERING_CLOSE:
3398 case WLAN_SP_MESH_PEERING_CONFIRM:
3399 if (!ieee80211_vif_is_mesh(&sdata->vif))
3400 goto invalid;
3401 if (sdata->u.mesh.user_mpm)
3402 /* userspace handles this frame */
3403 break;
3404 goto queue;
3405 case WLAN_SP_MGK_INFORM:
3406 case WLAN_SP_MGK_ACK:
3407 if (!ieee80211_vif_is_mesh(&sdata->vif))
3408 goto invalid;
3409 break;
3410 }
3411 break;
3412 case WLAN_CATEGORY_MESH_ACTION:
3413 if (len < (IEEE80211_MIN_ACTION_SIZE +
3414 sizeof(mgmt->u.action.u.mesh_action.action_code)))
3415 break;
3416
3417 if (!ieee80211_vif_is_mesh(&sdata->vif))
3418 break;
3419 if (mesh_action_is_path_sel(mgmt) &&
3420 !mesh_path_sel_is_hwmp(sdata))
3421 break;
3422 goto queue;
3423 }
3424
3425 return RX_CONTINUE;
3426
3427 invalid:
3428 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3429 /* will return in the next handlers */
3430 return RX_CONTINUE;
3431
3432 handled:
3433 if (rx->sta)
3434 rx->sta->rx_stats.packets++;
3435 dev_kfree_skb(rx->skb);
3436 return RX_QUEUED;
3437
3438 queue:
3439 skb_queue_tail(&sdata->skb_queue, rx->skb);
3440 ieee80211_queue_work(&local->hw, &sdata->work);
3441 if (rx->sta)
3442 rx->sta->rx_stats.packets++;
3443 return RX_QUEUED;
3444 }
3445
3446 static ieee80211_rx_result debug_noinline
3447 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3448 {
3449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3450 int sig = 0;
3451
3452 /* skip known-bad action frames and return them in the next handler */
3453 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3454 return RX_CONTINUE;
3455
3456 /*
3457 * Getting here means the kernel doesn't know how to handle
3458 * it, but maybe userspace does ... include returned frames
3459 * so userspace can register for those to know whether ones
3460 * it transmitted were processed or returned.
3461 */
3462
3463 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3464 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3465 sig = status->signal;
3466
3467 if (cfg80211_rx_mgmt_khz(&rx->sdata->wdev,
3468 ieee80211_rx_status_to_khz(status), sig,
3469 rx->skb->data, rx->skb->len, 0)) {
3470 if (rx->sta)
3471 rx->sta->rx_stats.packets++;
3472 dev_kfree_skb(rx->skb);
3473 return RX_QUEUED;
3474 }
3475
3476 return RX_CONTINUE;
3477 }
3478
3479 static ieee80211_rx_result debug_noinline
3480 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx)
3481 {
3482 struct ieee80211_sub_if_data *sdata = rx->sdata;
3483 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3484 int len = rx->skb->len;
3485
3486 if (!ieee80211_is_action(mgmt->frame_control))
3487 return RX_CONTINUE;
3488
3489 switch (mgmt->u.action.category) {
3490 case WLAN_CATEGORY_SA_QUERY:
3491 if (len < (IEEE80211_MIN_ACTION_SIZE +
3492 sizeof(mgmt->u.action.u.sa_query)))
3493 break;
3494
3495 switch (mgmt->u.action.u.sa_query.action) {
3496 case WLAN_ACTION_SA_QUERY_REQUEST:
3497 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3498 break;
3499 ieee80211_process_sa_query_req(sdata, mgmt, len);
3500 goto handled;
3501 }
3502 break;
3503 }
3504
3505 return RX_CONTINUE;
3506
3507 handled:
3508 if (rx->sta)
3509 rx->sta->rx_stats.packets++;
3510 dev_kfree_skb(rx->skb);
3511 return RX_QUEUED;
3512 }
3513
3514 static ieee80211_rx_result debug_noinline
3515 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3516 {
3517 struct ieee80211_local *local = rx->local;
3518 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3519 struct sk_buff *nskb;
3520 struct ieee80211_sub_if_data *sdata = rx->sdata;
3521 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3522
3523 if (!ieee80211_is_action(mgmt->frame_control))
3524 return RX_CONTINUE;
3525
3526 /*
3527 * For AP mode, hostapd is responsible for handling any action
3528 * frames that we didn't handle, including returning unknown
3529 * ones. For all other modes we will return them to the sender,
3530 * setting the 0x80 bit in the action category, as required by
3531 * 802.11-2012 9.24.4.
3532 * Newer versions of hostapd shall also use the management frame
3533 * registration mechanisms, but older ones still use cooked
3534 * monitor interfaces so push all frames there.
3535 */
3536 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3537 (sdata->vif.type == NL80211_IFTYPE_AP ||
3538 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3539 return RX_DROP_MONITOR;
3540
3541 if (is_multicast_ether_addr(mgmt->da))
3542 return RX_DROP_MONITOR;
3543
3544 /* do not return rejected action frames */
3545 if (mgmt->u.action.category & 0x80)
3546 return RX_DROP_UNUSABLE;
3547
3548 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3549 GFP_ATOMIC);
3550 if (nskb) {
3551 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3552
3553 nmgmt->u.action.category |= 0x80;
3554 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3555 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3556
3557 memset(nskb->cb, 0, sizeof(nskb->cb));
3558
3559 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3560 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3561
3562 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3563 IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3564 IEEE80211_TX_CTL_NO_CCK_RATE;
3565 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3566 info->hw_queue =
3567 local->hw.offchannel_tx_hw_queue;
3568 }
3569
3570 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
3571 status->band);
3572 }
3573 dev_kfree_skb(rx->skb);
3574 return RX_QUEUED;
3575 }
3576
3577 static ieee80211_rx_result debug_noinline
3578 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
3579 {
3580 struct ieee80211_sub_if_data *sdata = rx->sdata;
3581 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
3582
3583 if (!ieee80211_is_ext(hdr->frame_control))
3584 return RX_CONTINUE;
3585
3586 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3587 return RX_DROP_MONITOR;
3588
3589 /* for now only beacons are ext, so queue them */
3590 skb_queue_tail(&sdata->skb_queue, rx->skb);
3591 ieee80211_queue_work(&rx->local->hw, &sdata->work);
3592 if (rx->sta)
3593 rx->sta->rx_stats.packets++;
3594
3595 return RX_QUEUED;
3596 }
3597
3598 static ieee80211_rx_result debug_noinline
3599 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3600 {
3601 struct ieee80211_sub_if_data *sdata = rx->sdata;
3602 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3603 __le16 stype;
3604
3605 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
3606
3607 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
3608 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3609 sdata->vif.type != NL80211_IFTYPE_OCB &&
3610 sdata->vif.type != NL80211_IFTYPE_STATION)
3611 return RX_DROP_MONITOR;
3612
3613 switch (stype) {
3614 case cpu_to_le16(IEEE80211_STYPE_AUTH):
3615 case cpu_to_le16(IEEE80211_STYPE_BEACON):
3616 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3617 /* process for all: mesh, mlme, ibss */
3618 break;
3619 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3620 if (is_multicast_ether_addr(mgmt->da) &&
3621 !is_broadcast_ether_addr(mgmt->da))
3622 return RX_DROP_MONITOR;
3623
3624 /* process only for station/IBSS */
3625 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3626 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3627 return RX_DROP_MONITOR;
3628 break;
3629 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3630 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3631 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3632 if (is_multicast_ether_addr(mgmt->da) &&
3633 !is_broadcast_ether_addr(mgmt->da))
3634 return RX_DROP_MONITOR;
3635
3636 /* process only for station */
3637 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3638 return RX_DROP_MONITOR;
3639 break;
3640 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3641 /* process only for ibss and mesh */
3642 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3643 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3644 return RX_DROP_MONITOR;
3645 break;
3646 default:
3647 return RX_DROP_MONITOR;
3648 }
3649
3650 /* queue up frame and kick off work to process it */
3651 skb_queue_tail(&sdata->skb_queue, rx->skb);
3652 ieee80211_queue_work(&rx->local->hw, &sdata->work);
3653 if (rx->sta)
3654 rx->sta->rx_stats.packets++;
3655
3656 return RX_QUEUED;
3657 }
3658
3659 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
3660 struct ieee80211_rate *rate)
3661 {
3662 struct ieee80211_sub_if_data *sdata;
3663 struct ieee80211_local *local = rx->local;
3664 struct sk_buff *skb = rx->skb, *skb2;
3665 struct net_device *prev_dev = NULL;
3666 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3667 int needed_headroom;
3668
3669 /*
3670 * If cooked monitor has been processed already, then
3671 * don't do it again. If not, set the flag.
3672 */
3673 if (rx->flags & IEEE80211_RX_CMNTR)
3674 goto out_free_skb;
3675 rx->flags |= IEEE80211_RX_CMNTR;
3676
3677 /* If there are no cooked monitor interfaces, just free the SKB */
3678 if (!local->cooked_mntrs)
3679 goto out_free_skb;
3680
3681 /* vendor data is long removed here */
3682 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
3683 /* room for the radiotap header based on driver features */
3684 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
3685
3686 if (skb_headroom(skb) < needed_headroom &&
3687 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
3688 goto out_free_skb;
3689
3690 /* prepend radiotap information */
3691 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
3692 false);
3693
3694 skb_reset_mac_header(skb);
3695 skb->ip_summed = CHECKSUM_UNNECESSARY;
3696 skb->pkt_type = PACKET_OTHERHOST;
3697 skb->protocol = htons(ETH_P_802_2);
3698
3699 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3700 if (!ieee80211_sdata_running(sdata))
3701 continue;
3702
3703 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
3704 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
3705 continue;
3706
3707 if (prev_dev) {
3708 skb2 = skb_clone(skb, GFP_ATOMIC);
3709 if (skb2) {
3710 skb2->dev = prev_dev;
3711 netif_receive_skb(skb2);
3712 }
3713 }
3714
3715 prev_dev = sdata->dev;
3716 dev_sw_netstats_rx_add(sdata->dev, skb->len);
3717 }
3718
3719 if (prev_dev) {
3720 skb->dev = prev_dev;
3721 netif_receive_skb(skb);
3722 return;
3723 }
3724
3725 out_free_skb:
3726 dev_kfree_skb(skb);
3727 }
3728
3729 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
3730 ieee80211_rx_result res)
3731 {
3732 switch (res) {
3733 case RX_DROP_MONITOR:
3734 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3735 if (rx->sta)
3736 rx->sta->rx_stats.dropped++;
3737 fallthrough;
3738 case RX_CONTINUE: {
3739 struct ieee80211_rate *rate = NULL;
3740 struct ieee80211_supported_band *sband;
3741 struct ieee80211_rx_status *status;
3742
3743 status = IEEE80211_SKB_RXCB((rx->skb));
3744
3745 sband = rx->local->hw.wiphy->bands[status->band];
3746 if (status->encoding == RX_ENC_LEGACY)
3747 rate = &sband->bitrates[status->rate_idx];
3748
3749 ieee80211_rx_cooked_monitor(rx, rate);
3750 break;
3751 }
3752 case RX_DROP_UNUSABLE:
3753 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3754 if (rx->sta)
3755 rx->sta->rx_stats.dropped++;
3756 dev_kfree_skb(rx->skb);
3757 break;
3758 case RX_QUEUED:
3759 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
3760 break;
3761 }
3762 }
3763
3764 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
3765 struct sk_buff_head *frames)
3766 {
3767 ieee80211_rx_result res = RX_DROP_MONITOR;
3768 struct sk_buff *skb;
3769
3770 #define CALL_RXH(rxh) \
3771 do { \
3772 res = rxh(rx); \
3773 if (res != RX_CONTINUE) \
3774 goto rxh_next; \
3775 } while (0)
3776
3777 /* Lock here to avoid hitting all of the data used in the RX
3778 * path (e.g. key data, station data, ...) concurrently when
3779 * a frame is released from the reorder buffer due to timeout
3780 * from the timer, potentially concurrently with RX from the
3781 * driver.
3782 */
3783 spin_lock_bh(&rx->local->rx_path_lock);
3784
3785 while ((skb = __skb_dequeue(frames))) {
3786 /*
3787 * all the other fields are valid across frames
3788 * that belong to an aMPDU since they are on the
3789 * same TID from the same station
3790 */
3791 rx->skb = skb;
3792
3793 CALL_RXH(ieee80211_rx_h_check_more_data);
3794 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
3795 CALL_RXH(ieee80211_rx_h_sta_process);
3796 CALL_RXH(ieee80211_rx_h_decrypt);
3797 CALL_RXH(ieee80211_rx_h_defragment);
3798 CALL_RXH(ieee80211_rx_h_michael_mic_verify);
3799 /* must be after MMIC verify so header is counted in MPDU mic */
3800 #ifdef CONFIG_MAC80211_MESH
3801 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
3802 CALL_RXH(ieee80211_rx_h_mesh_fwding);
3803 #endif
3804 CALL_RXH(ieee80211_rx_h_amsdu);
3805 CALL_RXH(ieee80211_rx_h_data);
3806
3807 /* special treatment -- needs the queue */
3808 res = ieee80211_rx_h_ctrl(rx, frames);
3809 if (res != RX_CONTINUE)
3810 goto rxh_next;
3811
3812 CALL_RXH(ieee80211_rx_h_mgmt_check);
3813 CALL_RXH(ieee80211_rx_h_action);
3814 CALL_RXH(ieee80211_rx_h_userspace_mgmt);
3815 CALL_RXH(ieee80211_rx_h_action_post_userspace);
3816 CALL_RXH(ieee80211_rx_h_action_return);
3817 CALL_RXH(ieee80211_rx_h_ext);
3818 CALL_RXH(ieee80211_rx_h_mgmt);
3819
3820 rxh_next:
3821 ieee80211_rx_handlers_result(rx, res);
3822
3823 #undef CALL_RXH
3824 }
3825
3826 spin_unlock_bh(&rx->local->rx_path_lock);
3827 }
3828
3829 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
3830 {
3831 struct sk_buff_head reorder_release;
3832 ieee80211_rx_result res = RX_DROP_MONITOR;
3833
3834 __skb_queue_head_init(&reorder_release);
3835
3836 #define CALL_RXH(rxh) \
3837 do { \
3838 res = rxh(rx); \
3839 if (res != RX_CONTINUE) \
3840 goto rxh_next; \
3841 } while (0)
3842
3843 CALL_RXH(ieee80211_rx_h_check_dup);
3844 CALL_RXH(ieee80211_rx_h_check);
3845
3846 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
3847
3848 ieee80211_rx_handlers(rx, &reorder_release);
3849 return;
3850
3851 rxh_next:
3852 ieee80211_rx_handlers_result(rx, res);
3853
3854 #undef CALL_RXH
3855 }
3856
3857 /*
3858 * This function makes calls into the RX path, therefore
3859 * it has to be invoked under RCU read lock.
3860 */
3861 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
3862 {
3863 struct sk_buff_head frames;
3864 struct ieee80211_rx_data rx = {
3865 .sta = sta,
3866 .sdata = sta->sdata,
3867 .local = sta->local,
3868 /* This is OK -- must be QoS data frame */
3869 .security_idx = tid,
3870 .seqno_idx = tid,
3871 };
3872 struct tid_ampdu_rx *tid_agg_rx;
3873
3874 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3875 if (!tid_agg_rx)
3876 return;
3877
3878 __skb_queue_head_init(&frames);
3879
3880 spin_lock(&tid_agg_rx->reorder_lock);
3881 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3882 spin_unlock(&tid_agg_rx->reorder_lock);
3883
3884 if (!skb_queue_empty(&frames)) {
3885 struct ieee80211_event event = {
3886 .type = BA_FRAME_TIMEOUT,
3887 .u.ba.tid = tid,
3888 .u.ba.sta = &sta->sta,
3889 };
3890 drv_event_callback(rx.local, rx.sdata, &event);
3891 }
3892
3893 ieee80211_rx_handlers(&rx, &frames);
3894 }
3895
3896 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
3897 u16 ssn, u64 filtered,
3898 u16 received_mpdus)
3899 {
3900 struct sta_info *sta;
3901 struct tid_ampdu_rx *tid_agg_rx;
3902 struct sk_buff_head frames;
3903 struct ieee80211_rx_data rx = {
3904 /* This is OK -- must be QoS data frame */
3905 .security_idx = tid,
3906 .seqno_idx = tid,
3907 };
3908 int i, diff;
3909
3910 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
3911 return;
3912
3913 __skb_queue_head_init(&frames);
3914
3915 sta = container_of(pubsta, struct sta_info, sta);
3916
3917 rx.sta = sta;
3918 rx.sdata = sta->sdata;
3919 rx.local = sta->local;
3920
3921 rcu_read_lock();
3922 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3923 if (!tid_agg_rx)
3924 goto out;
3925
3926 spin_lock_bh(&tid_agg_rx->reorder_lock);
3927
3928 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
3929 int release;
3930
3931 /* release all frames in the reorder buffer */
3932 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
3933 IEEE80211_SN_MODULO;
3934 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
3935 release, &frames);
3936 /* update ssn to match received ssn */
3937 tid_agg_rx->head_seq_num = ssn;
3938 } else {
3939 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
3940 &frames);
3941 }
3942
3943 /* handle the case that received ssn is behind the mac ssn.
3944 * it can be tid_agg_rx->buf_size behind and still be valid */
3945 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
3946 if (diff >= tid_agg_rx->buf_size) {
3947 tid_agg_rx->reorder_buf_filtered = 0;
3948 goto release;
3949 }
3950 filtered = filtered >> diff;
3951 ssn += diff;
3952
3953 /* update bitmap */
3954 for (i = 0; i < tid_agg_rx->buf_size; i++) {
3955 int index = (ssn + i) % tid_agg_rx->buf_size;
3956
3957 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
3958 if (filtered & BIT_ULL(i))
3959 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
3960 }
3961
3962 /* now process also frames that the filter marking released */
3963 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3964
3965 release:
3966 spin_unlock_bh(&tid_agg_rx->reorder_lock);
3967
3968 ieee80211_rx_handlers(&rx, &frames);
3969
3970 out:
3971 rcu_read_unlock();
3972 }
3973 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
3974
3975 /* main receive path */
3976
3977 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3978 {
3979 struct ieee80211_sub_if_data *sdata = rx->sdata;
3980 struct sk_buff *skb = rx->skb;
3981 struct ieee80211_hdr *hdr = (void *)skb->data;
3982 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3983 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
3984 bool multicast = is_multicast_ether_addr(hdr->addr1) ||
3985 ieee80211_is_s1g_beacon(hdr->frame_control);
3986
3987 switch (sdata->vif.type) {
3988 case NL80211_IFTYPE_STATION:
3989 if (!bssid && !sdata->u.mgd.use_4addr)
3990 return false;
3991 if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
3992 return false;
3993 if (multicast)
3994 return true;
3995 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3996 case NL80211_IFTYPE_ADHOC:
3997 if (!bssid)
3998 return false;
3999 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
4000 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
4001 return false;
4002 if (ieee80211_is_beacon(hdr->frame_control))
4003 return true;
4004 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
4005 return false;
4006 if (!multicast &&
4007 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
4008 return false;
4009 if (!rx->sta) {
4010 int rate_idx;
4011 if (status->encoding != RX_ENC_LEGACY)
4012 rate_idx = 0; /* TODO: HT/VHT rates */
4013 else
4014 rate_idx = status->rate_idx;
4015 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
4016 BIT(rate_idx));
4017 }
4018 return true;
4019 case NL80211_IFTYPE_OCB:
4020 if (!bssid)
4021 return false;
4022 if (!ieee80211_is_data_present(hdr->frame_control))
4023 return false;
4024 if (!is_broadcast_ether_addr(bssid))
4025 return false;
4026 if (!multicast &&
4027 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
4028 return false;
4029 if (!rx->sta) {
4030 int rate_idx;
4031 if (status->encoding != RX_ENC_LEGACY)
4032 rate_idx = 0; /* TODO: HT rates */
4033 else
4034 rate_idx = status->rate_idx;
4035 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
4036 BIT(rate_idx));
4037 }
4038 return true;
4039 case NL80211_IFTYPE_MESH_POINT:
4040 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
4041 return false;
4042 if (multicast)
4043 return true;
4044 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
4045 case NL80211_IFTYPE_AP_VLAN:
4046 case NL80211_IFTYPE_AP:
4047 if (!bssid)
4048 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
4049
4050 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
4051 /*
4052 * Accept public action frames even when the
4053 * BSSID doesn't match, this is used for P2P
4054 * and location updates. Note that mac80211
4055 * itself never looks at these frames.
4056 */
4057 if (!multicast &&
4058 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
4059 return false;
4060 if (ieee80211_is_public_action(hdr, skb->len))
4061 return true;
4062 return ieee80211_is_beacon(hdr->frame_control);
4063 }
4064
4065 if (!ieee80211_has_tods(hdr->frame_control)) {
4066 /* ignore data frames to TDLS-peers */
4067 if (ieee80211_is_data(hdr->frame_control))
4068 return false;
4069 /* ignore action frames to TDLS-peers */
4070 if (ieee80211_is_action(hdr->frame_control) &&
4071 !is_broadcast_ether_addr(bssid) &&
4072 !ether_addr_equal(bssid, hdr->addr1))
4073 return false;
4074 }
4075
4076 /*
4077 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
4078 * the BSSID - we've checked that already but may have accepted
4079 * the wildcard (ff:ff:ff:ff:ff:ff).
4080 *
4081 * It also says:
4082 * The BSSID of the Data frame is determined as follows:
4083 * a) If the STA is contained within an AP or is associated
4084 * with an AP, the BSSID is the address currently in use
4085 * by the STA contained in the AP.
4086 *
4087 * So we should not accept data frames with an address that's
4088 * multicast.
4089 *
4090 * Accepting it also opens a security problem because stations
4091 * could encrypt it with the GTK and inject traffic that way.
4092 */
4093 if (ieee80211_is_data(hdr->frame_control) && multicast)
4094 return false;
4095
4096 return true;
4097 case NL80211_IFTYPE_P2P_DEVICE:
4098 return ieee80211_is_public_action(hdr, skb->len) ||
4099 ieee80211_is_probe_req(hdr->frame_control) ||
4100 ieee80211_is_probe_resp(hdr->frame_control) ||
4101 ieee80211_is_beacon(hdr->frame_control);
4102 case NL80211_IFTYPE_NAN:
4103 /* Currently no frames on NAN interface are allowed */
4104 return false;
4105 default:
4106 break;
4107 }
4108
4109 WARN_ON_ONCE(1);
4110 return false;
4111 }
4112
4113 void ieee80211_check_fast_rx(struct sta_info *sta)
4114 {
4115 struct ieee80211_sub_if_data *sdata = sta->sdata;
4116 struct ieee80211_local *local = sdata->local;
4117 struct ieee80211_key *key;
4118 struct ieee80211_fast_rx fastrx = {
4119 .dev = sdata->dev,
4120 .vif_type = sdata->vif.type,
4121 .control_port_protocol = sdata->control_port_protocol,
4122 }, *old, *new = NULL;
4123 bool assign = false;
4124
4125 /* use sparse to check that we don't return without updating */
4126 __acquire(check_fast_rx);
4127
4128 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
4129 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
4130 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
4131 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
4132
4133 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
4134
4135 /* fast-rx doesn't do reordering */
4136 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
4137 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
4138 goto clear;
4139
4140 switch (sdata->vif.type) {
4141 case NL80211_IFTYPE_STATION:
4142 if (sta->sta.tdls) {
4143 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4144 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4145 fastrx.expected_ds_bits = 0;
4146 } else {
4147 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4148 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
4149 fastrx.expected_ds_bits =
4150 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4151 }
4152
4153 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
4154 fastrx.expected_ds_bits |=
4155 cpu_to_le16(IEEE80211_FCTL_TODS);
4156 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4157 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4158 }
4159
4160 if (!sdata->u.mgd.powersave)
4161 break;
4162
4163 /* software powersave is a huge mess, avoid all of it */
4164 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
4165 goto clear;
4166 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
4167 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
4168 goto clear;
4169 break;
4170 case NL80211_IFTYPE_AP_VLAN:
4171 case NL80211_IFTYPE_AP:
4172 /* parallel-rx requires this, at least with calls to
4173 * ieee80211_sta_ps_transition()
4174 */
4175 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
4176 goto clear;
4177 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4178 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4179 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
4180
4181 fastrx.internal_forward =
4182 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
4183 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
4184 !sdata->u.vlan.sta);
4185
4186 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4187 sdata->u.vlan.sta) {
4188 fastrx.expected_ds_bits |=
4189 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4190 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4191 fastrx.internal_forward = 0;
4192 }
4193
4194 break;
4195 default:
4196 goto clear;
4197 }
4198
4199 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
4200 goto clear;
4201
4202 rcu_read_lock();
4203 key = rcu_dereference(sta->ptk[sta->ptk_idx]);
4204 if (!key)
4205 key = rcu_dereference(sdata->default_unicast_key);
4206 if (key) {
4207 switch (key->conf.cipher) {
4208 case WLAN_CIPHER_SUITE_TKIP:
4209 /* we don't want to deal with MMIC in fast-rx */
4210 goto clear_rcu;
4211 case WLAN_CIPHER_SUITE_CCMP:
4212 case WLAN_CIPHER_SUITE_CCMP_256:
4213 case WLAN_CIPHER_SUITE_GCMP:
4214 case WLAN_CIPHER_SUITE_GCMP_256:
4215 break;
4216 default:
4217 /* We also don't want to deal with
4218 * WEP or cipher scheme.
4219 */
4220 goto clear_rcu;
4221 }
4222
4223 fastrx.key = true;
4224 fastrx.icv_len = key->conf.icv_len;
4225 }
4226
4227 assign = true;
4228 clear_rcu:
4229 rcu_read_unlock();
4230 clear:
4231 __release(check_fast_rx);
4232
4233 if (assign)
4234 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
4235
4236 spin_lock_bh(&sta->lock);
4237 old = rcu_dereference_protected(sta->fast_rx, true);
4238 rcu_assign_pointer(sta->fast_rx, new);
4239 spin_unlock_bh(&sta->lock);
4240
4241 if (old)
4242 kfree_rcu(old, rcu_head);
4243 }
4244
4245 void ieee80211_clear_fast_rx(struct sta_info *sta)
4246 {
4247 struct ieee80211_fast_rx *old;
4248
4249 spin_lock_bh(&sta->lock);
4250 old = rcu_dereference_protected(sta->fast_rx, true);
4251 RCU_INIT_POINTER(sta->fast_rx, NULL);
4252 spin_unlock_bh(&sta->lock);
4253
4254 if (old)
4255 kfree_rcu(old, rcu_head);
4256 }
4257
4258 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4259 {
4260 struct ieee80211_local *local = sdata->local;
4261 struct sta_info *sta;
4262
4263 lockdep_assert_held(&local->sta_mtx);
4264
4265 list_for_each_entry(sta, &local->sta_list, list) {
4266 if (sdata != sta->sdata &&
4267 (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
4268 continue;
4269 ieee80211_check_fast_rx(sta);
4270 }
4271 }
4272
4273 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4274 {
4275 struct ieee80211_local *local = sdata->local;
4276
4277 mutex_lock(&local->sta_mtx);
4278 __ieee80211_check_fast_rx_iface(sdata);
4279 mutex_unlock(&local->sta_mtx);
4280 }
4281
4282 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
4283 struct ieee80211_fast_rx *fast_rx)
4284 {
4285 struct sk_buff *skb = rx->skb;
4286 struct ieee80211_hdr *hdr = (void *)skb->data;
4287 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4288 struct sta_info *sta = rx->sta;
4289 int orig_len = skb->len;
4290 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
4291 int snap_offs = hdrlen;
4292 struct {
4293 u8 snap[sizeof(rfc1042_header)];
4294 __be16 proto;
4295 } *payload __aligned(2);
4296 struct {
4297 u8 da[ETH_ALEN];
4298 u8 sa[ETH_ALEN];
4299 } addrs __aligned(2);
4300 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
4301
4302 if (fast_rx->uses_rss)
4303 stats = this_cpu_ptr(sta->pcpu_rx_stats);
4304
4305 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
4306 * to a common data structure; drivers can implement that per queue
4307 * but we don't have that information in mac80211
4308 */
4309 if (!(status->flag & RX_FLAG_DUP_VALIDATED))
4310 return false;
4311
4312 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
4313
4314 /* If using encryption, we also need to have:
4315 * - PN_VALIDATED: similar, but the implementation is tricky
4316 * - DECRYPTED: necessary for PN_VALIDATED
4317 */
4318 if (fast_rx->key &&
4319 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
4320 return false;
4321
4322 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
4323 return false;
4324
4325 if (unlikely(ieee80211_is_frag(hdr)))
4326 return false;
4327
4328 /* Since our interface address cannot be multicast, this
4329 * implicitly also rejects multicast frames without the
4330 * explicit check.
4331 *
4332 * We shouldn't get any *data* frames not addressed to us
4333 * (AP mode will accept multicast *management* frames), but
4334 * punting here will make it go through the full checks in
4335 * ieee80211_accept_frame().
4336 */
4337 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
4338 return false;
4339
4340 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
4341 IEEE80211_FCTL_TODS)) !=
4342 fast_rx->expected_ds_bits)
4343 return false;
4344
4345 /* assign the key to drop unencrypted frames (later)
4346 * and strip the IV/MIC if necessary
4347 */
4348 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
4349 /* GCMP header length is the same */
4350 snap_offs += IEEE80211_CCMP_HDR_LEN;
4351 }
4352
4353 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
4354 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4355 goto drop;
4356
4357 payload = (void *)(skb->data + snap_offs);
4358
4359 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4360 return false;
4361
4362 /* Don't handle these here since they require special code.
4363 * Accept AARP and IPX even though they should come with a
4364 * bridge-tunnel header - but if we get them this way then
4365 * there's little point in discarding them.
4366 */
4367 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4368 payload->proto == fast_rx->control_port_protocol))
4369 return false;
4370 }
4371
4372 /* after this point, don't punt to the slowpath! */
4373
4374 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4375 pskb_trim(skb, skb->len - fast_rx->icv_len))
4376 goto drop;
4377
4378 /* statistics part of ieee80211_rx_h_sta_process() */
4379 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4380 stats->last_signal = status->signal;
4381 if (!fast_rx->uses_rss)
4382 ewma_signal_add(&sta->rx_stats_avg.signal,
4383 -status->signal);
4384 }
4385
4386 if (status->chains) {
4387 int i;
4388
4389 stats->chains = status->chains;
4390 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4391 int signal = status->chain_signal[i];
4392
4393 if (!(status->chains & BIT(i)))
4394 continue;
4395
4396 stats->chain_signal_last[i] = signal;
4397 if (!fast_rx->uses_rss)
4398 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
4399 -signal);
4400 }
4401 }
4402 /* end of statistics */
4403
4404 if (rx->key && !ieee80211_has_protected(hdr->frame_control))
4405 goto drop;
4406
4407 if (status->rx_flags & IEEE80211_RX_AMSDU) {
4408 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
4409 RX_QUEUED)
4410 goto drop;
4411
4412 return true;
4413 }
4414
4415 stats->last_rx = jiffies;
4416 stats->last_rate = sta_stats_encode_rate(status);
4417
4418 stats->fragments++;
4419 stats->packets++;
4420
4421 /* do the header conversion - first grab the addresses */
4422 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
4423 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
4424 /* remove the SNAP but leave the ethertype */
4425 skb_pull(skb, snap_offs + sizeof(rfc1042_header));
4426 /* push the addresses in front */
4427 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
4428
4429 skb->dev = fast_rx->dev;
4430
4431 dev_sw_netstats_rx_add(fast_rx->dev, skb->len);
4432
4433 /* The seqno index has the same property as needed
4434 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4435 * for non-QoS-data frames. Here we know it's a data
4436 * frame, so count MSDUs.
4437 */
4438 u64_stats_update_begin(&stats->syncp);
4439 stats->msdu[rx->seqno_idx]++;
4440 stats->bytes += orig_len;
4441 u64_stats_update_end(&stats->syncp);
4442
4443 if (fast_rx->internal_forward) {
4444 struct sk_buff *xmit_skb = NULL;
4445 if (is_multicast_ether_addr(addrs.da)) {
4446 xmit_skb = skb_copy(skb, GFP_ATOMIC);
4447 } else if (!ether_addr_equal(addrs.da, addrs.sa) &&
4448 sta_info_get(rx->sdata, addrs.da)) {
4449 xmit_skb = skb;
4450 skb = NULL;
4451 }
4452
4453 if (xmit_skb) {
4454 /*
4455 * Send to wireless media and increase priority by 256
4456 * to keep the received priority instead of
4457 * reclassifying the frame (see cfg80211_classify8021d).
4458 */
4459 xmit_skb->priority += 256;
4460 xmit_skb->protocol = htons(ETH_P_802_3);
4461 skb_reset_network_header(xmit_skb);
4462 skb_reset_mac_header(xmit_skb);
4463 dev_queue_xmit(xmit_skb);
4464 }
4465
4466 if (!skb)
4467 return true;
4468 }
4469
4470 /* deliver to local stack */
4471 skb->protocol = eth_type_trans(skb, fast_rx->dev);
4472 memset(skb->cb, 0, sizeof(skb->cb));
4473 if (rx->list)
4474 list_add_tail(&skb->list, rx->list);
4475 else
4476 netif_receive_skb(skb);
4477
4478 return true;
4479 drop:
4480 dev_kfree_skb(skb);
4481 stats->dropped++;
4482 return true;
4483 }
4484
4485 /*
4486 * This function returns whether or not the SKB
4487 * was destined for RX processing or not, which,
4488 * if consume is true, is equivalent to whether
4489 * or not the skb was consumed.
4490 */
4491 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
4492 struct sk_buff *skb, bool consume)
4493 {
4494 struct ieee80211_local *local = rx->local;
4495 struct ieee80211_sub_if_data *sdata = rx->sdata;
4496
4497 rx->skb = skb;
4498
4499 /* See if we can do fast-rx; if we have to copy we already lost,
4500 * so punt in that case. We should never have to deliver a data
4501 * frame to multiple interfaces anyway.
4502 *
4503 * We skip the ieee80211_accept_frame() call and do the necessary
4504 * checking inside ieee80211_invoke_fast_rx().
4505 */
4506 if (consume && rx->sta) {
4507 struct ieee80211_fast_rx *fast_rx;
4508
4509 fast_rx = rcu_dereference(rx->sta->fast_rx);
4510 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
4511 return true;
4512 }
4513
4514 if (!ieee80211_accept_frame(rx))
4515 return false;
4516
4517 if (!consume) {
4518 skb = skb_copy(skb, GFP_ATOMIC);
4519 if (!skb) {
4520 if (net_ratelimit())
4521 wiphy_debug(local->hw.wiphy,
4522 "failed to copy skb for %s\n",
4523 sdata->name);
4524 return true;
4525 }
4526
4527 rx->skb = skb;
4528 }
4529
4530 ieee80211_invoke_rx_handlers(rx);
4531 return true;
4532 }
4533
4534 /*
4535 * This is the actual Rx frames handler. as it belongs to Rx path it must
4536 * be called with rcu_read_lock protection.
4537 */
4538 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4539 struct ieee80211_sta *pubsta,
4540 struct sk_buff *skb,
4541 struct list_head *list)
4542 {
4543 struct ieee80211_local *local = hw_to_local(hw);
4544 struct ieee80211_sub_if_data *sdata;
4545 struct ieee80211_hdr *hdr;
4546 __le16 fc;
4547 struct ieee80211_rx_data rx;
4548 struct ieee80211_sub_if_data *prev;
4549 struct rhlist_head *tmp;
4550 int err = 0;
4551
4552 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
4553 memset(&rx, 0, sizeof(rx));
4554 rx.skb = skb;
4555 rx.local = local;
4556 rx.list = list;
4557
4558 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
4559 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4560
4561 if (ieee80211_is_mgmt(fc)) {
4562 /* drop frame if too short for header */
4563 if (skb->len < ieee80211_hdrlen(fc))
4564 err = -ENOBUFS;
4565 else
4566 err = skb_linearize(skb);
4567 } else {
4568 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
4569 }
4570
4571 if (err) {
4572 dev_kfree_skb(skb);
4573 return;
4574 }
4575
4576 hdr = (struct ieee80211_hdr *)skb->data;
4577 ieee80211_parse_qos(&rx);
4578 ieee80211_verify_alignment(&rx);
4579
4580 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
4581 ieee80211_is_beacon(hdr->frame_control) ||
4582 ieee80211_is_s1g_beacon(hdr->frame_control)))
4583 ieee80211_scan_rx(local, skb);
4584
4585 if (ieee80211_is_data(fc)) {
4586 struct sta_info *sta, *prev_sta;
4587
4588 if (pubsta) {
4589 rx.sta = container_of(pubsta, struct sta_info, sta);
4590 rx.sdata = rx.sta->sdata;
4591 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4592 return;
4593 goto out;
4594 }
4595
4596 prev_sta = NULL;
4597
4598 for_each_sta_info(local, hdr->addr2, sta, tmp) {
4599 if (!prev_sta) {
4600 prev_sta = sta;
4601 continue;
4602 }
4603
4604 rx.sta = prev_sta;
4605 rx.sdata = prev_sta->sdata;
4606 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4607
4608 prev_sta = sta;
4609 }
4610
4611 if (prev_sta) {
4612 rx.sta = prev_sta;
4613 rx.sdata = prev_sta->sdata;
4614
4615 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4616 return;
4617 goto out;
4618 }
4619 }
4620
4621 prev = NULL;
4622
4623 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4624 if (!ieee80211_sdata_running(sdata))
4625 continue;
4626
4627 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
4628 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
4629 continue;
4630
4631 /*
4632 * frame is destined for this interface, but if it's
4633 * not also for the previous one we handle that after
4634 * the loop to avoid copying the SKB once too much
4635 */
4636
4637 if (!prev) {
4638 prev = sdata;
4639 continue;
4640 }
4641
4642 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4643 rx.sdata = prev;
4644 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4645
4646 prev = sdata;
4647 }
4648
4649 if (prev) {
4650 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4651 rx.sdata = prev;
4652
4653 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4654 return;
4655 }
4656
4657 out:
4658 dev_kfree_skb(skb);
4659 }
4660
4661 /*
4662 * This is the receive path handler. It is called by a low level driver when an
4663 * 802.11 MPDU is received from the hardware.
4664 */
4665 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4666 struct sk_buff *skb, struct list_head *list)
4667 {
4668 struct ieee80211_local *local = hw_to_local(hw);
4669 struct ieee80211_rate *rate = NULL;
4670 struct ieee80211_supported_band *sband;
4671 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4672
4673 WARN_ON_ONCE(softirq_count() == 0);
4674
4675 if (WARN_ON(status->band >= NUM_NL80211_BANDS))
4676 goto drop;
4677
4678 sband = local->hw.wiphy->bands[status->band];
4679 if (WARN_ON(!sband))
4680 goto drop;
4681
4682 /*
4683 * If we're suspending, it is possible although not too likely
4684 * that we'd be receiving frames after having already partially
4685 * quiesced the stack. We can't process such frames then since
4686 * that might, for example, cause stations to be added or other
4687 * driver callbacks be invoked.
4688 */
4689 if (unlikely(local->quiescing || local->suspended))
4690 goto drop;
4691
4692 /* We might be during a HW reconfig, prevent Rx for the same reason */
4693 if (unlikely(local->in_reconfig))
4694 goto drop;
4695
4696 /*
4697 * The same happens when we're not even started,
4698 * but that's worth a warning.
4699 */
4700 if (WARN_ON(!local->started))
4701 goto drop;
4702
4703 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
4704 /*
4705 * Validate the rate, unless a PLCP error means that
4706 * we probably can't have a valid rate here anyway.
4707 */
4708
4709 switch (status->encoding) {
4710 case RX_ENC_HT:
4711 /*
4712 * rate_idx is MCS index, which can be [0-76]
4713 * as documented on:
4714 *
4715 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n
4716 *
4717 * Anything else would be some sort of driver or
4718 * hardware error. The driver should catch hardware
4719 * errors.
4720 */
4721 if (WARN(status->rate_idx > 76,
4722 "Rate marked as an HT rate but passed "
4723 "status->rate_idx is not "
4724 "an MCS index [0-76]: %d (0x%02x)\n",
4725 status->rate_idx,
4726 status->rate_idx))
4727 goto drop;
4728 break;
4729 case RX_ENC_VHT:
4730 if (WARN_ONCE(status->rate_idx > 9 ||
4731 !status->nss ||
4732 status->nss > 8,
4733 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
4734 status->rate_idx, status->nss))
4735 goto drop;
4736 break;
4737 case RX_ENC_HE:
4738 if (WARN_ONCE(status->rate_idx > 11 ||
4739 !status->nss ||
4740 status->nss > 8,
4741 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
4742 status->rate_idx, status->nss))
4743 goto drop;
4744 break;
4745 default:
4746 WARN_ON_ONCE(1);
4747 fallthrough;
4748 case RX_ENC_LEGACY:
4749 if (WARN_ON(status->rate_idx >= sband->n_bitrates))
4750 goto drop;
4751 rate = &sband->bitrates[status->rate_idx];
4752 }
4753 }
4754
4755 status->rx_flags = 0;
4756
4757 kcov_remote_start_common(skb_get_kcov_handle(skb));
4758
4759 /*
4760 * Frames with failed FCS/PLCP checksum are not returned,
4761 * all other frames are returned without radiotap header
4762 * if it was previously present.
4763 * Also, frames with less than 16 bytes are dropped.
4764 */
4765 skb = ieee80211_rx_monitor(local, skb, rate);
4766 if (skb) {
4767 ieee80211_tpt_led_trig_rx(local,
4768 ((struct ieee80211_hdr *)skb->data)->frame_control,
4769 skb->len);
4770
4771 __ieee80211_rx_handle_packet(hw, pubsta, skb, list);
4772 }
4773
4774 kcov_remote_stop();
4775 return;
4776 drop:
4777 kfree_skb(skb);
4778 }
4779 EXPORT_SYMBOL(ieee80211_rx_list);
4780
4781 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4782 struct sk_buff *skb, struct napi_struct *napi)
4783 {
4784 struct sk_buff *tmp;
4785 LIST_HEAD(list);
4786
4787
4788 /*
4789 * key references and virtual interfaces are protected using RCU
4790 * and this requires that we are in a read-side RCU section during
4791 * receive processing
4792 */
4793 rcu_read_lock();
4794 ieee80211_rx_list(hw, pubsta, skb, &list);
4795 rcu_read_unlock();
4796
4797 if (!napi) {
4798 netif_receive_skb_list(&list);
4799 return;
4800 }
4801
4802 list_for_each_entry_safe(skb, tmp, &list, list) {
4803 skb_list_del_init(skb);
4804 napi_gro_receive(napi, skb);
4805 }
4806 }
4807 EXPORT_SYMBOL(ieee80211_rx_napi);
4808
4809 /* This is a version of the rx handler that can be called from hard irq
4810 * context. Post the skb on the queue and schedule the tasklet */
4811 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
4812 {
4813 struct ieee80211_local *local = hw_to_local(hw);
4814
4815 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
4816
4817 skb->pkt_type = IEEE80211_RX_MSG;
4818 skb_queue_tail(&local->skb_queue, skb);
4819 tasklet_schedule(&local->tasklet);
4820 }
4821 EXPORT_SYMBOL(ieee80211_rx_irqsafe);