]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/mac80211/rx.c
c1343c028b767762dd894dd5c6c742c3b77de98a
[mirror_ubuntu-jammy-kernel.git] / net / mac80211 / rx.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2002-2005, Instant802 Networks, Inc.
4 * Copyright 2005-2006, Devicescape Software, Inc.
5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
9 * Copyright (C) 2018-2020 Intel Corporation
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <linux/bitops.h>
21 #include <net/mac80211.h>
22 #include <net/ieee80211_radiotap.h>
23 #include <asm/unaligned.h>
24
25 #include "ieee80211_i.h"
26 #include "driver-ops.h"
27 #include "led.h"
28 #include "mesh.h"
29 #include "wep.h"
30 #include "wpa.h"
31 #include "tkip.h"
32 #include "wme.h"
33 #include "rate.h"
34
35 /*
36 * monitor mode reception
37 *
38 * This function cleans up the SKB, i.e. it removes all the stuff
39 * only useful for monitoring.
40 */
41 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb,
42 unsigned int present_fcs_len,
43 unsigned int rtap_space)
44 {
45 struct ieee80211_hdr *hdr;
46 unsigned int hdrlen;
47 __le16 fc;
48
49 if (present_fcs_len)
50 __pskb_trim(skb, skb->len - present_fcs_len);
51 __pskb_pull(skb, rtap_space);
52
53 hdr = (void *)skb->data;
54 fc = hdr->frame_control;
55
56 /*
57 * Remove the HT-Control field (if present) on management
58 * frames after we've sent the frame to monitoring. We
59 * (currently) don't need it, and don't properly parse
60 * frames with it present, due to the assumption of a
61 * fixed management header length.
62 */
63 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc)))
64 return skb;
65
66 hdrlen = ieee80211_hdrlen(fc);
67 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
68
69 if (!pskb_may_pull(skb, hdrlen)) {
70 dev_kfree_skb(skb);
71 return NULL;
72 }
73
74 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data,
75 hdrlen - IEEE80211_HT_CTL_LEN);
76 __pskb_pull(skb, IEEE80211_HT_CTL_LEN);
77
78 return skb;
79 }
80
81 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
82 unsigned int rtap_space)
83 {
84 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
85 struct ieee80211_hdr *hdr;
86
87 hdr = (void *)(skb->data + rtap_space);
88
89 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
90 RX_FLAG_FAILED_PLCP_CRC |
91 RX_FLAG_ONLY_MONITOR |
92 RX_FLAG_NO_PSDU))
93 return true;
94
95 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
96 return true;
97
98 if (ieee80211_is_ctl(hdr->frame_control) &&
99 !ieee80211_is_pspoll(hdr->frame_control) &&
100 !ieee80211_is_back_req(hdr->frame_control))
101 return true;
102
103 return false;
104 }
105
106 static int
107 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
108 struct ieee80211_rx_status *status,
109 struct sk_buff *skb)
110 {
111 int len;
112
113 /* always present fields */
114 len = sizeof(struct ieee80211_radiotap_header) + 8;
115
116 /* allocate extra bitmaps */
117 if (status->chains)
118 len += 4 * hweight8(status->chains);
119 /* vendor presence bitmap */
120 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
121 len += 4;
122
123 if (ieee80211_have_rx_timestamp(status)) {
124 len = ALIGN(len, 8);
125 len += 8;
126 }
127 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
128 len += 1;
129
130 /* antenna field, if we don't have per-chain info */
131 if (!status->chains)
132 len += 1;
133
134 /* padding for RX_FLAGS if necessary */
135 len = ALIGN(len, 2);
136
137 if (status->encoding == RX_ENC_HT) /* HT info */
138 len += 3;
139
140 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
141 len = ALIGN(len, 4);
142 len += 8;
143 }
144
145 if (status->encoding == RX_ENC_VHT) {
146 len = ALIGN(len, 2);
147 len += 12;
148 }
149
150 if (local->hw.radiotap_timestamp.units_pos >= 0) {
151 len = ALIGN(len, 8);
152 len += 12;
153 }
154
155 if (status->encoding == RX_ENC_HE &&
156 status->flag & RX_FLAG_RADIOTAP_HE) {
157 len = ALIGN(len, 2);
158 len += 12;
159 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
160 }
161
162 if (status->encoding == RX_ENC_HE &&
163 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
164 len = ALIGN(len, 2);
165 len += 12;
166 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
167 }
168
169 if (status->flag & RX_FLAG_NO_PSDU)
170 len += 1;
171
172 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
173 len = ALIGN(len, 2);
174 len += 4;
175 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
176 }
177
178 if (status->chains) {
179 /* antenna and antenna signal fields */
180 len += 2 * hweight8(status->chains);
181 }
182
183 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
184 struct ieee80211_vendor_radiotap *rtap;
185 int vendor_data_offset = 0;
186
187 /*
188 * The position to look at depends on the existence (or non-
189 * existence) of other elements, so take that into account...
190 */
191 if (status->flag & RX_FLAG_RADIOTAP_HE)
192 vendor_data_offset +=
193 sizeof(struct ieee80211_radiotap_he);
194 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
195 vendor_data_offset +=
196 sizeof(struct ieee80211_radiotap_he_mu);
197 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
198 vendor_data_offset +=
199 sizeof(struct ieee80211_radiotap_lsig);
200
201 rtap = (void *)&skb->data[vendor_data_offset];
202
203 /* alignment for fixed 6-byte vendor data header */
204 len = ALIGN(len, 2);
205 /* vendor data header */
206 len += 6;
207 if (WARN_ON(rtap->align == 0))
208 rtap->align = 1;
209 len = ALIGN(len, rtap->align);
210 len += rtap->len + rtap->pad;
211 }
212
213 return len;
214 }
215
216 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
217 struct sk_buff *skb,
218 int rtap_space)
219 {
220 struct {
221 struct ieee80211_hdr_3addr hdr;
222 u8 category;
223 u8 action_code;
224 } __packed __aligned(2) action;
225
226 if (!sdata)
227 return;
228
229 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
230
231 if (skb->len < rtap_space + sizeof(action) +
232 VHT_MUMIMO_GROUPS_DATA_LEN)
233 return;
234
235 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
236 return;
237
238 skb_copy_bits(skb, rtap_space, &action, sizeof(action));
239
240 if (!ieee80211_is_action(action.hdr.frame_control))
241 return;
242
243 if (action.category != WLAN_CATEGORY_VHT)
244 return;
245
246 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
247 return;
248
249 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
250 return;
251
252 skb = skb_copy(skb, GFP_ATOMIC);
253 if (!skb)
254 return;
255
256 skb_queue_tail(&sdata->skb_queue, skb);
257 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
258 }
259
260 /*
261 * ieee80211_add_rx_radiotap_header - add radiotap header
262 *
263 * add a radiotap header containing all the fields which the hardware provided.
264 */
265 static void
266 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
267 struct sk_buff *skb,
268 struct ieee80211_rate *rate,
269 int rtap_len, bool has_fcs)
270 {
271 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
272 struct ieee80211_radiotap_header *rthdr;
273 unsigned char *pos;
274 __le32 *it_present;
275 u32 it_present_val;
276 u16 rx_flags = 0;
277 u16 channel_flags = 0;
278 int mpdulen, chain;
279 unsigned long chains = status->chains;
280 struct ieee80211_vendor_radiotap rtap = {};
281 struct ieee80211_radiotap_he he = {};
282 struct ieee80211_radiotap_he_mu he_mu = {};
283 struct ieee80211_radiotap_lsig lsig = {};
284
285 if (status->flag & RX_FLAG_RADIOTAP_HE) {
286 he = *(struct ieee80211_radiotap_he *)skb->data;
287 skb_pull(skb, sizeof(he));
288 WARN_ON_ONCE(status->encoding != RX_ENC_HE);
289 }
290
291 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
292 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
293 skb_pull(skb, sizeof(he_mu));
294 }
295
296 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
297 lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
298 skb_pull(skb, sizeof(lsig));
299 }
300
301 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
302 rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
303 /* rtap.len and rtap.pad are undone immediately */
304 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
305 }
306
307 mpdulen = skb->len;
308 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
309 mpdulen += FCS_LEN;
310
311 rthdr = skb_push(skb, rtap_len);
312 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
313 it_present = &rthdr->it_present;
314
315 /* radiotap header, set always present flags */
316 rthdr->it_len = cpu_to_le16(rtap_len);
317 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
318 BIT(IEEE80211_RADIOTAP_CHANNEL) |
319 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
320
321 if (!status->chains)
322 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
323
324 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
325 it_present_val |=
326 BIT(IEEE80211_RADIOTAP_EXT) |
327 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
328 put_unaligned_le32(it_present_val, it_present);
329 it_present++;
330 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
331 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
332 }
333
334 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
335 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
336 BIT(IEEE80211_RADIOTAP_EXT);
337 put_unaligned_le32(it_present_val, it_present);
338 it_present++;
339 it_present_val = rtap.present;
340 }
341
342 put_unaligned_le32(it_present_val, it_present);
343
344 pos = (void *)(it_present + 1);
345
346 /* the order of the following fields is important */
347
348 /* IEEE80211_RADIOTAP_TSFT */
349 if (ieee80211_have_rx_timestamp(status)) {
350 /* padding */
351 while ((pos - (u8 *)rthdr) & 7)
352 *pos++ = 0;
353 put_unaligned_le64(
354 ieee80211_calculate_rx_timestamp(local, status,
355 mpdulen, 0),
356 pos);
357 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
358 pos += 8;
359 }
360
361 /* IEEE80211_RADIOTAP_FLAGS */
362 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
363 *pos |= IEEE80211_RADIOTAP_F_FCS;
364 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
365 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
366 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
367 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
368 pos++;
369
370 /* IEEE80211_RADIOTAP_RATE */
371 if (!rate || status->encoding != RX_ENC_LEGACY) {
372 /*
373 * Without rate information don't add it. If we have,
374 * MCS information is a separate field in radiotap,
375 * added below. The byte here is needed as padding
376 * for the channel though, so initialise it to 0.
377 */
378 *pos = 0;
379 } else {
380 int shift = 0;
381 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
382 if (status->bw == RATE_INFO_BW_10)
383 shift = 1;
384 else if (status->bw == RATE_INFO_BW_5)
385 shift = 2;
386 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
387 }
388 pos++;
389
390 /* IEEE80211_RADIOTAP_CHANNEL */
391 /* TODO: frequency offset in KHz */
392 put_unaligned_le16(status->freq, pos);
393 pos += 2;
394 if (status->bw == RATE_INFO_BW_10)
395 channel_flags |= IEEE80211_CHAN_HALF;
396 else if (status->bw == RATE_INFO_BW_5)
397 channel_flags |= IEEE80211_CHAN_QUARTER;
398
399 if (status->band == NL80211_BAND_5GHZ ||
400 status->band == NL80211_BAND_6GHZ)
401 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
402 else if (status->encoding != RX_ENC_LEGACY)
403 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
404 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
405 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
406 else if (rate)
407 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
408 else
409 channel_flags |= IEEE80211_CHAN_2GHZ;
410 put_unaligned_le16(channel_flags, pos);
411 pos += 2;
412
413 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
414 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
415 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
416 *pos = status->signal;
417 rthdr->it_present |=
418 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
419 pos++;
420 }
421
422 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
423
424 if (!status->chains) {
425 /* IEEE80211_RADIOTAP_ANTENNA */
426 *pos = status->antenna;
427 pos++;
428 }
429
430 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
431
432 /* IEEE80211_RADIOTAP_RX_FLAGS */
433 /* ensure 2 byte alignment for the 2 byte field as required */
434 if ((pos - (u8 *)rthdr) & 1)
435 *pos++ = 0;
436 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
437 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
438 put_unaligned_le16(rx_flags, pos);
439 pos += 2;
440
441 if (status->encoding == RX_ENC_HT) {
442 unsigned int stbc;
443
444 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
445 *pos++ = local->hw.radiotap_mcs_details;
446 *pos = 0;
447 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
448 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
449 if (status->bw == RATE_INFO_BW_40)
450 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
451 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
452 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
453 if (status->enc_flags & RX_ENC_FLAG_LDPC)
454 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
455 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
456 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
457 pos++;
458 *pos++ = status->rate_idx;
459 }
460
461 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
462 u16 flags = 0;
463
464 /* ensure 4 byte alignment */
465 while ((pos - (u8 *)rthdr) & 3)
466 pos++;
467 rthdr->it_present |=
468 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
469 put_unaligned_le32(status->ampdu_reference, pos);
470 pos += 4;
471 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
472 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
473 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
474 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
475 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
476 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
477 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
478 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
479 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
480 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
481 if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
482 flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
483 put_unaligned_le16(flags, pos);
484 pos += 2;
485 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
486 *pos++ = status->ampdu_delimiter_crc;
487 else
488 *pos++ = 0;
489 *pos++ = 0;
490 }
491
492 if (status->encoding == RX_ENC_VHT) {
493 u16 known = local->hw.radiotap_vht_details;
494
495 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
496 put_unaligned_le16(known, pos);
497 pos += 2;
498 /* flags */
499 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
500 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
501 /* in VHT, STBC is binary */
502 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
503 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
504 if (status->enc_flags & RX_ENC_FLAG_BF)
505 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
506 pos++;
507 /* bandwidth */
508 switch (status->bw) {
509 case RATE_INFO_BW_80:
510 *pos++ = 4;
511 break;
512 case RATE_INFO_BW_160:
513 *pos++ = 11;
514 break;
515 case RATE_INFO_BW_40:
516 *pos++ = 1;
517 break;
518 default:
519 *pos++ = 0;
520 }
521 /* MCS/NSS */
522 *pos = (status->rate_idx << 4) | status->nss;
523 pos += 4;
524 /* coding field */
525 if (status->enc_flags & RX_ENC_FLAG_LDPC)
526 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
527 pos++;
528 /* group ID */
529 pos++;
530 /* partial_aid */
531 pos += 2;
532 }
533
534 if (local->hw.radiotap_timestamp.units_pos >= 0) {
535 u16 accuracy = 0;
536 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
537
538 rthdr->it_present |=
539 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
540
541 /* ensure 8 byte alignment */
542 while ((pos - (u8 *)rthdr) & 7)
543 pos++;
544
545 put_unaligned_le64(status->device_timestamp, pos);
546 pos += sizeof(u64);
547
548 if (local->hw.radiotap_timestamp.accuracy >= 0) {
549 accuracy = local->hw.radiotap_timestamp.accuracy;
550 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
551 }
552 put_unaligned_le16(accuracy, pos);
553 pos += sizeof(u16);
554
555 *pos++ = local->hw.radiotap_timestamp.units_pos;
556 *pos++ = flags;
557 }
558
559 if (status->encoding == RX_ENC_HE &&
560 status->flag & RX_FLAG_RADIOTAP_HE) {
561 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
562
563 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
564 he.data6 |= HE_PREP(DATA6_NSTS,
565 FIELD_GET(RX_ENC_FLAG_STBC_MASK,
566 status->enc_flags));
567 he.data3 |= HE_PREP(DATA3_STBC, 1);
568 } else {
569 he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
570 }
571
572 #define CHECK_GI(s) \
573 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
574 (int)NL80211_RATE_INFO_HE_GI_##s)
575
576 CHECK_GI(0_8);
577 CHECK_GI(1_6);
578 CHECK_GI(3_2);
579
580 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
581 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
582 he.data3 |= HE_PREP(DATA3_CODING,
583 !!(status->enc_flags & RX_ENC_FLAG_LDPC));
584
585 he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
586
587 switch (status->bw) {
588 case RATE_INFO_BW_20:
589 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
590 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
591 break;
592 case RATE_INFO_BW_40:
593 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
594 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
595 break;
596 case RATE_INFO_BW_80:
597 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
598 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
599 break;
600 case RATE_INFO_BW_160:
601 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
602 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
603 break;
604 case RATE_INFO_BW_HE_RU:
605 #define CHECK_RU_ALLOC(s) \
606 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
607 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
608
609 CHECK_RU_ALLOC(26);
610 CHECK_RU_ALLOC(52);
611 CHECK_RU_ALLOC(106);
612 CHECK_RU_ALLOC(242);
613 CHECK_RU_ALLOC(484);
614 CHECK_RU_ALLOC(996);
615 CHECK_RU_ALLOC(2x996);
616
617 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
618 status->he_ru + 4);
619 break;
620 default:
621 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
622 }
623
624 /* ensure 2 byte alignment */
625 while ((pos - (u8 *)rthdr) & 1)
626 pos++;
627 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
628 memcpy(pos, &he, sizeof(he));
629 pos += sizeof(he);
630 }
631
632 if (status->encoding == RX_ENC_HE &&
633 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
634 /* ensure 2 byte alignment */
635 while ((pos - (u8 *)rthdr) & 1)
636 pos++;
637 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
638 memcpy(pos, &he_mu, sizeof(he_mu));
639 pos += sizeof(he_mu);
640 }
641
642 if (status->flag & RX_FLAG_NO_PSDU) {
643 rthdr->it_present |=
644 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU);
645 *pos++ = status->zero_length_psdu_type;
646 }
647
648 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
649 /* ensure 2 byte alignment */
650 while ((pos - (u8 *)rthdr) & 1)
651 pos++;
652 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG);
653 memcpy(pos, &lsig, sizeof(lsig));
654 pos += sizeof(lsig);
655 }
656
657 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
658 *pos++ = status->chain_signal[chain];
659 *pos++ = chain;
660 }
661
662 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
663 /* ensure 2 byte alignment for the vendor field as required */
664 if ((pos - (u8 *)rthdr) & 1)
665 *pos++ = 0;
666 *pos++ = rtap.oui[0];
667 *pos++ = rtap.oui[1];
668 *pos++ = rtap.oui[2];
669 *pos++ = rtap.subns;
670 put_unaligned_le16(rtap.len, pos);
671 pos += 2;
672 /* align the actual payload as requested */
673 while ((pos - (u8 *)rthdr) & (rtap.align - 1))
674 *pos++ = 0;
675 /* data (and possible padding) already follows */
676 }
677 }
678
679 static struct sk_buff *
680 ieee80211_make_monitor_skb(struct ieee80211_local *local,
681 struct sk_buff **origskb,
682 struct ieee80211_rate *rate,
683 int rtap_space, bool use_origskb)
684 {
685 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
686 int rt_hdrlen, needed_headroom;
687 struct sk_buff *skb;
688
689 /* room for the radiotap header based on driver features */
690 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
691 needed_headroom = rt_hdrlen - rtap_space;
692
693 if (use_origskb) {
694 /* only need to expand headroom if necessary */
695 skb = *origskb;
696 *origskb = NULL;
697
698 /*
699 * This shouldn't trigger often because most devices have an
700 * RX header they pull before we get here, and that should
701 * be big enough for our radiotap information. We should
702 * probably export the length to drivers so that we can have
703 * them allocate enough headroom to start with.
704 */
705 if (skb_headroom(skb) < needed_headroom &&
706 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
707 dev_kfree_skb(skb);
708 return NULL;
709 }
710 } else {
711 /*
712 * Need to make a copy and possibly remove radiotap header
713 * and FCS from the original.
714 */
715 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
716
717 if (!skb)
718 return NULL;
719 }
720
721 /* prepend radiotap information */
722 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
723
724 skb_reset_mac_header(skb);
725 skb->ip_summed = CHECKSUM_UNNECESSARY;
726 skb->pkt_type = PACKET_OTHERHOST;
727 skb->protocol = htons(ETH_P_802_2);
728
729 return skb;
730 }
731
732 /*
733 * This function copies a received frame to all monitor interfaces and
734 * returns a cleaned-up SKB that no longer includes the FCS nor the
735 * radiotap header the driver might have added.
736 */
737 static struct sk_buff *
738 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
739 struct ieee80211_rate *rate)
740 {
741 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
742 struct ieee80211_sub_if_data *sdata;
743 struct sk_buff *monskb = NULL;
744 int present_fcs_len = 0;
745 unsigned int rtap_space = 0;
746 struct ieee80211_sub_if_data *monitor_sdata =
747 rcu_dereference(local->monitor_sdata);
748 bool only_monitor = false;
749 unsigned int min_head_len;
750
751 if (status->flag & RX_FLAG_RADIOTAP_HE)
752 rtap_space += sizeof(struct ieee80211_radiotap_he);
753
754 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
755 rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
756
757 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
758 rtap_space += sizeof(struct ieee80211_radiotap_lsig);
759
760 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
761 struct ieee80211_vendor_radiotap *rtap =
762 (void *)(origskb->data + rtap_space);
763
764 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
765 }
766
767 min_head_len = rtap_space;
768
769 /*
770 * First, we may need to make a copy of the skb because
771 * (1) we need to modify it for radiotap (if not present), and
772 * (2) the other RX handlers will modify the skb we got.
773 *
774 * We don't need to, of course, if we aren't going to return
775 * the SKB because it has a bad FCS/PLCP checksum.
776 */
777
778 if (!(status->flag & RX_FLAG_NO_PSDU)) {
779 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
780 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
781 /* driver bug */
782 WARN_ON(1);
783 dev_kfree_skb(origskb);
784 return NULL;
785 }
786 present_fcs_len = FCS_LEN;
787 }
788
789 /* also consider the hdr->frame_control */
790 min_head_len += 2;
791 }
792
793 /* ensure that the expected data elements are in skb head */
794 if (!pskb_may_pull(origskb, min_head_len)) {
795 dev_kfree_skb(origskb);
796 return NULL;
797 }
798
799 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
800
801 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
802 if (only_monitor) {
803 dev_kfree_skb(origskb);
804 return NULL;
805 }
806
807 return ieee80211_clean_skb(origskb, present_fcs_len,
808 rtap_space);
809 }
810
811 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
812
813 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
814 bool last_monitor = list_is_last(&sdata->u.mntr.list,
815 &local->mon_list);
816
817 if (!monskb)
818 monskb = ieee80211_make_monitor_skb(local, &origskb,
819 rate, rtap_space,
820 only_monitor &&
821 last_monitor);
822
823 if (monskb) {
824 struct sk_buff *skb;
825
826 if (last_monitor) {
827 skb = monskb;
828 monskb = NULL;
829 } else {
830 skb = skb_clone(monskb, GFP_ATOMIC);
831 }
832
833 if (skb) {
834 skb->dev = sdata->dev;
835 dev_sw_netstats_rx_add(skb->dev, skb->len);
836 netif_receive_skb(skb);
837 }
838 }
839
840 if (last_monitor)
841 break;
842 }
843
844 /* this happens if last_monitor was erroneously false */
845 dev_kfree_skb(monskb);
846
847 /* ditto */
848 if (!origskb)
849 return NULL;
850
851 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
852 }
853
854 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
855 {
856 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
857 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
858 int tid, seqno_idx, security_idx;
859
860 /* does the frame have a qos control field? */
861 if (ieee80211_is_data_qos(hdr->frame_control)) {
862 u8 *qc = ieee80211_get_qos_ctl(hdr);
863 /* frame has qos control */
864 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
865 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
866 status->rx_flags |= IEEE80211_RX_AMSDU;
867
868 seqno_idx = tid;
869 security_idx = tid;
870 } else {
871 /*
872 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
873 *
874 * Sequence numbers for management frames, QoS data
875 * frames with a broadcast/multicast address in the
876 * Address 1 field, and all non-QoS data frames sent
877 * by QoS STAs are assigned using an additional single
878 * modulo-4096 counter, [...]
879 *
880 * We also use that counter for non-QoS STAs.
881 */
882 seqno_idx = IEEE80211_NUM_TIDS;
883 security_idx = 0;
884 if (ieee80211_is_mgmt(hdr->frame_control))
885 security_idx = IEEE80211_NUM_TIDS;
886 tid = 0;
887 }
888
889 rx->seqno_idx = seqno_idx;
890 rx->security_idx = security_idx;
891 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
892 * For now, set skb->priority to 0 for other cases. */
893 rx->skb->priority = (tid > 7) ? 0 : tid;
894 }
895
896 /**
897 * DOC: Packet alignment
898 *
899 * Drivers always need to pass packets that are aligned to two-byte boundaries
900 * to the stack.
901 *
902 * Additionally, should, if possible, align the payload data in a way that
903 * guarantees that the contained IP header is aligned to a four-byte
904 * boundary. In the case of regular frames, this simply means aligning the
905 * payload to a four-byte boundary (because either the IP header is directly
906 * contained, or IV/RFC1042 headers that have a length divisible by four are
907 * in front of it). If the payload data is not properly aligned and the
908 * architecture doesn't support efficient unaligned operations, mac80211
909 * will align the data.
910 *
911 * With A-MSDU frames, however, the payload data address must yield two modulo
912 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
913 * push the IP header further back to a multiple of four again. Thankfully, the
914 * specs were sane enough this time around to require padding each A-MSDU
915 * subframe to a length that is a multiple of four.
916 *
917 * Padding like Atheros hardware adds which is between the 802.11 header and
918 * the payload is not supported, the driver is required to move the 802.11
919 * header to be directly in front of the payload in that case.
920 */
921 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
922 {
923 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
924 WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
925 #endif
926 }
927
928
929 /* rx handlers */
930
931 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
932 {
933 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
934
935 if (is_multicast_ether_addr(hdr->addr1))
936 return 0;
937
938 return ieee80211_is_robust_mgmt_frame(skb);
939 }
940
941
942 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
943 {
944 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
945
946 if (!is_multicast_ether_addr(hdr->addr1))
947 return 0;
948
949 return ieee80211_is_robust_mgmt_frame(skb);
950 }
951
952
953 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
954 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
955 {
956 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
957 struct ieee80211_mmie *mmie;
958 struct ieee80211_mmie_16 *mmie16;
959
960 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
961 return -1;
962
963 if (!ieee80211_is_robust_mgmt_frame(skb) &&
964 !ieee80211_is_beacon(hdr->frame_control))
965 return -1; /* not a robust management frame */
966
967 mmie = (struct ieee80211_mmie *)
968 (skb->data + skb->len - sizeof(*mmie));
969 if (mmie->element_id == WLAN_EID_MMIE &&
970 mmie->length == sizeof(*mmie) - 2)
971 return le16_to_cpu(mmie->key_id);
972
973 mmie16 = (struct ieee80211_mmie_16 *)
974 (skb->data + skb->len - sizeof(*mmie16));
975 if (skb->len >= 24 + sizeof(*mmie16) &&
976 mmie16->element_id == WLAN_EID_MMIE &&
977 mmie16->length == sizeof(*mmie16) - 2)
978 return le16_to_cpu(mmie16->key_id);
979
980 return -1;
981 }
982
983 static int ieee80211_get_keyid(struct sk_buff *skb,
984 const struct ieee80211_cipher_scheme *cs)
985 {
986 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
987 __le16 fc;
988 int hdrlen;
989 int minlen;
990 u8 key_idx_off;
991 u8 key_idx_shift;
992 u8 keyid;
993
994 fc = hdr->frame_control;
995 hdrlen = ieee80211_hdrlen(fc);
996
997 if (cs) {
998 minlen = hdrlen + cs->hdr_len;
999 key_idx_off = hdrlen + cs->key_idx_off;
1000 key_idx_shift = cs->key_idx_shift;
1001 } else {
1002 /* WEP, TKIP, CCMP and GCMP */
1003 minlen = hdrlen + IEEE80211_WEP_IV_LEN;
1004 key_idx_off = hdrlen + 3;
1005 key_idx_shift = 6;
1006 }
1007
1008 if (unlikely(skb->len < minlen))
1009 return -EINVAL;
1010
1011 skb_copy_bits(skb, key_idx_off, &keyid, 1);
1012
1013 if (cs)
1014 keyid &= cs->key_idx_mask;
1015 keyid >>= key_idx_shift;
1016
1017 /* cs could use more than the usual two bits for the keyid */
1018 if (unlikely(keyid >= NUM_DEFAULT_KEYS))
1019 return -EINVAL;
1020
1021 return keyid;
1022 }
1023
1024 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
1025 {
1026 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1027 char *dev_addr = rx->sdata->vif.addr;
1028
1029 if (ieee80211_is_data(hdr->frame_control)) {
1030 if (is_multicast_ether_addr(hdr->addr1)) {
1031 if (ieee80211_has_tods(hdr->frame_control) ||
1032 !ieee80211_has_fromds(hdr->frame_control))
1033 return RX_DROP_MONITOR;
1034 if (ether_addr_equal(hdr->addr3, dev_addr))
1035 return RX_DROP_MONITOR;
1036 } else {
1037 if (!ieee80211_has_a4(hdr->frame_control))
1038 return RX_DROP_MONITOR;
1039 if (ether_addr_equal(hdr->addr4, dev_addr))
1040 return RX_DROP_MONITOR;
1041 }
1042 }
1043
1044 /* If there is not an established peer link and this is not a peer link
1045 * establisment frame, beacon or probe, drop the frame.
1046 */
1047
1048 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
1049 struct ieee80211_mgmt *mgmt;
1050
1051 if (!ieee80211_is_mgmt(hdr->frame_control))
1052 return RX_DROP_MONITOR;
1053
1054 if (ieee80211_is_action(hdr->frame_control)) {
1055 u8 category;
1056
1057 /* make sure category field is present */
1058 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
1059 return RX_DROP_MONITOR;
1060
1061 mgmt = (struct ieee80211_mgmt *)hdr;
1062 category = mgmt->u.action.category;
1063 if (category != WLAN_CATEGORY_MESH_ACTION &&
1064 category != WLAN_CATEGORY_SELF_PROTECTED)
1065 return RX_DROP_MONITOR;
1066 return RX_CONTINUE;
1067 }
1068
1069 if (ieee80211_is_probe_req(hdr->frame_control) ||
1070 ieee80211_is_probe_resp(hdr->frame_control) ||
1071 ieee80211_is_beacon(hdr->frame_control) ||
1072 ieee80211_is_auth(hdr->frame_control))
1073 return RX_CONTINUE;
1074
1075 return RX_DROP_MONITOR;
1076 }
1077
1078 return RX_CONTINUE;
1079 }
1080
1081 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
1082 int index)
1083 {
1084 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
1085 struct sk_buff *tail = skb_peek_tail(frames);
1086 struct ieee80211_rx_status *status;
1087
1088 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
1089 return true;
1090
1091 if (!tail)
1092 return false;
1093
1094 status = IEEE80211_SKB_RXCB(tail);
1095 if (status->flag & RX_FLAG_AMSDU_MORE)
1096 return false;
1097
1098 return true;
1099 }
1100
1101 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
1102 struct tid_ampdu_rx *tid_agg_rx,
1103 int index,
1104 struct sk_buff_head *frames)
1105 {
1106 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
1107 struct sk_buff *skb;
1108 struct ieee80211_rx_status *status;
1109
1110 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1111
1112 if (skb_queue_empty(skb_list))
1113 goto no_frame;
1114
1115 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1116 __skb_queue_purge(skb_list);
1117 goto no_frame;
1118 }
1119
1120 /* release frames from the reorder ring buffer */
1121 tid_agg_rx->stored_mpdu_num--;
1122 while ((skb = __skb_dequeue(skb_list))) {
1123 status = IEEE80211_SKB_RXCB(skb);
1124 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
1125 __skb_queue_tail(frames, skb);
1126 }
1127
1128 no_frame:
1129 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
1130 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1131 }
1132
1133 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
1134 struct tid_ampdu_rx *tid_agg_rx,
1135 u16 head_seq_num,
1136 struct sk_buff_head *frames)
1137 {
1138 int index;
1139
1140 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1141
1142 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1143 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1144 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1145 frames);
1146 }
1147 }
1148
1149 /*
1150 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
1151 * the skb was added to the buffer longer than this time ago, the earlier
1152 * frames that have not yet been received are assumed to be lost and the skb
1153 * can be released for processing. This may also release other skb's from the
1154 * reorder buffer if there are no additional gaps between the frames.
1155 *
1156 * Callers must hold tid_agg_rx->reorder_lock.
1157 */
1158 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
1159
1160 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
1161 struct tid_ampdu_rx *tid_agg_rx,
1162 struct sk_buff_head *frames)
1163 {
1164 int index, i, j;
1165
1166 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1167
1168 /* release the buffer until next missing frame */
1169 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1170 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
1171 tid_agg_rx->stored_mpdu_num) {
1172 /*
1173 * No buffers ready to be released, but check whether any
1174 * frames in the reorder buffer have timed out.
1175 */
1176 int skipped = 1;
1177 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1178 j = (j + 1) % tid_agg_rx->buf_size) {
1179 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1180 skipped++;
1181 continue;
1182 }
1183 if (skipped &&
1184 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1185 HT_RX_REORDER_BUF_TIMEOUT))
1186 goto set_release_timer;
1187
1188 /* don't leave incomplete A-MSDUs around */
1189 for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1190 i = (i + 1) % tid_agg_rx->buf_size)
1191 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1192
1193 ht_dbg_ratelimited(sdata,
1194 "release an RX reorder frame due to timeout on earlier frames\n");
1195 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1196 frames);
1197
1198 /*
1199 * Increment the head seq# also for the skipped slots.
1200 */
1201 tid_agg_rx->head_seq_num =
1202 (tid_agg_rx->head_seq_num +
1203 skipped) & IEEE80211_SN_MASK;
1204 skipped = 0;
1205 }
1206 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1207 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1208 frames);
1209 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1210 }
1211
1212 if (tid_agg_rx->stored_mpdu_num) {
1213 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1214
1215 for (; j != (index - 1) % tid_agg_rx->buf_size;
1216 j = (j + 1) % tid_agg_rx->buf_size) {
1217 if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1218 break;
1219 }
1220
1221 set_release_timer:
1222
1223 if (!tid_agg_rx->removed)
1224 mod_timer(&tid_agg_rx->reorder_timer,
1225 tid_agg_rx->reorder_time[j] + 1 +
1226 HT_RX_REORDER_BUF_TIMEOUT);
1227 } else {
1228 del_timer(&tid_agg_rx->reorder_timer);
1229 }
1230 }
1231
1232 /*
1233 * As this function belongs to the RX path it must be under
1234 * rcu_read_lock protection. It returns false if the frame
1235 * can be processed immediately, true if it was consumed.
1236 */
1237 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1238 struct tid_ampdu_rx *tid_agg_rx,
1239 struct sk_buff *skb,
1240 struct sk_buff_head *frames)
1241 {
1242 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1243 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1244 u16 sc = le16_to_cpu(hdr->seq_ctrl);
1245 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
1246 u16 head_seq_num, buf_size;
1247 int index;
1248 bool ret = true;
1249
1250 spin_lock(&tid_agg_rx->reorder_lock);
1251
1252 /*
1253 * Offloaded BA sessions have no known starting sequence number so pick
1254 * one from first Rxed frame for this tid after BA was started.
1255 */
1256 if (unlikely(tid_agg_rx->auto_seq)) {
1257 tid_agg_rx->auto_seq = false;
1258 tid_agg_rx->ssn = mpdu_seq_num;
1259 tid_agg_rx->head_seq_num = mpdu_seq_num;
1260 }
1261
1262 buf_size = tid_agg_rx->buf_size;
1263 head_seq_num = tid_agg_rx->head_seq_num;
1264
1265 /*
1266 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1267 * be reordered.
1268 */
1269 if (unlikely(!tid_agg_rx->started)) {
1270 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1271 ret = false;
1272 goto out;
1273 }
1274 tid_agg_rx->started = true;
1275 }
1276
1277 /* frame with out of date sequence number */
1278 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1279 dev_kfree_skb(skb);
1280 goto out;
1281 }
1282
1283 /*
1284 * If frame the sequence number exceeds our buffering window
1285 * size release some previous frames to make room for this one.
1286 */
1287 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1288 head_seq_num = ieee80211_sn_inc(
1289 ieee80211_sn_sub(mpdu_seq_num, buf_size));
1290 /* release stored frames up to new head to stack */
1291 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1292 head_seq_num, frames);
1293 }
1294
1295 /* Now the new frame is always in the range of the reordering buffer */
1296
1297 index = mpdu_seq_num % tid_agg_rx->buf_size;
1298
1299 /* check if we already stored this frame */
1300 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1301 dev_kfree_skb(skb);
1302 goto out;
1303 }
1304
1305 /*
1306 * If the current MPDU is in the right order and nothing else
1307 * is stored we can process it directly, no need to buffer it.
1308 * If it is first but there's something stored, we may be able
1309 * to release frames after this one.
1310 */
1311 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1312 tid_agg_rx->stored_mpdu_num == 0) {
1313 if (!(status->flag & RX_FLAG_AMSDU_MORE))
1314 tid_agg_rx->head_seq_num =
1315 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1316 ret = false;
1317 goto out;
1318 }
1319
1320 /* put the frame in the reordering buffer */
1321 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1322 if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1323 tid_agg_rx->reorder_time[index] = jiffies;
1324 tid_agg_rx->stored_mpdu_num++;
1325 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1326 }
1327
1328 out:
1329 spin_unlock(&tid_agg_rx->reorder_lock);
1330 return ret;
1331 }
1332
1333 /*
1334 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1335 * true if the MPDU was buffered, false if it should be processed.
1336 */
1337 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1338 struct sk_buff_head *frames)
1339 {
1340 struct sk_buff *skb = rx->skb;
1341 struct ieee80211_local *local = rx->local;
1342 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1343 struct sta_info *sta = rx->sta;
1344 struct tid_ampdu_rx *tid_agg_rx;
1345 u16 sc;
1346 u8 tid, ack_policy;
1347
1348 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1349 is_multicast_ether_addr(hdr->addr1))
1350 goto dont_reorder;
1351
1352 /*
1353 * filter the QoS data rx stream according to
1354 * STA/TID and check if this STA/TID is on aggregation
1355 */
1356
1357 if (!sta)
1358 goto dont_reorder;
1359
1360 ack_policy = *ieee80211_get_qos_ctl(hdr) &
1361 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1362 tid = ieee80211_get_tid(hdr);
1363
1364 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1365 if (!tid_agg_rx) {
1366 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1367 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1368 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1369 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1370 WLAN_BACK_RECIPIENT,
1371 WLAN_REASON_QSTA_REQUIRE_SETUP);
1372 goto dont_reorder;
1373 }
1374
1375 /* qos null data frames are excluded */
1376 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1377 goto dont_reorder;
1378
1379 /* not part of a BA session */
1380 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1381 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
1382 goto dont_reorder;
1383
1384 /* new, potentially un-ordered, ampdu frame - process it */
1385
1386 /* reset session timer */
1387 if (tid_agg_rx->timeout)
1388 tid_agg_rx->last_rx = jiffies;
1389
1390 /* if this mpdu is fragmented - terminate rx aggregation session */
1391 sc = le16_to_cpu(hdr->seq_ctrl);
1392 if (sc & IEEE80211_SCTL_FRAG) {
1393 skb_queue_tail(&rx->sdata->skb_queue, skb);
1394 ieee80211_queue_work(&local->hw, &rx->sdata->work);
1395 return;
1396 }
1397
1398 /*
1399 * No locking needed -- we will only ever process one
1400 * RX packet at a time, and thus own tid_agg_rx. All
1401 * other code manipulating it needs to (and does) make
1402 * sure that we cannot get to it any more before doing
1403 * anything with it.
1404 */
1405 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1406 frames))
1407 return;
1408
1409 dont_reorder:
1410 __skb_queue_tail(frames, skb);
1411 }
1412
1413 static ieee80211_rx_result debug_noinline
1414 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1415 {
1416 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1417 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1418
1419 if (status->flag & RX_FLAG_DUP_VALIDATED)
1420 return RX_CONTINUE;
1421
1422 /*
1423 * Drop duplicate 802.11 retransmissions
1424 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1425 */
1426
1427 if (rx->skb->len < 24)
1428 return RX_CONTINUE;
1429
1430 if (ieee80211_is_ctl(hdr->frame_control) ||
1431 ieee80211_is_any_nullfunc(hdr->frame_control) ||
1432 is_multicast_ether_addr(hdr->addr1))
1433 return RX_CONTINUE;
1434
1435 if (!rx->sta)
1436 return RX_CONTINUE;
1437
1438 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1439 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1440 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1441 rx->sta->rx_stats.num_duplicates++;
1442 return RX_DROP_UNUSABLE;
1443 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1444 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1445 }
1446
1447 return RX_CONTINUE;
1448 }
1449
1450 static ieee80211_rx_result debug_noinline
1451 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1452 {
1453 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1454
1455 /* Drop disallowed frame classes based on STA auth/assoc state;
1456 * IEEE 802.11, Chap 5.5.
1457 *
1458 * mac80211 filters only based on association state, i.e. it drops
1459 * Class 3 frames from not associated stations. hostapd sends
1460 * deauth/disassoc frames when needed. In addition, hostapd is
1461 * responsible for filtering on both auth and assoc states.
1462 */
1463
1464 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1465 return ieee80211_rx_mesh_check(rx);
1466
1467 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1468 ieee80211_is_pspoll(hdr->frame_control)) &&
1469 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1470 rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1471 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1472 /*
1473 * accept port control frames from the AP even when it's not
1474 * yet marked ASSOC to prevent a race where we don't set the
1475 * assoc bit quickly enough before it sends the first frame
1476 */
1477 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1478 ieee80211_is_data_present(hdr->frame_control)) {
1479 unsigned int hdrlen;
1480 __be16 ethertype;
1481
1482 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1483
1484 if (rx->skb->len < hdrlen + 8)
1485 return RX_DROP_MONITOR;
1486
1487 skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
1488 if (ethertype == rx->sdata->control_port_protocol)
1489 return RX_CONTINUE;
1490 }
1491
1492 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1493 cfg80211_rx_spurious_frame(rx->sdata->dev,
1494 hdr->addr2,
1495 GFP_ATOMIC))
1496 return RX_DROP_UNUSABLE;
1497
1498 return RX_DROP_MONITOR;
1499 }
1500
1501 return RX_CONTINUE;
1502 }
1503
1504
1505 static ieee80211_rx_result debug_noinline
1506 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1507 {
1508 struct ieee80211_local *local;
1509 struct ieee80211_hdr *hdr;
1510 struct sk_buff *skb;
1511
1512 local = rx->local;
1513 skb = rx->skb;
1514 hdr = (struct ieee80211_hdr *) skb->data;
1515
1516 if (!local->pspolling)
1517 return RX_CONTINUE;
1518
1519 if (!ieee80211_has_fromds(hdr->frame_control))
1520 /* this is not from AP */
1521 return RX_CONTINUE;
1522
1523 if (!ieee80211_is_data(hdr->frame_control))
1524 return RX_CONTINUE;
1525
1526 if (!ieee80211_has_moredata(hdr->frame_control)) {
1527 /* AP has no more frames buffered for us */
1528 local->pspolling = false;
1529 return RX_CONTINUE;
1530 }
1531
1532 /* more data bit is set, let's request a new frame from the AP */
1533 ieee80211_send_pspoll(local, rx->sdata);
1534
1535 return RX_CONTINUE;
1536 }
1537
1538 static void sta_ps_start(struct sta_info *sta)
1539 {
1540 struct ieee80211_sub_if_data *sdata = sta->sdata;
1541 struct ieee80211_local *local = sdata->local;
1542 struct ps_data *ps;
1543 int tid;
1544
1545 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1546 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1547 ps = &sdata->bss->ps;
1548 else
1549 return;
1550
1551 atomic_inc(&ps->num_sta_ps);
1552 set_sta_flag(sta, WLAN_STA_PS_STA);
1553 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1554 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1555 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1556 sta->sta.addr, sta->sta.aid);
1557
1558 ieee80211_clear_fast_xmit(sta);
1559
1560 if (!sta->sta.txq[0])
1561 return;
1562
1563 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
1564 struct ieee80211_txq *txq = sta->sta.txq[tid];
1565 struct txq_info *txqi = to_txq_info(txq);
1566
1567 spin_lock(&local->active_txq_lock[txq->ac]);
1568 if (!list_empty(&txqi->schedule_order))
1569 list_del_init(&txqi->schedule_order);
1570 spin_unlock(&local->active_txq_lock[txq->ac]);
1571
1572 if (txq_has_queue(txq))
1573 set_bit(tid, &sta->txq_buffered_tids);
1574 else
1575 clear_bit(tid, &sta->txq_buffered_tids);
1576 }
1577 }
1578
1579 static void sta_ps_end(struct sta_info *sta)
1580 {
1581 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1582 sta->sta.addr, sta->sta.aid);
1583
1584 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1585 /*
1586 * Clear the flag only if the other one is still set
1587 * so that the TX path won't start TX'ing new frames
1588 * directly ... In the case that the driver flag isn't
1589 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1590 */
1591 clear_sta_flag(sta, WLAN_STA_PS_STA);
1592 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1593 sta->sta.addr, sta->sta.aid);
1594 return;
1595 }
1596
1597 set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1598 clear_sta_flag(sta, WLAN_STA_PS_STA);
1599 ieee80211_sta_ps_deliver_wakeup(sta);
1600 }
1601
1602 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1603 {
1604 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1605 bool in_ps;
1606
1607 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1608
1609 /* Don't let the same PS state be set twice */
1610 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1611 if ((start && in_ps) || (!start && !in_ps))
1612 return -EINVAL;
1613
1614 if (start)
1615 sta_ps_start(sta);
1616 else
1617 sta_ps_end(sta);
1618
1619 return 0;
1620 }
1621 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1622
1623 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1624 {
1625 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1626
1627 if (test_sta_flag(sta, WLAN_STA_SP))
1628 return;
1629
1630 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1631 ieee80211_sta_ps_deliver_poll_response(sta);
1632 else
1633 set_sta_flag(sta, WLAN_STA_PSPOLL);
1634 }
1635 EXPORT_SYMBOL(ieee80211_sta_pspoll);
1636
1637 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1638 {
1639 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1640 int ac = ieee80211_ac_from_tid(tid);
1641
1642 /*
1643 * If this AC is not trigger-enabled do nothing unless the
1644 * driver is calling us after it already checked.
1645 *
1646 * NB: This could/should check a separate bitmap of trigger-
1647 * enabled queues, but for now we only implement uAPSD w/o
1648 * TSPEC changes to the ACs, so they're always the same.
1649 */
1650 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1651 tid != IEEE80211_NUM_TIDS)
1652 return;
1653
1654 /* if we are in a service period, do nothing */
1655 if (test_sta_flag(sta, WLAN_STA_SP))
1656 return;
1657
1658 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1659 ieee80211_sta_ps_deliver_uapsd(sta);
1660 else
1661 set_sta_flag(sta, WLAN_STA_UAPSD);
1662 }
1663 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1664
1665 static ieee80211_rx_result debug_noinline
1666 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1667 {
1668 struct ieee80211_sub_if_data *sdata = rx->sdata;
1669 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1670 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1671
1672 if (!rx->sta)
1673 return RX_CONTINUE;
1674
1675 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1676 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1677 return RX_CONTINUE;
1678
1679 /*
1680 * The device handles station powersave, so don't do anything about
1681 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1682 * it to mac80211 since they're handled.)
1683 */
1684 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1685 return RX_CONTINUE;
1686
1687 /*
1688 * Don't do anything if the station isn't already asleep. In
1689 * the uAPSD case, the station will probably be marked asleep,
1690 * in the PS-Poll case the station must be confused ...
1691 */
1692 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1693 return RX_CONTINUE;
1694
1695 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1696 ieee80211_sta_pspoll(&rx->sta->sta);
1697
1698 /* Free PS Poll skb here instead of returning RX_DROP that would
1699 * count as an dropped frame. */
1700 dev_kfree_skb(rx->skb);
1701
1702 return RX_QUEUED;
1703 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1704 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1705 ieee80211_has_pm(hdr->frame_control) &&
1706 (ieee80211_is_data_qos(hdr->frame_control) ||
1707 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1708 u8 tid = ieee80211_get_tid(hdr);
1709
1710 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1711 }
1712
1713 return RX_CONTINUE;
1714 }
1715
1716 static ieee80211_rx_result debug_noinline
1717 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1718 {
1719 struct sta_info *sta = rx->sta;
1720 struct sk_buff *skb = rx->skb;
1721 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1722 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1723 int i;
1724
1725 if (!sta)
1726 return RX_CONTINUE;
1727
1728 /*
1729 * Update last_rx only for IBSS packets which are for the current
1730 * BSSID and for station already AUTHORIZED to avoid keeping the
1731 * current IBSS network alive in cases where other STAs start
1732 * using different BSSID. This will also give the station another
1733 * chance to restart the authentication/authorization in case
1734 * something went wrong the first time.
1735 */
1736 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1737 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1738 NL80211_IFTYPE_ADHOC);
1739 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1740 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1741 sta->rx_stats.last_rx = jiffies;
1742 if (ieee80211_is_data(hdr->frame_control) &&
1743 !is_multicast_ether_addr(hdr->addr1))
1744 sta->rx_stats.last_rate =
1745 sta_stats_encode_rate(status);
1746 }
1747 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1748 sta->rx_stats.last_rx = jiffies;
1749 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
1750 !is_multicast_ether_addr(hdr->addr1)) {
1751 /*
1752 * Mesh beacons will update last_rx when if they are found to
1753 * match the current local configuration when processed.
1754 */
1755 sta->rx_stats.last_rx = jiffies;
1756 if (ieee80211_is_data(hdr->frame_control))
1757 sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1758 }
1759
1760 sta->rx_stats.fragments++;
1761
1762 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
1763 sta->rx_stats.bytes += rx->skb->len;
1764 u64_stats_update_end(&rx->sta->rx_stats.syncp);
1765
1766 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1767 sta->rx_stats.last_signal = status->signal;
1768 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
1769 }
1770
1771 if (status->chains) {
1772 sta->rx_stats.chains = status->chains;
1773 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1774 int signal = status->chain_signal[i];
1775
1776 if (!(status->chains & BIT(i)))
1777 continue;
1778
1779 sta->rx_stats.chain_signal_last[i] = signal;
1780 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
1781 -signal);
1782 }
1783 }
1784
1785 if (ieee80211_is_s1g_beacon(hdr->frame_control))
1786 return RX_CONTINUE;
1787
1788 /*
1789 * Change STA power saving mode only at the end of a frame
1790 * exchange sequence, and only for a data or management
1791 * frame as specified in IEEE 802.11-2016 11.2.3.2
1792 */
1793 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1794 !ieee80211_has_morefrags(hdr->frame_control) &&
1795 !is_multicast_ether_addr(hdr->addr1) &&
1796 (ieee80211_is_mgmt(hdr->frame_control) ||
1797 ieee80211_is_data(hdr->frame_control)) &&
1798 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1799 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1800 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1801 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1802 if (!ieee80211_has_pm(hdr->frame_control))
1803 sta_ps_end(sta);
1804 } else {
1805 if (ieee80211_has_pm(hdr->frame_control))
1806 sta_ps_start(sta);
1807 }
1808 }
1809
1810 /* mesh power save support */
1811 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1812 ieee80211_mps_rx_h_sta_process(sta, hdr);
1813
1814 /*
1815 * Drop (qos-)data::nullfunc frames silently, since they
1816 * are used only to control station power saving mode.
1817 */
1818 if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
1819 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1820
1821 /*
1822 * If we receive a 4-addr nullfunc frame from a STA
1823 * that was not moved to a 4-addr STA vlan yet send
1824 * the event to userspace and for older hostapd drop
1825 * the frame to the monitor interface.
1826 */
1827 if (ieee80211_has_a4(hdr->frame_control) &&
1828 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1829 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1830 !rx->sdata->u.vlan.sta))) {
1831 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1832 cfg80211_rx_unexpected_4addr_frame(
1833 rx->sdata->dev, sta->sta.addr,
1834 GFP_ATOMIC);
1835 return RX_DROP_MONITOR;
1836 }
1837 /*
1838 * Update counter and free packet here to avoid
1839 * counting this as a dropped packed.
1840 */
1841 sta->rx_stats.packets++;
1842 dev_kfree_skb(rx->skb);
1843 return RX_QUEUED;
1844 }
1845
1846 return RX_CONTINUE;
1847 } /* ieee80211_rx_h_sta_process */
1848
1849 static struct ieee80211_key *
1850 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
1851 {
1852 struct ieee80211_key *key = NULL;
1853 struct ieee80211_sub_if_data *sdata = rx->sdata;
1854 int idx2;
1855
1856 /* Make sure key gets set if either BIGTK key index is set so that
1857 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected
1858 * Beacon frames and Beacon frames that claim to use another BIGTK key
1859 * index (i.e., a key that we do not have).
1860 */
1861
1862 if (idx < 0) {
1863 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
1864 idx2 = idx + 1;
1865 } else {
1866 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1867 idx2 = idx + 1;
1868 else
1869 idx2 = idx - 1;
1870 }
1871
1872 if (rx->sta)
1873 key = rcu_dereference(rx->sta->gtk[idx]);
1874 if (!key)
1875 key = rcu_dereference(sdata->keys[idx]);
1876 if (!key && rx->sta)
1877 key = rcu_dereference(rx->sta->gtk[idx2]);
1878 if (!key)
1879 key = rcu_dereference(sdata->keys[idx2]);
1880
1881 return key;
1882 }
1883
1884 static ieee80211_rx_result debug_noinline
1885 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1886 {
1887 struct sk_buff *skb = rx->skb;
1888 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1889 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1890 int keyidx;
1891 ieee80211_rx_result result = RX_DROP_UNUSABLE;
1892 struct ieee80211_key *sta_ptk = NULL;
1893 struct ieee80211_key *ptk_idx = NULL;
1894 int mmie_keyidx = -1;
1895 __le16 fc;
1896 const struct ieee80211_cipher_scheme *cs = NULL;
1897
1898 if (ieee80211_is_ext(hdr->frame_control))
1899 return RX_CONTINUE;
1900
1901 /*
1902 * Key selection 101
1903 *
1904 * There are five types of keys:
1905 * - GTK (group keys)
1906 * - IGTK (group keys for management frames)
1907 * - BIGTK (group keys for Beacon frames)
1908 * - PTK (pairwise keys)
1909 * - STK (station-to-station pairwise keys)
1910 *
1911 * When selecting a key, we have to distinguish between multicast
1912 * (including broadcast) and unicast frames, the latter can only
1913 * use PTKs and STKs while the former always use GTKs, IGTKs, and
1914 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used,
1915 * then unicast frames can also use key indices like GTKs. Hence, if we
1916 * don't have a PTK/STK we check the key index for a WEP key.
1917 *
1918 * Note that in a regular BSS, multicast frames are sent by the
1919 * AP only, associated stations unicast the frame to the AP first
1920 * which then multicasts it on their behalf.
1921 *
1922 * There is also a slight problem in IBSS mode: GTKs are negotiated
1923 * with each station, that is something we don't currently handle.
1924 * The spec seems to expect that one negotiates the same key with
1925 * every station but there's no such requirement; VLANs could be
1926 * possible.
1927 */
1928
1929 /* start without a key */
1930 rx->key = NULL;
1931 fc = hdr->frame_control;
1932
1933 if (rx->sta) {
1934 int keyid = rx->sta->ptk_idx;
1935 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1936
1937 if (ieee80211_has_protected(fc)) {
1938 cs = rx->sta->cipher_scheme;
1939 keyid = ieee80211_get_keyid(rx->skb, cs);
1940
1941 if (unlikely(keyid < 0))
1942 return RX_DROP_UNUSABLE;
1943
1944 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
1945 }
1946 }
1947
1948 if (!ieee80211_has_protected(fc))
1949 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1950
1951 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1952 rx->key = ptk_idx ? ptk_idx : sta_ptk;
1953 if ((status->flag & RX_FLAG_DECRYPTED) &&
1954 (status->flag & RX_FLAG_IV_STRIPPED))
1955 return RX_CONTINUE;
1956 /* Skip decryption if the frame is not protected. */
1957 if (!ieee80211_has_protected(fc))
1958 return RX_CONTINUE;
1959 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) {
1960 /* Broadcast/multicast robust management frame / BIP */
1961 if ((status->flag & RX_FLAG_DECRYPTED) &&
1962 (status->flag & RX_FLAG_IV_STRIPPED))
1963 return RX_CONTINUE;
1964
1965 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
1966 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
1967 NUM_DEFAULT_BEACON_KEYS) {
1968 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
1969 skb->data,
1970 skb->len);
1971 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1972 }
1973
1974 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx);
1975 if (!rx->key)
1976 return RX_CONTINUE; /* Beacon protection not in use */
1977 } else if (mmie_keyidx >= 0) {
1978 /* Broadcast/multicast robust management frame / BIP */
1979 if ((status->flag & RX_FLAG_DECRYPTED) &&
1980 (status->flag & RX_FLAG_IV_STRIPPED))
1981 return RX_CONTINUE;
1982
1983 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1984 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1985 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1986 if (rx->sta) {
1987 if (ieee80211_is_group_privacy_action(skb) &&
1988 test_sta_flag(rx->sta, WLAN_STA_MFP))
1989 return RX_DROP_MONITOR;
1990
1991 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
1992 }
1993 if (!rx->key)
1994 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
1995 } else if (!ieee80211_has_protected(fc)) {
1996 /*
1997 * The frame was not protected, so skip decryption. However, we
1998 * need to set rx->key if there is a key that could have been
1999 * used so that the frame may be dropped if encryption would
2000 * have been expected.
2001 */
2002 struct ieee80211_key *key = NULL;
2003 struct ieee80211_sub_if_data *sdata = rx->sdata;
2004 int i;
2005
2006 if (ieee80211_is_beacon(fc)) {
2007 key = ieee80211_rx_get_bigtk(rx, -1);
2008 } else if (ieee80211_is_mgmt(fc) &&
2009 is_multicast_ether_addr(hdr->addr1)) {
2010 key = rcu_dereference(rx->sdata->default_mgmt_key);
2011 } else {
2012 if (rx->sta) {
2013 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2014 key = rcu_dereference(rx->sta->gtk[i]);
2015 if (key)
2016 break;
2017 }
2018 }
2019 if (!key) {
2020 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2021 key = rcu_dereference(sdata->keys[i]);
2022 if (key)
2023 break;
2024 }
2025 }
2026 }
2027 if (key)
2028 rx->key = key;
2029 return RX_CONTINUE;
2030 } else {
2031 /*
2032 * The device doesn't give us the IV so we won't be
2033 * able to look up the key. That's ok though, we
2034 * don't need to decrypt the frame, we just won't
2035 * be able to keep statistics accurate.
2036 * Except for key threshold notifications, should
2037 * we somehow allow the driver to tell us which key
2038 * the hardware used if this flag is set?
2039 */
2040 if ((status->flag & RX_FLAG_DECRYPTED) &&
2041 (status->flag & RX_FLAG_IV_STRIPPED))
2042 return RX_CONTINUE;
2043
2044 keyidx = ieee80211_get_keyid(rx->skb, cs);
2045
2046 if (unlikely(keyidx < 0))
2047 return RX_DROP_UNUSABLE;
2048
2049 /* check per-station GTK first, if multicast packet */
2050 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
2051 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
2052
2053 /* if not found, try default key */
2054 if (!rx->key) {
2055 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
2056
2057 /*
2058 * RSNA-protected unicast frames should always be
2059 * sent with pairwise or station-to-station keys,
2060 * but for WEP we allow using a key index as well.
2061 */
2062 if (rx->key &&
2063 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
2064 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
2065 !is_multicast_ether_addr(hdr->addr1))
2066 rx->key = NULL;
2067 }
2068 }
2069
2070 if (rx->key) {
2071 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
2072 return RX_DROP_MONITOR;
2073
2074 /* TODO: add threshold stuff again */
2075 } else {
2076 return RX_DROP_MONITOR;
2077 }
2078
2079 switch (rx->key->conf.cipher) {
2080 case WLAN_CIPHER_SUITE_WEP40:
2081 case WLAN_CIPHER_SUITE_WEP104:
2082 result = ieee80211_crypto_wep_decrypt(rx);
2083 break;
2084 case WLAN_CIPHER_SUITE_TKIP:
2085 result = ieee80211_crypto_tkip_decrypt(rx);
2086 break;
2087 case WLAN_CIPHER_SUITE_CCMP:
2088 result = ieee80211_crypto_ccmp_decrypt(
2089 rx, IEEE80211_CCMP_MIC_LEN);
2090 break;
2091 case WLAN_CIPHER_SUITE_CCMP_256:
2092 result = ieee80211_crypto_ccmp_decrypt(
2093 rx, IEEE80211_CCMP_256_MIC_LEN);
2094 break;
2095 case WLAN_CIPHER_SUITE_AES_CMAC:
2096 result = ieee80211_crypto_aes_cmac_decrypt(rx);
2097 break;
2098 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
2099 result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
2100 break;
2101 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2102 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2103 result = ieee80211_crypto_aes_gmac_decrypt(rx);
2104 break;
2105 case WLAN_CIPHER_SUITE_GCMP:
2106 case WLAN_CIPHER_SUITE_GCMP_256:
2107 result = ieee80211_crypto_gcmp_decrypt(rx);
2108 break;
2109 default:
2110 result = ieee80211_crypto_hw_decrypt(rx);
2111 }
2112
2113 /* the hdr variable is invalid after the decrypt handlers */
2114
2115 /* either the frame has been decrypted or will be dropped */
2116 status->flag |= RX_FLAG_DECRYPTED;
2117
2118 if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE))
2119 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2120 skb->data, skb->len);
2121
2122 return result;
2123 }
2124
2125 static inline struct ieee80211_fragment_entry *
2126 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
2127 unsigned int frag, unsigned int seq, int rx_queue,
2128 struct sk_buff **skb)
2129 {
2130 struct ieee80211_fragment_entry *entry;
2131
2132 entry = &sdata->fragments[sdata->fragment_next++];
2133 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
2134 sdata->fragment_next = 0;
2135
2136 if (!skb_queue_empty(&entry->skb_list))
2137 __skb_queue_purge(&entry->skb_list);
2138
2139 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
2140 *skb = NULL;
2141 entry->first_frag_time = jiffies;
2142 entry->seq = seq;
2143 entry->rx_queue = rx_queue;
2144 entry->last_frag = frag;
2145 entry->check_sequential_pn = false;
2146 entry->extra_len = 0;
2147
2148 return entry;
2149 }
2150
2151 static inline struct ieee80211_fragment_entry *
2152 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
2153 unsigned int frag, unsigned int seq,
2154 int rx_queue, struct ieee80211_hdr *hdr)
2155 {
2156 struct ieee80211_fragment_entry *entry;
2157 int i, idx;
2158
2159 idx = sdata->fragment_next;
2160 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
2161 struct ieee80211_hdr *f_hdr;
2162 struct sk_buff *f_skb;
2163
2164 idx--;
2165 if (idx < 0)
2166 idx = IEEE80211_FRAGMENT_MAX - 1;
2167
2168 entry = &sdata->fragments[idx];
2169 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
2170 entry->rx_queue != rx_queue ||
2171 entry->last_frag + 1 != frag)
2172 continue;
2173
2174 f_skb = __skb_peek(&entry->skb_list);
2175 f_hdr = (struct ieee80211_hdr *) f_skb->data;
2176
2177 /*
2178 * Check ftype and addresses are equal, else check next fragment
2179 */
2180 if (((hdr->frame_control ^ f_hdr->frame_control) &
2181 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
2182 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
2183 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
2184 continue;
2185
2186 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
2187 __skb_queue_purge(&entry->skb_list);
2188 continue;
2189 }
2190 return entry;
2191 }
2192
2193 return NULL;
2194 }
2195
2196 static ieee80211_rx_result debug_noinline
2197 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2198 {
2199 struct ieee80211_hdr *hdr;
2200 u16 sc;
2201 __le16 fc;
2202 unsigned int frag, seq;
2203 struct ieee80211_fragment_entry *entry;
2204 struct sk_buff *skb;
2205
2206 hdr = (struct ieee80211_hdr *)rx->skb->data;
2207 fc = hdr->frame_control;
2208
2209 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc))
2210 return RX_CONTINUE;
2211
2212 sc = le16_to_cpu(hdr->seq_ctrl);
2213 frag = sc & IEEE80211_SCTL_FRAG;
2214
2215 if (is_multicast_ether_addr(hdr->addr1)) {
2216 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
2217 goto out_no_led;
2218 }
2219
2220 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2221 goto out;
2222
2223 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2224
2225 if (skb_linearize(rx->skb))
2226 return RX_DROP_UNUSABLE;
2227
2228 /*
2229 * skb_linearize() might change the skb->data and
2230 * previously cached variables (in this case, hdr) need to
2231 * be refreshed with the new data.
2232 */
2233 hdr = (struct ieee80211_hdr *)rx->skb->data;
2234 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2235
2236 if (frag == 0) {
2237 /* This is the first fragment of a new frame. */
2238 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
2239 rx->seqno_idx, &(rx->skb));
2240 if (rx->key &&
2241 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
2242 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
2243 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
2244 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
2245 ieee80211_has_protected(fc)) {
2246 int queue = rx->security_idx;
2247
2248 /* Store CCMP/GCMP PN so that we can verify that the
2249 * next fragment has a sequential PN value.
2250 */
2251 entry->check_sequential_pn = true;
2252 memcpy(entry->last_pn,
2253 rx->key->u.ccmp.rx_pn[queue],
2254 IEEE80211_CCMP_PN_LEN);
2255 BUILD_BUG_ON(offsetof(struct ieee80211_key,
2256 u.ccmp.rx_pn) !=
2257 offsetof(struct ieee80211_key,
2258 u.gcmp.rx_pn));
2259 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2260 sizeof(rx->key->u.gcmp.rx_pn[queue]));
2261 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2262 IEEE80211_GCMP_PN_LEN);
2263 }
2264 return RX_QUEUED;
2265 }
2266
2267 /* This is a fragment for a frame that should already be pending in
2268 * fragment cache. Add this fragment to the end of the pending entry.
2269 */
2270 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
2271 rx->seqno_idx, hdr);
2272 if (!entry) {
2273 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2274 return RX_DROP_MONITOR;
2275 }
2276
2277 /* "The receiver shall discard MSDUs and MMPDUs whose constituent
2278 * MPDU PN values are not incrementing in steps of 1."
2279 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2280 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2281 */
2282 if (entry->check_sequential_pn) {
2283 int i;
2284 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2285 int queue;
2286
2287 if (!rx->key ||
2288 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
2289 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
2290 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
2291 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
2292 return RX_DROP_UNUSABLE;
2293 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2294 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2295 pn[i]++;
2296 if (pn[i])
2297 break;
2298 }
2299 queue = rx->security_idx;
2300 rpn = rx->key->u.ccmp.rx_pn[queue];
2301 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2302 return RX_DROP_UNUSABLE;
2303 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2304 }
2305
2306 skb_pull(rx->skb, ieee80211_hdrlen(fc));
2307 __skb_queue_tail(&entry->skb_list, rx->skb);
2308 entry->last_frag = frag;
2309 entry->extra_len += rx->skb->len;
2310 if (ieee80211_has_morefrags(fc)) {
2311 rx->skb = NULL;
2312 return RX_QUEUED;
2313 }
2314
2315 rx->skb = __skb_dequeue(&entry->skb_list);
2316 if (skb_tailroom(rx->skb) < entry->extra_len) {
2317 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2318 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2319 GFP_ATOMIC))) {
2320 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2321 __skb_queue_purge(&entry->skb_list);
2322 return RX_DROP_UNUSABLE;
2323 }
2324 }
2325 while ((skb = __skb_dequeue(&entry->skb_list))) {
2326 skb_put_data(rx->skb, skb->data, skb->len);
2327 dev_kfree_skb(skb);
2328 }
2329
2330 out:
2331 ieee80211_led_rx(rx->local);
2332 out_no_led:
2333 if (rx->sta)
2334 rx->sta->rx_stats.packets++;
2335 return RX_CONTINUE;
2336 }
2337
2338 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2339 {
2340 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2341 return -EACCES;
2342
2343 return 0;
2344 }
2345
2346 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2347 {
2348 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
2349 struct sk_buff *skb = rx->skb;
2350 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2351
2352 /*
2353 * Pass through unencrypted frames if the hardware has
2354 * decrypted them already.
2355 */
2356 if (status->flag & RX_FLAG_DECRYPTED)
2357 return 0;
2358
2359 /* check mesh EAPOL frames first */
2360 if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
2361 ieee80211_is_data(fc))) {
2362 struct ieee80211s_hdr *mesh_hdr;
2363 u16 hdr_len = ieee80211_hdrlen(fc);
2364 u16 ethertype_offset;
2365 __be16 ethertype;
2366
2367 if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
2368 goto drop_check;
2369
2370 /* make sure fixed part of mesh header is there, also checks skb len */
2371 if (!pskb_may_pull(rx->skb, hdr_len + 6))
2372 goto drop_check;
2373
2374 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
2375 ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
2376 sizeof(rfc1042_header);
2377
2378 if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
2379 ethertype == rx->sdata->control_port_protocol)
2380 return 0;
2381 }
2382
2383 drop_check:
2384 /* Drop unencrypted frames if key is set. */
2385 if (unlikely(!ieee80211_has_protected(fc) &&
2386 !ieee80211_is_any_nullfunc(fc) &&
2387 ieee80211_is_data(fc) && rx->key))
2388 return -EACCES;
2389
2390 return 0;
2391 }
2392
2393 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2394 {
2395 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2396 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2397 __le16 fc = hdr->frame_control;
2398
2399 /*
2400 * Pass through unencrypted frames if the hardware has
2401 * decrypted them already.
2402 */
2403 if (status->flag & RX_FLAG_DECRYPTED)
2404 return 0;
2405
2406 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2407 if (unlikely(!ieee80211_has_protected(fc) &&
2408 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
2409 rx->key)) {
2410 if (ieee80211_is_deauth(fc) ||
2411 ieee80211_is_disassoc(fc))
2412 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2413 rx->skb->data,
2414 rx->skb->len);
2415 return -EACCES;
2416 }
2417 /* BIP does not use Protected field, so need to check MMIE */
2418 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2419 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2420 if (ieee80211_is_deauth(fc) ||
2421 ieee80211_is_disassoc(fc))
2422 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2423 rx->skb->data,
2424 rx->skb->len);
2425 return -EACCES;
2426 }
2427 if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
2428 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2429 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2430 rx->skb->data,
2431 rx->skb->len);
2432 return -EACCES;
2433 }
2434 /*
2435 * When using MFP, Action frames are not allowed prior to
2436 * having configured keys.
2437 */
2438 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2439 ieee80211_is_robust_mgmt_frame(rx->skb)))
2440 return -EACCES;
2441 }
2442
2443 return 0;
2444 }
2445
2446 static int
2447 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2448 {
2449 struct ieee80211_sub_if_data *sdata = rx->sdata;
2450 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2451 bool check_port_control = false;
2452 struct ethhdr *ehdr;
2453 int ret;
2454
2455 *port_control = false;
2456 if (ieee80211_has_a4(hdr->frame_control) &&
2457 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2458 return -1;
2459
2460 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2461 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2462
2463 if (!sdata->u.mgd.use_4addr)
2464 return -1;
2465 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
2466 check_port_control = true;
2467 }
2468
2469 if (is_multicast_ether_addr(hdr->addr1) &&
2470 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2471 return -1;
2472
2473 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2474 if (ret < 0)
2475 return ret;
2476
2477 ehdr = (struct ethhdr *) rx->skb->data;
2478 if (ehdr->h_proto == rx->sdata->control_port_protocol)
2479 *port_control = true;
2480 else if (check_port_control)
2481 return -1;
2482
2483 return 0;
2484 }
2485
2486 /*
2487 * requires that rx->skb is a frame with ethernet header
2488 */
2489 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2490 {
2491 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2492 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2493 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2494
2495 /*
2496 * Allow EAPOL frames to us/the PAE group address regardless
2497 * of whether the frame was encrypted or not.
2498 */
2499 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
2500 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
2501 ether_addr_equal(ehdr->h_dest, pae_group_addr)))
2502 return true;
2503
2504 if (ieee80211_802_1x_port_control(rx) ||
2505 ieee80211_drop_unencrypted(rx, fc))
2506 return false;
2507
2508 return true;
2509 }
2510
2511 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2512 struct ieee80211_rx_data *rx)
2513 {
2514 struct ieee80211_sub_if_data *sdata = rx->sdata;
2515 struct net_device *dev = sdata->dev;
2516
2517 if (unlikely((skb->protocol == sdata->control_port_protocol ||
2518 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) &&
2519 !sdata->control_port_no_preauth)) &&
2520 sdata->control_port_over_nl80211)) {
2521 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2522 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2523
2524 cfg80211_rx_control_port(dev, skb, noencrypt);
2525 dev_kfree_skb(skb);
2526 } else {
2527 memset(skb->cb, 0, sizeof(skb->cb));
2528
2529 /* deliver to local stack */
2530 if (rx->list)
2531 list_add_tail(&skb->list, rx->list);
2532 else
2533 netif_receive_skb(skb);
2534 }
2535 }
2536
2537 /*
2538 * requires that rx->skb is a frame with ethernet header
2539 */
2540 static void
2541 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2542 {
2543 struct ieee80211_sub_if_data *sdata = rx->sdata;
2544 struct net_device *dev = sdata->dev;
2545 struct sk_buff *skb, *xmit_skb;
2546 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2547 struct sta_info *dsta;
2548
2549 skb = rx->skb;
2550 xmit_skb = NULL;
2551
2552 dev_sw_netstats_rx_add(dev, skb->len);
2553
2554 if (rx->sta) {
2555 /* The seqno index has the same property as needed
2556 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2557 * for non-QoS-data frames. Here we know it's a data
2558 * frame, so count MSDUs.
2559 */
2560 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
2561 rx->sta->rx_stats.msdu[rx->seqno_idx]++;
2562 u64_stats_update_end(&rx->sta->rx_stats.syncp);
2563 }
2564
2565 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2566 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2567 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2568 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2569 if (is_multicast_ether_addr(ehdr->h_dest) &&
2570 ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2571 /*
2572 * send multicast frames both to higher layers in
2573 * local net stack and back to the wireless medium
2574 */
2575 xmit_skb = skb_copy(skb, GFP_ATOMIC);
2576 if (!xmit_skb)
2577 net_info_ratelimited("%s: failed to clone multicast frame\n",
2578 dev->name);
2579 } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
2580 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
2581 dsta = sta_info_get(sdata, ehdr->h_dest);
2582 if (dsta) {
2583 /*
2584 * The destination station is associated to
2585 * this AP (in this VLAN), so send the frame
2586 * directly to it and do not pass it to local
2587 * net stack.
2588 */
2589 xmit_skb = skb;
2590 skb = NULL;
2591 }
2592 }
2593 }
2594
2595 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2596 if (skb) {
2597 /* 'align' will only take the values 0 or 2 here since all
2598 * frames are required to be aligned to 2-byte boundaries
2599 * when being passed to mac80211; the code here works just
2600 * as well if that isn't true, but mac80211 assumes it can
2601 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2602 */
2603 int align;
2604
2605 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2606 if (align) {
2607 if (WARN_ON(skb_headroom(skb) < 3)) {
2608 dev_kfree_skb(skb);
2609 skb = NULL;
2610 } else {
2611 u8 *data = skb->data;
2612 size_t len = skb_headlen(skb);
2613 skb->data -= align;
2614 memmove(skb->data, data, len);
2615 skb_set_tail_pointer(skb, len);
2616 }
2617 }
2618 }
2619 #endif
2620
2621 if (skb) {
2622 skb->protocol = eth_type_trans(skb, dev);
2623 ieee80211_deliver_skb_to_local_stack(skb, rx);
2624 }
2625
2626 if (xmit_skb) {
2627 /*
2628 * Send to wireless media and increase priority by 256 to
2629 * keep the received priority instead of reclassifying
2630 * the frame (see cfg80211_classify8021d).
2631 */
2632 xmit_skb->priority += 256;
2633 xmit_skb->protocol = htons(ETH_P_802_3);
2634 skb_reset_network_header(xmit_skb);
2635 skb_reset_mac_header(xmit_skb);
2636 dev_queue_xmit(xmit_skb);
2637 }
2638 }
2639
2640 static ieee80211_rx_result debug_noinline
2641 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
2642 {
2643 struct net_device *dev = rx->sdata->dev;
2644 struct sk_buff *skb = rx->skb;
2645 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2646 __le16 fc = hdr->frame_control;
2647 struct sk_buff_head frame_list;
2648 struct ethhdr ethhdr;
2649 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2650
2651 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2652 check_da = NULL;
2653 check_sa = NULL;
2654 } else switch (rx->sdata->vif.type) {
2655 case NL80211_IFTYPE_AP:
2656 case NL80211_IFTYPE_AP_VLAN:
2657 check_da = NULL;
2658 break;
2659 case NL80211_IFTYPE_STATION:
2660 if (!rx->sta ||
2661 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2662 check_sa = NULL;
2663 break;
2664 case NL80211_IFTYPE_MESH_POINT:
2665 check_sa = NULL;
2666 break;
2667 default:
2668 break;
2669 }
2670
2671 skb->dev = dev;
2672 __skb_queue_head_init(&frame_list);
2673
2674 if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
2675 rx->sdata->vif.addr,
2676 rx->sdata->vif.type,
2677 data_offset))
2678 return RX_DROP_UNUSABLE;
2679
2680 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2681 rx->sdata->vif.type,
2682 rx->local->hw.extra_tx_headroom,
2683 check_da, check_sa);
2684
2685 while (!skb_queue_empty(&frame_list)) {
2686 rx->skb = __skb_dequeue(&frame_list);
2687
2688 if (!ieee80211_frame_allowed(rx, fc)) {
2689 dev_kfree_skb(rx->skb);
2690 continue;
2691 }
2692
2693 ieee80211_deliver_skb(rx);
2694 }
2695
2696 return RX_QUEUED;
2697 }
2698
2699 static ieee80211_rx_result debug_noinline
2700 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2701 {
2702 struct sk_buff *skb = rx->skb;
2703 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2704 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2705 __le16 fc = hdr->frame_control;
2706
2707 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2708 return RX_CONTINUE;
2709
2710 if (unlikely(!ieee80211_is_data(fc)))
2711 return RX_CONTINUE;
2712
2713 if (unlikely(!ieee80211_is_data_present(fc)))
2714 return RX_DROP_MONITOR;
2715
2716 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2717 switch (rx->sdata->vif.type) {
2718 case NL80211_IFTYPE_AP_VLAN:
2719 if (!rx->sdata->u.vlan.sta)
2720 return RX_DROP_UNUSABLE;
2721 break;
2722 case NL80211_IFTYPE_STATION:
2723 if (!rx->sdata->u.mgd.use_4addr)
2724 return RX_DROP_UNUSABLE;
2725 break;
2726 default:
2727 return RX_DROP_UNUSABLE;
2728 }
2729 }
2730
2731 if (is_multicast_ether_addr(hdr->addr1))
2732 return RX_DROP_UNUSABLE;
2733
2734 return __ieee80211_rx_h_amsdu(rx, 0);
2735 }
2736
2737 #ifdef CONFIG_MAC80211_MESH
2738 static ieee80211_rx_result
2739 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2740 {
2741 struct ieee80211_hdr *fwd_hdr, *hdr;
2742 struct ieee80211_tx_info *info;
2743 struct ieee80211s_hdr *mesh_hdr;
2744 struct sk_buff *skb = rx->skb, *fwd_skb;
2745 struct ieee80211_local *local = rx->local;
2746 struct ieee80211_sub_if_data *sdata = rx->sdata;
2747 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2748 u16 ac, q, hdrlen;
2749 int tailroom = 0;
2750
2751 hdr = (struct ieee80211_hdr *) skb->data;
2752 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2753
2754 /* make sure fixed part of mesh header is there, also checks skb len */
2755 if (!pskb_may_pull(rx->skb, hdrlen + 6))
2756 return RX_DROP_MONITOR;
2757
2758 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2759
2760 /* make sure full mesh header is there, also checks skb len */
2761 if (!pskb_may_pull(rx->skb,
2762 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
2763 return RX_DROP_MONITOR;
2764
2765 /* reload pointers */
2766 hdr = (struct ieee80211_hdr *) skb->data;
2767 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2768
2769 if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2770 return RX_DROP_MONITOR;
2771
2772 /* frame is in RMC, don't forward */
2773 if (ieee80211_is_data(hdr->frame_control) &&
2774 is_multicast_ether_addr(hdr->addr1) &&
2775 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2776 return RX_DROP_MONITOR;
2777
2778 if (!ieee80211_is_data(hdr->frame_control))
2779 return RX_CONTINUE;
2780
2781 if (!mesh_hdr->ttl)
2782 return RX_DROP_MONITOR;
2783
2784 if (mesh_hdr->flags & MESH_FLAGS_AE) {
2785 struct mesh_path *mppath;
2786 char *proxied_addr;
2787 char *mpp_addr;
2788
2789 if (is_multicast_ether_addr(hdr->addr1)) {
2790 mpp_addr = hdr->addr3;
2791 proxied_addr = mesh_hdr->eaddr1;
2792 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2793 MESH_FLAGS_AE_A5_A6) {
2794 /* has_a4 already checked in ieee80211_rx_mesh_check */
2795 mpp_addr = hdr->addr4;
2796 proxied_addr = mesh_hdr->eaddr2;
2797 } else {
2798 return RX_DROP_MONITOR;
2799 }
2800
2801 rcu_read_lock();
2802 mppath = mpp_path_lookup(sdata, proxied_addr);
2803 if (!mppath) {
2804 mpp_path_add(sdata, proxied_addr, mpp_addr);
2805 } else {
2806 spin_lock_bh(&mppath->state_lock);
2807 if (!ether_addr_equal(mppath->mpp, mpp_addr))
2808 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
2809 mppath->exp_time = jiffies;
2810 spin_unlock_bh(&mppath->state_lock);
2811 }
2812 rcu_read_unlock();
2813 }
2814
2815 /* Frame has reached destination. Don't forward */
2816 if (!is_multicast_ether_addr(hdr->addr1) &&
2817 ether_addr_equal(sdata->vif.addr, hdr->addr3))
2818 return RX_CONTINUE;
2819
2820 ac = ieee80211_select_queue_80211(sdata, skb, hdr);
2821 q = sdata->vif.hw_queue[ac];
2822 if (ieee80211_queue_stopped(&local->hw, q)) {
2823 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
2824 return RX_DROP_MONITOR;
2825 }
2826 skb_set_queue_mapping(skb, q);
2827
2828 if (!--mesh_hdr->ttl) {
2829 if (!is_multicast_ether_addr(hdr->addr1))
2830 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
2831 dropped_frames_ttl);
2832 goto out;
2833 }
2834
2835 if (!ifmsh->mshcfg.dot11MeshForwarding)
2836 goto out;
2837
2838 if (sdata->crypto_tx_tailroom_needed_cnt)
2839 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2840
2841 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2842 sdata->encrypt_headroom,
2843 tailroom, GFP_ATOMIC);
2844 if (!fwd_skb)
2845 goto out;
2846
2847 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
2848 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
2849 info = IEEE80211_SKB_CB(fwd_skb);
2850 memset(info, 0, sizeof(*info));
2851 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
2852 info->control.vif = &rx->sdata->vif;
2853 info->control.jiffies = jiffies;
2854 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2855 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2856 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2857 /* update power mode indication when forwarding */
2858 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2859 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2860 /* mesh power mode flags updated in mesh_nexthop_lookup */
2861 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2862 } else {
2863 /* unable to resolve next hop */
2864 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2865 fwd_hdr->addr3, 0,
2866 WLAN_REASON_MESH_PATH_NOFORWARD,
2867 fwd_hdr->addr2);
2868 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2869 kfree_skb(fwd_skb);
2870 return RX_DROP_MONITOR;
2871 }
2872
2873 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2874 ieee80211_add_pending_skb(local, fwd_skb);
2875 out:
2876 if (is_multicast_ether_addr(hdr->addr1))
2877 return RX_CONTINUE;
2878 return RX_DROP_MONITOR;
2879 }
2880 #endif
2881
2882 static ieee80211_rx_result debug_noinline
2883 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2884 {
2885 struct ieee80211_sub_if_data *sdata = rx->sdata;
2886 struct ieee80211_local *local = rx->local;
2887 struct net_device *dev = sdata->dev;
2888 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2889 __le16 fc = hdr->frame_control;
2890 bool port_control;
2891 int err;
2892
2893 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2894 return RX_CONTINUE;
2895
2896 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2897 return RX_DROP_MONITOR;
2898
2899 /*
2900 * Send unexpected-4addr-frame event to hostapd. For older versions,
2901 * also drop the frame to cooked monitor interfaces.
2902 */
2903 if (ieee80211_has_a4(hdr->frame_control) &&
2904 sdata->vif.type == NL80211_IFTYPE_AP) {
2905 if (rx->sta &&
2906 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2907 cfg80211_rx_unexpected_4addr_frame(
2908 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2909 return RX_DROP_MONITOR;
2910 }
2911
2912 err = __ieee80211_data_to_8023(rx, &port_control);
2913 if (unlikely(err))
2914 return RX_DROP_UNUSABLE;
2915
2916 if (!ieee80211_frame_allowed(rx, fc))
2917 return RX_DROP_MONITOR;
2918
2919 /* directly handle TDLS channel switch requests/responses */
2920 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
2921 cpu_to_be16(ETH_P_TDLS))) {
2922 struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
2923
2924 if (pskb_may_pull(rx->skb,
2925 offsetof(struct ieee80211_tdls_data, u)) &&
2926 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
2927 tf->category == WLAN_CATEGORY_TDLS &&
2928 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
2929 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
2930 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
2931 schedule_work(&local->tdls_chsw_work);
2932 if (rx->sta)
2933 rx->sta->rx_stats.packets++;
2934
2935 return RX_QUEUED;
2936 }
2937 }
2938
2939 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2940 unlikely(port_control) && sdata->bss) {
2941 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2942 u.ap);
2943 dev = sdata->dev;
2944 rx->sdata = sdata;
2945 }
2946
2947 rx->skb->dev = dev;
2948
2949 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
2950 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2951 !is_multicast_ether_addr(
2952 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2953 (!local->scanning &&
2954 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
2955 mod_timer(&local->dynamic_ps_timer, jiffies +
2956 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2957
2958 ieee80211_deliver_skb(rx);
2959
2960 return RX_QUEUED;
2961 }
2962
2963 static ieee80211_rx_result debug_noinline
2964 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2965 {
2966 struct sk_buff *skb = rx->skb;
2967 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2968 struct tid_ampdu_rx *tid_agg_rx;
2969 u16 start_seq_num;
2970 u16 tid;
2971
2972 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2973 return RX_CONTINUE;
2974
2975 if (ieee80211_is_back_req(bar->frame_control)) {
2976 struct {
2977 __le16 control, start_seq_num;
2978 } __packed bar_data;
2979 struct ieee80211_event event = {
2980 .type = BAR_RX_EVENT,
2981 };
2982
2983 if (!rx->sta)
2984 return RX_DROP_MONITOR;
2985
2986 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2987 &bar_data, sizeof(bar_data)))
2988 return RX_DROP_MONITOR;
2989
2990 tid = le16_to_cpu(bar_data.control) >> 12;
2991
2992 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
2993 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
2994 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
2995 WLAN_BACK_RECIPIENT,
2996 WLAN_REASON_QSTA_REQUIRE_SETUP);
2997
2998 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2999 if (!tid_agg_rx)
3000 return RX_DROP_MONITOR;
3001
3002 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
3003 event.u.ba.tid = tid;
3004 event.u.ba.ssn = start_seq_num;
3005 event.u.ba.sta = &rx->sta->sta;
3006
3007 /* reset session timer */
3008 if (tid_agg_rx->timeout)
3009 mod_timer(&tid_agg_rx->session_timer,
3010 TU_TO_EXP_TIME(tid_agg_rx->timeout));
3011
3012 spin_lock(&tid_agg_rx->reorder_lock);
3013 /* release stored frames up to start of BAR */
3014 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
3015 start_seq_num, frames);
3016 spin_unlock(&tid_agg_rx->reorder_lock);
3017
3018 drv_event_callback(rx->local, rx->sdata, &event);
3019
3020 kfree_skb(skb);
3021 return RX_QUEUED;
3022 }
3023
3024 /*
3025 * After this point, we only want management frames,
3026 * so we can drop all remaining control frames to
3027 * cooked monitor interfaces.
3028 */
3029 return RX_DROP_MONITOR;
3030 }
3031
3032 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
3033 struct ieee80211_mgmt *mgmt,
3034 size_t len)
3035 {
3036 struct ieee80211_local *local = sdata->local;
3037 struct sk_buff *skb;
3038 struct ieee80211_mgmt *resp;
3039
3040 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
3041 /* Not to own unicast address */
3042 return;
3043 }
3044
3045 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
3046 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
3047 /* Not from the current AP or not associated yet. */
3048 return;
3049 }
3050
3051 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
3052 /* Too short SA Query request frame */
3053 return;
3054 }
3055
3056 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
3057 if (skb == NULL)
3058 return;
3059
3060 skb_reserve(skb, local->hw.extra_tx_headroom);
3061 resp = skb_put_zero(skb, 24);
3062 memcpy(resp->da, mgmt->sa, ETH_ALEN);
3063 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
3064 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
3065 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3066 IEEE80211_STYPE_ACTION);
3067 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
3068 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
3069 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
3070 memcpy(resp->u.action.u.sa_query.trans_id,
3071 mgmt->u.action.u.sa_query.trans_id,
3072 WLAN_SA_QUERY_TR_ID_LEN);
3073
3074 ieee80211_tx_skb(sdata, skb);
3075 }
3076
3077 static ieee80211_rx_result debug_noinline
3078 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
3079 {
3080 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3081 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3082
3083 if (ieee80211_is_s1g_beacon(mgmt->frame_control))
3084 return RX_CONTINUE;
3085
3086 /*
3087 * From here on, look only at management frames.
3088 * Data and control frames are already handled,
3089 * and unknown (reserved) frames are useless.
3090 */
3091 if (rx->skb->len < 24)
3092 return RX_DROP_MONITOR;
3093
3094 if (!ieee80211_is_mgmt(mgmt->frame_control))
3095 return RX_DROP_MONITOR;
3096
3097 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
3098 ieee80211_is_beacon(mgmt->frame_control) &&
3099 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
3100 int sig = 0;
3101
3102 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3103 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3104 sig = status->signal;
3105
3106 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy,
3107 rx->skb->data, rx->skb->len,
3108 ieee80211_rx_status_to_khz(status),
3109 sig);
3110 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
3111 }
3112
3113 if (ieee80211_drop_unencrypted_mgmt(rx))
3114 return RX_DROP_UNUSABLE;
3115
3116 return RX_CONTINUE;
3117 }
3118
3119 static ieee80211_rx_result debug_noinline
3120 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
3121 {
3122 struct ieee80211_local *local = rx->local;
3123 struct ieee80211_sub_if_data *sdata = rx->sdata;
3124 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3125 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3126 int len = rx->skb->len;
3127
3128 if (!ieee80211_is_action(mgmt->frame_control))
3129 return RX_CONTINUE;
3130
3131 /* drop too small frames */
3132 if (len < IEEE80211_MIN_ACTION_SIZE)
3133 return RX_DROP_UNUSABLE;
3134
3135 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
3136 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
3137 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
3138 return RX_DROP_UNUSABLE;
3139
3140 switch (mgmt->u.action.category) {
3141 case WLAN_CATEGORY_HT:
3142 /* reject HT action frames from stations not supporting HT */
3143 if (!rx->sta->sta.ht_cap.ht_supported)
3144 goto invalid;
3145
3146 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3147 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3148 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3149 sdata->vif.type != NL80211_IFTYPE_AP &&
3150 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3151 break;
3152
3153 /* verify action & smps_control/chanwidth are present */
3154 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3155 goto invalid;
3156
3157 switch (mgmt->u.action.u.ht_smps.action) {
3158 case WLAN_HT_ACTION_SMPS: {
3159 struct ieee80211_supported_band *sband;
3160 enum ieee80211_smps_mode smps_mode;
3161 struct sta_opmode_info sta_opmode = {};
3162
3163 if (sdata->vif.type != NL80211_IFTYPE_AP &&
3164 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
3165 goto handled;
3166
3167 /* convert to HT capability */
3168 switch (mgmt->u.action.u.ht_smps.smps_control) {
3169 case WLAN_HT_SMPS_CONTROL_DISABLED:
3170 smps_mode = IEEE80211_SMPS_OFF;
3171 break;
3172 case WLAN_HT_SMPS_CONTROL_STATIC:
3173 smps_mode = IEEE80211_SMPS_STATIC;
3174 break;
3175 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
3176 smps_mode = IEEE80211_SMPS_DYNAMIC;
3177 break;
3178 default:
3179 goto invalid;
3180 }
3181
3182 /* if no change do nothing */
3183 if (rx->sta->sta.smps_mode == smps_mode)
3184 goto handled;
3185 rx->sta->sta.smps_mode = smps_mode;
3186 sta_opmode.smps_mode =
3187 ieee80211_smps_mode_to_smps_mode(smps_mode);
3188 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
3189
3190 sband = rx->local->hw.wiphy->bands[status->band];
3191
3192 rate_control_rate_update(local, sband, rx->sta,
3193 IEEE80211_RC_SMPS_CHANGED);
3194 cfg80211_sta_opmode_change_notify(sdata->dev,
3195 rx->sta->addr,
3196 &sta_opmode,
3197 GFP_ATOMIC);
3198 goto handled;
3199 }
3200 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
3201 struct ieee80211_supported_band *sband;
3202 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
3203 enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
3204 struct sta_opmode_info sta_opmode = {};
3205
3206 /* If it doesn't support 40 MHz it can't change ... */
3207 if (!(rx->sta->sta.ht_cap.cap &
3208 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
3209 goto handled;
3210
3211 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
3212 max_bw = IEEE80211_STA_RX_BW_20;
3213 else
3214 max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
3215
3216 /* set cur_max_bandwidth and recalc sta bw */
3217 rx->sta->cur_max_bandwidth = max_bw;
3218 new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
3219
3220 if (rx->sta->sta.bandwidth == new_bw)
3221 goto handled;
3222
3223 rx->sta->sta.bandwidth = new_bw;
3224 sband = rx->local->hw.wiphy->bands[status->band];
3225 sta_opmode.bw =
3226 ieee80211_sta_rx_bw_to_chan_width(rx->sta);
3227 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
3228
3229 rate_control_rate_update(local, sband, rx->sta,
3230 IEEE80211_RC_BW_CHANGED);
3231 cfg80211_sta_opmode_change_notify(sdata->dev,
3232 rx->sta->addr,
3233 &sta_opmode,
3234 GFP_ATOMIC);
3235 goto handled;
3236 }
3237 default:
3238 goto invalid;
3239 }
3240
3241 break;
3242 case WLAN_CATEGORY_PUBLIC:
3243 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3244 goto invalid;
3245 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3246 break;
3247 if (!rx->sta)
3248 break;
3249 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
3250 break;
3251 if (mgmt->u.action.u.ext_chan_switch.action_code !=
3252 WLAN_PUB_ACTION_EXT_CHANSW_ANN)
3253 break;
3254 if (len < offsetof(struct ieee80211_mgmt,
3255 u.action.u.ext_chan_switch.variable))
3256 goto invalid;
3257 goto queue;
3258 case WLAN_CATEGORY_VHT:
3259 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3260 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3261 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3262 sdata->vif.type != NL80211_IFTYPE_AP &&
3263 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3264 break;
3265
3266 /* verify action code is present */
3267 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3268 goto invalid;
3269
3270 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
3271 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
3272 /* verify opmode is present */
3273 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3274 goto invalid;
3275 goto queue;
3276 }
3277 case WLAN_VHT_ACTION_GROUPID_MGMT: {
3278 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3279 goto invalid;
3280 goto queue;
3281 }
3282 default:
3283 break;
3284 }
3285 break;
3286 case WLAN_CATEGORY_BACK:
3287 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3288 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3289 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3290 sdata->vif.type != NL80211_IFTYPE_AP &&
3291 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3292 break;
3293
3294 /* verify action_code is present */
3295 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3296 break;
3297
3298 switch (mgmt->u.action.u.addba_req.action_code) {
3299 case WLAN_ACTION_ADDBA_REQ:
3300 if (len < (IEEE80211_MIN_ACTION_SIZE +
3301 sizeof(mgmt->u.action.u.addba_req)))
3302 goto invalid;
3303 break;
3304 case WLAN_ACTION_ADDBA_RESP:
3305 if (len < (IEEE80211_MIN_ACTION_SIZE +
3306 sizeof(mgmt->u.action.u.addba_resp)))
3307 goto invalid;
3308 break;
3309 case WLAN_ACTION_DELBA:
3310 if (len < (IEEE80211_MIN_ACTION_SIZE +
3311 sizeof(mgmt->u.action.u.delba)))
3312 goto invalid;
3313 break;
3314 default:
3315 goto invalid;
3316 }
3317
3318 goto queue;
3319 case WLAN_CATEGORY_SPECTRUM_MGMT:
3320 /* verify action_code is present */
3321 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3322 break;
3323
3324 switch (mgmt->u.action.u.measurement.action_code) {
3325 case WLAN_ACTION_SPCT_MSR_REQ:
3326 if (status->band != NL80211_BAND_5GHZ)
3327 break;
3328
3329 if (len < (IEEE80211_MIN_ACTION_SIZE +
3330 sizeof(mgmt->u.action.u.measurement)))
3331 break;
3332
3333 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3334 break;
3335
3336 ieee80211_process_measurement_req(sdata, mgmt, len);
3337 goto handled;
3338 case WLAN_ACTION_SPCT_CHL_SWITCH: {
3339 u8 *bssid;
3340 if (len < (IEEE80211_MIN_ACTION_SIZE +
3341 sizeof(mgmt->u.action.u.chan_switch)))
3342 break;
3343
3344 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3345 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3346 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3347 break;
3348
3349 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3350 bssid = sdata->u.mgd.bssid;
3351 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3352 bssid = sdata->u.ibss.bssid;
3353 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3354 bssid = mgmt->sa;
3355 else
3356 break;
3357
3358 if (!ether_addr_equal(mgmt->bssid, bssid))
3359 break;
3360
3361 goto queue;
3362 }
3363 }
3364 break;
3365 case WLAN_CATEGORY_SELF_PROTECTED:
3366 if (len < (IEEE80211_MIN_ACTION_SIZE +
3367 sizeof(mgmt->u.action.u.self_prot.action_code)))
3368 break;
3369
3370 switch (mgmt->u.action.u.self_prot.action_code) {
3371 case WLAN_SP_MESH_PEERING_OPEN:
3372 case WLAN_SP_MESH_PEERING_CLOSE:
3373 case WLAN_SP_MESH_PEERING_CONFIRM:
3374 if (!ieee80211_vif_is_mesh(&sdata->vif))
3375 goto invalid;
3376 if (sdata->u.mesh.user_mpm)
3377 /* userspace handles this frame */
3378 break;
3379 goto queue;
3380 case WLAN_SP_MGK_INFORM:
3381 case WLAN_SP_MGK_ACK:
3382 if (!ieee80211_vif_is_mesh(&sdata->vif))
3383 goto invalid;
3384 break;
3385 }
3386 break;
3387 case WLAN_CATEGORY_MESH_ACTION:
3388 if (len < (IEEE80211_MIN_ACTION_SIZE +
3389 sizeof(mgmt->u.action.u.mesh_action.action_code)))
3390 break;
3391
3392 if (!ieee80211_vif_is_mesh(&sdata->vif))
3393 break;
3394 if (mesh_action_is_path_sel(mgmt) &&
3395 !mesh_path_sel_is_hwmp(sdata))
3396 break;
3397 goto queue;
3398 }
3399
3400 return RX_CONTINUE;
3401
3402 invalid:
3403 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3404 /* will return in the next handlers */
3405 return RX_CONTINUE;
3406
3407 handled:
3408 if (rx->sta)
3409 rx->sta->rx_stats.packets++;
3410 dev_kfree_skb(rx->skb);
3411 return RX_QUEUED;
3412
3413 queue:
3414 skb_queue_tail(&sdata->skb_queue, rx->skb);
3415 ieee80211_queue_work(&local->hw, &sdata->work);
3416 if (rx->sta)
3417 rx->sta->rx_stats.packets++;
3418 return RX_QUEUED;
3419 }
3420
3421 static ieee80211_rx_result debug_noinline
3422 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3423 {
3424 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3425 int sig = 0;
3426
3427 /* skip known-bad action frames and return them in the next handler */
3428 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3429 return RX_CONTINUE;
3430
3431 /*
3432 * Getting here means the kernel doesn't know how to handle
3433 * it, but maybe userspace does ... include returned frames
3434 * so userspace can register for those to know whether ones
3435 * it transmitted were processed or returned.
3436 */
3437
3438 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3439 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3440 sig = status->signal;
3441
3442 if (cfg80211_rx_mgmt_khz(&rx->sdata->wdev,
3443 ieee80211_rx_status_to_khz(status), sig,
3444 rx->skb->data, rx->skb->len, 0)) {
3445 if (rx->sta)
3446 rx->sta->rx_stats.packets++;
3447 dev_kfree_skb(rx->skb);
3448 return RX_QUEUED;
3449 }
3450
3451 return RX_CONTINUE;
3452 }
3453
3454 static ieee80211_rx_result debug_noinline
3455 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx)
3456 {
3457 struct ieee80211_sub_if_data *sdata = rx->sdata;
3458 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3459 int len = rx->skb->len;
3460
3461 if (!ieee80211_is_action(mgmt->frame_control))
3462 return RX_CONTINUE;
3463
3464 switch (mgmt->u.action.category) {
3465 case WLAN_CATEGORY_SA_QUERY:
3466 if (len < (IEEE80211_MIN_ACTION_SIZE +
3467 sizeof(mgmt->u.action.u.sa_query)))
3468 break;
3469
3470 switch (mgmt->u.action.u.sa_query.action) {
3471 case WLAN_ACTION_SA_QUERY_REQUEST:
3472 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3473 break;
3474 ieee80211_process_sa_query_req(sdata, mgmt, len);
3475 goto handled;
3476 }
3477 break;
3478 }
3479
3480 return RX_CONTINUE;
3481
3482 handled:
3483 if (rx->sta)
3484 rx->sta->rx_stats.packets++;
3485 dev_kfree_skb(rx->skb);
3486 return RX_QUEUED;
3487 }
3488
3489 static ieee80211_rx_result debug_noinline
3490 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3491 {
3492 struct ieee80211_local *local = rx->local;
3493 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3494 struct sk_buff *nskb;
3495 struct ieee80211_sub_if_data *sdata = rx->sdata;
3496 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3497
3498 if (!ieee80211_is_action(mgmt->frame_control))
3499 return RX_CONTINUE;
3500
3501 /*
3502 * For AP mode, hostapd is responsible for handling any action
3503 * frames that we didn't handle, including returning unknown
3504 * ones. For all other modes we will return them to the sender,
3505 * setting the 0x80 bit in the action category, as required by
3506 * 802.11-2012 9.24.4.
3507 * Newer versions of hostapd shall also use the management frame
3508 * registration mechanisms, but older ones still use cooked
3509 * monitor interfaces so push all frames there.
3510 */
3511 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3512 (sdata->vif.type == NL80211_IFTYPE_AP ||
3513 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3514 return RX_DROP_MONITOR;
3515
3516 if (is_multicast_ether_addr(mgmt->da))
3517 return RX_DROP_MONITOR;
3518
3519 /* do not return rejected action frames */
3520 if (mgmt->u.action.category & 0x80)
3521 return RX_DROP_UNUSABLE;
3522
3523 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3524 GFP_ATOMIC);
3525 if (nskb) {
3526 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3527
3528 nmgmt->u.action.category |= 0x80;
3529 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3530 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3531
3532 memset(nskb->cb, 0, sizeof(nskb->cb));
3533
3534 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3535 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3536
3537 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3538 IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3539 IEEE80211_TX_CTL_NO_CCK_RATE;
3540 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3541 info->hw_queue =
3542 local->hw.offchannel_tx_hw_queue;
3543 }
3544
3545 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
3546 status->band);
3547 }
3548 dev_kfree_skb(rx->skb);
3549 return RX_QUEUED;
3550 }
3551
3552 static ieee80211_rx_result debug_noinline
3553 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
3554 {
3555 struct ieee80211_sub_if_data *sdata = rx->sdata;
3556 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
3557
3558 if (!ieee80211_is_ext(hdr->frame_control))
3559 return RX_CONTINUE;
3560
3561 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3562 return RX_DROP_MONITOR;
3563
3564 /* for now only beacons are ext, so queue them */
3565 skb_queue_tail(&sdata->skb_queue, rx->skb);
3566 ieee80211_queue_work(&rx->local->hw, &sdata->work);
3567 if (rx->sta)
3568 rx->sta->rx_stats.packets++;
3569
3570 return RX_QUEUED;
3571 }
3572
3573 static ieee80211_rx_result debug_noinline
3574 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3575 {
3576 struct ieee80211_sub_if_data *sdata = rx->sdata;
3577 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3578 __le16 stype;
3579
3580 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
3581
3582 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
3583 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3584 sdata->vif.type != NL80211_IFTYPE_OCB &&
3585 sdata->vif.type != NL80211_IFTYPE_STATION)
3586 return RX_DROP_MONITOR;
3587
3588 switch (stype) {
3589 case cpu_to_le16(IEEE80211_STYPE_AUTH):
3590 case cpu_to_le16(IEEE80211_STYPE_BEACON):
3591 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3592 /* process for all: mesh, mlme, ibss */
3593 break;
3594 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3595 if (is_multicast_ether_addr(mgmt->da) &&
3596 !is_broadcast_ether_addr(mgmt->da))
3597 return RX_DROP_MONITOR;
3598
3599 /* process only for station/IBSS */
3600 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3601 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3602 return RX_DROP_MONITOR;
3603 break;
3604 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3605 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3606 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3607 if (is_multicast_ether_addr(mgmt->da) &&
3608 !is_broadcast_ether_addr(mgmt->da))
3609 return RX_DROP_MONITOR;
3610
3611 /* process only for station */
3612 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3613 return RX_DROP_MONITOR;
3614 break;
3615 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3616 /* process only for ibss and mesh */
3617 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3618 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3619 return RX_DROP_MONITOR;
3620 break;
3621 default:
3622 return RX_DROP_MONITOR;
3623 }
3624
3625 /* queue up frame and kick off work to process it */
3626 skb_queue_tail(&sdata->skb_queue, rx->skb);
3627 ieee80211_queue_work(&rx->local->hw, &sdata->work);
3628 if (rx->sta)
3629 rx->sta->rx_stats.packets++;
3630
3631 return RX_QUEUED;
3632 }
3633
3634 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
3635 struct ieee80211_rate *rate)
3636 {
3637 struct ieee80211_sub_if_data *sdata;
3638 struct ieee80211_local *local = rx->local;
3639 struct sk_buff *skb = rx->skb, *skb2;
3640 struct net_device *prev_dev = NULL;
3641 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3642 int needed_headroom;
3643
3644 /*
3645 * If cooked monitor has been processed already, then
3646 * don't do it again. If not, set the flag.
3647 */
3648 if (rx->flags & IEEE80211_RX_CMNTR)
3649 goto out_free_skb;
3650 rx->flags |= IEEE80211_RX_CMNTR;
3651
3652 /* If there are no cooked monitor interfaces, just free the SKB */
3653 if (!local->cooked_mntrs)
3654 goto out_free_skb;
3655
3656 /* vendor data is long removed here */
3657 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
3658 /* room for the radiotap header based on driver features */
3659 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
3660
3661 if (skb_headroom(skb) < needed_headroom &&
3662 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
3663 goto out_free_skb;
3664
3665 /* prepend radiotap information */
3666 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
3667 false);
3668
3669 skb_reset_mac_header(skb);
3670 skb->ip_summed = CHECKSUM_UNNECESSARY;
3671 skb->pkt_type = PACKET_OTHERHOST;
3672 skb->protocol = htons(ETH_P_802_2);
3673
3674 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3675 if (!ieee80211_sdata_running(sdata))
3676 continue;
3677
3678 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
3679 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
3680 continue;
3681
3682 if (prev_dev) {
3683 skb2 = skb_clone(skb, GFP_ATOMIC);
3684 if (skb2) {
3685 skb2->dev = prev_dev;
3686 netif_receive_skb(skb2);
3687 }
3688 }
3689
3690 prev_dev = sdata->dev;
3691 dev_sw_netstats_rx_add(sdata->dev, skb->len);
3692 }
3693
3694 if (prev_dev) {
3695 skb->dev = prev_dev;
3696 netif_receive_skb(skb);
3697 return;
3698 }
3699
3700 out_free_skb:
3701 dev_kfree_skb(skb);
3702 }
3703
3704 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
3705 ieee80211_rx_result res)
3706 {
3707 switch (res) {
3708 case RX_DROP_MONITOR:
3709 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3710 if (rx->sta)
3711 rx->sta->rx_stats.dropped++;
3712 fallthrough;
3713 case RX_CONTINUE: {
3714 struct ieee80211_rate *rate = NULL;
3715 struct ieee80211_supported_band *sband;
3716 struct ieee80211_rx_status *status;
3717
3718 status = IEEE80211_SKB_RXCB((rx->skb));
3719
3720 sband = rx->local->hw.wiphy->bands[status->band];
3721 if (status->encoding == RX_ENC_LEGACY)
3722 rate = &sband->bitrates[status->rate_idx];
3723
3724 ieee80211_rx_cooked_monitor(rx, rate);
3725 break;
3726 }
3727 case RX_DROP_UNUSABLE:
3728 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3729 if (rx->sta)
3730 rx->sta->rx_stats.dropped++;
3731 dev_kfree_skb(rx->skb);
3732 break;
3733 case RX_QUEUED:
3734 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
3735 break;
3736 }
3737 }
3738
3739 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
3740 struct sk_buff_head *frames)
3741 {
3742 ieee80211_rx_result res = RX_DROP_MONITOR;
3743 struct sk_buff *skb;
3744
3745 #define CALL_RXH(rxh) \
3746 do { \
3747 res = rxh(rx); \
3748 if (res != RX_CONTINUE) \
3749 goto rxh_next; \
3750 } while (0)
3751
3752 /* Lock here to avoid hitting all of the data used in the RX
3753 * path (e.g. key data, station data, ...) concurrently when
3754 * a frame is released from the reorder buffer due to timeout
3755 * from the timer, potentially concurrently with RX from the
3756 * driver.
3757 */
3758 spin_lock_bh(&rx->local->rx_path_lock);
3759
3760 while ((skb = __skb_dequeue(frames))) {
3761 /*
3762 * all the other fields are valid across frames
3763 * that belong to an aMPDU since they are on the
3764 * same TID from the same station
3765 */
3766 rx->skb = skb;
3767
3768 CALL_RXH(ieee80211_rx_h_check_more_data);
3769 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
3770 CALL_RXH(ieee80211_rx_h_sta_process);
3771 CALL_RXH(ieee80211_rx_h_decrypt);
3772 CALL_RXH(ieee80211_rx_h_defragment);
3773 CALL_RXH(ieee80211_rx_h_michael_mic_verify);
3774 /* must be after MMIC verify so header is counted in MPDU mic */
3775 #ifdef CONFIG_MAC80211_MESH
3776 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
3777 CALL_RXH(ieee80211_rx_h_mesh_fwding);
3778 #endif
3779 CALL_RXH(ieee80211_rx_h_amsdu);
3780 CALL_RXH(ieee80211_rx_h_data);
3781
3782 /* special treatment -- needs the queue */
3783 res = ieee80211_rx_h_ctrl(rx, frames);
3784 if (res != RX_CONTINUE)
3785 goto rxh_next;
3786
3787 CALL_RXH(ieee80211_rx_h_mgmt_check);
3788 CALL_RXH(ieee80211_rx_h_action);
3789 CALL_RXH(ieee80211_rx_h_userspace_mgmt);
3790 CALL_RXH(ieee80211_rx_h_action_post_userspace);
3791 CALL_RXH(ieee80211_rx_h_action_return);
3792 CALL_RXH(ieee80211_rx_h_ext);
3793 CALL_RXH(ieee80211_rx_h_mgmt);
3794
3795 rxh_next:
3796 ieee80211_rx_handlers_result(rx, res);
3797
3798 #undef CALL_RXH
3799 }
3800
3801 spin_unlock_bh(&rx->local->rx_path_lock);
3802 }
3803
3804 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
3805 {
3806 struct sk_buff_head reorder_release;
3807 ieee80211_rx_result res = RX_DROP_MONITOR;
3808
3809 __skb_queue_head_init(&reorder_release);
3810
3811 #define CALL_RXH(rxh) \
3812 do { \
3813 res = rxh(rx); \
3814 if (res != RX_CONTINUE) \
3815 goto rxh_next; \
3816 } while (0)
3817
3818 CALL_RXH(ieee80211_rx_h_check_dup);
3819 CALL_RXH(ieee80211_rx_h_check);
3820
3821 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
3822
3823 ieee80211_rx_handlers(rx, &reorder_release);
3824 return;
3825
3826 rxh_next:
3827 ieee80211_rx_handlers_result(rx, res);
3828
3829 #undef CALL_RXH
3830 }
3831
3832 /*
3833 * This function makes calls into the RX path, therefore
3834 * it has to be invoked under RCU read lock.
3835 */
3836 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
3837 {
3838 struct sk_buff_head frames;
3839 struct ieee80211_rx_data rx = {
3840 .sta = sta,
3841 .sdata = sta->sdata,
3842 .local = sta->local,
3843 /* This is OK -- must be QoS data frame */
3844 .security_idx = tid,
3845 .seqno_idx = tid,
3846 };
3847 struct tid_ampdu_rx *tid_agg_rx;
3848
3849 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3850 if (!tid_agg_rx)
3851 return;
3852
3853 __skb_queue_head_init(&frames);
3854
3855 spin_lock(&tid_agg_rx->reorder_lock);
3856 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3857 spin_unlock(&tid_agg_rx->reorder_lock);
3858
3859 if (!skb_queue_empty(&frames)) {
3860 struct ieee80211_event event = {
3861 .type = BA_FRAME_TIMEOUT,
3862 .u.ba.tid = tid,
3863 .u.ba.sta = &sta->sta,
3864 };
3865 drv_event_callback(rx.local, rx.sdata, &event);
3866 }
3867
3868 ieee80211_rx_handlers(&rx, &frames);
3869 }
3870
3871 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
3872 u16 ssn, u64 filtered,
3873 u16 received_mpdus)
3874 {
3875 struct sta_info *sta;
3876 struct tid_ampdu_rx *tid_agg_rx;
3877 struct sk_buff_head frames;
3878 struct ieee80211_rx_data rx = {
3879 /* This is OK -- must be QoS data frame */
3880 .security_idx = tid,
3881 .seqno_idx = tid,
3882 };
3883 int i, diff;
3884
3885 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
3886 return;
3887
3888 __skb_queue_head_init(&frames);
3889
3890 sta = container_of(pubsta, struct sta_info, sta);
3891
3892 rx.sta = sta;
3893 rx.sdata = sta->sdata;
3894 rx.local = sta->local;
3895
3896 rcu_read_lock();
3897 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3898 if (!tid_agg_rx)
3899 goto out;
3900
3901 spin_lock_bh(&tid_agg_rx->reorder_lock);
3902
3903 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
3904 int release;
3905
3906 /* release all frames in the reorder buffer */
3907 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
3908 IEEE80211_SN_MODULO;
3909 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
3910 release, &frames);
3911 /* update ssn to match received ssn */
3912 tid_agg_rx->head_seq_num = ssn;
3913 } else {
3914 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
3915 &frames);
3916 }
3917
3918 /* handle the case that received ssn is behind the mac ssn.
3919 * it can be tid_agg_rx->buf_size behind and still be valid */
3920 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
3921 if (diff >= tid_agg_rx->buf_size) {
3922 tid_agg_rx->reorder_buf_filtered = 0;
3923 goto release;
3924 }
3925 filtered = filtered >> diff;
3926 ssn += diff;
3927
3928 /* update bitmap */
3929 for (i = 0; i < tid_agg_rx->buf_size; i++) {
3930 int index = (ssn + i) % tid_agg_rx->buf_size;
3931
3932 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
3933 if (filtered & BIT_ULL(i))
3934 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
3935 }
3936
3937 /* now process also frames that the filter marking released */
3938 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3939
3940 release:
3941 spin_unlock_bh(&tid_agg_rx->reorder_lock);
3942
3943 ieee80211_rx_handlers(&rx, &frames);
3944
3945 out:
3946 rcu_read_unlock();
3947 }
3948 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
3949
3950 /* main receive path */
3951
3952 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3953 {
3954 struct ieee80211_sub_if_data *sdata = rx->sdata;
3955 struct sk_buff *skb = rx->skb;
3956 struct ieee80211_hdr *hdr = (void *)skb->data;
3957 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3958 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
3959 bool multicast = is_multicast_ether_addr(hdr->addr1) ||
3960 ieee80211_is_s1g_beacon(hdr->frame_control);
3961
3962 switch (sdata->vif.type) {
3963 case NL80211_IFTYPE_STATION:
3964 if (!bssid && !sdata->u.mgd.use_4addr)
3965 return false;
3966 if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
3967 return false;
3968 if (multicast)
3969 return true;
3970 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3971 case NL80211_IFTYPE_ADHOC:
3972 if (!bssid)
3973 return false;
3974 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3975 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
3976 return false;
3977 if (ieee80211_is_beacon(hdr->frame_control))
3978 return true;
3979 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
3980 return false;
3981 if (!multicast &&
3982 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3983 return false;
3984 if (!rx->sta) {
3985 int rate_idx;
3986 if (status->encoding != RX_ENC_LEGACY)
3987 rate_idx = 0; /* TODO: HT/VHT rates */
3988 else
3989 rate_idx = status->rate_idx;
3990 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
3991 BIT(rate_idx));
3992 }
3993 return true;
3994 case NL80211_IFTYPE_OCB:
3995 if (!bssid)
3996 return false;
3997 if (!ieee80211_is_data_present(hdr->frame_control))
3998 return false;
3999 if (!is_broadcast_ether_addr(bssid))
4000 return false;
4001 if (!multicast &&
4002 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
4003 return false;
4004 if (!rx->sta) {
4005 int rate_idx;
4006 if (status->encoding != RX_ENC_LEGACY)
4007 rate_idx = 0; /* TODO: HT rates */
4008 else
4009 rate_idx = status->rate_idx;
4010 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
4011 BIT(rate_idx));
4012 }
4013 return true;
4014 case NL80211_IFTYPE_MESH_POINT:
4015 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
4016 return false;
4017 if (multicast)
4018 return true;
4019 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
4020 case NL80211_IFTYPE_AP_VLAN:
4021 case NL80211_IFTYPE_AP:
4022 if (!bssid)
4023 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
4024
4025 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
4026 /*
4027 * Accept public action frames even when the
4028 * BSSID doesn't match, this is used for P2P
4029 * and location updates. Note that mac80211
4030 * itself never looks at these frames.
4031 */
4032 if (!multicast &&
4033 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
4034 return false;
4035 if (ieee80211_is_public_action(hdr, skb->len))
4036 return true;
4037 return ieee80211_is_beacon(hdr->frame_control);
4038 }
4039
4040 if (!ieee80211_has_tods(hdr->frame_control)) {
4041 /* ignore data frames to TDLS-peers */
4042 if (ieee80211_is_data(hdr->frame_control))
4043 return false;
4044 /* ignore action frames to TDLS-peers */
4045 if (ieee80211_is_action(hdr->frame_control) &&
4046 !is_broadcast_ether_addr(bssid) &&
4047 !ether_addr_equal(bssid, hdr->addr1))
4048 return false;
4049 }
4050
4051 /*
4052 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
4053 * the BSSID - we've checked that already but may have accepted
4054 * the wildcard (ff:ff:ff:ff:ff:ff).
4055 *
4056 * It also says:
4057 * The BSSID of the Data frame is determined as follows:
4058 * a) If the STA is contained within an AP or is associated
4059 * with an AP, the BSSID is the address currently in use
4060 * by the STA contained in the AP.
4061 *
4062 * So we should not accept data frames with an address that's
4063 * multicast.
4064 *
4065 * Accepting it also opens a security problem because stations
4066 * could encrypt it with the GTK and inject traffic that way.
4067 */
4068 if (ieee80211_is_data(hdr->frame_control) && multicast)
4069 return false;
4070
4071 return true;
4072 case NL80211_IFTYPE_P2P_DEVICE:
4073 return ieee80211_is_public_action(hdr, skb->len) ||
4074 ieee80211_is_probe_req(hdr->frame_control) ||
4075 ieee80211_is_probe_resp(hdr->frame_control) ||
4076 ieee80211_is_beacon(hdr->frame_control);
4077 case NL80211_IFTYPE_NAN:
4078 /* Currently no frames on NAN interface are allowed */
4079 return false;
4080 default:
4081 break;
4082 }
4083
4084 WARN_ON_ONCE(1);
4085 return false;
4086 }
4087
4088 void ieee80211_check_fast_rx(struct sta_info *sta)
4089 {
4090 struct ieee80211_sub_if_data *sdata = sta->sdata;
4091 struct ieee80211_local *local = sdata->local;
4092 struct ieee80211_key *key;
4093 struct ieee80211_fast_rx fastrx = {
4094 .dev = sdata->dev,
4095 .vif_type = sdata->vif.type,
4096 .control_port_protocol = sdata->control_port_protocol,
4097 }, *old, *new = NULL;
4098 bool set_offload = false;
4099 bool assign = false;
4100 bool offload;
4101
4102 /* use sparse to check that we don't return without updating */
4103 __acquire(check_fast_rx);
4104
4105 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
4106 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
4107 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
4108 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
4109
4110 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
4111
4112 /* fast-rx doesn't do reordering */
4113 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
4114 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
4115 goto clear;
4116
4117 switch (sdata->vif.type) {
4118 case NL80211_IFTYPE_STATION:
4119 if (sta->sta.tdls) {
4120 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4121 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4122 fastrx.expected_ds_bits = 0;
4123 } else {
4124 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4125 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
4126 fastrx.expected_ds_bits =
4127 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4128 }
4129
4130 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
4131 fastrx.expected_ds_bits |=
4132 cpu_to_le16(IEEE80211_FCTL_TODS);
4133 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4134 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4135 }
4136
4137 if (!sdata->u.mgd.powersave)
4138 break;
4139
4140 /* software powersave is a huge mess, avoid all of it */
4141 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
4142 goto clear;
4143 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
4144 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
4145 goto clear;
4146 break;
4147 case NL80211_IFTYPE_AP_VLAN:
4148 case NL80211_IFTYPE_AP:
4149 /* parallel-rx requires this, at least with calls to
4150 * ieee80211_sta_ps_transition()
4151 */
4152 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
4153 goto clear;
4154 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4155 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4156 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
4157
4158 fastrx.internal_forward =
4159 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
4160 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
4161 !sdata->u.vlan.sta);
4162
4163 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4164 sdata->u.vlan.sta) {
4165 fastrx.expected_ds_bits |=
4166 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4167 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4168 fastrx.internal_forward = 0;
4169 }
4170
4171 break;
4172 default:
4173 goto clear;
4174 }
4175
4176 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
4177 goto clear;
4178
4179 rcu_read_lock();
4180 key = rcu_dereference(sta->ptk[sta->ptk_idx]);
4181 if (!key)
4182 key = rcu_dereference(sdata->default_unicast_key);
4183 if (key) {
4184 switch (key->conf.cipher) {
4185 case WLAN_CIPHER_SUITE_TKIP:
4186 /* we don't want to deal with MMIC in fast-rx */
4187 goto clear_rcu;
4188 case WLAN_CIPHER_SUITE_CCMP:
4189 case WLAN_CIPHER_SUITE_CCMP_256:
4190 case WLAN_CIPHER_SUITE_GCMP:
4191 case WLAN_CIPHER_SUITE_GCMP_256:
4192 break;
4193 default:
4194 /* We also don't want to deal with
4195 * WEP or cipher scheme.
4196 */
4197 goto clear_rcu;
4198 }
4199
4200 fastrx.key = true;
4201 fastrx.icv_len = key->conf.icv_len;
4202 }
4203
4204 assign = true;
4205 clear_rcu:
4206 rcu_read_unlock();
4207 clear:
4208 __release(check_fast_rx);
4209
4210 if (assign)
4211 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
4212
4213 offload = assign &&
4214 (sdata->vif.offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED);
4215
4216 if (offload)
4217 set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4218 else
4219 set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4220
4221 if (set_offload)
4222 drv_sta_set_decap_offload(local, sdata, &sta->sta, assign);
4223
4224 spin_lock_bh(&sta->lock);
4225 old = rcu_dereference_protected(sta->fast_rx, true);
4226 rcu_assign_pointer(sta->fast_rx, new);
4227 spin_unlock_bh(&sta->lock);
4228
4229 if (old)
4230 kfree_rcu(old, rcu_head);
4231 }
4232
4233 void ieee80211_clear_fast_rx(struct sta_info *sta)
4234 {
4235 struct ieee80211_fast_rx *old;
4236
4237 spin_lock_bh(&sta->lock);
4238 old = rcu_dereference_protected(sta->fast_rx, true);
4239 RCU_INIT_POINTER(sta->fast_rx, NULL);
4240 spin_unlock_bh(&sta->lock);
4241
4242 if (old)
4243 kfree_rcu(old, rcu_head);
4244 }
4245
4246 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4247 {
4248 struct ieee80211_local *local = sdata->local;
4249 struct sta_info *sta;
4250
4251 lockdep_assert_held(&local->sta_mtx);
4252
4253 list_for_each_entry(sta, &local->sta_list, list) {
4254 if (sdata != sta->sdata &&
4255 (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
4256 continue;
4257 ieee80211_check_fast_rx(sta);
4258 }
4259 }
4260
4261 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4262 {
4263 struct ieee80211_local *local = sdata->local;
4264
4265 mutex_lock(&local->sta_mtx);
4266 __ieee80211_check_fast_rx_iface(sdata);
4267 mutex_unlock(&local->sta_mtx);
4268 }
4269
4270 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
4271 struct ieee80211_fast_rx *fast_rx,
4272 int orig_len)
4273 {
4274 struct ieee80211_sta_rx_stats *stats;
4275 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
4276 struct sta_info *sta = rx->sta;
4277 struct sk_buff *skb = rx->skb;
4278 void *sa = skb->data + ETH_ALEN;
4279 void *da = skb->data;
4280
4281 stats = &sta->rx_stats;
4282 if (fast_rx->uses_rss)
4283 stats = this_cpu_ptr(sta->pcpu_rx_stats);
4284
4285 /* statistics part of ieee80211_rx_h_sta_process() */
4286 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4287 stats->last_signal = status->signal;
4288 if (!fast_rx->uses_rss)
4289 ewma_signal_add(&sta->rx_stats_avg.signal,
4290 -status->signal);
4291 }
4292
4293 if (status->chains) {
4294 int i;
4295
4296 stats->chains = status->chains;
4297 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4298 int signal = status->chain_signal[i];
4299
4300 if (!(status->chains & BIT(i)))
4301 continue;
4302
4303 stats->chain_signal_last[i] = signal;
4304 if (!fast_rx->uses_rss)
4305 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
4306 -signal);
4307 }
4308 }
4309 /* end of statistics */
4310
4311 stats->last_rx = jiffies;
4312 stats->last_rate = sta_stats_encode_rate(status);
4313
4314 stats->fragments++;
4315 stats->packets++;
4316
4317 skb->dev = fast_rx->dev;
4318
4319 dev_sw_netstats_rx_add(fast_rx->dev, skb->len);
4320
4321 /* The seqno index has the same property as needed
4322 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4323 * for non-QoS-data frames. Here we know it's a data
4324 * frame, so count MSDUs.
4325 */
4326 u64_stats_update_begin(&stats->syncp);
4327 stats->msdu[rx->seqno_idx]++;
4328 stats->bytes += orig_len;
4329 u64_stats_update_end(&stats->syncp);
4330
4331 if (fast_rx->internal_forward) {
4332 struct sk_buff *xmit_skb = NULL;
4333 if (is_multicast_ether_addr(da)) {
4334 xmit_skb = skb_copy(skb, GFP_ATOMIC);
4335 } else if (!ether_addr_equal(da, sa) &&
4336 sta_info_get(rx->sdata, da)) {
4337 xmit_skb = skb;
4338 skb = NULL;
4339 }
4340
4341 if (xmit_skb) {
4342 /*
4343 * Send to wireless media and increase priority by 256
4344 * to keep the received priority instead of
4345 * reclassifying the frame (see cfg80211_classify8021d).
4346 */
4347 xmit_skb->priority += 256;
4348 xmit_skb->protocol = htons(ETH_P_802_3);
4349 skb_reset_network_header(xmit_skb);
4350 skb_reset_mac_header(xmit_skb);
4351 dev_queue_xmit(xmit_skb);
4352 }
4353
4354 if (!skb)
4355 return;
4356 }
4357
4358 /* deliver to local stack */
4359 skb->protocol = eth_type_trans(skb, fast_rx->dev);
4360 memset(skb->cb, 0, sizeof(skb->cb));
4361 if (rx->list)
4362 list_add_tail(&skb->list, rx->list);
4363 else
4364 netif_receive_skb(skb);
4365
4366 }
4367
4368 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
4369 struct ieee80211_fast_rx *fast_rx)
4370 {
4371 struct sk_buff *skb = rx->skb;
4372 struct ieee80211_hdr *hdr = (void *)skb->data;
4373 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4374 struct sta_info *sta = rx->sta;
4375 int orig_len = skb->len;
4376 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
4377 int snap_offs = hdrlen;
4378 struct {
4379 u8 snap[sizeof(rfc1042_header)];
4380 __be16 proto;
4381 } *payload __aligned(2);
4382 struct {
4383 u8 da[ETH_ALEN];
4384 u8 sa[ETH_ALEN];
4385 } addrs __aligned(2);
4386 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
4387
4388 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
4389 * to a common data structure; drivers can implement that per queue
4390 * but we don't have that information in mac80211
4391 */
4392 if (!(status->flag & RX_FLAG_DUP_VALIDATED))
4393 return false;
4394
4395 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
4396
4397 /* If using encryption, we also need to have:
4398 * - PN_VALIDATED: similar, but the implementation is tricky
4399 * - DECRYPTED: necessary for PN_VALIDATED
4400 */
4401 if (fast_rx->key &&
4402 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
4403 return false;
4404
4405 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
4406 return false;
4407
4408 if (unlikely(ieee80211_is_frag(hdr)))
4409 return false;
4410
4411 /* Since our interface address cannot be multicast, this
4412 * implicitly also rejects multicast frames without the
4413 * explicit check.
4414 *
4415 * We shouldn't get any *data* frames not addressed to us
4416 * (AP mode will accept multicast *management* frames), but
4417 * punting here will make it go through the full checks in
4418 * ieee80211_accept_frame().
4419 */
4420 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
4421 return false;
4422
4423 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
4424 IEEE80211_FCTL_TODS)) !=
4425 fast_rx->expected_ds_bits)
4426 return false;
4427
4428 /* assign the key to drop unencrypted frames (later)
4429 * and strip the IV/MIC if necessary
4430 */
4431 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
4432 /* GCMP header length is the same */
4433 snap_offs += IEEE80211_CCMP_HDR_LEN;
4434 }
4435
4436 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
4437 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4438 goto drop;
4439
4440 payload = (void *)(skb->data + snap_offs);
4441
4442 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4443 return false;
4444
4445 /* Don't handle these here since they require special code.
4446 * Accept AARP and IPX even though they should come with a
4447 * bridge-tunnel header - but if we get them this way then
4448 * there's little point in discarding them.
4449 */
4450 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4451 payload->proto == fast_rx->control_port_protocol))
4452 return false;
4453 }
4454
4455 /* after this point, don't punt to the slowpath! */
4456
4457 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4458 pskb_trim(skb, skb->len - fast_rx->icv_len))
4459 goto drop;
4460
4461 if (rx->key && !ieee80211_has_protected(hdr->frame_control))
4462 goto drop;
4463
4464 if (status->rx_flags & IEEE80211_RX_AMSDU) {
4465 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
4466 RX_QUEUED)
4467 goto drop;
4468
4469 return true;
4470 }
4471
4472 /* do the header conversion - first grab the addresses */
4473 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
4474 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
4475 /* remove the SNAP but leave the ethertype */
4476 skb_pull(skb, snap_offs + sizeof(rfc1042_header));
4477 /* push the addresses in front */
4478 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
4479
4480 ieee80211_rx_8023(rx, fast_rx, orig_len);
4481
4482 return true;
4483 drop:
4484 dev_kfree_skb(skb);
4485 if (fast_rx->uses_rss)
4486 stats = this_cpu_ptr(sta->pcpu_rx_stats);
4487
4488 stats->dropped++;
4489 return true;
4490 }
4491
4492 /*
4493 * This function returns whether or not the SKB
4494 * was destined for RX processing or not, which,
4495 * if consume is true, is equivalent to whether
4496 * or not the skb was consumed.
4497 */
4498 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
4499 struct sk_buff *skb, bool consume)
4500 {
4501 struct ieee80211_local *local = rx->local;
4502 struct ieee80211_sub_if_data *sdata = rx->sdata;
4503
4504 rx->skb = skb;
4505
4506 /* See if we can do fast-rx; if we have to copy we already lost,
4507 * so punt in that case. We should never have to deliver a data
4508 * frame to multiple interfaces anyway.
4509 *
4510 * We skip the ieee80211_accept_frame() call and do the necessary
4511 * checking inside ieee80211_invoke_fast_rx().
4512 */
4513 if (consume && rx->sta) {
4514 struct ieee80211_fast_rx *fast_rx;
4515
4516 fast_rx = rcu_dereference(rx->sta->fast_rx);
4517 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
4518 return true;
4519 }
4520
4521 if (!ieee80211_accept_frame(rx))
4522 return false;
4523
4524 if (!consume) {
4525 skb = skb_copy(skb, GFP_ATOMIC);
4526 if (!skb) {
4527 if (net_ratelimit())
4528 wiphy_debug(local->hw.wiphy,
4529 "failed to copy skb for %s\n",
4530 sdata->name);
4531 return true;
4532 }
4533
4534 rx->skb = skb;
4535 }
4536
4537 ieee80211_invoke_rx_handlers(rx);
4538 return true;
4539 }
4540
4541 static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
4542 struct ieee80211_sta *pubsta,
4543 struct sk_buff *skb,
4544 struct list_head *list)
4545 {
4546 struct ieee80211_local *local = hw_to_local(hw);
4547 struct ieee80211_fast_rx *fast_rx;
4548 struct ieee80211_rx_data rx;
4549
4550 memset(&rx, 0, sizeof(rx));
4551 rx.skb = skb;
4552 rx.local = local;
4553 rx.list = list;
4554
4555 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4556
4557 /* drop frame if too short for header */
4558 if (skb->len < sizeof(struct ethhdr))
4559 goto drop;
4560
4561 if (!pubsta)
4562 goto drop;
4563
4564 rx.sta = container_of(pubsta, struct sta_info, sta);
4565 rx.sdata = rx.sta->sdata;
4566
4567 fast_rx = rcu_dereference(rx.sta->fast_rx);
4568 if (!fast_rx)
4569 goto drop;
4570
4571 ieee80211_rx_8023(&rx, fast_rx, skb->len);
4572 return;
4573
4574 drop:
4575 dev_kfree_skb(skb);
4576 }
4577
4578 /*
4579 * This is the actual Rx frames handler. as it belongs to Rx path it must
4580 * be called with rcu_read_lock protection.
4581 */
4582 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4583 struct ieee80211_sta *pubsta,
4584 struct sk_buff *skb,
4585 struct list_head *list)
4586 {
4587 struct ieee80211_local *local = hw_to_local(hw);
4588 struct ieee80211_sub_if_data *sdata;
4589 struct ieee80211_hdr *hdr;
4590 __le16 fc;
4591 struct ieee80211_rx_data rx;
4592 struct ieee80211_sub_if_data *prev;
4593 struct rhlist_head *tmp;
4594 int err = 0;
4595
4596 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
4597 memset(&rx, 0, sizeof(rx));
4598 rx.skb = skb;
4599 rx.local = local;
4600 rx.list = list;
4601
4602 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
4603 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4604
4605 if (ieee80211_is_mgmt(fc)) {
4606 /* drop frame if too short for header */
4607 if (skb->len < ieee80211_hdrlen(fc))
4608 err = -ENOBUFS;
4609 else
4610 err = skb_linearize(skb);
4611 } else {
4612 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
4613 }
4614
4615 if (err) {
4616 dev_kfree_skb(skb);
4617 return;
4618 }
4619
4620 hdr = (struct ieee80211_hdr *)skb->data;
4621 ieee80211_parse_qos(&rx);
4622 ieee80211_verify_alignment(&rx);
4623
4624 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
4625 ieee80211_is_beacon(hdr->frame_control) ||
4626 ieee80211_is_s1g_beacon(hdr->frame_control)))
4627 ieee80211_scan_rx(local, skb);
4628
4629 if (ieee80211_is_data(fc)) {
4630 struct sta_info *sta, *prev_sta;
4631
4632 if (pubsta) {
4633 rx.sta = container_of(pubsta, struct sta_info, sta);
4634 rx.sdata = rx.sta->sdata;
4635 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4636 return;
4637 goto out;
4638 }
4639
4640 prev_sta = NULL;
4641
4642 for_each_sta_info(local, hdr->addr2, sta, tmp) {
4643 if (!prev_sta) {
4644 prev_sta = sta;
4645 continue;
4646 }
4647
4648 rx.sta = prev_sta;
4649 rx.sdata = prev_sta->sdata;
4650 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4651
4652 prev_sta = sta;
4653 }
4654
4655 if (prev_sta) {
4656 rx.sta = prev_sta;
4657 rx.sdata = prev_sta->sdata;
4658
4659 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4660 return;
4661 goto out;
4662 }
4663 }
4664
4665 prev = NULL;
4666
4667 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4668 if (!ieee80211_sdata_running(sdata))
4669 continue;
4670
4671 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
4672 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
4673 continue;
4674
4675 /*
4676 * frame is destined for this interface, but if it's
4677 * not also for the previous one we handle that after
4678 * the loop to avoid copying the SKB once too much
4679 */
4680
4681 if (!prev) {
4682 prev = sdata;
4683 continue;
4684 }
4685
4686 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4687 rx.sdata = prev;
4688 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4689
4690 prev = sdata;
4691 }
4692
4693 if (prev) {
4694 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4695 rx.sdata = prev;
4696
4697 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4698 return;
4699 }
4700
4701 out:
4702 dev_kfree_skb(skb);
4703 }
4704
4705 /*
4706 * This is the receive path handler. It is called by a low level driver when an
4707 * 802.11 MPDU is received from the hardware.
4708 */
4709 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4710 struct sk_buff *skb, struct list_head *list)
4711 {
4712 struct ieee80211_local *local = hw_to_local(hw);
4713 struct ieee80211_rate *rate = NULL;
4714 struct ieee80211_supported_band *sband;
4715 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4716
4717 WARN_ON_ONCE(softirq_count() == 0);
4718
4719 if (WARN_ON(status->band >= NUM_NL80211_BANDS))
4720 goto drop;
4721
4722 sband = local->hw.wiphy->bands[status->band];
4723 if (WARN_ON(!sband))
4724 goto drop;
4725
4726 /*
4727 * If we're suspending, it is possible although not too likely
4728 * that we'd be receiving frames after having already partially
4729 * quiesced the stack. We can't process such frames then since
4730 * that might, for example, cause stations to be added or other
4731 * driver callbacks be invoked.
4732 */
4733 if (unlikely(local->quiescing || local->suspended))
4734 goto drop;
4735
4736 /* We might be during a HW reconfig, prevent Rx for the same reason */
4737 if (unlikely(local->in_reconfig))
4738 goto drop;
4739
4740 /*
4741 * The same happens when we're not even started,
4742 * but that's worth a warning.
4743 */
4744 if (WARN_ON(!local->started))
4745 goto drop;
4746
4747 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
4748 /*
4749 * Validate the rate, unless a PLCP error means that
4750 * we probably can't have a valid rate here anyway.
4751 */
4752
4753 switch (status->encoding) {
4754 case RX_ENC_HT:
4755 /*
4756 * rate_idx is MCS index, which can be [0-76]
4757 * as documented on:
4758 *
4759 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n
4760 *
4761 * Anything else would be some sort of driver or
4762 * hardware error. The driver should catch hardware
4763 * errors.
4764 */
4765 if (WARN(status->rate_idx > 76,
4766 "Rate marked as an HT rate but passed "
4767 "status->rate_idx is not "
4768 "an MCS index [0-76]: %d (0x%02x)\n",
4769 status->rate_idx,
4770 status->rate_idx))
4771 goto drop;
4772 break;
4773 case RX_ENC_VHT:
4774 if (WARN_ONCE(status->rate_idx > 9 ||
4775 !status->nss ||
4776 status->nss > 8,
4777 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
4778 status->rate_idx, status->nss))
4779 goto drop;
4780 break;
4781 case RX_ENC_HE:
4782 if (WARN_ONCE(status->rate_idx > 11 ||
4783 !status->nss ||
4784 status->nss > 8,
4785 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
4786 status->rate_idx, status->nss))
4787 goto drop;
4788 break;
4789 default:
4790 WARN_ON_ONCE(1);
4791 fallthrough;
4792 case RX_ENC_LEGACY:
4793 if (WARN_ON(status->rate_idx >= sband->n_bitrates))
4794 goto drop;
4795 rate = &sband->bitrates[status->rate_idx];
4796 }
4797 }
4798
4799 status->rx_flags = 0;
4800
4801 kcov_remote_start_common(skb_get_kcov_handle(skb));
4802
4803 /*
4804 * Frames with failed FCS/PLCP checksum are not returned,
4805 * all other frames are returned without radiotap header
4806 * if it was previously present.
4807 * Also, frames with less than 16 bytes are dropped.
4808 */
4809 if (!(status->flag & RX_FLAG_8023))
4810 skb = ieee80211_rx_monitor(local, skb, rate);
4811 if (skb) {
4812 ieee80211_tpt_led_trig_rx(local,
4813 ((struct ieee80211_hdr *)skb->data)->frame_control,
4814 skb->len);
4815
4816 if (status->flag & RX_FLAG_8023)
4817 __ieee80211_rx_handle_8023(hw, pubsta, skb, list);
4818 else
4819 __ieee80211_rx_handle_packet(hw, pubsta, skb, list);
4820 }
4821
4822 kcov_remote_stop();
4823 return;
4824 drop:
4825 kfree_skb(skb);
4826 }
4827 EXPORT_SYMBOL(ieee80211_rx_list);
4828
4829 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4830 struct sk_buff *skb, struct napi_struct *napi)
4831 {
4832 struct sk_buff *tmp;
4833 LIST_HEAD(list);
4834
4835
4836 /*
4837 * key references and virtual interfaces are protected using RCU
4838 * and this requires that we are in a read-side RCU section during
4839 * receive processing
4840 */
4841 rcu_read_lock();
4842 ieee80211_rx_list(hw, pubsta, skb, &list);
4843 rcu_read_unlock();
4844
4845 if (!napi) {
4846 netif_receive_skb_list(&list);
4847 return;
4848 }
4849
4850 list_for_each_entry_safe(skb, tmp, &list, list) {
4851 skb_list_del_init(skb);
4852 napi_gro_receive(napi, skb);
4853 }
4854 }
4855 EXPORT_SYMBOL(ieee80211_rx_napi);
4856
4857 /* This is a version of the rx handler that can be called from hard irq
4858 * context. Post the skb on the queue and schedule the tasklet */
4859 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
4860 {
4861 struct ieee80211_local *local = hw_to_local(hw);
4862
4863 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
4864
4865 skb->pkt_type = IEEE80211_RX_MSG;
4866 skb_queue_tail(&local->skb_queue, skb);
4867 tasklet_schedule(&local->tasklet);
4868 }
4869 EXPORT_SYMBOL(ieee80211_rx_irqsafe);