]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/mac80211/rx.c
Merge tag 'for-linus-5.13-1' of git://github.com/cminyard/linux-ipmi
[mirror_ubuntu-jammy-kernel.git] / net / mac80211 / rx.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2002-2005, Instant802 Networks, Inc.
4 * Copyright 2005-2006, Devicescape Software, Inc.
5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
9 * Copyright (C) 2018-2020 Intel Corporation
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <linux/kcov.h>
21 #include <linux/bitops.h>
22 #include <net/mac80211.h>
23 #include <net/ieee80211_radiotap.h>
24 #include <asm/unaligned.h>
25
26 #include "ieee80211_i.h"
27 #include "driver-ops.h"
28 #include "led.h"
29 #include "mesh.h"
30 #include "wep.h"
31 #include "wpa.h"
32 #include "tkip.h"
33 #include "wme.h"
34 #include "rate.h"
35
36 /*
37 * monitor mode reception
38 *
39 * This function cleans up the SKB, i.e. it removes all the stuff
40 * only useful for monitoring.
41 */
42 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb,
43 unsigned int present_fcs_len,
44 unsigned int rtap_space)
45 {
46 struct ieee80211_hdr *hdr;
47 unsigned int hdrlen;
48 __le16 fc;
49
50 if (present_fcs_len)
51 __pskb_trim(skb, skb->len - present_fcs_len);
52 __pskb_pull(skb, rtap_space);
53
54 hdr = (void *)skb->data;
55 fc = hdr->frame_control;
56
57 /*
58 * Remove the HT-Control field (if present) on management
59 * frames after we've sent the frame to monitoring. We
60 * (currently) don't need it, and don't properly parse
61 * frames with it present, due to the assumption of a
62 * fixed management header length.
63 */
64 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc)))
65 return skb;
66
67 hdrlen = ieee80211_hdrlen(fc);
68 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
69
70 if (!pskb_may_pull(skb, hdrlen)) {
71 dev_kfree_skb(skb);
72 return NULL;
73 }
74
75 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data,
76 hdrlen - IEEE80211_HT_CTL_LEN);
77 __pskb_pull(skb, IEEE80211_HT_CTL_LEN);
78
79 return skb;
80 }
81
82 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
83 unsigned int rtap_space)
84 {
85 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
86 struct ieee80211_hdr *hdr;
87
88 hdr = (void *)(skb->data + rtap_space);
89
90 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
91 RX_FLAG_FAILED_PLCP_CRC |
92 RX_FLAG_ONLY_MONITOR |
93 RX_FLAG_NO_PSDU))
94 return true;
95
96 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
97 return true;
98
99 if (ieee80211_is_ctl(hdr->frame_control) &&
100 !ieee80211_is_pspoll(hdr->frame_control) &&
101 !ieee80211_is_back_req(hdr->frame_control))
102 return true;
103
104 return false;
105 }
106
107 static int
108 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
109 struct ieee80211_rx_status *status,
110 struct sk_buff *skb)
111 {
112 int len;
113
114 /* always present fields */
115 len = sizeof(struct ieee80211_radiotap_header) + 8;
116
117 /* allocate extra bitmaps */
118 if (status->chains)
119 len += 4 * hweight8(status->chains);
120 /* vendor presence bitmap */
121 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
122 len += 4;
123
124 if (ieee80211_have_rx_timestamp(status)) {
125 len = ALIGN(len, 8);
126 len += 8;
127 }
128 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
129 len += 1;
130
131 /* antenna field, if we don't have per-chain info */
132 if (!status->chains)
133 len += 1;
134
135 /* padding for RX_FLAGS if necessary */
136 len = ALIGN(len, 2);
137
138 if (status->encoding == RX_ENC_HT) /* HT info */
139 len += 3;
140
141 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
142 len = ALIGN(len, 4);
143 len += 8;
144 }
145
146 if (status->encoding == RX_ENC_VHT) {
147 len = ALIGN(len, 2);
148 len += 12;
149 }
150
151 if (local->hw.radiotap_timestamp.units_pos >= 0) {
152 len = ALIGN(len, 8);
153 len += 12;
154 }
155
156 if (status->encoding == RX_ENC_HE &&
157 status->flag & RX_FLAG_RADIOTAP_HE) {
158 len = ALIGN(len, 2);
159 len += 12;
160 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
161 }
162
163 if (status->encoding == RX_ENC_HE &&
164 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
165 len = ALIGN(len, 2);
166 len += 12;
167 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
168 }
169
170 if (status->flag & RX_FLAG_NO_PSDU)
171 len += 1;
172
173 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
174 len = ALIGN(len, 2);
175 len += 4;
176 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
177 }
178
179 if (status->chains) {
180 /* antenna and antenna signal fields */
181 len += 2 * hweight8(status->chains);
182 }
183
184 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
185 struct ieee80211_vendor_radiotap *rtap;
186 int vendor_data_offset = 0;
187
188 /*
189 * The position to look at depends on the existence (or non-
190 * existence) of other elements, so take that into account...
191 */
192 if (status->flag & RX_FLAG_RADIOTAP_HE)
193 vendor_data_offset +=
194 sizeof(struct ieee80211_radiotap_he);
195 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
196 vendor_data_offset +=
197 sizeof(struct ieee80211_radiotap_he_mu);
198 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
199 vendor_data_offset +=
200 sizeof(struct ieee80211_radiotap_lsig);
201
202 rtap = (void *)&skb->data[vendor_data_offset];
203
204 /* alignment for fixed 6-byte vendor data header */
205 len = ALIGN(len, 2);
206 /* vendor data header */
207 len += 6;
208 if (WARN_ON(rtap->align == 0))
209 rtap->align = 1;
210 len = ALIGN(len, rtap->align);
211 len += rtap->len + rtap->pad;
212 }
213
214 return len;
215 }
216
217 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
218 struct sk_buff *skb,
219 int rtap_space)
220 {
221 struct {
222 struct ieee80211_hdr_3addr hdr;
223 u8 category;
224 u8 action_code;
225 } __packed __aligned(2) action;
226
227 if (!sdata)
228 return;
229
230 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
231
232 if (skb->len < rtap_space + sizeof(action) +
233 VHT_MUMIMO_GROUPS_DATA_LEN)
234 return;
235
236 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
237 return;
238
239 skb_copy_bits(skb, rtap_space, &action, sizeof(action));
240
241 if (!ieee80211_is_action(action.hdr.frame_control))
242 return;
243
244 if (action.category != WLAN_CATEGORY_VHT)
245 return;
246
247 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
248 return;
249
250 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
251 return;
252
253 skb = skb_copy(skb, GFP_ATOMIC);
254 if (!skb)
255 return;
256
257 skb_queue_tail(&sdata->skb_queue, skb);
258 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
259 }
260
261 /*
262 * ieee80211_add_rx_radiotap_header - add radiotap header
263 *
264 * add a radiotap header containing all the fields which the hardware provided.
265 */
266 static void
267 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
268 struct sk_buff *skb,
269 struct ieee80211_rate *rate,
270 int rtap_len, bool has_fcs)
271 {
272 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
273 struct ieee80211_radiotap_header *rthdr;
274 unsigned char *pos;
275 __le32 *it_present;
276 u32 it_present_val;
277 u16 rx_flags = 0;
278 u16 channel_flags = 0;
279 int mpdulen, chain;
280 unsigned long chains = status->chains;
281 struct ieee80211_vendor_radiotap rtap = {};
282 struct ieee80211_radiotap_he he = {};
283 struct ieee80211_radiotap_he_mu he_mu = {};
284 struct ieee80211_radiotap_lsig lsig = {};
285
286 if (status->flag & RX_FLAG_RADIOTAP_HE) {
287 he = *(struct ieee80211_radiotap_he *)skb->data;
288 skb_pull(skb, sizeof(he));
289 WARN_ON_ONCE(status->encoding != RX_ENC_HE);
290 }
291
292 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
293 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
294 skb_pull(skb, sizeof(he_mu));
295 }
296
297 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
298 lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
299 skb_pull(skb, sizeof(lsig));
300 }
301
302 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
303 rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
304 /* rtap.len and rtap.pad are undone immediately */
305 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
306 }
307
308 mpdulen = skb->len;
309 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
310 mpdulen += FCS_LEN;
311
312 rthdr = skb_push(skb, rtap_len);
313 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
314 it_present = &rthdr->it_present;
315
316 /* radiotap header, set always present flags */
317 rthdr->it_len = cpu_to_le16(rtap_len);
318 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
319 BIT(IEEE80211_RADIOTAP_CHANNEL) |
320 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
321
322 if (!status->chains)
323 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
324
325 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
326 it_present_val |=
327 BIT(IEEE80211_RADIOTAP_EXT) |
328 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
329 put_unaligned_le32(it_present_val, it_present);
330 it_present++;
331 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
332 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
333 }
334
335 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
336 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
337 BIT(IEEE80211_RADIOTAP_EXT);
338 put_unaligned_le32(it_present_val, it_present);
339 it_present++;
340 it_present_val = rtap.present;
341 }
342
343 put_unaligned_le32(it_present_val, it_present);
344
345 pos = (void *)(it_present + 1);
346
347 /* the order of the following fields is important */
348
349 /* IEEE80211_RADIOTAP_TSFT */
350 if (ieee80211_have_rx_timestamp(status)) {
351 /* padding */
352 while ((pos - (u8 *)rthdr) & 7)
353 *pos++ = 0;
354 put_unaligned_le64(
355 ieee80211_calculate_rx_timestamp(local, status,
356 mpdulen, 0),
357 pos);
358 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
359 pos += 8;
360 }
361
362 /* IEEE80211_RADIOTAP_FLAGS */
363 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
364 *pos |= IEEE80211_RADIOTAP_F_FCS;
365 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
366 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
367 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
368 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
369 pos++;
370
371 /* IEEE80211_RADIOTAP_RATE */
372 if (!rate || status->encoding != RX_ENC_LEGACY) {
373 /*
374 * Without rate information don't add it. If we have,
375 * MCS information is a separate field in radiotap,
376 * added below. The byte here is needed as padding
377 * for the channel though, so initialise it to 0.
378 */
379 *pos = 0;
380 } else {
381 int shift = 0;
382 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
383 if (status->bw == RATE_INFO_BW_10)
384 shift = 1;
385 else if (status->bw == RATE_INFO_BW_5)
386 shift = 2;
387 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
388 }
389 pos++;
390
391 /* IEEE80211_RADIOTAP_CHANNEL */
392 /* TODO: frequency offset in KHz */
393 put_unaligned_le16(status->freq, pos);
394 pos += 2;
395 if (status->bw == RATE_INFO_BW_10)
396 channel_flags |= IEEE80211_CHAN_HALF;
397 else if (status->bw == RATE_INFO_BW_5)
398 channel_flags |= IEEE80211_CHAN_QUARTER;
399
400 if (status->band == NL80211_BAND_5GHZ ||
401 status->band == NL80211_BAND_6GHZ)
402 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
403 else if (status->encoding != RX_ENC_LEGACY)
404 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
405 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
406 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
407 else if (rate)
408 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
409 else
410 channel_flags |= IEEE80211_CHAN_2GHZ;
411 put_unaligned_le16(channel_flags, pos);
412 pos += 2;
413
414 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
415 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
416 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
417 *pos = status->signal;
418 rthdr->it_present |=
419 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
420 pos++;
421 }
422
423 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
424
425 if (!status->chains) {
426 /* IEEE80211_RADIOTAP_ANTENNA */
427 *pos = status->antenna;
428 pos++;
429 }
430
431 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
432
433 /* IEEE80211_RADIOTAP_RX_FLAGS */
434 /* ensure 2 byte alignment for the 2 byte field as required */
435 if ((pos - (u8 *)rthdr) & 1)
436 *pos++ = 0;
437 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
438 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
439 put_unaligned_le16(rx_flags, pos);
440 pos += 2;
441
442 if (status->encoding == RX_ENC_HT) {
443 unsigned int stbc;
444
445 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
446 *pos++ = local->hw.radiotap_mcs_details;
447 *pos = 0;
448 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
449 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
450 if (status->bw == RATE_INFO_BW_40)
451 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
452 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
453 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
454 if (status->enc_flags & RX_ENC_FLAG_LDPC)
455 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
456 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
457 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
458 pos++;
459 *pos++ = status->rate_idx;
460 }
461
462 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
463 u16 flags = 0;
464
465 /* ensure 4 byte alignment */
466 while ((pos - (u8 *)rthdr) & 3)
467 pos++;
468 rthdr->it_present |=
469 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
470 put_unaligned_le32(status->ampdu_reference, pos);
471 pos += 4;
472 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
473 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
474 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
475 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
476 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
477 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
478 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
479 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
480 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
481 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
482 if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
483 flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
484 put_unaligned_le16(flags, pos);
485 pos += 2;
486 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
487 *pos++ = status->ampdu_delimiter_crc;
488 else
489 *pos++ = 0;
490 *pos++ = 0;
491 }
492
493 if (status->encoding == RX_ENC_VHT) {
494 u16 known = local->hw.radiotap_vht_details;
495
496 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
497 put_unaligned_le16(known, pos);
498 pos += 2;
499 /* flags */
500 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
501 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
502 /* in VHT, STBC is binary */
503 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
504 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
505 if (status->enc_flags & RX_ENC_FLAG_BF)
506 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
507 pos++;
508 /* bandwidth */
509 switch (status->bw) {
510 case RATE_INFO_BW_80:
511 *pos++ = 4;
512 break;
513 case RATE_INFO_BW_160:
514 *pos++ = 11;
515 break;
516 case RATE_INFO_BW_40:
517 *pos++ = 1;
518 break;
519 default:
520 *pos++ = 0;
521 }
522 /* MCS/NSS */
523 *pos = (status->rate_idx << 4) | status->nss;
524 pos += 4;
525 /* coding field */
526 if (status->enc_flags & RX_ENC_FLAG_LDPC)
527 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
528 pos++;
529 /* group ID */
530 pos++;
531 /* partial_aid */
532 pos += 2;
533 }
534
535 if (local->hw.radiotap_timestamp.units_pos >= 0) {
536 u16 accuracy = 0;
537 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
538
539 rthdr->it_present |=
540 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
541
542 /* ensure 8 byte alignment */
543 while ((pos - (u8 *)rthdr) & 7)
544 pos++;
545
546 put_unaligned_le64(status->device_timestamp, pos);
547 pos += sizeof(u64);
548
549 if (local->hw.radiotap_timestamp.accuracy >= 0) {
550 accuracy = local->hw.radiotap_timestamp.accuracy;
551 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
552 }
553 put_unaligned_le16(accuracy, pos);
554 pos += sizeof(u16);
555
556 *pos++ = local->hw.radiotap_timestamp.units_pos;
557 *pos++ = flags;
558 }
559
560 if (status->encoding == RX_ENC_HE &&
561 status->flag & RX_FLAG_RADIOTAP_HE) {
562 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
563
564 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
565 he.data6 |= HE_PREP(DATA6_NSTS,
566 FIELD_GET(RX_ENC_FLAG_STBC_MASK,
567 status->enc_flags));
568 he.data3 |= HE_PREP(DATA3_STBC, 1);
569 } else {
570 he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
571 }
572
573 #define CHECK_GI(s) \
574 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
575 (int)NL80211_RATE_INFO_HE_GI_##s)
576
577 CHECK_GI(0_8);
578 CHECK_GI(1_6);
579 CHECK_GI(3_2);
580
581 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
582 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
583 he.data3 |= HE_PREP(DATA3_CODING,
584 !!(status->enc_flags & RX_ENC_FLAG_LDPC));
585
586 he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
587
588 switch (status->bw) {
589 case RATE_INFO_BW_20:
590 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
591 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
592 break;
593 case RATE_INFO_BW_40:
594 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
595 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
596 break;
597 case RATE_INFO_BW_80:
598 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
599 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
600 break;
601 case RATE_INFO_BW_160:
602 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
603 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
604 break;
605 case RATE_INFO_BW_HE_RU:
606 #define CHECK_RU_ALLOC(s) \
607 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
608 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
609
610 CHECK_RU_ALLOC(26);
611 CHECK_RU_ALLOC(52);
612 CHECK_RU_ALLOC(106);
613 CHECK_RU_ALLOC(242);
614 CHECK_RU_ALLOC(484);
615 CHECK_RU_ALLOC(996);
616 CHECK_RU_ALLOC(2x996);
617
618 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
619 status->he_ru + 4);
620 break;
621 default:
622 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
623 }
624
625 /* ensure 2 byte alignment */
626 while ((pos - (u8 *)rthdr) & 1)
627 pos++;
628 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
629 memcpy(pos, &he, sizeof(he));
630 pos += sizeof(he);
631 }
632
633 if (status->encoding == RX_ENC_HE &&
634 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
635 /* ensure 2 byte alignment */
636 while ((pos - (u8 *)rthdr) & 1)
637 pos++;
638 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
639 memcpy(pos, &he_mu, sizeof(he_mu));
640 pos += sizeof(he_mu);
641 }
642
643 if (status->flag & RX_FLAG_NO_PSDU) {
644 rthdr->it_present |=
645 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU);
646 *pos++ = status->zero_length_psdu_type;
647 }
648
649 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
650 /* ensure 2 byte alignment */
651 while ((pos - (u8 *)rthdr) & 1)
652 pos++;
653 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG);
654 memcpy(pos, &lsig, sizeof(lsig));
655 pos += sizeof(lsig);
656 }
657
658 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
659 *pos++ = status->chain_signal[chain];
660 *pos++ = chain;
661 }
662
663 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
664 /* ensure 2 byte alignment for the vendor field as required */
665 if ((pos - (u8 *)rthdr) & 1)
666 *pos++ = 0;
667 *pos++ = rtap.oui[0];
668 *pos++ = rtap.oui[1];
669 *pos++ = rtap.oui[2];
670 *pos++ = rtap.subns;
671 put_unaligned_le16(rtap.len, pos);
672 pos += 2;
673 /* align the actual payload as requested */
674 while ((pos - (u8 *)rthdr) & (rtap.align - 1))
675 *pos++ = 0;
676 /* data (and possible padding) already follows */
677 }
678 }
679
680 static struct sk_buff *
681 ieee80211_make_monitor_skb(struct ieee80211_local *local,
682 struct sk_buff **origskb,
683 struct ieee80211_rate *rate,
684 int rtap_space, bool use_origskb)
685 {
686 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
687 int rt_hdrlen, needed_headroom;
688 struct sk_buff *skb;
689
690 /* room for the radiotap header based on driver features */
691 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
692 needed_headroom = rt_hdrlen - rtap_space;
693
694 if (use_origskb) {
695 /* only need to expand headroom if necessary */
696 skb = *origskb;
697 *origskb = NULL;
698
699 /*
700 * This shouldn't trigger often because most devices have an
701 * RX header they pull before we get here, and that should
702 * be big enough for our radiotap information. We should
703 * probably export the length to drivers so that we can have
704 * them allocate enough headroom to start with.
705 */
706 if (skb_headroom(skb) < needed_headroom &&
707 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
708 dev_kfree_skb(skb);
709 return NULL;
710 }
711 } else {
712 /*
713 * Need to make a copy and possibly remove radiotap header
714 * and FCS from the original.
715 */
716 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
717
718 if (!skb)
719 return NULL;
720 }
721
722 /* prepend radiotap information */
723 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
724
725 skb_reset_mac_header(skb);
726 skb->ip_summed = CHECKSUM_UNNECESSARY;
727 skb->pkt_type = PACKET_OTHERHOST;
728 skb->protocol = htons(ETH_P_802_2);
729
730 return skb;
731 }
732
733 /*
734 * This function copies a received frame to all monitor interfaces and
735 * returns a cleaned-up SKB that no longer includes the FCS nor the
736 * radiotap header the driver might have added.
737 */
738 static struct sk_buff *
739 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
740 struct ieee80211_rate *rate)
741 {
742 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
743 struct ieee80211_sub_if_data *sdata;
744 struct sk_buff *monskb = NULL;
745 int present_fcs_len = 0;
746 unsigned int rtap_space = 0;
747 struct ieee80211_sub_if_data *monitor_sdata =
748 rcu_dereference(local->monitor_sdata);
749 bool only_monitor = false;
750 unsigned int min_head_len;
751
752 if (status->flag & RX_FLAG_RADIOTAP_HE)
753 rtap_space += sizeof(struct ieee80211_radiotap_he);
754
755 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
756 rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
757
758 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
759 rtap_space += sizeof(struct ieee80211_radiotap_lsig);
760
761 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
762 struct ieee80211_vendor_radiotap *rtap =
763 (void *)(origskb->data + rtap_space);
764
765 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
766 }
767
768 min_head_len = rtap_space;
769
770 /*
771 * First, we may need to make a copy of the skb because
772 * (1) we need to modify it for radiotap (if not present), and
773 * (2) the other RX handlers will modify the skb we got.
774 *
775 * We don't need to, of course, if we aren't going to return
776 * the SKB because it has a bad FCS/PLCP checksum.
777 */
778
779 if (!(status->flag & RX_FLAG_NO_PSDU)) {
780 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
781 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
782 /* driver bug */
783 WARN_ON(1);
784 dev_kfree_skb(origskb);
785 return NULL;
786 }
787 present_fcs_len = FCS_LEN;
788 }
789
790 /* also consider the hdr->frame_control */
791 min_head_len += 2;
792 }
793
794 /* ensure that the expected data elements are in skb head */
795 if (!pskb_may_pull(origskb, min_head_len)) {
796 dev_kfree_skb(origskb);
797 return NULL;
798 }
799
800 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
801
802 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
803 if (only_monitor) {
804 dev_kfree_skb(origskb);
805 return NULL;
806 }
807
808 return ieee80211_clean_skb(origskb, present_fcs_len,
809 rtap_space);
810 }
811
812 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
813
814 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
815 bool last_monitor = list_is_last(&sdata->u.mntr.list,
816 &local->mon_list);
817
818 if (!monskb)
819 monskb = ieee80211_make_monitor_skb(local, &origskb,
820 rate, rtap_space,
821 only_monitor &&
822 last_monitor);
823
824 if (monskb) {
825 struct sk_buff *skb;
826
827 if (last_monitor) {
828 skb = monskb;
829 monskb = NULL;
830 } else {
831 skb = skb_clone(monskb, GFP_ATOMIC);
832 }
833
834 if (skb) {
835 skb->dev = sdata->dev;
836 dev_sw_netstats_rx_add(skb->dev, skb->len);
837 netif_receive_skb(skb);
838 }
839 }
840
841 if (last_monitor)
842 break;
843 }
844
845 /* this happens if last_monitor was erroneously false */
846 dev_kfree_skb(monskb);
847
848 /* ditto */
849 if (!origskb)
850 return NULL;
851
852 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
853 }
854
855 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
856 {
857 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
858 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
859 int tid, seqno_idx, security_idx;
860
861 /* does the frame have a qos control field? */
862 if (ieee80211_is_data_qos(hdr->frame_control)) {
863 u8 *qc = ieee80211_get_qos_ctl(hdr);
864 /* frame has qos control */
865 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
866 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
867 status->rx_flags |= IEEE80211_RX_AMSDU;
868
869 seqno_idx = tid;
870 security_idx = tid;
871 } else {
872 /*
873 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
874 *
875 * Sequence numbers for management frames, QoS data
876 * frames with a broadcast/multicast address in the
877 * Address 1 field, and all non-QoS data frames sent
878 * by QoS STAs are assigned using an additional single
879 * modulo-4096 counter, [...]
880 *
881 * We also use that counter for non-QoS STAs.
882 */
883 seqno_idx = IEEE80211_NUM_TIDS;
884 security_idx = 0;
885 if (ieee80211_is_mgmt(hdr->frame_control))
886 security_idx = IEEE80211_NUM_TIDS;
887 tid = 0;
888 }
889
890 rx->seqno_idx = seqno_idx;
891 rx->security_idx = security_idx;
892 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
893 * For now, set skb->priority to 0 for other cases. */
894 rx->skb->priority = (tid > 7) ? 0 : tid;
895 }
896
897 /**
898 * DOC: Packet alignment
899 *
900 * Drivers always need to pass packets that are aligned to two-byte boundaries
901 * to the stack.
902 *
903 * Additionally, should, if possible, align the payload data in a way that
904 * guarantees that the contained IP header is aligned to a four-byte
905 * boundary. In the case of regular frames, this simply means aligning the
906 * payload to a four-byte boundary (because either the IP header is directly
907 * contained, or IV/RFC1042 headers that have a length divisible by four are
908 * in front of it). If the payload data is not properly aligned and the
909 * architecture doesn't support efficient unaligned operations, mac80211
910 * will align the data.
911 *
912 * With A-MSDU frames, however, the payload data address must yield two modulo
913 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
914 * push the IP header further back to a multiple of four again. Thankfully, the
915 * specs were sane enough this time around to require padding each A-MSDU
916 * subframe to a length that is a multiple of four.
917 *
918 * Padding like Atheros hardware adds which is between the 802.11 header and
919 * the payload is not supported, the driver is required to move the 802.11
920 * header to be directly in front of the payload in that case.
921 */
922 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
923 {
924 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
925 WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
926 #endif
927 }
928
929
930 /* rx handlers */
931
932 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
933 {
934 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
935
936 if (is_multicast_ether_addr(hdr->addr1))
937 return 0;
938
939 return ieee80211_is_robust_mgmt_frame(skb);
940 }
941
942
943 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
944 {
945 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
946
947 if (!is_multicast_ether_addr(hdr->addr1))
948 return 0;
949
950 return ieee80211_is_robust_mgmt_frame(skb);
951 }
952
953
954 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
955 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
956 {
957 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
958 struct ieee80211_mmie *mmie;
959 struct ieee80211_mmie_16 *mmie16;
960
961 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
962 return -1;
963
964 if (!ieee80211_is_robust_mgmt_frame(skb) &&
965 !ieee80211_is_beacon(hdr->frame_control))
966 return -1; /* not a robust management frame */
967
968 mmie = (struct ieee80211_mmie *)
969 (skb->data + skb->len - sizeof(*mmie));
970 if (mmie->element_id == WLAN_EID_MMIE &&
971 mmie->length == sizeof(*mmie) - 2)
972 return le16_to_cpu(mmie->key_id);
973
974 mmie16 = (struct ieee80211_mmie_16 *)
975 (skb->data + skb->len - sizeof(*mmie16));
976 if (skb->len >= 24 + sizeof(*mmie16) &&
977 mmie16->element_id == WLAN_EID_MMIE &&
978 mmie16->length == sizeof(*mmie16) - 2)
979 return le16_to_cpu(mmie16->key_id);
980
981 return -1;
982 }
983
984 static int ieee80211_get_keyid(struct sk_buff *skb,
985 const struct ieee80211_cipher_scheme *cs)
986 {
987 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
988 __le16 fc;
989 int hdrlen;
990 int minlen;
991 u8 key_idx_off;
992 u8 key_idx_shift;
993 u8 keyid;
994
995 fc = hdr->frame_control;
996 hdrlen = ieee80211_hdrlen(fc);
997
998 if (cs) {
999 minlen = hdrlen + cs->hdr_len;
1000 key_idx_off = hdrlen + cs->key_idx_off;
1001 key_idx_shift = cs->key_idx_shift;
1002 } else {
1003 /* WEP, TKIP, CCMP and GCMP */
1004 minlen = hdrlen + IEEE80211_WEP_IV_LEN;
1005 key_idx_off = hdrlen + 3;
1006 key_idx_shift = 6;
1007 }
1008
1009 if (unlikely(skb->len < minlen))
1010 return -EINVAL;
1011
1012 skb_copy_bits(skb, key_idx_off, &keyid, 1);
1013
1014 if (cs)
1015 keyid &= cs->key_idx_mask;
1016 keyid >>= key_idx_shift;
1017
1018 /* cs could use more than the usual two bits for the keyid */
1019 if (unlikely(keyid >= NUM_DEFAULT_KEYS))
1020 return -EINVAL;
1021
1022 return keyid;
1023 }
1024
1025 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
1026 {
1027 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1028 char *dev_addr = rx->sdata->vif.addr;
1029
1030 if (ieee80211_is_data(hdr->frame_control)) {
1031 if (is_multicast_ether_addr(hdr->addr1)) {
1032 if (ieee80211_has_tods(hdr->frame_control) ||
1033 !ieee80211_has_fromds(hdr->frame_control))
1034 return RX_DROP_MONITOR;
1035 if (ether_addr_equal(hdr->addr3, dev_addr))
1036 return RX_DROP_MONITOR;
1037 } else {
1038 if (!ieee80211_has_a4(hdr->frame_control))
1039 return RX_DROP_MONITOR;
1040 if (ether_addr_equal(hdr->addr4, dev_addr))
1041 return RX_DROP_MONITOR;
1042 }
1043 }
1044
1045 /* If there is not an established peer link and this is not a peer link
1046 * establisment frame, beacon or probe, drop the frame.
1047 */
1048
1049 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
1050 struct ieee80211_mgmt *mgmt;
1051
1052 if (!ieee80211_is_mgmt(hdr->frame_control))
1053 return RX_DROP_MONITOR;
1054
1055 if (ieee80211_is_action(hdr->frame_control)) {
1056 u8 category;
1057
1058 /* make sure category field is present */
1059 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
1060 return RX_DROP_MONITOR;
1061
1062 mgmt = (struct ieee80211_mgmt *)hdr;
1063 category = mgmt->u.action.category;
1064 if (category != WLAN_CATEGORY_MESH_ACTION &&
1065 category != WLAN_CATEGORY_SELF_PROTECTED)
1066 return RX_DROP_MONITOR;
1067 return RX_CONTINUE;
1068 }
1069
1070 if (ieee80211_is_probe_req(hdr->frame_control) ||
1071 ieee80211_is_probe_resp(hdr->frame_control) ||
1072 ieee80211_is_beacon(hdr->frame_control) ||
1073 ieee80211_is_auth(hdr->frame_control))
1074 return RX_CONTINUE;
1075
1076 return RX_DROP_MONITOR;
1077 }
1078
1079 return RX_CONTINUE;
1080 }
1081
1082 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
1083 int index)
1084 {
1085 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
1086 struct sk_buff *tail = skb_peek_tail(frames);
1087 struct ieee80211_rx_status *status;
1088
1089 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
1090 return true;
1091
1092 if (!tail)
1093 return false;
1094
1095 status = IEEE80211_SKB_RXCB(tail);
1096 if (status->flag & RX_FLAG_AMSDU_MORE)
1097 return false;
1098
1099 return true;
1100 }
1101
1102 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
1103 struct tid_ampdu_rx *tid_agg_rx,
1104 int index,
1105 struct sk_buff_head *frames)
1106 {
1107 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
1108 struct sk_buff *skb;
1109 struct ieee80211_rx_status *status;
1110
1111 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1112
1113 if (skb_queue_empty(skb_list))
1114 goto no_frame;
1115
1116 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1117 __skb_queue_purge(skb_list);
1118 goto no_frame;
1119 }
1120
1121 /* release frames from the reorder ring buffer */
1122 tid_agg_rx->stored_mpdu_num--;
1123 while ((skb = __skb_dequeue(skb_list))) {
1124 status = IEEE80211_SKB_RXCB(skb);
1125 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
1126 __skb_queue_tail(frames, skb);
1127 }
1128
1129 no_frame:
1130 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
1131 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1132 }
1133
1134 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
1135 struct tid_ampdu_rx *tid_agg_rx,
1136 u16 head_seq_num,
1137 struct sk_buff_head *frames)
1138 {
1139 int index;
1140
1141 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1142
1143 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1144 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1145 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1146 frames);
1147 }
1148 }
1149
1150 /*
1151 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
1152 * the skb was added to the buffer longer than this time ago, the earlier
1153 * frames that have not yet been received are assumed to be lost and the skb
1154 * can be released for processing. This may also release other skb's from the
1155 * reorder buffer if there are no additional gaps between the frames.
1156 *
1157 * Callers must hold tid_agg_rx->reorder_lock.
1158 */
1159 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
1160
1161 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
1162 struct tid_ampdu_rx *tid_agg_rx,
1163 struct sk_buff_head *frames)
1164 {
1165 int index, i, j;
1166
1167 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1168
1169 /* release the buffer until next missing frame */
1170 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1171 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
1172 tid_agg_rx->stored_mpdu_num) {
1173 /*
1174 * No buffers ready to be released, but check whether any
1175 * frames in the reorder buffer have timed out.
1176 */
1177 int skipped = 1;
1178 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1179 j = (j + 1) % tid_agg_rx->buf_size) {
1180 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1181 skipped++;
1182 continue;
1183 }
1184 if (skipped &&
1185 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1186 HT_RX_REORDER_BUF_TIMEOUT))
1187 goto set_release_timer;
1188
1189 /* don't leave incomplete A-MSDUs around */
1190 for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1191 i = (i + 1) % tid_agg_rx->buf_size)
1192 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1193
1194 ht_dbg_ratelimited(sdata,
1195 "release an RX reorder frame due to timeout on earlier frames\n");
1196 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1197 frames);
1198
1199 /*
1200 * Increment the head seq# also for the skipped slots.
1201 */
1202 tid_agg_rx->head_seq_num =
1203 (tid_agg_rx->head_seq_num +
1204 skipped) & IEEE80211_SN_MASK;
1205 skipped = 0;
1206 }
1207 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1208 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1209 frames);
1210 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1211 }
1212
1213 if (tid_agg_rx->stored_mpdu_num) {
1214 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1215
1216 for (; j != (index - 1) % tid_agg_rx->buf_size;
1217 j = (j + 1) % tid_agg_rx->buf_size) {
1218 if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1219 break;
1220 }
1221
1222 set_release_timer:
1223
1224 if (!tid_agg_rx->removed)
1225 mod_timer(&tid_agg_rx->reorder_timer,
1226 tid_agg_rx->reorder_time[j] + 1 +
1227 HT_RX_REORDER_BUF_TIMEOUT);
1228 } else {
1229 del_timer(&tid_agg_rx->reorder_timer);
1230 }
1231 }
1232
1233 /*
1234 * As this function belongs to the RX path it must be under
1235 * rcu_read_lock protection. It returns false if the frame
1236 * can be processed immediately, true if it was consumed.
1237 */
1238 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1239 struct tid_ampdu_rx *tid_agg_rx,
1240 struct sk_buff *skb,
1241 struct sk_buff_head *frames)
1242 {
1243 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1244 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1245 u16 sc = le16_to_cpu(hdr->seq_ctrl);
1246 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
1247 u16 head_seq_num, buf_size;
1248 int index;
1249 bool ret = true;
1250
1251 spin_lock(&tid_agg_rx->reorder_lock);
1252
1253 /*
1254 * Offloaded BA sessions have no known starting sequence number so pick
1255 * one from first Rxed frame for this tid after BA was started.
1256 */
1257 if (unlikely(tid_agg_rx->auto_seq)) {
1258 tid_agg_rx->auto_seq = false;
1259 tid_agg_rx->ssn = mpdu_seq_num;
1260 tid_agg_rx->head_seq_num = mpdu_seq_num;
1261 }
1262
1263 buf_size = tid_agg_rx->buf_size;
1264 head_seq_num = tid_agg_rx->head_seq_num;
1265
1266 /*
1267 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1268 * be reordered.
1269 */
1270 if (unlikely(!tid_agg_rx->started)) {
1271 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1272 ret = false;
1273 goto out;
1274 }
1275 tid_agg_rx->started = true;
1276 }
1277
1278 /* frame with out of date sequence number */
1279 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1280 dev_kfree_skb(skb);
1281 goto out;
1282 }
1283
1284 /*
1285 * If frame the sequence number exceeds our buffering window
1286 * size release some previous frames to make room for this one.
1287 */
1288 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1289 head_seq_num = ieee80211_sn_inc(
1290 ieee80211_sn_sub(mpdu_seq_num, buf_size));
1291 /* release stored frames up to new head to stack */
1292 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1293 head_seq_num, frames);
1294 }
1295
1296 /* Now the new frame is always in the range of the reordering buffer */
1297
1298 index = mpdu_seq_num % tid_agg_rx->buf_size;
1299
1300 /* check if we already stored this frame */
1301 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1302 dev_kfree_skb(skb);
1303 goto out;
1304 }
1305
1306 /*
1307 * If the current MPDU is in the right order and nothing else
1308 * is stored we can process it directly, no need to buffer it.
1309 * If it is first but there's something stored, we may be able
1310 * to release frames after this one.
1311 */
1312 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1313 tid_agg_rx->stored_mpdu_num == 0) {
1314 if (!(status->flag & RX_FLAG_AMSDU_MORE))
1315 tid_agg_rx->head_seq_num =
1316 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1317 ret = false;
1318 goto out;
1319 }
1320
1321 /* put the frame in the reordering buffer */
1322 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1323 if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1324 tid_agg_rx->reorder_time[index] = jiffies;
1325 tid_agg_rx->stored_mpdu_num++;
1326 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1327 }
1328
1329 out:
1330 spin_unlock(&tid_agg_rx->reorder_lock);
1331 return ret;
1332 }
1333
1334 /*
1335 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1336 * true if the MPDU was buffered, false if it should be processed.
1337 */
1338 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1339 struct sk_buff_head *frames)
1340 {
1341 struct sk_buff *skb = rx->skb;
1342 struct ieee80211_local *local = rx->local;
1343 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1344 struct sta_info *sta = rx->sta;
1345 struct tid_ampdu_rx *tid_agg_rx;
1346 u16 sc;
1347 u8 tid, ack_policy;
1348
1349 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1350 is_multicast_ether_addr(hdr->addr1))
1351 goto dont_reorder;
1352
1353 /*
1354 * filter the QoS data rx stream according to
1355 * STA/TID and check if this STA/TID is on aggregation
1356 */
1357
1358 if (!sta)
1359 goto dont_reorder;
1360
1361 ack_policy = *ieee80211_get_qos_ctl(hdr) &
1362 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1363 tid = ieee80211_get_tid(hdr);
1364
1365 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1366 if (!tid_agg_rx) {
1367 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1368 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1369 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1370 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1371 WLAN_BACK_RECIPIENT,
1372 WLAN_REASON_QSTA_REQUIRE_SETUP);
1373 goto dont_reorder;
1374 }
1375
1376 /* qos null data frames are excluded */
1377 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1378 goto dont_reorder;
1379
1380 /* not part of a BA session */
1381 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1382 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
1383 goto dont_reorder;
1384
1385 /* new, potentially un-ordered, ampdu frame - process it */
1386
1387 /* reset session timer */
1388 if (tid_agg_rx->timeout)
1389 tid_agg_rx->last_rx = jiffies;
1390
1391 /* if this mpdu is fragmented - terminate rx aggregation session */
1392 sc = le16_to_cpu(hdr->seq_ctrl);
1393 if (sc & IEEE80211_SCTL_FRAG) {
1394 skb_queue_tail(&rx->sdata->skb_queue, skb);
1395 ieee80211_queue_work(&local->hw, &rx->sdata->work);
1396 return;
1397 }
1398
1399 /*
1400 * No locking needed -- we will only ever process one
1401 * RX packet at a time, and thus own tid_agg_rx. All
1402 * other code manipulating it needs to (and does) make
1403 * sure that we cannot get to it any more before doing
1404 * anything with it.
1405 */
1406 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1407 frames))
1408 return;
1409
1410 dont_reorder:
1411 __skb_queue_tail(frames, skb);
1412 }
1413
1414 static ieee80211_rx_result debug_noinline
1415 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1416 {
1417 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1418 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1419
1420 if (status->flag & RX_FLAG_DUP_VALIDATED)
1421 return RX_CONTINUE;
1422
1423 /*
1424 * Drop duplicate 802.11 retransmissions
1425 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1426 */
1427
1428 if (rx->skb->len < 24)
1429 return RX_CONTINUE;
1430
1431 if (ieee80211_is_ctl(hdr->frame_control) ||
1432 ieee80211_is_any_nullfunc(hdr->frame_control) ||
1433 is_multicast_ether_addr(hdr->addr1))
1434 return RX_CONTINUE;
1435
1436 if (!rx->sta)
1437 return RX_CONTINUE;
1438
1439 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1440 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1441 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1442 rx->sta->rx_stats.num_duplicates++;
1443 return RX_DROP_UNUSABLE;
1444 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1445 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1446 }
1447
1448 return RX_CONTINUE;
1449 }
1450
1451 static ieee80211_rx_result debug_noinline
1452 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1453 {
1454 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1455
1456 /* Drop disallowed frame classes based on STA auth/assoc state;
1457 * IEEE 802.11, Chap 5.5.
1458 *
1459 * mac80211 filters only based on association state, i.e. it drops
1460 * Class 3 frames from not associated stations. hostapd sends
1461 * deauth/disassoc frames when needed. In addition, hostapd is
1462 * responsible for filtering on both auth and assoc states.
1463 */
1464
1465 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1466 return ieee80211_rx_mesh_check(rx);
1467
1468 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1469 ieee80211_is_pspoll(hdr->frame_control)) &&
1470 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1471 rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1472 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1473 /*
1474 * accept port control frames from the AP even when it's not
1475 * yet marked ASSOC to prevent a race where we don't set the
1476 * assoc bit quickly enough before it sends the first frame
1477 */
1478 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1479 ieee80211_is_data_present(hdr->frame_control)) {
1480 unsigned int hdrlen;
1481 __be16 ethertype;
1482
1483 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1484
1485 if (rx->skb->len < hdrlen + 8)
1486 return RX_DROP_MONITOR;
1487
1488 skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
1489 if (ethertype == rx->sdata->control_port_protocol)
1490 return RX_CONTINUE;
1491 }
1492
1493 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1494 cfg80211_rx_spurious_frame(rx->sdata->dev,
1495 hdr->addr2,
1496 GFP_ATOMIC))
1497 return RX_DROP_UNUSABLE;
1498
1499 return RX_DROP_MONITOR;
1500 }
1501
1502 return RX_CONTINUE;
1503 }
1504
1505
1506 static ieee80211_rx_result debug_noinline
1507 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1508 {
1509 struct ieee80211_local *local;
1510 struct ieee80211_hdr *hdr;
1511 struct sk_buff *skb;
1512
1513 local = rx->local;
1514 skb = rx->skb;
1515 hdr = (struct ieee80211_hdr *) skb->data;
1516
1517 if (!local->pspolling)
1518 return RX_CONTINUE;
1519
1520 if (!ieee80211_has_fromds(hdr->frame_control))
1521 /* this is not from AP */
1522 return RX_CONTINUE;
1523
1524 if (!ieee80211_is_data(hdr->frame_control))
1525 return RX_CONTINUE;
1526
1527 if (!ieee80211_has_moredata(hdr->frame_control)) {
1528 /* AP has no more frames buffered for us */
1529 local->pspolling = false;
1530 return RX_CONTINUE;
1531 }
1532
1533 /* more data bit is set, let's request a new frame from the AP */
1534 ieee80211_send_pspoll(local, rx->sdata);
1535
1536 return RX_CONTINUE;
1537 }
1538
1539 static void sta_ps_start(struct sta_info *sta)
1540 {
1541 struct ieee80211_sub_if_data *sdata = sta->sdata;
1542 struct ieee80211_local *local = sdata->local;
1543 struct ps_data *ps;
1544 int tid;
1545
1546 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1547 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1548 ps = &sdata->bss->ps;
1549 else
1550 return;
1551
1552 atomic_inc(&ps->num_sta_ps);
1553 set_sta_flag(sta, WLAN_STA_PS_STA);
1554 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1555 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1556 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1557 sta->sta.addr, sta->sta.aid);
1558
1559 ieee80211_clear_fast_xmit(sta);
1560
1561 if (!sta->sta.txq[0])
1562 return;
1563
1564 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
1565 struct ieee80211_txq *txq = sta->sta.txq[tid];
1566 struct txq_info *txqi = to_txq_info(txq);
1567
1568 spin_lock(&local->active_txq_lock[txq->ac]);
1569 if (!list_empty(&txqi->schedule_order))
1570 list_del_init(&txqi->schedule_order);
1571 spin_unlock(&local->active_txq_lock[txq->ac]);
1572
1573 if (txq_has_queue(txq))
1574 set_bit(tid, &sta->txq_buffered_tids);
1575 else
1576 clear_bit(tid, &sta->txq_buffered_tids);
1577 }
1578 }
1579
1580 static void sta_ps_end(struct sta_info *sta)
1581 {
1582 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1583 sta->sta.addr, sta->sta.aid);
1584
1585 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1586 /*
1587 * Clear the flag only if the other one is still set
1588 * so that the TX path won't start TX'ing new frames
1589 * directly ... In the case that the driver flag isn't
1590 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1591 */
1592 clear_sta_flag(sta, WLAN_STA_PS_STA);
1593 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1594 sta->sta.addr, sta->sta.aid);
1595 return;
1596 }
1597
1598 set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1599 clear_sta_flag(sta, WLAN_STA_PS_STA);
1600 ieee80211_sta_ps_deliver_wakeup(sta);
1601 }
1602
1603 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1604 {
1605 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1606 bool in_ps;
1607
1608 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1609
1610 /* Don't let the same PS state be set twice */
1611 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1612 if ((start && in_ps) || (!start && !in_ps))
1613 return -EINVAL;
1614
1615 if (start)
1616 sta_ps_start(sta);
1617 else
1618 sta_ps_end(sta);
1619
1620 return 0;
1621 }
1622 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1623
1624 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1625 {
1626 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1627
1628 if (test_sta_flag(sta, WLAN_STA_SP))
1629 return;
1630
1631 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1632 ieee80211_sta_ps_deliver_poll_response(sta);
1633 else
1634 set_sta_flag(sta, WLAN_STA_PSPOLL);
1635 }
1636 EXPORT_SYMBOL(ieee80211_sta_pspoll);
1637
1638 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1639 {
1640 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1641 int ac = ieee80211_ac_from_tid(tid);
1642
1643 /*
1644 * If this AC is not trigger-enabled do nothing unless the
1645 * driver is calling us after it already checked.
1646 *
1647 * NB: This could/should check a separate bitmap of trigger-
1648 * enabled queues, but for now we only implement uAPSD w/o
1649 * TSPEC changes to the ACs, so they're always the same.
1650 */
1651 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1652 tid != IEEE80211_NUM_TIDS)
1653 return;
1654
1655 /* if we are in a service period, do nothing */
1656 if (test_sta_flag(sta, WLAN_STA_SP))
1657 return;
1658
1659 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1660 ieee80211_sta_ps_deliver_uapsd(sta);
1661 else
1662 set_sta_flag(sta, WLAN_STA_UAPSD);
1663 }
1664 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1665
1666 static ieee80211_rx_result debug_noinline
1667 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1668 {
1669 struct ieee80211_sub_if_data *sdata = rx->sdata;
1670 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1671 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1672
1673 if (!rx->sta)
1674 return RX_CONTINUE;
1675
1676 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1677 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1678 return RX_CONTINUE;
1679
1680 /*
1681 * The device handles station powersave, so don't do anything about
1682 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1683 * it to mac80211 since they're handled.)
1684 */
1685 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1686 return RX_CONTINUE;
1687
1688 /*
1689 * Don't do anything if the station isn't already asleep. In
1690 * the uAPSD case, the station will probably be marked asleep,
1691 * in the PS-Poll case the station must be confused ...
1692 */
1693 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1694 return RX_CONTINUE;
1695
1696 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1697 ieee80211_sta_pspoll(&rx->sta->sta);
1698
1699 /* Free PS Poll skb here instead of returning RX_DROP that would
1700 * count as an dropped frame. */
1701 dev_kfree_skb(rx->skb);
1702
1703 return RX_QUEUED;
1704 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1705 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1706 ieee80211_has_pm(hdr->frame_control) &&
1707 (ieee80211_is_data_qos(hdr->frame_control) ||
1708 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1709 u8 tid = ieee80211_get_tid(hdr);
1710
1711 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1712 }
1713
1714 return RX_CONTINUE;
1715 }
1716
1717 static ieee80211_rx_result debug_noinline
1718 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1719 {
1720 struct sta_info *sta = rx->sta;
1721 struct sk_buff *skb = rx->skb;
1722 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1723 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1724 int i;
1725
1726 if (!sta)
1727 return RX_CONTINUE;
1728
1729 /*
1730 * Update last_rx only for IBSS packets which are for the current
1731 * BSSID and for station already AUTHORIZED to avoid keeping the
1732 * current IBSS network alive in cases where other STAs start
1733 * using different BSSID. This will also give the station another
1734 * chance to restart the authentication/authorization in case
1735 * something went wrong the first time.
1736 */
1737 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1738 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1739 NL80211_IFTYPE_ADHOC);
1740 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1741 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1742 sta->rx_stats.last_rx = jiffies;
1743 if (ieee80211_is_data(hdr->frame_control) &&
1744 !is_multicast_ether_addr(hdr->addr1))
1745 sta->rx_stats.last_rate =
1746 sta_stats_encode_rate(status);
1747 }
1748 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1749 sta->rx_stats.last_rx = jiffies;
1750 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
1751 !is_multicast_ether_addr(hdr->addr1)) {
1752 /*
1753 * Mesh beacons will update last_rx when if they are found to
1754 * match the current local configuration when processed.
1755 */
1756 sta->rx_stats.last_rx = jiffies;
1757 if (ieee80211_is_data(hdr->frame_control))
1758 sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1759 }
1760
1761 sta->rx_stats.fragments++;
1762
1763 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
1764 sta->rx_stats.bytes += rx->skb->len;
1765 u64_stats_update_end(&rx->sta->rx_stats.syncp);
1766
1767 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1768 sta->rx_stats.last_signal = status->signal;
1769 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
1770 }
1771
1772 if (status->chains) {
1773 sta->rx_stats.chains = status->chains;
1774 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1775 int signal = status->chain_signal[i];
1776
1777 if (!(status->chains & BIT(i)))
1778 continue;
1779
1780 sta->rx_stats.chain_signal_last[i] = signal;
1781 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
1782 -signal);
1783 }
1784 }
1785
1786 if (ieee80211_is_s1g_beacon(hdr->frame_control))
1787 return RX_CONTINUE;
1788
1789 /*
1790 * Change STA power saving mode only at the end of a frame
1791 * exchange sequence, and only for a data or management
1792 * frame as specified in IEEE 802.11-2016 11.2.3.2
1793 */
1794 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1795 !ieee80211_has_morefrags(hdr->frame_control) &&
1796 !is_multicast_ether_addr(hdr->addr1) &&
1797 (ieee80211_is_mgmt(hdr->frame_control) ||
1798 ieee80211_is_data(hdr->frame_control)) &&
1799 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1800 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1801 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1802 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1803 if (!ieee80211_has_pm(hdr->frame_control))
1804 sta_ps_end(sta);
1805 } else {
1806 if (ieee80211_has_pm(hdr->frame_control))
1807 sta_ps_start(sta);
1808 }
1809 }
1810
1811 /* mesh power save support */
1812 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1813 ieee80211_mps_rx_h_sta_process(sta, hdr);
1814
1815 /*
1816 * Drop (qos-)data::nullfunc frames silently, since they
1817 * are used only to control station power saving mode.
1818 */
1819 if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
1820 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1821
1822 /*
1823 * If we receive a 4-addr nullfunc frame from a STA
1824 * that was not moved to a 4-addr STA vlan yet send
1825 * the event to userspace and for older hostapd drop
1826 * the frame to the monitor interface.
1827 */
1828 if (ieee80211_has_a4(hdr->frame_control) &&
1829 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1830 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1831 !rx->sdata->u.vlan.sta))) {
1832 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1833 cfg80211_rx_unexpected_4addr_frame(
1834 rx->sdata->dev, sta->sta.addr,
1835 GFP_ATOMIC);
1836 return RX_DROP_MONITOR;
1837 }
1838 /*
1839 * Update counter and free packet here to avoid
1840 * counting this as a dropped packed.
1841 */
1842 sta->rx_stats.packets++;
1843 dev_kfree_skb(rx->skb);
1844 return RX_QUEUED;
1845 }
1846
1847 return RX_CONTINUE;
1848 } /* ieee80211_rx_h_sta_process */
1849
1850 static struct ieee80211_key *
1851 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
1852 {
1853 struct ieee80211_key *key = NULL;
1854 struct ieee80211_sub_if_data *sdata = rx->sdata;
1855 int idx2;
1856
1857 /* Make sure key gets set if either BIGTK key index is set so that
1858 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected
1859 * Beacon frames and Beacon frames that claim to use another BIGTK key
1860 * index (i.e., a key that we do not have).
1861 */
1862
1863 if (idx < 0) {
1864 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
1865 idx2 = idx + 1;
1866 } else {
1867 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1868 idx2 = idx + 1;
1869 else
1870 idx2 = idx - 1;
1871 }
1872
1873 if (rx->sta)
1874 key = rcu_dereference(rx->sta->gtk[idx]);
1875 if (!key)
1876 key = rcu_dereference(sdata->keys[idx]);
1877 if (!key && rx->sta)
1878 key = rcu_dereference(rx->sta->gtk[idx2]);
1879 if (!key)
1880 key = rcu_dereference(sdata->keys[idx2]);
1881
1882 return key;
1883 }
1884
1885 static ieee80211_rx_result debug_noinline
1886 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1887 {
1888 struct sk_buff *skb = rx->skb;
1889 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1890 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1891 int keyidx;
1892 ieee80211_rx_result result = RX_DROP_UNUSABLE;
1893 struct ieee80211_key *sta_ptk = NULL;
1894 struct ieee80211_key *ptk_idx = NULL;
1895 int mmie_keyidx = -1;
1896 __le16 fc;
1897 const struct ieee80211_cipher_scheme *cs = NULL;
1898
1899 if (ieee80211_is_ext(hdr->frame_control))
1900 return RX_CONTINUE;
1901
1902 /*
1903 * Key selection 101
1904 *
1905 * There are five types of keys:
1906 * - GTK (group keys)
1907 * - IGTK (group keys for management frames)
1908 * - BIGTK (group keys for Beacon frames)
1909 * - PTK (pairwise keys)
1910 * - STK (station-to-station pairwise keys)
1911 *
1912 * When selecting a key, we have to distinguish between multicast
1913 * (including broadcast) and unicast frames, the latter can only
1914 * use PTKs and STKs while the former always use GTKs, IGTKs, and
1915 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used,
1916 * then unicast frames can also use key indices like GTKs. Hence, if we
1917 * don't have a PTK/STK we check the key index for a WEP key.
1918 *
1919 * Note that in a regular BSS, multicast frames are sent by the
1920 * AP only, associated stations unicast the frame to the AP first
1921 * which then multicasts it on their behalf.
1922 *
1923 * There is also a slight problem in IBSS mode: GTKs are negotiated
1924 * with each station, that is something we don't currently handle.
1925 * The spec seems to expect that one negotiates the same key with
1926 * every station but there's no such requirement; VLANs could be
1927 * possible.
1928 */
1929
1930 /* start without a key */
1931 rx->key = NULL;
1932 fc = hdr->frame_control;
1933
1934 if (rx->sta) {
1935 int keyid = rx->sta->ptk_idx;
1936 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1937
1938 if (ieee80211_has_protected(fc)) {
1939 cs = rx->sta->cipher_scheme;
1940 keyid = ieee80211_get_keyid(rx->skb, cs);
1941
1942 if (unlikely(keyid < 0))
1943 return RX_DROP_UNUSABLE;
1944
1945 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
1946 }
1947 }
1948
1949 if (!ieee80211_has_protected(fc))
1950 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1951
1952 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1953 rx->key = ptk_idx ? ptk_idx : sta_ptk;
1954 if ((status->flag & RX_FLAG_DECRYPTED) &&
1955 (status->flag & RX_FLAG_IV_STRIPPED))
1956 return RX_CONTINUE;
1957 /* Skip decryption if the frame is not protected. */
1958 if (!ieee80211_has_protected(fc))
1959 return RX_CONTINUE;
1960 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) {
1961 /* Broadcast/multicast robust management frame / BIP */
1962 if ((status->flag & RX_FLAG_DECRYPTED) &&
1963 (status->flag & RX_FLAG_IV_STRIPPED))
1964 return RX_CONTINUE;
1965
1966 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
1967 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
1968 NUM_DEFAULT_BEACON_KEYS) {
1969 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
1970 skb->data,
1971 skb->len);
1972 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1973 }
1974
1975 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx);
1976 if (!rx->key)
1977 return RX_CONTINUE; /* Beacon protection not in use */
1978 } else if (mmie_keyidx >= 0) {
1979 /* Broadcast/multicast robust management frame / BIP */
1980 if ((status->flag & RX_FLAG_DECRYPTED) &&
1981 (status->flag & RX_FLAG_IV_STRIPPED))
1982 return RX_CONTINUE;
1983
1984 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1985 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1986 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1987 if (rx->sta) {
1988 if (ieee80211_is_group_privacy_action(skb) &&
1989 test_sta_flag(rx->sta, WLAN_STA_MFP))
1990 return RX_DROP_MONITOR;
1991
1992 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
1993 }
1994 if (!rx->key)
1995 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
1996 } else if (!ieee80211_has_protected(fc)) {
1997 /*
1998 * The frame was not protected, so skip decryption. However, we
1999 * need to set rx->key if there is a key that could have been
2000 * used so that the frame may be dropped if encryption would
2001 * have been expected.
2002 */
2003 struct ieee80211_key *key = NULL;
2004 struct ieee80211_sub_if_data *sdata = rx->sdata;
2005 int i;
2006
2007 if (ieee80211_is_beacon(fc)) {
2008 key = ieee80211_rx_get_bigtk(rx, -1);
2009 } else if (ieee80211_is_mgmt(fc) &&
2010 is_multicast_ether_addr(hdr->addr1)) {
2011 key = rcu_dereference(rx->sdata->default_mgmt_key);
2012 } else {
2013 if (rx->sta) {
2014 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2015 key = rcu_dereference(rx->sta->gtk[i]);
2016 if (key)
2017 break;
2018 }
2019 }
2020 if (!key) {
2021 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2022 key = rcu_dereference(sdata->keys[i]);
2023 if (key)
2024 break;
2025 }
2026 }
2027 }
2028 if (key)
2029 rx->key = key;
2030 return RX_CONTINUE;
2031 } else {
2032 /*
2033 * The device doesn't give us the IV so we won't be
2034 * able to look up the key. That's ok though, we
2035 * don't need to decrypt the frame, we just won't
2036 * be able to keep statistics accurate.
2037 * Except for key threshold notifications, should
2038 * we somehow allow the driver to tell us which key
2039 * the hardware used if this flag is set?
2040 */
2041 if ((status->flag & RX_FLAG_DECRYPTED) &&
2042 (status->flag & RX_FLAG_IV_STRIPPED))
2043 return RX_CONTINUE;
2044
2045 keyidx = ieee80211_get_keyid(rx->skb, cs);
2046
2047 if (unlikely(keyidx < 0))
2048 return RX_DROP_UNUSABLE;
2049
2050 /* check per-station GTK first, if multicast packet */
2051 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
2052 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
2053
2054 /* if not found, try default key */
2055 if (!rx->key) {
2056 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
2057
2058 /*
2059 * RSNA-protected unicast frames should always be
2060 * sent with pairwise or station-to-station keys,
2061 * but for WEP we allow using a key index as well.
2062 */
2063 if (rx->key &&
2064 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
2065 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
2066 !is_multicast_ether_addr(hdr->addr1))
2067 rx->key = NULL;
2068 }
2069 }
2070
2071 if (rx->key) {
2072 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
2073 return RX_DROP_MONITOR;
2074
2075 /* TODO: add threshold stuff again */
2076 } else {
2077 return RX_DROP_MONITOR;
2078 }
2079
2080 switch (rx->key->conf.cipher) {
2081 case WLAN_CIPHER_SUITE_WEP40:
2082 case WLAN_CIPHER_SUITE_WEP104:
2083 result = ieee80211_crypto_wep_decrypt(rx);
2084 break;
2085 case WLAN_CIPHER_SUITE_TKIP:
2086 result = ieee80211_crypto_tkip_decrypt(rx);
2087 break;
2088 case WLAN_CIPHER_SUITE_CCMP:
2089 result = ieee80211_crypto_ccmp_decrypt(
2090 rx, IEEE80211_CCMP_MIC_LEN);
2091 break;
2092 case WLAN_CIPHER_SUITE_CCMP_256:
2093 result = ieee80211_crypto_ccmp_decrypt(
2094 rx, IEEE80211_CCMP_256_MIC_LEN);
2095 break;
2096 case WLAN_CIPHER_SUITE_AES_CMAC:
2097 result = ieee80211_crypto_aes_cmac_decrypt(rx);
2098 break;
2099 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
2100 result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
2101 break;
2102 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2103 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2104 result = ieee80211_crypto_aes_gmac_decrypt(rx);
2105 break;
2106 case WLAN_CIPHER_SUITE_GCMP:
2107 case WLAN_CIPHER_SUITE_GCMP_256:
2108 result = ieee80211_crypto_gcmp_decrypt(rx);
2109 break;
2110 default:
2111 result = ieee80211_crypto_hw_decrypt(rx);
2112 }
2113
2114 /* the hdr variable is invalid after the decrypt handlers */
2115
2116 /* either the frame has been decrypted or will be dropped */
2117 status->flag |= RX_FLAG_DECRYPTED;
2118
2119 if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE))
2120 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2121 skb->data, skb->len);
2122
2123 return result;
2124 }
2125
2126 static inline struct ieee80211_fragment_entry *
2127 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
2128 unsigned int frag, unsigned int seq, int rx_queue,
2129 struct sk_buff **skb)
2130 {
2131 struct ieee80211_fragment_entry *entry;
2132
2133 entry = &sdata->fragments[sdata->fragment_next++];
2134 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
2135 sdata->fragment_next = 0;
2136
2137 if (!skb_queue_empty(&entry->skb_list))
2138 __skb_queue_purge(&entry->skb_list);
2139
2140 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
2141 *skb = NULL;
2142 entry->first_frag_time = jiffies;
2143 entry->seq = seq;
2144 entry->rx_queue = rx_queue;
2145 entry->last_frag = frag;
2146 entry->check_sequential_pn = false;
2147 entry->extra_len = 0;
2148
2149 return entry;
2150 }
2151
2152 static inline struct ieee80211_fragment_entry *
2153 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
2154 unsigned int frag, unsigned int seq,
2155 int rx_queue, struct ieee80211_hdr *hdr)
2156 {
2157 struct ieee80211_fragment_entry *entry;
2158 int i, idx;
2159
2160 idx = sdata->fragment_next;
2161 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
2162 struct ieee80211_hdr *f_hdr;
2163 struct sk_buff *f_skb;
2164
2165 idx--;
2166 if (idx < 0)
2167 idx = IEEE80211_FRAGMENT_MAX - 1;
2168
2169 entry = &sdata->fragments[idx];
2170 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
2171 entry->rx_queue != rx_queue ||
2172 entry->last_frag + 1 != frag)
2173 continue;
2174
2175 f_skb = __skb_peek(&entry->skb_list);
2176 f_hdr = (struct ieee80211_hdr *) f_skb->data;
2177
2178 /*
2179 * Check ftype and addresses are equal, else check next fragment
2180 */
2181 if (((hdr->frame_control ^ f_hdr->frame_control) &
2182 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
2183 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
2184 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
2185 continue;
2186
2187 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
2188 __skb_queue_purge(&entry->skb_list);
2189 continue;
2190 }
2191 return entry;
2192 }
2193
2194 return NULL;
2195 }
2196
2197 static ieee80211_rx_result debug_noinline
2198 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2199 {
2200 struct ieee80211_hdr *hdr;
2201 u16 sc;
2202 __le16 fc;
2203 unsigned int frag, seq;
2204 struct ieee80211_fragment_entry *entry;
2205 struct sk_buff *skb;
2206
2207 hdr = (struct ieee80211_hdr *)rx->skb->data;
2208 fc = hdr->frame_control;
2209
2210 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc))
2211 return RX_CONTINUE;
2212
2213 sc = le16_to_cpu(hdr->seq_ctrl);
2214 frag = sc & IEEE80211_SCTL_FRAG;
2215
2216 if (is_multicast_ether_addr(hdr->addr1)) {
2217 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
2218 goto out_no_led;
2219 }
2220
2221 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2222 goto out;
2223
2224 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2225
2226 if (skb_linearize(rx->skb))
2227 return RX_DROP_UNUSABLE;
2228
2229 /*
2230 * skb_linearize() might change the skb->data and
2231 * previously cached variables (in this case, hdr) need to
2232 * be refreshed with the new data.
2233 */
2234 hdr = (struct ieee80211_hdr *)rx->skb->data;
2235 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2236
2237 if (frag == 0) {
2238 /* This is the first fragment of a new frame. */
2239 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
2240 rx->seqno_idx, &(rx->skb));
2241 if (rx->key &&
2242 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
2243 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
2244 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
2245 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
2246 ieee80211_has_protected(fc)) {
2247 int queue = rx->security_idx;
2248
2249 /* Store CCMP/GCMP PN so that we can verify that the
2250 * next fragment has a sequential PN value.
2251 */
2252 entry->check_sequential_pn = true;
2253 memcpy(entry->last_pn,
2254 rx->key->u.ccmp.rx_pn[queue],
2255 IEEE80211_CCMP_PN_LEN);
2256 BUILD_BUG_ON(offsetof(struct ieee80211_key,
2257 u.ccmp.rx_pn) !=
2258 offsetof(struct ieee80211_key,
2259 u.gcmp.rx_pn));
2260 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2261 sizeof(rx->key->u.gcmp.rx_pn[queue]));
2262 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2263 IEEE80211_GCMP_PN_LEN);
2264 }
2265 return RX_QUEUED;
2266 }
2267
2268 /* This is a fragment for a frame that should already be pending in
2269 * fragment cache. Add this fragment to the end of the pending entry.
2270 */
2271 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
2272 rx->seqno_idx, hdr);
2273 if (!entry) {
2274 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2275 return RX_DROP_MONITOR;
2276 }
2277
2278 /* "The receiver shall discard MSDUs and MMPDUs whose constituent
2279 * MPDU PN values are not incrementing in steps of 1."
2280 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2281 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2282 */
2283 if (entry->check_sequential_pn) {
2284 int i;
2285 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2286 int queue;
2287
2288 if (!rx->key ||
2289 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
2290 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
2291 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
2292 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
2293 return RX_DROP_UNUSABLE;
2294 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2295 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2296 pn[i]++;
2297 if (pn[i])
2298 break;
2299 }
2300 queue = rx->security_idx;
2301 rpn = rx->key->u.ccmp.rx_pn[queue];
2302 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2303 return RX_DROP_UNUSABLE;
2304 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2305 }
2306
2307 skb_pull(rx->skb, ieee80211_hdrlen(fc));
2308 __skb_queue_tail(&entry->skb_list, rx->skb);
2309 entry->last_frag = frag;
2310 entry->extra_len += rx->skb->len;
2311 if (ieee80211_has_morefrags(fc)) {
2312 rx->skb = NULL;
2313 return RX_QUEUED;
2314 }
2315
2316 rx->skb = __skb_dequeue(&entry->skb_list);
2317 if (skb_tailroom(rx->skb) < entry->extra_len) {
2318 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2319 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2320 GFP_ATOMIC))) {
2321 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2322 __skb_queue_purge(&entry->skb_list);
2323 return RX_DROP_UNUSABLE;
2324 }
2325 }
2326 while ((skb = __skb_dequeue(&entry->skb_list))) {
2327 skb_put_data(rx->skb, skb->data, skb->len);
2328 dev_kfree_skb(skb);
2329 }
2330
2331 out:
2332 ieee80211_led_rx(rx->local);
2333 out_no_led:
2334 if (rx->sta)
2335 rx->sta->rx_stats.packets++;
2336 return RX_CONTINUE;
2337 }
2338
2339 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2340 {
2341 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2342 return -EACCES;
2343
2344 return 0;
2345 }
2346
2347 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2348 {
2349 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
2350 struct sk_buff *skb = rx->skb;
2351 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2352
2353 /*
2354 * Pass through unencrypted frames if the hardware has
2355 * decrypted them already.
2356 */
2357 if (status->flag & RX_FLAG_DECRYPTED)
2358 return 0;
2359
2360 /* check mesh EAPOL frames first */
2361 if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
2362 ieee80211_is_data(fc))) {
2363 struct ieee80211s_hdr *mesh_hdr;
2364 u16 hdr_len = ieee80211_hdrlen(fc);
2365 u16 ethertype_offset;
2366 __be16 ethertype;
2367
2368 if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
2369 goto drop_check;
2370
2371 /* make sure fixed part of mesh header is there, also checks skb len */
2372 if (!pskb_may_pull(rx->skb, hdr_len + 6))
2373 goto drop_check;
2374
2375 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
2376 ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
2377 sizeof(rfc1042_header);
2378
2379 if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
2380 ethertype == rx->sdata->control_port_protocol)
2381 return 0;
2382 }
2383
2384 drop_check:
2385 /* Drop unencrypted frames if key is set. */
2386 if (unlikely(!ieee80211_has_protected(fc) &&
2387 !ieee80211_is_any_nullfunc(fc) &&
2388 ieee80211_is_data(fc) && rx->key))
2389 return -EACCES;
2390
2391 return 0;
2392 }
2393
2394 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2395 {
2396 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2397 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2398 __le16 fc = hdr->frame_control;
2399
2400 /*
2401 * Pass through unencrypted frames if the hardware has
2402 * decrypted them already.
2403 */
2404 if (status->flag & RX_FLAG_DECRYPTED)
2405 return 0;
2406
2407 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2408 if (unlikely(!ieee80211_has_protected(fc) &&
2409 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
2410 rx->key)) {
2411 if (ieee80211_is_deauth(fc) ||
2412 ieee80211_is_disassoc(fc))
2413 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2414 rx->skb->data,
2415 rx->skb->len);
2416 return -EACCES;
2417 }
2418 /* BIP does not use Protected field, so need to check MMIE */
2419 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2420 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2421 if (ieee80211_is_deauth(fc) ||
2422 ieee80211_is_disassoc(fc))
2423 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2424 rx->skb->data,
2425 rx->skb->len);
2426 return -EACCES;
2427 }
2428 if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
2429 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2430 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2431 rx->skb->data,
2432 rx->skb->len);
2433 return -EACCES;
2434 }
2435 /*
2436 * When using MFP, Action frames are not allowed prior to
2437 * having configured keys.
2438 */
2439 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2440 ieee80211_is_robust_mgmt_frame(rx->skb)))
2441 return -EACCES;
2442 }
2443
2444 return 0;
2445 }
2446
2447 static int
2448 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2449 {
2450 struct ieee80211_sub_if_data *sdata = rx->sdata;
2451 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2452 bool check_port_control = false;
2453 struct ethhdr *ehdr;
2454 int ret;
2455
2456 *port_control = false;
2457 if (ieee80211_has_a4(hdr->frame_control) &&
2458 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2459 return -1;
2460
2461 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2462 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2463
2464 if (!sdata->u.mgd.use_4addr)
2465 return -1;
2466 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
2467 check_port_control = true;
2468 }
2469
2470 if (is_multicast_ether_addr(hdr->addr1) &&
2471 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2472 return -1;
2473
2474 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2475 if (ret < 0)
2476 return ret;
2477
2478 ehdr = (struct ethhdr *) rx->skb->data;
2479 if (ehdr->h_proto == rx->sdata->control_port_protocol)
2480 *port_control = true;
2481 else if (check_port_control)
2482 return -1;
2483
2484 return 0;
2485 }
2486
2487 /*
2488 * requires that rx->skb is a frame with ethernet header
2489 */
2490 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2491 {
2492 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2493 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2494 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2495
2496 /*
2497 * Allow EAPOL frames to us/the PAE group address regardless
2498 * of whether the frame was encrypted or not.
2499 */
2500 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
2501 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
2502 ether_addr_equal(ehdr->h_dest, pae_group_addr)))
2503 return true;
2504
2505 if (ieee80211_802_1x_port_control(rx) ||
2506 ieee80211_drop_unencrypted(rx, fc))
2507 return false;
2508
2509 return true;
2510 }
2511
2512 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2513 struct ieee80211_rx_data *rx)
2514 {
2515 struct ieee80211_sub_if_data *sdata = rx->sdata;
2516 struct net_device *dev = sdata->dev;
2517
2518 if (unlikely((skb->protocol == sdata->control_port_protocol ||
2519 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) &&
2520 !sdata->control_port_no_preauth)) &&
2521 sdata->control_port_over_nl80211)) {
2522 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2523 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2524
2525 cfg80211_rx_control_port(dev, skb, noencrypt);
2526 dev_kfree_skb(skb);
2527 } else {
2528 memset(skb->cb, 0, sizeof(skb->cb));
2529
2530 /* deliver to local stack */
2531 if (rx->list)
2532 list_add_tail(&skb->list, rx->list);
2533 else
2534 netif_receive_skb(skb);
2535 }
2536 }
2537
2538 /*
2539 * requires that rx->skb is a frame with ethernet header
2540 */
2541 static void
2542 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2543 {
2544 struct ieee80211_sub_if_data *sdata = rx->sdata;
2545 struct net_device *dev = sdata->dev;
2546 struct sk_buff *skb, *xmit_skb;
2547 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2548 struct sta_info *dsta;
2549
2550 skb = rx->skb;
2551 xmit_skb = NULL;
2552
2553 dev_sw_netstats_rx_add(dev, skb->len);
2554
2555 if (rx->sta) {
2556 /* The seqno index has the same property as needed
2557 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2558 * for non-QoS-data frames. Here we know it's a data
2559 * frame, so count MSDUs.
2560 */
2561 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
2562 rx->sta->rx_stats.msdu[rx->seqno_idx]++;
2563 u64_stats_update_end(&rx->sta->rx_stats.syncp);
2564 }
2565
2566 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2567 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2568 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2569 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2570 if (is_multicast_ether_addr(ehdr->h_dest) &&
2571 ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2572 /*
2573 * send multicast frames both to higher layers in
2574 * local net stack and back to the wireless medium
2575 */
2576 xmit_skb = skb_copy(skb, GFP_ATOMIC);
2577 if (!xmit_skb)
2578 net_info_ratelimited("%s: failed to clone multicast frame\n",
2579 dev->name);
2580 } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
2581 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
2582 dsta = sta_info_get(sdata, ehdr->h_dest);
2583 if (dsta) {
2584 /*
2585 * The destination station is associated to
2586 * this AP (in this VLAN), so send the frame
2587 * directly to it and do not pass it to local
2588 * net stack.
2589 */
2590 xmit_skb = skb;
2591 skb = NULL;
2592 }
2593 }
2594 }
2595
2596 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2597 if (skb) {
2598 /* 'align' will only take the values 0 or 2 here since all
2599 * frames are required to be aligned to 2-byte boundaries
2600 * when being passed to mac80211; the code here works just
2601 * as well if that isn't true, but mac80211 assumes it can
2602 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2603 */
2604 int align;
2605
2606 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2607 if (align) {
2608 if (WARN_ON(skb_headroom(skb) < 3)) {
2609 dev_kfree_skb(skb);
2610 skb = NULL;
2611 } else {
2612 u8 *data = skb->data;
2613 size_t len = skb_headlen(skb);
2614 skb->data -= align;
2615 memmove(skb->data, data, len);
2616 skb_set_tail_pointer(skb, len);
2617 }
2618 }
2619 }
2620 #endif
2621
2622 if (skb) {
2623 skb->protocol = eth_type_trans(skb, dev);
2624 ieee80211_deliver_skb_to_local_stack(skb, rx);
2625 }
2626
2627 if (xmit_skb) {
2628 /*
2629 * Send to wireless media and increase priority by 256 to
2630 * keep the received priority instead of reclassifying
2631 * the frame (see cfg80211_classify8021d).
2632 */
2633 xmit_skb->priority += 256;
2634 xmit_skb->protocol = htons(ETH_P_802_3);
2635 skb_reset_network_header(xmit_skb);
2636 skb_reset_mac_header(xmit_skb);
2637 dev_queue_xmit(xmit_skb);
2638 }
2639 }
2640
2641 static ieee80211_rx_result debug_noinline
2642 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
2643 {
2644 struct net_device *dev = rx->sdata->dev;
2645 struct sk_buff *skb = rx->skb;
2646 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2647 __le16 fc = hdr->frame_control;
2648 struct sk_buff_head frame_list;
2649 struct ethhdr ethhdr;
2650 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2651
2652 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2653 check_da = NULL;
2654 check_sa = NULL;
2655 } else switch (rx->sdata->vif.type) {
2656 case NL80211_IFTYPE_AP:
2657 case NL80211_IFTYPE_AP_VLAN:
2658 check_da = NULL;
2659 break;
2660 case NL80211_IFTYPE_STATION:
2661 if (!rx->sta ||
2662 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2663 check_sa = NULL;
2664 break;
2665 case NL80211_IFTYPE_MESH_POINT:
2666 check_sa = NULL;
2667 break;
2668 default:
2669 break;
2670 }
2671
2672 skb->dev = dev;
2673 __skb_queue_head_init(&frame_list);
2674
2675 if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
2676 rx->sdata->vif.addr,
2677 rx->sdata->vif.type,
2678 data_offset))
2679 return RX_DROP_UNUSABLE;
2680
2681 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2682 rx->sdata->vif.type,
2683 rx->local->hw.extra_tx_headroom,
2684 check_da, check_sa);
2685
2686 while (!skb_queue_empty(&frame_list)) {
2687 rx->skb = __skb_dequeue(&frame_list);
2688
2689 if (!ieee80211_frame_allowed(rx, fc)) {
2690 dev_kfree_skb(rx->skb);
2691 continue;
2692 }
2693
2694 ieee80211_deliver_skb(rx);
2695 }
2696
2697 return RX_QUEUED;
2698 }
2699
2700 static ieee80211_rx_result debug_noinline
2701 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2702 {
2703 struct sk_buff *skb = rx->skb;
2704 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2705 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2706 __le16 fc = hdr->frame_control;
2707
2708 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2709 return RX_CONTINUE;
2710
2711 if (unlikely(!ieee80211_is_data(fc)))
2712 return RX_CONTINUE;
2713
2714 if (unlikely(!ieee80211_is_data_present(fc)))
2715 return RX_DROP_MONITOR;
2716
2717 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2718 switch (rx->sdata->vif.type) {
2719 case NL80211_IFTYPE_AP_VLAN:
2720 if (!rx->sdata->u.vlan.sta)
2721 return RX_DROP_UNUSABLE;
2722 break;
2723 case NL80211_IFTYPE_STATION:
2724 if (!rx->sdata->u.mgd.use_4addr)
2725 return RX_DROP_UNUSABLE;
2726 break;
2727 default:
2728 return RX_DROP_UNUSABLE;
2729 }
2730 }
2731
2732 if (is_multicast_ether_addr(hdr->addr1))
2733 return RX_DROP_UNUSABLE;
2734
2735 return __ieee80211_rx_h_amsdu(rx, 0);
2736 }
2737
2738 #ifdef CONFIG_MAC80211_MESH
2739 static ieee80211_rx_result
2740 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2741 {
2742 struct ieee80211_hdr *fwd_hdr, *hdr;
2743 struct ieee80211_tx_info *info;
2744 struct ieee80211s_hdr *mesh_hdr;
2745 struct sk_buff *skb = rx->skb, *fwd_skb;
2746 struct ieee80211_local *local = rx->local;
2747 struct ieee80211_sub_if_data *sdata = rx->sdata;
2748 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2749 u16 ac, q, hdrlen;
2750 int tailroom = 0;
2751
2752 hdr = (struct ieee80211_hdr *) skb->data;
2753 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2754
2755 /* make sure fixed part of mesh header is there, also checks skb len */
2756 if (!pskb_may_pull(rx->skb, hdrlen + 6))
2757 return RX_DROP_MONITOR;
2758
2759 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2760
2761 /* make sure full mesh header is there, also checks skb len */
2762 if (!pskb_may_pull(rx->skb,
2763 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
2764 return RX_DROP_MONITOR;
2765
2766 /* reload pointers */
2767 hdr = (struct ieee80211_hdr *) skb->data;
2768 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2769
2770 if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2771 return RX_DROP_MONITOR;
2772
2773 /* frame is in RMC, don't forward */
2774 if (ieee80211_is_data(hdr->frame_control) &&
2775 is_multicast_ether_addr(hdr->addr1) &&
2776 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2777 return RX_DROP_MONITOR;
2778
2779 if (!ieee80211_is_data(hdr->frame_control))
2780 return RX_CONTINUE;
2781
2782 if (!mesh_hdr->ttl)
2783 return RX_DROP_MONITOR;
2784
2785 if (mesh_hdr->flags & MESH_FLAGS_AE) {
2786 struct mesh_path *mppath;
2787 char *proxied_addr;
2788 char *mpp_addr;
2789
2790 if (is_multicast_ether_addr(hdr->addr1)) {
2791 mpp_addr = hdr->addr3;
2792 proxied_addr = mesh_hdr->eaddr1;
2793 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2794 MESH_FLAGS_AE_A5_A6) {
2795 /* has_a4 already checked in ieee80211_rx_mesh_check */
2796 mpp_addr = hdr->addr4;
2797 proxied_addr = mesh_hdr->eaddr2;
2798 } else {
2799 return RX_DROP_MONITOR;
2800 }
2801
2802 rcu_read_lock();
2803 mppath = mpp_path_lookup(sdata, proxied_addr);
2804 if (!mppath) {
2805 mpp_path_add(sdata, proxied_addr, mpp_addr);
2806 } else {
2807 spin_lock_bh(&mppath->state_lock);
2808 if (!ether_addr_equal(mppath->mpp, mpp_addr))
2809 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
2810 mppath->exp_time = jiffies;
2811 spin_unlock_bh(&mppath->state_lock);
2812 }
2813 rcu_read_unlock();
2814 }
2815
2816 /* Frame has reached destination. Don't forward */
2817 if (!is_multicast_ether_addr(hdr->addr1) &&
2818 ether_addr_equal(sdata->vif.addr, hdr->addr3))
2819 return RX_CONTINUE;
2820
2821 ac = ieee80211_select_queue_80211(sdata, skb, hdr);
2822 q = sdata->vif.hw_queue[ac];
2823 if (ieee80211_queue_stopped(&local->hw, q)) {
2824 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
2825 return RX_DROP_MONITOR;
2826 }
2827 skb_set_queue_mapping(skb, q);
2828
2829 if (!--mesh_hdr->ttl) {
2830 if (!is_multicast_ether_addr(hdr->addr1))
2831 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
2832 dropped_frames_ttl);
2833 goto out;
2834 }
2835
2836 if (!ifmsh->mshcfg.dot11MeshForwarding)
2837 goto out;
2838
2839 if (sdata->crypto_tx_tailroom_needed_cnt)
2840 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2841
2842 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2843 sdata->encrypt_headroom,
2844 tailroom, GFP_ATOMIC);
2845 if (!fwd_skb)
2846 goto out;
2847
2848 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
2849 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
2850 info = IEEE80211_SKB_CB(fwd_skb);
2851 memset(info, 0, sizeof(*info));
2852 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
2853 info->control.vif = &rx->sdata->vif;
2854 info->control.jiffies = jiffies;
2855 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2856 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2857 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2858 /* update power mode indication when forwarding */
2859 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2860 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2861 /* mesh power mode flags updated in mesh_nexthop_lookup */
2862 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2863 } else {
2864 /* unable to resolve next hop */
2865 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2866 fwd_hdr->addr3, 0,
2867 WLAN_REASON_MESH_PATH_NOFORWARD,
2868 fwd_hdr->addr2);
2869 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2870 kfree_skb(fwd_skb);
2871 return RX_DROP_MONITOR;
2872 }
2873
2874 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2875 ieee80211_add_pending_skb(local, fwd_skb);
2876 out:
2877 if (is_multicast_ether_addr(hdr->addr1))
2878 return RX_CONTINUE;
2879 return RX_DROP_MONITOR;
2880 }
2881 #endif
2882
2883 static ieee80211_rx_result debug_noinline
2884 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2885 {
2886 struct ieee80211_sub_if_data *sdata = rx->sdata;
2887 struct ieee80211_local *local = rx->local;
2888 struct net_device *dev = sdata->dev;
2889 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2890 __le16 fc = hdr->frame_control;
2891 bool port_control;
2892 int err;
2893
2894 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2895 return RX_CONTINUE;
2896
2897 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2898 return RX_DROP_MONITOR;
2899
2900 /*
2901 * Send unexpected-4addr-frame event to hostapd. For older versions,
2902 * also drop the frame to cooked monitor interfaces.
2903 */
2904 if (ieee80211_has_a4(hdr->frame_control) &&
2905 sdata->vif.type == NL80211_IFTYPE_AP) {
2906 if (rx->sta &&
2907 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2908 cfg80211_rx_unexpected_4addr_frame(
2909 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2910 return RX_DROP_MONITOR;
2911 }
2912
2913 err = __ieee80211_data_to_8023(rx, &port_control);
2914 if (unlikely(err))
2915 return RX_DROP_UNUSABLE;
2916
2917 if (!ieee80211_frame_allowed(rx, fc))
2918 return RX_DROP_MONITOR;
2919
2920 /* directly handle TDLS channel switch requests/responses */
2921 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
2922 cpu_to_be16(ETH_P_TDLS))) {
2923 struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
2924
2925 if (pskb_may_pull(rx->skb,
2926 offsetof(struct ieee80211_tdls_data, u)) &&
2927 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
2928 tf->category == WLAN_CATEGORY_TDLS &&
2929 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
2930 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
2931 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
2932 schedule_work(&local->tdls_chsw_work);
2933 if (rx->sta)
2934 rx->sta->rx_stats.packets++;
2935
2936 return RX_QUEUED;
2937 }
2938 }
2939
2940 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2941 unlikely(port_control) && sdata->bss) {
2942 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2943 u.ap);
2944 dev = sdata->dev;
2945 rx->sdata = sdata;
2946 }
2947
2948 rx->skb->dev = dev;
2949
2950 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
2951 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2952 !is_multicast_ether_addr(
2953 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2954 (!local->scanning &&
2955 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
2956 mod_timer(&local->dynamic_ps_timer, jiffies +
2957 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2958
2959 ieee80211_deliver_skb(rx);
2960
2961 return RX_QUEUED;
2962 }
2963
2964 static ieee80211_rx_result debug_noinline
2965 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2966 {
2967 struct sk_buff *skb = rx->skb;
2968 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2969 struct tid_ampdu_rx *tid_agg_rx;
2970 u16 start_seq_num;
2971 u16 tid;
2972
2973 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2974 return RX_CONTINUE;
2975
2976 if (ieee80211_is_back_req(bar->frame_control)) {
2977 struct {
2978 __le16 control, start_seq_num;
2979 } __packed bar_data;
2980 struct ieee80211_event event = {
2981 .type = BAR_RX_EVENT,
2982 };
2983
2984 if (!rx->sta)
2985 return RX_DROP_MONITOR;
2986
2987 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2988 &bar_data, sizeof(bar_data)))
2989 return RX_DROP_MONITOR;
2990
2991 tid = le16_to_cpu(bar_data.control) >> 12;
2992
2993 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
2994 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
2995 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
2996 WLAN_BACK_RECIPIENT,
2997 WLAN_REASON_QSTA_REQUIRE_SETUP);
2998
2999 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
3000 if (!tid_agg_rx)
3001 return RX_DROP_MONITOR;
3002
3003 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
3004 event.u.ba.tid = tid;
3005 event.u.ba.ssn = start_seq_num;
3006 event.u.ba.sta = &rx->sta->sta;
3007
3008 /* reset session timer */
3009 if (tid_agg_rx->timeout)
3010 mod_timer(&tid_agg_rx->session_timer,
3011 TU_TO_EXP_TIME(tid_agg_rx->timeout));
3012
3013 spin_lock(&tid_agg_rx->reorder_lock);
3014 /* release stored frames up to start of BAR */
3015 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
3016 start_seq_num, frames);
3017 spin_unlock(&tid_agg_rx->reorder_lock);
3018
3019 drv_event_callback(rx->local, rx->sdata, &event);
3020
3021 kfree_skb(skb);
3022 return RX_QUEUED;
3023 }
3024
3025 /*
3026 * After this point, we only want management frames,
3027 * so we can drop all remaining control frames to
3028 * cooked monitor interfaces.
3029 */
3030 return RX_DROP_MONITOR;
3031 }
3032
3033 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
3034 struct ieee80211_mgmt *mgmt,
3035 size_t len)
3036 {
3037 struct ieee80211_local *local = sdata->local;
3038 struct sk_buff *skb;
3039 struct ieee80211_mgmt *resp;
3040
3041 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
3042 /* Not to own unicast address */
3043 return;
3044 }
3045
3046 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
3047 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
3048 /* Not from the current AP or not associated yet. */
3049 return;
3050 }
3051
3052 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
3053 /* Too short SA Query request frame */
3054 return;
3055 }
3056
3057 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
3058 if (skb == NULL)
3059 return;
3060
3061 skb_reserve(skb, local->hw.extra_tx_headroom);
3062 resp = skb_put_zero(skb, 24);
3063 memcpy(resp->da, mgmt->sa, ETH_ALEN);
3064 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
3065 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
3066 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3067 IEEE80211_STYPE_ACTION);
3068 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
3069 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
3070 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
3071 memcpy(resp->u.action.u.sa_query.trans_id,
3072 mgmt->u.action.u.sa_query.trans_id,
3073 WLAN_SA_QUERY_TR_ID_LEN);
3074
3075 ieee80211_tx_skb(sdata, skb);
3076 }
3077
3078 static ieee80211_rx_result debug_noinline
3079 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
3080 {
3081 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3082 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3083
3084 if (ieee80211_is_s1g_beacon(mgmt->frame_control))
3085 return RX_CONTINUE;
3086
3087 /*
3088 * From here on, look only at management frames.
3089 * Data and control frames are already handled,
3090 * and unknown (reserved) frames are useless.
3091 */
3092 if (rx->skb->len < 24)
3093 return RX_DROP_MONITOR;
3094
3095 if (!ieee80211_is_mgmt(mgmt->frame_control))
3096 return RX_DROP_MONITOR;
3097
3098 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
3099 ieee80211_is_beacon(mgmt->frame_control) &&
3100 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
3101 int sig = 0;
3102
3103 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3104 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3105 sig = status->signal;
3106
3107 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy,
3108 rx->skb->data, rx->skb->len,
3109 ieee80211_rx_status_to_khz(status),
3110 sig);
3111 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
3112 }
3113
3114 if (ieee80211_drop_unencrypted_mgmt(rx))
3115 return RX_DROP_UNUSABLE;
3116
3117 return RX_CONTINUE;
3118 }
3119
3120 static ieee80211_rx_result debug_noinline
3121 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
3122 {
3123 struct ieee80211_local *local = rx->local;
3124 struct ieee80211_sub_if_data *sdata = rx->sdata;
3125 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3126 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3127 int len = rx->skb->len;
3128
3129 if (!ieee80211_is_action(mgmt->frame_control))
3130 return RX_CONTINUE;
3131
3132 /* drop too small frames */
3133 if (len < IEEE80211_MIN_ACTION_SIZE)
3134 return RX_DROP_UNUSABLE;
3135
3136 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
3137 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
3138 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
3139 return RX_DROP_UNUSABLE;
3140
3141 switch (mgmt->u.action.category) {
3142 case WLAN_CATEGORY_HT:
3143 /* reject HT action frames from stations not supporting HT */
3144 if (!rx->sta->sta.ht_cap.ht_supported)
3145 goto invalid;
3146
3147 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3148 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3149 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3150 sdata->vif.type != NL80211_IFTYPE_AP &&
3151 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3152 break;
3153
3154 /* verify action & smps_control/chanwidth are present */
3155 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3156 goto invalid;
3157
3158 switch (mgmt->u.action.u.ht_smps.action) {
3159 case WLAN_HT_ACTION_SMPS: {
3160 struct ieee80211_supported_band *sband;
3161 enum ieee80211_smps_mode smps_mode;
3162 struct sta_opmode_info sta_opmode = {};
3163
3164 if (sdata->vif.type != NL80211_IFTYPE_AP &&
3165 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
3166 goto handled;
3167
3168 /* convert to HT capability */
3169 switch (mgmt->u.action.u.ht_smps.smps_control) {
3170 case WLAN_HT_SMPS_CONTROL_DISABLED:
3171 smps_mode = IEEE80211_SMPS_OFF;
3172 break;
3173 case WLAN_HT_SMPS_CONTROL_STATIC:
3174 smps_mode = IEEE80211_SMPS_STATIC;
3175 break;
3176 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
3177 smps_mode = IEEE80211_SMPS_DYNAMIC;
3178 break;
3179 default:
3180 goto invalid;
3181 }
3182
3183 /* if no change do nothing */
3184 if (rx->sta->sta.smps_mode == smps_mode)
3185 goto handled;
3186 rx->sta->sta.smps_mode = smps_mode;
3187 sta_opmode.smps_mode =
3188 ieee80211_smps_mode_to_smps_mode(smps_mode);
3189 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
3190
3191 sband = rx->local->hw.wiphy->bands[status->band];
3192
3193 rate_control_rate_update(local, sband, rx->sta,
3194 IEEE80211_RC_SMPS_CHANGED);
3195 cfg80211_sta_opmode_change_notify(sdata->dev,
3196 rx->sta->addr,
3197 &sta_opmode,
3198 GFP_ATOMIC);
3199 goto handled;
3200 }
3201 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
3202 struct ieee80211_supported_band *sband;
3203 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
3204 enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
3205 struct sta_opmode_info sta_opmode = {};
3206
3207 /* If it doesn't support 40 MHz it can't change ... */
3208 if (!(rx->sta->sta.ht_cap.cap &
3209 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
3210 goto handled;
3211
3212 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
3213 max_bw = IEEE80211_STA_RX_BW_20;
3214 else
3215 max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
3216
3217 /* set cur_max_bandwidth and recalc sta bw */
3218 rx->sta->cur_max_bandwidth = max_bw;
3219 new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
3220
3221 if (rx->sta->sta.bandwidth == new_bw)
3222 goto handled;
3223
3224 rx->sta->sta.bandwidth = new_bw;
3225 sband = rx->local->hw.wiphy->bands[status->band];
3226 sta_opmode.bw =
3227 ieee80211_sta_rx_bw_to_chan_width(rx->sta);
3228 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
3229
3230 rate_control_rate_update(local, sband, rx->sta,
3231 IEEE80211_RC_BW_CHANGED);
3232 cfg80211_sta_opmode_change_notify(sdata->dev,
3233 rx->sta->addr,
3234 &sta_opmode,
3235 GFP_ATOMIC);
3236 goto handled;
3237 }
3238 default:
3239 goto invalid;
3240 }
3241
3242 break;
3243 case WLAN_CATEGORY_PUBLIC:
3244 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3245 goto invalid;
3246 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3247 break;
3248 if (!rx->sta)
3249 break;
3250 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
3251 break;
3252 if (mgmt->u.action.u.ext_chan_switch.action_code !=
3253 WLAN_PUB_ACTION_EXT_CHANSW_ANN)
3254 break;
3255 if (len < offsetof(struct ieee80211_mgmt,
3256 u.action.u.ext_chan_switch.variable))
3257 goto invalid;
3258 goto queue;
3259 case WLAN_CATEGORY_VHT:
3260 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3261 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3262 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3263 sdata->vif.type != NL80211_IFTYPE_AP &&
3264 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3265 break;
3266
3267 /* verify action code is present */
3268 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3269 goto invalid;
3270
3271 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
3272 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
3273 /* verify opmode is present */
3274 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3275 goto invalid;
3276 goto queue;
3277 }
3278 case WLAN_VHT_ACTION_GROUPID_MGMT: {
3279 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3280 goto invalid;
3281 goto queue;
3282 }
3283 default:
3284 break;
3285 }
3286 break;
3287 case WLAN_CATEGORY_BACK:
3288 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3289 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3290 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3291 sdata->vif.type != NL80211_IFTYPE_AP &&
3292 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3293 break;
3294
3295 /* verify action_code is present */
3296 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3297 break;
3298
3299 switch (mgmt->u.action.u.addba_req.action_code) {
3300 case WLAN_ACTION_ADDBA_REQ:
3301 if (len < (IEEE80211_MIN_ACTION_SIZE +
3302 sizeof(mgmt->u.action.u.addba_req)))
3303 goto invalid;
3304 break;
3305 case WLAN_ACTION_ADDBA_RESP:
3306 if (len < (IEEE80211_MIN_ACTION_SIZE +
3307 sizeof(mgmt->u.action.u.addba_resp)))
3308 goto invalid;
3309 break;
3310 case WLAN_ACTION_DELBA:
3311 if (len < (IEEE80211_MIN_ACTION_SIZE +
3312 sizeof(mgmt->u.action.u.delba)))
3313 goto invalid;
3314 break;
3315 default:
3316 goto invalid;
3317 }
3318
3319 goto queue;
3320 case WLAN_CATEGORY_SPECTRUM_MGMT:
3321 /* verify action_code is present */
3322 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3323 break;
3324
3325 switch (mgmt->u.action.u.measurement.action_code) {
3326 case WLAN_ACTION_SPCT_MSR_REQ:
3327 if (status->band != NL80211_BAND_5GHZ)
3328 break;
3329
3330 if (len < (IEEE80211_MIN_ACTION_SIZE +
3331 sizeof(mgmt->u.action.u.measurement)))
3332 break;
3333
3334 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3335 break;
3336
3337 ieee80211_process_measurement_req(sdata, mgmt, len);
3338 goto handled;
3339 case WLAN_ACTION_SPCT_CHL_SWITCH: {
3340 u8 *bssid;
3341 if (len < (IEEE80211_MIN_ACTION_SIZE +
3342 sizeof(mgmt->u.action.u.chan_switch)))
3343 break;
3344
3345 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3346 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3347 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3348 break;
3349
3350 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3351 bssid = sdata->u.mgd.bssid;
3352 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3353 bssid = sdata->u.ibss.bssid;
3354 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3355 bssid = mgmt->sa;
3356 else
3357 break;
3358
3359 if (!ether_addr_equal(mgmt->bssid, bssid))
3360 break;
3361
3362 goto queue;
3363 }
3364 }
3365 break;
3366 case WLAN_CATEGORY_SELF_PROTECTED:
3367 if (len < (IEEE80211_MIN_ACTION_SIZE +
3368 sizeof(mgmt->u.action.u.self_prot.action_code)))
3369 break;
3370
3371 switch (mgmt->u.action.u.self_prot.action_code) {
3372 case WLAN_SP_MESH_PEERING_OPEN:
3373 case WLAN_SP_MESH_PEERING_CLOSE:
3374 case WLAN_SP_MESH_PEERING_CONFIRM:
3375 if (!ieee80211_vif_is_mesh(&sdata->vif))
3376 goto invalid;
3377 if (sdata->u.mesh.user_mpm)
3378 /* userspace handles this frame */
3379 break;
3380 goto queue;
3381 case WLAN_SP_MGK_INFORM:
3382 case WLAN_SP_MGK_ACK:
3383 if (!ieee80211_vif_is_mesh(&sdata->vif))
3384 goto invalid;
3385 break;
3386 }
3387 break;
3388 case WLAN_CATEGORY_MESH_ACTION:
3389 if (len < (IEEE80211_MIN_ACTION_SIZE +
3390 sizeof(mgmt->u.action.u.mesh_action.action_code)))
3391 break;
3392
3393 if (!ieee80211_vif_is_mesh(&sdata->vif))
3394 break;
3395 if (mesh_action_is_path_sel(mgmt) &&
3396 !mesh_path_sel_is_hwmp(sdata))
3397 break;
3398 goto queue;
3399 }
3400
3401 return RX_CONTINUE;
3402
3403 invalid:
3404 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3405 /* will return in the next handlers */
3406 return RX_CONTINUE;
3407
3408 handled:
3409 if (rx->sta)
3410 rx->sta->rx_stats.packets++;
3411 dev_kfree_skb(rx->skb);
3412 return RX_QUEUED;
3413
3414 queue:
3415 skb_queue_tail(&sdata->skb_queue, rx->skb);
3416 ieee80211_queue_work(&local->hw, &sdata->work);
3417 if (rx->sta)
3418 rx->sta->rx_stats.packets++;
3419 return RX_QUEUED;
3420 }
3421
3422 static ieee80211_rx_result debug_noinline
3423 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3424 {
3425 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3426 int sig = 0;
3427
3428 /* skip known-bad action frames and return them in the next handler */
3429 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3430 return RX_CONTINUE;
3431
3432 /*
3433 * Getting here means the kernel doesn't know how to handle
3434 * it, but maybe userspace does ... include returned frames
3435 * so userspace can register for those to know whether ones
3436 * it transmitted were processed or returned.
3437 */
3438
3439 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3440 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3441 sig = status->signal;
3442
3443 if (cfg80211_rx_mgmt_khz(&rx->sdata->wdev,
3444 ieee80211_rx_status_to_khz(status), sig,
3445 rx->skb->data, rx->skb->len, 0)) {
3446 if (rx->sta)
3447 rx->sta->rx_stats.packets++;
3448 dev_kfree_skb(rx->skb);
3449 return RX_QUEUED;
3450 }
3451
3452 return RX_CONTINUE;
3453 }
3454
3455 static ieee80211_rx_result debug_noinline
3456 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx)
3457 {
3458 struct ieee80211_sub_if_data *sdata = rx->sdata;
3459 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3460 int len = rx->skb->len;
3461
3462 if (!ieee80211_is_action(mgmt->frame_control))
3463 return RX_CONTINUE;
3464
3465 switch (mgmt->u.action.category) {
3466 case WLAN_CATEGORY_SA_QUERY:
3467 if (len < (IEEE80211_MIN_ACTION_SIZE +
3468 sizeof(mgmt->u.action.u.sa_query)))
3469 break;
3470
3471 switch (mgmt->u.action.u.sa_query.action) {
3472 case WLAN_ACTION_SA_QUERY_REQUEST:
3473 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3474 break;
3475 ieee80211_process_sa_query_req(sdata, mgmt, len);
3476 goto handled;
3477 }
3478 break;
3479 }
3480
3481 return RX_CONTINUE;
3482
3483 handled:
3484 if (rx->sta)
3485 rx->sta->rx_stats.packets++;
3486 dev_kfree_skb(rx->skb);
3487 return RX_QUEUED;
3488 }
3489
3490 static ieee80211_rx_result debug_noinline
3491 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3492 {
3493 struct ieee80211_local *local = rx->local;
3494 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3495 struct sk_buff *nskb;
3496 struct ieee80211_sub_if_data *sdata = rx->sdata;
3497 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3498
3499 if (!ieee80211_is_action(mgmt->frame_control))
3500 return RX_CONTINUE;
3501
3502 /*
3503 * For AP mode, hostapd is responsible for handling any action
3504 * frames that we didn't handle, including returning unknown
3505 * ones. For all other modes we will return them to the sender,
3506 * setting the 0x80 bit in the action category, as required by
3507 * 802.11-2012 9.24.4.
3508 * Newer versions of hostapd shall also use the management frame
3509 * registration mechanisms, but older ones still use cooked
3510 * monitor interfaces so push all frames there.
3511 */
3512 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3513 (sdata->vif.type == NL80211_IFTYPE_AP ||
3514 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3515 return RX_DROP_MONITOR;
3516
3517 if (is_multicast_ether_addr(mgmt->da))
3518 return RX_DROP_MONITOR;
3519
3520 /* do not return rejected action frames */
3521 if (mgmt->u.action.category & 0x80)
3522 return RX_DROP_UNUSABLE;
3523
3524 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3525 GFP_ATOMIC);
3526 if (nskb) {
3527 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3528
3529 nmgmt->u.action.category |= 0x80;
3530 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3531 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3532
3533 memset(nskb->cb, 0, sizeof(nskb->cb));
3534
3535 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3536 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3537
3538 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3539 IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3540 IEEE80211_TX_CTL_NO_CCK_RATE;
3541 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3542 info->hw_queue =
3543 local->hw.offchannel_tx_hw_queue;
3544 }
3545
3546 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
3547 status->band);
3548 }
3549 dev_kfree_skb(rx->skb);
3550 return RX_QUEUED;
3551 }
3552
3553 static ieee80211_rx_result debug_noinline
3554 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
3555 {
3556 struct ieee80211_sub_if_data *sdata = rx->sdata;
3557 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
3558
3559 if (!ieee80211_is_ext(hdr->frame_control))
3560 return RX_CONTINUE;
3561
3562 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3563 return RX_DROP_MONITOR;
3564
3565 /* for now only beacons are ext, so queue them */
3566 skb_queue_tail(&sdata->skb_queue, rx->skb);
3567 ieee80211_queue_work(&rx->local->hw, &sdata->work);
3568 if (rx->sta)
3569 rx->sta->rx_stats.packets++;
3570
3571 return RX_QUEUED;
3572 }
3573
3574 static ieee80211_rx_result debug_noinline
3575 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3576 {
3577 struct ieee80211_sub_if_data *sdata = rx->sdata;
3578 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3579 __le16 stype;
3580
3581 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
3582
3583 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
3584 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3585 sdata->vif.type != NL80211_IFTYPE_OCB &&
3586 sdata->vif.type != NL80211_IFTYPE_STATION)
3587 return RX_DROP_MONITOR;
3588
3589 switch (stype) {
3590 case cpu_to_le16(IEEE80211_STYPE_AUTH):
3591 case cpu_to_le16(IEEE80211_STYPE_BEACON):
3592 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3593 /* process for all: mesh, mlme, ibss */
3594 break;
3595 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3596 if (is_multicast_ether_addr(mgmt->da) &&
3597 !is_broadcast_ether_addr(mgmt->da))
3598 return RX_DROP_MONITOR;
3599
3600 /* process only for station/IBSS */
3601 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3602 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3603 return RX_DROP_MONITOR;
3604 break;
3605 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3606 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3607 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3608 if (is_multicast_ether_addr(mgmt->da) &&
3609 !is_broadcast_ether_addr(mgmt->da))
3610 return RX_DROP_MONITOR;
3611
3612 /* process only for station */
3613 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3614 return RX_DROP_MONITOR;
3615 break;
3616 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3617 /* process only for ibss and mesh */
3618 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3619 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3620 return RX_DROP_MONITOR;
3621 break;
3622 default:
3623 return RX_DROP_MONITOR;
3624 }
3625
3626 /* queue up frame and kick off work to process it */
3627 skb_queue_tail(&sdata->skb_queue, rx->skb);
3628 ieee80211_queue_work(&rx->local->hw, &sdata->work);
3629 if (rx->sta)
3630 rx->sta->rx_stats.packets++;
3631
3632 return RX_QUEUED;
3633 }
3634
3635 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
3636 struct ieee80211_rate *rate)
3637 {
3638 struct ieee80211_sub_if_data *sdata;
3639 struct ieee80211_local *local = rx->local;
3640 struct sk_buff *skb = rx->skb, *skb2;
3641 struct net_device *prev_dev = NULL;
3642 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3643 int needed_headroom;
3644
3645 /*
3646 * If cooked monitor has been processed already, then
3647 * don't do it again. If not, set the flag.
3648 */
3649 if (rx->flags & IEEE80211_RX_CMNTR)
3650 goto out_free_skb;
3651 rx->flags |= IEEE80211_RX_CMNTR;
3652
3653 /* If there are no cooked monitor interfaces, just free the SKB */
3654 if (!local->cooked_mntrs)
3655 goto out_free_skb;
3656
3657 /* vendor data is long removed here */
3658 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
3659 /* room for the radiotap header based on driver features */
3660 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
3661
3662 if (skb_headroom(skb) < needed_headroom &&
3663 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
3664 goto out_free_skb;
3665
3666 /* prepend radiotap information */
3667 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
3668 false);
3669
3670 skb_reset_mac_header(skb);
3671 skb->ip_summed = CHECKSUM_UNNECESSARY;
3672 skb->pkt_type = PACKET_OTHERHOST;
3673 skb->protocol = htons(ETH_P_802_2);
3674
3675 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3676 if (!ieee80211_sdata_running(sdata))
3677 continue;
3678
3679 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
3680 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
3681 continue;
3682
3683 if (prev_dev) {
3684 skb2 = skb_clone(skb, GFP_ATOMIC);
3685 if (skb2) {
3686 skb2->dev = prev_dev;
3687 netif_receive_skb(skb2);
3688 }
3689 }
3690
3691 prev_dev = sdata->dev;
3692 dev_sw_netstats_rx_add(sdata->dev, skb->len);
3693 }
3694
3695 if (prev_dev) {
3696 skb->dev = prev_dev;
3697 netif_receive_skb(skb);
3698 return;
3699 }
3700
3701 out_free_skb:
3702 dev_kfree_skb(skb);
3703 }
3704
3705 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
3706 ieee80211_rx_result res)
3707 {
3708 switch (res) {
3709 case RX_DROP_MONITOR:
3710 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3711 if (rx->sta)
3712 rx->sta->rx_stats.dropped++;
3713 fallthrough;
3714 case RX_CONTINUE: {
3715 struct ieee80211_rate *rate = NULL;
3716 struct ieee80211_supported_band *sband;
3717 struct ieee80211_rx_status *status;
3718
3719 status = IEEE80211_SKB_RXCB((rx->skb));
3720
3721 sband = rx->local->hw.wiphy->bands[status->band];
3722 if (status->encoding == RX_ENC_LEGACY)
3723 rate = &sband->bitrates[status->rate_idx];
3724
3725 ieee80211_rx_cooked_monitor(rx, rate);
3726 break;
3727 }
3728 case RX_DROP_UNUSABLE:
3729 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3730 if (rx->sta)
3731 rx->sta->rx_stats.dropped++;
3732 dev_kfree_skb(rx->skb);
3733 break;
3734 case RX_QUEUED:
3735 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
3736 break;
3737 }
3738 }
3739
3740 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
3741 struct sk_buff_head *frames)
3742 {
3743 ieee80211_rx_result res = RX_DROP_MONITOR;
3744 struct sk_buff *skb;
3745
3746 #define CALL_RXH(rxh) \
3747 do { \
3748 res = rxh(rx); \
3749 if (res != RX_CONTINUE) \
3750 goto rxh_next; \
3751 } while (0)
3752
3753 /* Lock here to avoid hitting all of the data used in the RX
3754 * path (e.g. key data, station data, ...) concurrently when
3755 * a frame is released from the reorder buffer due to timeout
3756 * from the timer, potentially concurrently with RX from the
3757 * driver.
3758 */
3759 spin_lock_bh(&rx->local->rx_path_lock);
3760
3761 while ((skb = __skb_dequeue(frames))) {
3762 /*
3763 * all the other fields are valid across frames
3764 * that belong to an aMPDU since they are on the
3765 * same TID from the same station
3766 */
3767 rx->skb = skb;
3768
3769 CALL_RXH(ieee80211_rx_h_check_more_data);
3770 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
3771 CALL_RXH(ieee80211_rx_h_sta_process);
3772 CALL_RXH(ieee80211_rx_h_decrypt);
3773 CALL_RXH(ieee80211_rx_h_defragment);
3774 CALL_RXH(ieee80211_rx_h_michael_mic_verify);
3775 /* must be after MMIC verify so header is counted in MPDU mic */
3776 #ifdef CONFIG_MAC80211_MESH
3777 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
3778 CALL_RXH(ieee80211_rx_h_mesh_fwding);
3779 #endif
3780 CALL_RXH(ieee80211_rx_h_amsdu);
3781 CALL_RXH(ieee80211_rx_h_data);
3782
3783 /* special treatment -- needs the queue */
3784 res = ieee80211_rx_h_ctrl(rx, frames);
3785 if (res != RX_CONTINUE)
3786 goto rxh_next;
3787
3788 CALL_RXH(ieee80211_rx_h_mgmt_check);
3789 CALL_RXH(ieee80211_rx_h_action);
3790 CALL_RXH(ieee80211_rx_h_userspace_mgmt);
3791 CALL_RXH(ieee80211_rx_h_action_post_userspace);
3792 CALL_RXH(ieee80211_rx_h_action_return);
3793 CALL_RXH(ieee80211_rx_h_ext);
3794 CALL_RXH(ieee80211_rx_h_mgmt);
3795
3796 rxh_next:
3797 ieee80211_rx_handlers_result(rx, res);
3798
3799 #undef CALL_RXH
3800 }
3801
3802 spin_unlock_bh(&rx->local->rx_path_lock);
3803 }
3804
3805 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
3806 {
3807 struct sk_buff_head reorder_release;
3808 ieee80211_rx_result res = RX_DROP_MONITOR;
3809
3810 __skb_queue_head_init(&reorder_release);
3811
3812 #define CALL_RXH(rxh) \
3813 do { \
3814 res = rxh(rx); \
3815 if (res != RX_CONTINUE) \
3816 goto rxh_next; \
3817 } while (0)
3818
3819 CALL_RXH(ieee80211_rx_h_check_dup);
3820 CALL_RXH(ieee80211_rx_h_check);
3821
3822 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
3823
3824 ieee80211_rx_handlers(rx, &reorder_release);
3825 return;
3826
3827 rxh_next:
3828 ieee80211_rx_handlers_result(rx, res);
3829
3830 #undef CALL_RXH
3831 }
3832
3833 /*
3834 * This function makes calls into the RX path, therefore
3835 * it has to be invoked under RCU read lock.
3836 */
3837 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
3838 {
3839 struct sk_buff_head frames;
3840 struct ieee80211_rx_data rx = {
3841 .sta = sta,
3842 .sdata = sta->sdata,
3843 .local = sta->local,
3844 /* This is OK -- must be QoS data frame */
3845 .security_idx = tid,
3846 .seqno_idx = tid,
3847 };
3848 struct tid_ampdu_rx *tid_agg_rx;
3849
3850 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3851 if (!tid_agg_rx)
3852 return;
3853
3854 __skb_queue_head_init(&frames);
3855
3856 spin_lock(&tid_agg_rx->reorder_lock);
3857 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3858 spin_unlock(&tid_agg_rx->reorder_lock);
3859
3860 if (!skb_queue_empty(&frames)) {
3861 struct ieee80211_event event = {
3862 .type = BA_FRAME_TIMEOUT,
3863 .u.ba.tid = tid,
3864 .u.ba.sta = &sta->sta,
3865 };
3866 drv_event_callback(rx.local, rx.sdata, &event);
3867 }
3868
3869 ieee80211_rx_handlers(&rx, &frames);
3870 }
3871
3872 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
3873 u16 ssn, u64 filtered,
3874 u16 received_mpdus)
3875 {
3876 struct sta_info *sta;
3877 struct tid_ampdu_rx *tid_agg_rx;
3878 struct sk_buff_head frames;
3879 struct ieee80211_rx_data rx = {
3880 /* This is OK -- must be QoS data frame */
3881 .security_idx = tid,
3882 .seqno_idx = tid,
3883 };
3884 int i, diff;
3885
3886 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
3887 return;
3888
3889 __skb_queue_head_init(&frames);
3890
3891 sta = container_of(pubsta, struct sta_info, sta);
3892
3893 rx.sta = sta;
3894 rx.sdata = sta->sdata;
3895 rx.local = sta->local;
3896
3897 rcu_read_lock();
3898 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3899 if (!tid_agg_rx)
3900 goto out;
3901
3902 spin_lock_bh(&tid_agg_rx->reorder_lock);
3903
3904 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
3905 int release;
3906
3907 /* release all frames in the reorder buffer */
3908 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
3909 IEEE80211_SN_MODULO;
3910 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
3911 release, &frames);
3912 /* update ssn to match received ssn */
3913 tid_agg_rx->head_seq_num = ssn;
3914 } else {
3915 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
3916 &frames);
3917 }
3918
3919 /* handle the case that received ssn is behind the mac ssn.
3920 * it can be tid_agg_rx->buf_size behind and still be valid */
3921 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
3922 if (diff >= tid_agg_rx->buf_size) {
3923 tid_agg_rx->reorder_buf_filtered = 0;
3924 goto release;
3925 }
3926 filtered = filtered >> diff;
3927 ssn += diff;
3928
3929 /* update bitmap */
3930 for (i = 0; i < tid_agg_rx->buf_size; i++) {
3931 int index = (ssn + i) % tid_agg_rx->buf_size;
3932
3933 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
3934 if (filtered & BIT_ULL(i))
3935 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
3936 }
3937
3938 /* now process also frames that the filter marking released */
3939 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3940
3941 release:
3942 spin_unlock_bh(&tid_agg_rx->reorder_lock);
3943
3944 ieee80211_rx_handlers(&rx, &frames);
3945
3946 out:
3947 rcu_read_unlock();
3948 }
3949 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
3950
3951 /* main receive path */
3952
3953 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3954 {
3955 struct ieee80211_sub_if_data *sdata = rx->sdata;
3956 struct sk_buff *skb = rx->skb;
3957 struct ieee80211_hdr *hdr = (void *)skb->data;
3958 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3959 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
3960 bool multicast = is_multicast_ether_addr(hdr->addr1) ||
3961 ieee80211_is_s1g_beacon(hdr->frame_control);
3962
3963 switch (sdata->vif.type) {
3964 case NL80211_IFTYPE_STATION:
3965 if (!bssid && !sdata->u.mgd.use_4addr)
3966 return false;
3967 if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
3968 return false;
3969 if (multicast)
3970 return true;
3971 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3972 case NL80211_IFTYPE_ADHOC:
3973 if (!bssid)
3974 return false;
3975 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3976 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
3977 return false;
3978 if (ieee80211_is_beacon(hdr->frame_control))
3979 return true;
3980 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
3981 return false;
3982 if (!multicast &&
3983 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3984 return false;
3985 if (!rx->sta) {
3986 int rate_idx;
3987 if (status->encoding != RX_ENC_LEGACY)
3988 rate_idx = 0; /* TODO: HT/VHT rates */
3989 else
3990 rate_idx = status->rate_idx;
3991 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
3992 BIT(rate_idx));
3993 }
3994 return true;
3995 case NL80211_IFTYPE_OCB:
3996 if (!bssid)
3997 return false;
3998 if (!ieee80211_is_data_present(hdr->frame_control))
3999 return false;
4000 if (!is_broadcast_ether_addr(bssid))
4001 return false;
4002 if (!multicast &&
4003 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
4004 return false;
4005 if (!rx->sta) {
4006 int rate_idx;
4007 if (status->encoding != RX_ENC_LEGACY)
4008 rate_idx = 0; /* TODO: HT rates */
4009 else
4010 rate_idx = status->rate_idx;
4011 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
4012 BIT(rate_idx));
4013 }
4014 return true;
4015 case NL80211_IFTYPE_MESH_POINT:
4016 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
4017 return false;
4018 if (multicast)
4019 return true;
4020 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
4021 case NL80211_IFTYPE_AP_VLAN:
4022 case NL80211_IFTYPE_AP:
4023 if (!bssid)
4024 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
4025
4026 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
4027 /*
4028 * Accept public action frames even when the
4029 * BSSID doesn't match, this is used for P2P
4030 * and location updates. Note that mac80211
4031 * itself never looks at these frames.
4032 */
4033 if (!multicast &&
4034 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
4035 return false;
4036 if (ieee80211_is_public_action(hdr, skb->len))
4037 return true;
4038 return ieee80211_is_beacon(hdr->frame_control);
4039 }
4040
4041 if (!ieee80211_has_tods(hdr->frame_control)) {
4042 /* ignore data frames to TDLS-peers */
4043 if (ieee80211_is_data(hdr->frame_control))
4044 return false;
4045 /* ignore action frames to TDLS-peers */
4046 if (ieee80211_is_action(hdr->frame_control) &&
4047 !is_broadcast_ether_addr(bssid) &&
4048 !ether_addr_equal(bssid, hdr->addr1))
4049 return false;
4050 }
4051
4052 /*
4053 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
4054 * the BSSID - we've checked that already but may have accepted
4055 * the wildcard (ff:ff:ff:ff:ff:ff).
4056 *
4057 * It also says:
4058 * The BSSID of the Data frame is determined as follows:
4059 * a) If the STA is contained within an AP or is associated
4060 * with an AP, the BSSID is the address currently in use
4061 * by the STA contained in the AP.
4062 *
4063 * So we should not accept data frames with an address that's
4064 * multicast.
4065 *
4066 * Accepting it also opens a security problem because stations
4067 * could encrypt it with the GTK and inject traffic that way.
4068 */
4069 if (ieee80211_is_data(hdr->frame_control) && multicast)
4070 return false;
4071
4072 return true;
4073 case NL80211_IFTYPE_P2P_DEVICE:
4074 return ieee80211_is_public_action(hdr, skb->len) ||
4075 ieee80211_is_probe_req(hdr->frame_control) ||
4076 ieee80211_is_probe_resp(hdr->frame_control) ||
4077 ieee80211_is_beacon(hdr->frame_control);
4078 case NL80211_IFTYPE_NAN:
4079 /* Currently no frames on NAN interface are allowed */
4080 return false;
4081 default:
4082 break;
4083 }
4084
4085 WARN_ON_ONCE(1);
4086 return false;
4087 }
4088
4089 void ieee80211_check_fast_rx(struct sta_info *sta)
4090 {
4091 struct ieee80211_sub_if_data *sdata = sta->sdata;
4092 struct ieee80211_local *local = sdata->local;
4093 struct ieee80211_key *key;
4094 struct ieee80211_fast_rx fastrx = {
4095 .dev = sdata->dev,
4096 .vif_type = sdata->vif.type,
4097 .control_port_protocol = sdata->control_port_protocol,
4098 }, *old, *new = NULL;
4099 bool set_offload = false;
4100 bool assign = false;
4101 bool offload;
4102
4103 /* use sparse to check that we don't return without updating */
4104 __acquire(check_fast_rx);
4105
4106 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
4107 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
4108 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
4109 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
4110
4111 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
4112
4113 /* fast-rx doesn't do reordering */
4114 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
4115 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
4116 goto clear;
4117
4118 switch (sdata->vif.type) {
4119 case NL80211_IFTYPE_STATION:
4120 if (sta->sta.tdls) {
4121 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4122 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4123 fastrx.expected_ds_bits = 0;
4124 } else {
4125 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4126 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
4127 fastrx.expected_ds_bits =
4128 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4129 }
4130
4131 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
4132 fastrx.expected_ds_bits |=
4133 cpu_to_le16(IEEE80211_FCTL_TODS);
4134 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4135 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4136 }
4137
4138 if (!sdata->u.mgd.powersave)
4139 break;
4140
4141 /* software powersave is a huge mess, avoid all of it */
4142 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
4143 goto clear;
4144 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
4145 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
4146 goto clear;
4147 break;
4148 case NL80211_IFTYPE_AP_VLAN:
4149 case NL80211_IFTYPE_AP:
4150 /* parallel-rx requires this, at least with calls to
4151 * ieee80211_sta_ps_transition()
4152 */
4153 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
4154 goto clear;
4155 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4156 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4157 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
4158
4159 fastrx.internal_forward =
4160 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
4161 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
4162 !sdata->u.vlan.sta);
4163
4164 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4165 sdata->u.vlan.sta) {
4166 fastrx.expected_ds_bits |=
4167 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4168 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4169 fastrx.internal_forward = 0;
4170 }
4171
4172 break;
4173 default:
4174 goto clear;
4175 }
4176
4177 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
4178 goto clear;
4179
4180 rcu_read_lock();
4181 key = rcu_dereference(sta->ptk[sta->ptk_idx]);
4182 if (!key)
4183 key = rcu_dereference(sdata->default_unicast_key);
4184 if (key) {
4185 switch (key->conf.cipher) {
4186 case WLAN_CIPHER_SUITE_TKIP:
4187 /* we don't want to deal with MMIC in fast-rx */
4188 goto clear_rcu;
4189 case WLAN_CIPHER_SUITE_CCMP:
4190 case WLAN_CIPHER_SUITE_CCMP_256:
4191 case WLAN_CIPHER_SUITE_GCMP:
4192 case WLAN_CIPHER_SUITE_GCMP_256:
4193 break;
4194 default:
4195 /* We also don't want to deal with
4196 * WEP or cipher scheme.
4197 */
4198 goto clear_rcu;
4199 }
4200
4201 fastrx.key = true;
4202 fastrx.icv_len = key->conf.icv_len;
4203 }
4204
4205 assign = true;
4206 clear_rcu:
4207 rcu_read_unlock();
4208 clear:
4209 __release(check_fast_rx);
4210
4211 if (assign)
4212 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
4213
4214 offload = assign &&
4215 (sdata->vif.offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED);
4216
4217 if (offload)
4218 set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4219 else
4220 set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4221
4222 if (set_offload)
4223 drv_sta_set_decap_offload(local, sdata, &sta->sta, assign);
4224
4225 spin_lock_bh(&sta->lock);
4226 old = rcu_dereference_protected(sta->fast_rx, true);
4227 rcu_assign_pointer(sta->fast_rx, new);
4228 spin_unlock_bh(&sta->lock);
4229
4230 if (old)
4231 kfree_rcu(old, rcu_head);
4232 }
4233
4234 void ieee80211_clear_fast_rx(struct sta_info *sta)
4235 {
4236 struct ieee80211_fast_rx *old;
4237
4238 spin_lock_bh(&sta->lock);
4239 old = rcu_dereference_protected(sta->fast_rx, true);
4240 RCU_INIT_POINTER(sta->fast_rx, NULL);
4241 spin_unlock_bh(&sta->lock);
4242
4243 if (old)
4244 kfree_rcu(old, rcu_head);
4245 }
4246
4247 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4248 {
4249 struct ieee80211_local *local = sdata->local;
4250 struct sta_info *sta;
4251
4252 lockdep_assert_held(&local->sta_mtx);
4253
4254 list_for_each_entry(sta, &local->sta_list, list) {
4255 if (sdata != sta->sdata &&
4256 (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
4257 continue;
4258 ieee80211_check_fast_rx(sta);
4259 }
4260 }
4261
4262 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4263 {
4264 struct ieee80211_local *local = sdata->local;
4265
4266 mutex_lock(&local->sta_mtx);
4267 __ieee80211_check_fast_rx_iface(sdata);
4268 mutex_unlock(&local->sta_mtx);
4269 }
4270
4271 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
4272 struct ieee80211_fast_rx *fast_rx,
4273 int orig_len)
4274 {
4275 struct ieee80211_sta_rx_stats *stats;
4276 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
4277 struct sta_info *sta = rx->sta;
4278 struct sk_buff *skb = rx->skb;
4279 void *sa = skb->data + ETH_ALEN;
4280 void *da = skb->data;
4281
4282 stats = &sta->rx_stats;
4283 if (fast_rx->uses_rss)
4284 stats = this_cpu_ptr(sta->pcpu_rx_stats);
4285
4286 /* statistics part of ieee80211_rx_h_sta_process() */
4287 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4288 stats->last_signal = status->signal;
4289 if (!fast_rx->uses_rss)
4290 ewma_signal_add(&sta->rx_stats_avg.signal,
4291 -status->signal);
4292 }
4293
4294 if (status->chains) {
4295 int i;
4296
4297 stats->chains = status->chains;
4298 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4299 int signal = status->chain_signal[i];
4300
4301 if (!(status->chains & BIT(i)))
4302 continue;
4303
4304 stats->chain_signal_last[i] = signal;
4305 if (!fast_rx->uses_rss)
4306 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
4307 -signal);
4308 }
4309 }
4310 /* end of statistics */
4311
4312 stats->last_rx = jiffies;
4313 stats->last_rate = sta_stats_encode_rate(status);
4314
4315 stats->fragments++;
4316 stats->packets++;
4317
4318 skb->dev = fast_rx->dev;
4319
4320 dev_sw_netstats_rx_add(fast_rx->dev, skb->len);
4321
4322 /* The seqno index has the same property as needed
4323 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4324 * for non-QoS-data frames. Here we know it's a data
4325 * frame, so count MSDUs.
4326 */
4327 u64_stats_update_begin(&stats->syncp);
4328 stats->msdu[rx->seqno_idx]++;
4329 stats->bytes += orig_len;
4330 u64_stats_update_end(&stats->syncp);
4331
4332 if (fast_rx->internal_forward) {
4333 struct sk_buff *xmit_skb = NULL;
4334 if (is_multicast_ether_addr(da)) {
4335 xmit_skb = skb_copy(skb, GFP_ATOMIC);
4336 } else if (!ether_addr_equal(da, sa) &&
4337 sta_info_get(rx->sdata, da)) {
4338 xmit_skb = skb;
4339 skb = NULL;
4340 }
4341
4342 if (xmit_skb) {
4343 /*
4344 * Send to wireless media and increase priority by 256
4345 * to keep the received priority instead of
4346 * reclassifying the frame (see cfg80211_classify8021d).
4347 */
4348 xmit_skb->priority += 256;
4349 xmit_skb->protocol = htons(ETH_P_802_3);
4350 skb_reset_network_header(xmit_skb);
4351 skb_reset_mac_header(xmit_skb);
4352 dev_queue_xmit(xmit_skb);
4353 }
4354
4355 if (!skb)
4356 return;
4357 }
4358
4359 /* deliver to local stack */
4360 skb->protocol = eth_type_trans(skb, fast_rx->dev);
4361 memset(skb->cb, 0, sizeof(skb->cb));
4362 if (rx->list)
4363 list_add_tail(&skb->list, rx->list);
4364 else
4365 netif_receive_skb(skb);
4366
4367 }
4368
4369 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
4370 struct ieee80211_fast_rx *fast_rx)
4371 {
4372 struct sk_buff *skb = rx->skb;
4373 struct ieee80211_hdr *hdr = (void *)skb->data;
4374 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4375 struct sta_info *sta = rx->sta;
4376 int orig_len = skb->len;
4377 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
4378 int snap_offs = hdrlen;
4379 struct {
4380 u8 snap[sizeof(rfc1042_header)];
4381 __be16 proto;
4382 } *payload __aligned(2);
4383 struct {
4384 u8 da[ETH_ALEN];
4385 u8 sa[ETH_ALEN];
4386 } addrs __aligned(2);
4387 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
4388
4389 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
4390 * to a common data structure; drivers can implement that per queue
4391 * but we don't have that information in mac80211
4392 */
4393 if (!(status->flag & RX_FLAG_DUP_VALIDATED))
4394 return false;
4395
4396 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
4397
4398 /* If using encryption, we also need to have:
4399 * - PN_VALIDATED: similar, but the implementation is tricky
4400 * - DECRYPTED: necessary for PN_VALIDATED
4401 */
4402 if (fast_rx->key &&
4403 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
4404 return false;
4405
4406 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
4407 return false;
4408
4409 if (unlikely(ieee80211_is_frag(hdr)))
4410 return false;
4411
4412 /* Since our interface address cannot be multicast, this
4413 * implicitly also rejects multicast frames without the
4414 * explicit check.
4415 *
4416 * We shouldn't get any *data* frames not addressed to us
4417 * (AP mode will accept multicast *management* frames), but
4418 * punting here will make it go through the full checks in
4419 * ieee80211_accept_frame().
4420 */
4421 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
4422 return false;
4423
4424 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
4425 IEEE80211_FCTL_TODS)) !=
4426 fast_rx->expected_ds_bits)
4427 return false;
4428
4429 /* assign the key to drop unencrypted frames (later)
4430 * and strip the IV/MIC if necessary
4431 */
4432 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
4433 /* GCMP header length is the same */
4434 snap_offs += IEEE80211_CCMP_HDR_LEN;
4435 }
4436
4437 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
4438 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4439 goto drop;
4440
4441 payload = (void *)(skb->data + snap_offs);
4442
4443 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4444 return false;
4445
4446 /* Don't handle these here since they require special code.
4447 * Accept AARP and IPX even though they should come with a
4448 * bridge-tunnel header - but if we get them this way then
4449 * there's little point in discarding them.
4450 */
4451 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4452 payload->proto == fast_rx->control_port_protocol))
4453 return false;
4454 }
4455
4456 /* after this point, don't punt to the slowpath! */
4457
4458 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4459 pskb_trim(skb, skb->len - fast_rx->icv_len))
4460 goto drop;
4461
4462 if (rx->key && !ieee80211_has_protected(hdr->frame_control))
4463 goto drop;
4464
4465 if (status->rx_flags & IEEE80211_RX_AMSDU) {
4466 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
4467 RX_QUEUED)
4468 goto drop;
4469
4470 return true;
4471 }
4472
4473 /* do the header conversion - first grab the addresses */
4474 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
4475 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
4476 /* remove the SNAP but leave the ethertype */
4477 skb_pull(skb, snap_offs + sizeof(rfc1042_header));
4478 /* push the addresses in front */
4479 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
4480
4481 ieee80211_rx_8023(rx, fast_rx, orig_len);
4482
4483 return true;
4484 drop:
4485 dev_kfree_skb(skb);
4486 if (fast_rx->uses_rss)
4487 stats = this_cpu_ptr(sta->pcpu_rx_stats);
4488
4489 stats->dropped++;
4490 return true;
4491 }
4492
4493 /*
4494 * This function returns whether or not the SKB
4495 * was destined for RX processing or not, which,
4496 * if consume is true, is equivalent to whether
4497 * or not the skb was consumed.
4498 */
4499 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
4500 struct sk_buff *skb, bool consume)
4501 {
4502 struct ieee80211_local *local = rx->local;
4503 struct ieee80211_sub_if_data *sdata = rx->sdata;
4504
4505 rx->skb = skb;
4506
4507 /* See if we can do fast-rx; if we have to copy we already lost,
4508 * so punt in that case. We should never have to deliver a data
4509 * frame to multiple interfaces anyway.
4510 *
4511 * We skip the ieee80211_accept_frame() call and do the necessary
4512 * checking inside ieee80211_invoke_fast_rx().
4513 */
4514 if (consume && rx->sta) {
4515 struct ieee80211_fast_rx *fast_rx;
4516
4517 fast_rx = rcu_dereference(rx->sta->fast_rx);
4518 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
4519 return true;
4520 }
4521
4522 if (!ieee80211_accept_frame(rx))
4523 return false;
4524
4525 if (!consume) {
4526 skb = skb_copy(skb, GFP_ATOMIC);
4527 if (!skb) {
4528 if (net_ratelimit())
4529 wiphy_debug(local->hw.wiphy,
4530 "failed to copy skb for %s\n",
4531 sdata->name);
4532 return true;
4533 }
4534
4535 rx->skb = skb;
4536 }
4537
4538 ieee80211_invoke_rx_handlers(rx);
4539 return true;
4540 }
4541
4542 static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
4543 struct ieee80211_sta *pubsta,
4544 struct sk_buff *skb,
4545 struct list_head *list)
4546 {
4547 struct ieee80211_local *local = hw_to_local(hw);
4548 struct ieee80211_fast_rx *fast_rx;
4549 struct ieee80211_rx_data rx;
4550
4551 memset(&rx, 0, sizeof(rx));
4552 rx.skb = skb;
4553 rx.local = local;
4554 rx.list = list;
4555
4556 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4557
4558 /* drop frame if too short for header */
4559 if (skb->len < sizeof(struct ethhdr))
4560 goto drop;
4561
4562 if (!pubsta)
4563 goto drop;
4564
4565 rx.sta = container_of(pubsta, struct sta_info, sta);
4566 rx.sdata = rx.sta->sdata;
4567
4568 fast_rx = rcu_dereference(rx.sta->fast_rx);
4569 if (!fast_rx)
4570 goto drop;
4571
4572 ieee80211_rx_8023(&rx, fast_rx, skb->len);
4573 return;
4574
4575 drop:
4576 dev_kfree_skb(skb);
4577 }
4578
4579 /*
4580 * This is the actual Rx frames handler. as it belongs to Rx path it must
4581 * be called with rcu_read_lock protection.
4582 */
4583 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4584 struct ieee80211_sta *pubsta,
4585 struct sk_buff *skb,
4586 struct list_head *list)
4587 {
4588 struct ieee80211_local *local = hw_to_local(hw);
4589 struct ieee80211_sub_if_data *sdata;
4590 struct ieee80211_hdr *hdr;
4591 __le16 fc;
4592 struct ieee80211_rx_data rx;
4593 struct ieee80211_sub_if_data *prev;
4594 struct rhlist_head *tmp;
4595 int err = 0;
4596
4597 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
4598 memset(&rx, 0, sizeof(rx));
4599 rx.skb = skb;
4600 rx.local = local;
4601 rx.list = list;
4602
4603 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
4604 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4605
4606 if (ieee80211_is_mgmt(fc)) {
4607 /* drop frame if too short for header */
4608 if (skb->len < ieee80211_hdrlen(fc))
4609 err = -ENOBUFS;
4610 else
4611 err = skb_linearize(skb);
4612 } else {
4613 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
4614 }
4615
4616 if (err) {
4617 dev_kfree_skb(skb);
4618 return;
4619 }
4620
4621 hdr = (struct ieee80211_hdr *)skb->data;
4622 ieee80211_parse_qos(&rx);
4623 ieee80211_verify_alignment(&rx);
4624
4625 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
4626 ieee80211_is_beacon(hdr->frame_control) ||
4627 ieee80211_is_s1g_beacon(hdr->frame_control)))
4628 ieee80211_scan_rx(local, skb);
4629
4630 if (ieee80211_is_data(fc)) {
4631 struct sta_info *sta, *prev_sta;
4632
4633 if (pubsta) {
4634 rx.sta = container_of(pubsta, struct sta_info, sta);
4635 rx.sdata = rx.sta->sdata;
4636 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4637 return;
4638 goto out;
4639 }
4640
4641 prev_sta = NULL;
4642
4643 for_each_sta_info(local, hdr->addr2, sta, tmp) {
4644 if (!prev_sta) {
4645 prev_sta = sta;
4646 continue;
4647 }
4648
4649 rx.sta = prev_sta;
4650 rx.sdata = prev_sta->sdata;
4651 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4652
4653 prev_sta = sta;
4654 }
4655
4656 if (prev_sta) {
4657 rx.sta = prev_sta;
4658 rx.sdata = prev_sta->sdata;
4659
4660 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4661 return;
4662 goto out;
4663 }
4664 }
4665
4666 prev = NULL;
4667
4668 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4669 if (!ieee80211_sdata_running(sdata))
4670 continue;
4671
4672 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
4673 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
4674 continue;
4675
4676 /*
4677 * frame is destined for this interface, but if it's
4678 * not also for the previous one we handle that after
4679 * the loop to avoid copying the SKB once too much
4680 */
4681
4682 if (!prev) {
4683 prev = sdata;
4684 continue;
4685 }
4686
4687 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4688 rx.sdata = prev;
4689 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4690
4691 prev = sdata;
4692 }
4693
4694 if (prev) {
4695 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4696 rx.sdata = prev;
4697
4698 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4699 return;
4700 }
4701
4702 out:
4703 dev_kfree_skb(skb);
4704 }
4705
4706 /*
4707 * This is the receive path handler. It is called by a low level driver when an
4708 * 802.11 MPDU is received from the hardware.
4709 */
4710 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4711 struct sk_buff *skb, struct list_head *list)
4712 {
4713 struct ieee80211_local *local = hw_to_local(hw);
4714 struct ieee80211_rate *rate = NULL;
4715 struct ieee80211_supported_band *sband;
4716 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4717
4718 WARN_ON_ONCE(softirq_count() == 0);
4719
4720 if (WARN_ON(status->band >= NUM_NL80211_BANDS))
4721 goto drop;
4722
4723 sband = local->hw.wiphy->bands[status->band];
4724 if (WARN_ON(!sband))
4725 goto drop;
4726
4727 /*
4728 * If we're suspending, it is possible although not too likely
4729 * that we'd be receiving frames after having already partially
4730 * quiesced the stack. We can't process such frames then since
4731 * that might, for example, cause stations to be added or other
4732 * driver callbacks be invoked.
4733 */
4734 if (unlikely(local->quiescing || local->suspended))
4735 goto drop;
4736
4737 /* We might be during a HW reconfig, prevent Rx for the same reason */
4738 if (unlikely(local->in_reconfig))
4739 goto drop;
4740
4741 /*
4742 * The same happens when we're not even started,
4743 * but that's worth a warning.
4744 */
4745 if (WARN_ON(!local->started))
4746 goto drop;
4747
4748 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
4749 /*
4750 * Validate the rate, unless a PLCP error means that
4751 * we probably can't have a valid rate here anyway.
4752 */
4753
4754 switch (status->encoding) {
4755 case RX_ENC_HT:
4756 /*
4757 * rate_idx is MCS index, which can be [0-76]
4758 * as documented on:
4759 *
4760 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n
4761 *
4762 * Anything else would be some sort of driver or
4763 * hardware error. The driver should catch hardware
4764 * errors.
4765 */
4766 if (WARN(status->rate_idx > 76,
4767 "Rate marked as an HT rate but passed "
4768 "status->rate_idx is not "
4769 "an MCS index [0-76]: %d (0x%02x)\n",
4770 status->rate_idx,
4771 status->rate_idx))
4772 goto drop;
4773 break;
4774 case RX_ENC_VHT:
4775 if (WARN_ONCE(status->rate_idx > 9 ||
4776 !status->nss ||
4777 status->nss > 8,
4778 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
4779 status->rate_idx, status->nss))
4780 goto drop;
4781 break;
4782 case RX_ENC_HE:
4783 if (WARN_ONCE(status->rate_idx > 11 ||
4784 !status->nss ||
4785 status->nss > 8,
4786 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
4787 status->rate_idx, status->nss))
4788 goto drop;
4789 break;
4790 default:
4791 WARN_ON_ONCE(1);
4792 fallthrough;
4793 case RX_ENC_LEGACY:
4794 if (WARN_ON(status->rate_idx >= sband->n_bitrates))
4795 goto drop;
4796 rate = &sband->bitrates[status->rate_idx];
4797 }
4798 }
4799
4800 status->rx_flags = 0;
4801
4802 kcov_remote_start_common(skb_get_kcov_handle(skb));
4803
4804 /*
4805 * Frames with failed FCS/PLCP checksum are not returned,
4806 * all other frames are returned without radiotap header
4807 * if it was previously present.
4808 * Also, frames with less than 16 bytes are dropped.
4809 */
4810 if (!(status->flag & RX_FLAG_8023))
4811 skb = ieee80211_rx_monitor(local, skb, rate);
4812 if (skb) {
4813 ieee80211_tpt_led_trig_rx(local,
4814 ((struct ieee80211_hdr *)skb->data)->frame_control,
4815 skb->len);
4816
4817 if (status->flag & RX_FLAG_8023)
4818 __ieee80211_rx_handle_8023(hw, pubsta, skb, list);
4819 else
4820 __ieee80211_rx_handle_packet(hw, pubsta, skb, list);
4821 }
4822
4823 kcov_remote_stop();
4824 return;
4825 drop:
4826 kfree_skb(skb);
4827 }
4828 EXPORT_SYMBOL(ieee80211_rx_list);
4829
4830 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4831 struct sk_buff *skb, struct napi_struct *napi)
4832 {
4833 struct sk_buff *tmp;
4834 LIST_HEAD(list);
4835
4836
4837 /*
4838 * key references and virtual interfaces are protected using RCU
4839 * and this requires that we are in a read-side RCU section during
4840 * receive processing
4841 */
4842 rcu_read_lock();
4843 ieee80211_rx_list(hw, pubsta, skb, &list);
4844 rcu_read_unlock();
4845
4846 if (!napi) {
4847 netif_receive_skb_list(&list);
4848 return;
4849 }
4850
4851 list_for_each_entry_safe(skb, tmp, &list, list) {
4852 skb_list_del_init(skb);
4853 napi_gro_receive(napi, skb);
4854 }
4855 }
4856 EXPORT_SYMBOL(ieee80211_rx_napi);
4857
4858 /* This is a version of the rx handler that can be called from hard irq
4859 * context. Post the skb on the queue and schedule the tasklet */
4860 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
4861 {
4862 struct ieee80211_local *local = hw_to_local(hw);
4863
4864 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
4865
4866 skb->pkt_type = IEEE80211_RX_MSG;
4867 skb_queue_tail(&local->skb_queue, skb);
4868 tasklet_schedule(&local->tasklet);
4869 }
4870 EXPORT_SYMBOL(ieee80211_rx_irqsafe);