1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2002-2005, Instant802 Networks, Inc.
4 * Copyright 2005-2006, Devicescape Software, Inc.
5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright (C) 2018-2021 Intel Corporation
10 * Transmit and frame generation functions.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/skbuff.h>
16 #include <linux/if_vlan.h>
17 #include <linux/etherdevice.h>
18 #include <linux/bitmap.h>
19 #include <linux/rcupdate.h>
20 #include <linux/export.h>
21 #include <linux/timekeeping.h>
22 #include <net/net_namespace.h>
23 #include <net/ieee80211_radiotap.h>
24 #include <net/cfg80211.h>
25 #include <net/mac80211.h>
26 #include <net/codel.h>
27 #include <net/codel_impl.h>
28 #include <asm/unaligned.h>
29 #include <net/fq_impl.h>
31 #include "ieee80211_i.h"
32 #include "driver-ops.h"
42 static __le16
ieee80211_duration(struct ieee80211_tx_data
*tx
,
43 struct sk_buff
*skb
, int group_addr
,
46 int rate
, mrate
, erp
, dur
, i
, shift
= 0;
47 struct ieee80211_rate
*txrate
;
48 struct ieee80211_local
*local
= tx
->local
;
49 struct ieee80211_supported_band
*sband
;
50 struct ieee80211_hdr
*hdr
;
51 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
52 struct ieee80211_chanctx_conf
*chanctx_conf
;
55 /* assume HW handles this */
56 if (tx
->rate
.flags
& (IEEE80211_TX_RC_MCS
| IEEE80211_TX_RC_VHT_MCS
))
60 chanctx_conf
= rcu_dereference(tx
->sdata
->vif
.chanctx_conf
);
62 shift
= ieee80211_chandef_get_shift(&chanctx_conf
->def
);
63 rate_flags
= ieee80211_chandef_rate_flags(&chanctx_conf
->def
);
68 if (WARN_ON_ONCE(tx
->rate
.idx
< 0))
71 sband
= local
->hw
.wiphy
->bands
[info
->band
];
72 txrate
= &sband
->bitrates
[tx
->rate
.idx
];
74 erp
= txrate
->flags
& IEEE80211_RATE_ERP_G
;
76 /* device is expected to do this */
77 if (sband
->band
== NL80211_BAND_S1GHZ
)
81 * data and mgmt (except PS Poll):
83 * - during contention period:
84 * if addr1 is group address: 0
85 * if more fragments = 0 and addr1 is individual address: time to
86 * transmit one ACK plus SIFS
87 * if more fragments = 1 and addr1 is individual address: time to
88 * transmit next fragment plus 2 x ACK plus 3 x SIFS
91 * - control response frame (CTS or ACK) shall be transmitted using the
92 * same rate as the immediately previous frame in the frame exchange
93 * sequence, if this rate belongs to the PHY mandatory rates, or else
94 * at the highest possible rate belonging to the PHY rates in the
97 hdr
= (struct ieee80211_hdr
*)skb
->data
;
98 if (ieee80211_is_ctl(hdr
->frame_control
)) {
99 /* TODO: These control frames are not currently sent by
100 * mac80211, but should they be implemented, this function
101 * needs to be updated to support duration field calculation.
103 * RTS: time needed to transmit pending data/mgmt frame plus
104 * one CTS frame plus one ACK frame plus 3 x SIFS
105 * CTS: duration of immediately previous RTS minus time
106 * required to transmit CTS and its SIFS
107 * ACK: 0 if immediately previous directed data/mgmt had
108 * more=0, with more=1 duration in ACK frame is duration
109 * from previous frame minus time needed to transmit ACK
111 * PS Poll: BIT(15) | BIT(14) | aid
117 if (0 /* FIX: data/mgmt during CFP */)
118 return cpu_to_le16(32768);
120 if (group_addr
) /* Group address as the destination - no ACK */
123 /* Individual destination address:
124 * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
125 * CTS and ACK frames shall be transmitted using the highest rate in
126 * basic rate set that is less than or equal to the rate of the
127 * immediately previous frame and that is using the same modulation
128 * (CCK or OFDM). If no basic rate set matches with these requirements,
129 * the highest mandatory rate of the PHY that is less than or equal to
130 * the rate of the previous frame is used.
131 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
134 /* use lowest available if everything fails */
135 mrate
= sband
->bitrates
[0].bitrate
;
136 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
137 struct ieee80211_rate
*r
= &sband
->bitrates
[i
];
139 if (r
->bitrate
> txrate
->bitrate
)
142 if ((rate_flags
& r
->flags
) != rate_flags
)
145 if (tx
->sdata
->vif
.bss_conf
.basic_rates
& BIT(i
))
146 rate
= DIV_ROUND_UP(r
->bitrate
, 1 << shift
);
148 switch (sband
->band
) {
149 case NL80211_BAND_2GHZ
: {
151 if (tx
->sdata
->flags
& IEEE80211_SDATA_OPERATING_GMODE
)
152 flag
= IEEE80211_RATE_MANDATORY_G
;
154 flag
= IEEE80211_RATE_MANDATORY_B
;
159 case NL80211_BAND_5GHZ
:
160 case NL80211_BAND_6GHZ
:
161 if (r
->flags
& IEEE80211_RATE_MANDATORY_A
)
164 case NL80211_BAND_S1GHZ
:
165 case NL80211_BAND_60GHZ
:
166 /* TODO, for now fall through */
167 case NUM_NL80211_BANDS
:
173 /* No matching basic rate found; use highest suitable mandatory
175 rate
= DIV_ROUND_UP(mrate
, 1 << shift
);
178 /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
179 if (ieee80211_is_data_qos(hdr
->frame_control
) &&
180 *(ieee80211_get_qos_ctl(hdr
)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK
)
183 /* Time needed to transmit ACK
184 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
185 * to closest integer */
186 dur
= ieee80211_frame_duration(sband
->band
, 10, rate
, erp
,
187 tx
->sdata
->vif
.bss_conf
.use_short_preamble
,
191 /* Frame is fragmented: duration increases with time needed to
192 * transmit next fragment plus ACK and 2 x SIFS. */
193 dur
*= 2; /* ACK + SIFS */
195 dur
+= ieee80211_frame_duration(sband
->band
, next_frag_len
,
196 txrate
->bitrate
, erp
,
197 tx
->sdata
->vif
.bss_conf
.use_short_preamble
,
201 return cpu_to_le16(dur
);
205 static ieee80211_tx_result debug_noinline
206 ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data
*tx
)
208 struct ieee80211_local
*local
= tx
->local
;
209 struct ieee80211_if_managed
*ifmgd
;
210 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx
->skb
);
212 /* driver doesn't support power save */
213 if (!ieee80211_hw_check(&local
->hw
, SUPPORTS_PS
))
216 /* hardware does dynamic power save */
217 if (ieee80211_hw_check(&local
->hw
, SUPPORTS_DYNAMIC_PS
))
220 /* dynamic power save disabled */
221 if (local
->hw
.conf
.dynamic_ps_timeout
<= 0)
224 /* we are scanning, don't enable power save */
228 if (!local
->ps_sdata
)
231 /* No point if we're going to suspend */
232 if (local
->quiescing
)
235 /* dynamic ps is supported only in managed mode */
236 if (tx
->sdata
->vif
.type
!= NL80211_IFTYPE_STATION
)
239 if (unlikely(info
->flags
& IEEE80211_TX_INTFL_OFFCHAN_TX_OK
))
242 ifmgd
= &tx
->sdata
->u
.mgd
;
245 * Don't wakeup from power save if u-apsd is enabled, voip ac has
246 * u-apsd enabled and the frame is in voip class. This effectively
247 * means that even if all access categories have u-apsd enabled, in
248 * practise u-apsd is only used with the voip ac. This is a
249 * workaround for the case when received voip class packets do not
250 * have correct qos tag for some reason, due the network or the
253 * Note: ifmgd->uapsd_queues access is racy here. If the value is
254 * changed via debugfs, user needs to reassociate manually to have
255 * everything in sync.
257 if ((ifmgd
->flags
& IEEE80211_STA_UAPSD_ENABLED
) &&
258 (ifmgd
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
) &&
259 skb_get_queue_mapping(tx
->skb
) == IEEE80211_AC_VO
)
262 if (local
->hw
.conf
.flags
& IEEE80211_CONF_PS
) {
263 ieee80211_stop_queues_by_reason(&local
->hw
,
264 IEEE80211_MAX_QUEUE_MAP
,
265 IEEE80211_QUEUE_STOP_REASON_PS
,
267 ifmgd
->flags
&= ~IEEE80211_STA_NULLFUNC_ACKED
;
268 ieee80211_queue_work(&local
->hw
,
269 &local
->dynamic_ps_disable_work
);
272 /* Don't restart the timer if we're not disassociated */
273 if (!ifmgd
->associated
)
276 mod_timer(&local
->dynamic_ps_timer
, jiffies
+
277 msecs_to_jiffies(local
->hw
.conf
.dynamic_ps_timeout
));
282 static ieee80211_tx_result debug_noinline
283 ieee80211_tx_h_check_assoc(struct ieee80211_tx_data
*tx
)
286 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)tx
->skb
->data
;
287 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx
->skb
);
290 if (unlikely(info
->flags
& IEEE80211_TX_CTL_INJECTED
))
293 if (unlikely(test_bit(SCAN_SW_SCANNING
, &tx
->local
->scanning
)) &&
294 test_bit(SDATA_STATE_OFFCHANNEL
, &tx
->sdata
->state
) &&
295 !ieee80211_is_probe_req(hdr
->frame_control
) &&
296 !ieee80211_is_any_nullfunc(hdr
->frame_control
))
298 * When software scanning only nullfunc frames (to notify
299 * the sleep state to the AP) and probe requests (for the
300 * active scan) are allowed, all other frames should not be
301 * sent and we should not get here, but if we do
302 * nonetheless, drop them to avoid sending them
303 * off-channel. See the link below and
304 * ieee80211_start_scan() for more.
306 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
310 if (tx
->sdata
->vif
.type
== NL80211_IFTYPE_OCB
)
313 if (tx
->flags
& IEEE80211_TX_PS_BUFFERED
)
317 assoc
= test_sta_flag(tx
->sta
, WLAN_STA_ASSOC
);
319 if (likely(tx
->flags
& IEEE80211_TX_UNICAST
)) {
320 if (unlikely(!assoc
&&
321 ieee80211_is_data(hdr
->frame_control
))) {
322 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
323 sdata_info(tx
->sdata
,
324 "dropped data frame to not associated station %pM\n",
327 I802_DEBUG_INC(tx
->local
->tx_handlers_drop_not_assoc
);
330 } else if (unlikely(ieee80211_is_data(hdr
->frame_control
) &&
331 ieee80211_vif_get_num_mcast_if(tx
->sdata
) == 0)) {
333 * No associated STAs - no need to send multicast
342 /* This function is called whenever the AP is about to exceed the maximum limit
343 * of buffered frames for power saving STAs. This situation should not really
344 * happen often during normal operation, so dropping the oldest buffered packet
345 * from each queue should be OK to make some room for new frames. */
346 static void purge_old_ps_buffers(struct ieee80211_local
*local
)
348 int total
= 0, purged
= 0;
350 struct ieee80211_sub_if_data
*sdata
;
351 struct sta_info
*sta
;
353 list_for_each_entry_rcu(sdata
, &local
->interfaces
, list
) {
356 if (sdata
->vif
.type
== NL80211_IFTYPE_AP
)
357 ps
= &sdata
->u
.ap
.ps
;
358 else if (ieee80211_vif_is_mesh(&sdata
->vif
))
359 ps
= &sdata
->u
.mesh
.ps
;
363 skb
= skb_dequeue(&ps
->bc_buf
);
366 ieee80211_free_txskb(&local
->hw
, skb
);
368 total
+= skb_queue_len(&ps
->bc_buf
);
372 * Drop one frame from each station from the lowest-priority
373 * AC that has frames at all.
375 list_for_each_entry_rcu(sta
, &local
->sta_list
, list
) {
378 for (ac
= IEEE80211_AC_BK
; ac
>= IEEE80211_AC_VO
; ac
--) {
379 skb
= skb_dequeue(&sta
->ps_tx_buf
[ac
]);
380 total
+= skb_queue_len(&sta
->ps_tx_buf
[ac
]);
383 ieee80211_free_txskb(&local
->hw
, skb
);
389 local
->total_ps_buffered
= total
;
390 ps_dbg_hw(&local
->hw
, "PS buffers full - purged %d frames\n", purged
);
393 static ieee80211_tx_result
394 ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data
*tx
)
396 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx
->skb
);
397 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)tx
->skb
->data
;
401 * broadcast/multicast frame
403 * If any of the associated/peer stations is in power save mode,
404 * the frame is buffered to be sent after DTIM beacon frame.
405 * This is done either by the hardware or us.
408 /* powersaving STAs currently only in AP/VLAN/mesh mode */
409 if (tx
->sdata
->vif
.type
== NL80211_IFTYPE_AP
||
410 tx
->sdata
->vif
.type
== NL80211_IFTYPE_AP_VLAN
) {
414 ps
= &tx
->sdata
->bss
->ps
;
415 } else if (ieee80211_vif_is_mesh(&tx
->sdata
->vif
)) {
416 ps
= &tx
->sdata
->u
.mesh
.ps
;
422 /* no buffering for ordered frames */
423 if (ieee80211_has_order(hdr
->frame_control
))
426 if (ieee80211_is_probe_req(hdr
->frame_control
))
429 if (ieee80211_hw_check(&tx
->local
->hw
, QUEUE_CONTROL
))
430 info
->hw_queue
= tx
->sdata
->vif
.cab_queue
;
432 /* no stations in PS mode and no buffered packets */
433 if (!atomic_read(&ps
->num_sta_ps
) && skb_queue_empty(&ps
->bc_buf
))
436 info
->flags
|= IEEE80211_TX_CTL_SEND_AFTER_DTIM
;
438 /* device releases frame after DTIM beacon */
439 if (!ieee80211_hw_check(&tx
->local
->hw
, HOST_BROADCAST_PS_BUFFERING
))
442 /* buffered in mac80211 */
443 if (tx
->local
->total_ps_buffered
>= TOTAL_MAX_TX_BUFFER
)
444 purge_old_ps_buffers(tx
->local
);
446 if (skb_queue_len(&ps
->bc_buf
) >= AP_MAX_BC_BUFFER
) {
448 "BC TX buffer full - dropping the oldest frame\n");
449 ieee80211_free_txskb(&tx
->local
->hw
, skb_dequeue(&ps
->bc_buf
));
451 tx
->local
->total_ps_buffered
++;
453 skb_queue_tail(&ps
->bc_buf
, tx
->skb
);
458 static int ieee80211_use_mfp(__le16 fc
, struct sta_info
*sta
,
461 if (!ieee80211_is_mgmt(fc
))
464 if (sta
== NULL
|| !test_sta_flag(sta
, WLAN_STA_MFP
))
467 if (!ieee80211_is_robust_mgmt_frame(skb
))
473 static ieee80211_tx_result
474 ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data
*tx
)
476 struct sta_info
*sta
= tx
->sta
;
477 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx
->skb
);
478 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)tx
->skb
->data
;
479 struct ieee80211_local
*local
= tx
->local
;
484 if (unlikely((test_sta_flag(sta
, WLAN_STA_PS_STA
) ||
485 test_sta_flag(sta
, WLAN_STA_PS_DRIVER
) ||
486 test_sta_flag(sta
, WLAN_STA_PS_DELIVER
)) &&
487 !(info
->flags
& IEEE80211_TX_CTL_NO_PS_BUFFER
))) {
488 int ac
= skb_get_queue_mapping(tx
->skb
);
490 if (ieee80211_is_mgmt(hdr
->frame_control
) &&
491 !ieee80211_is_bufferable_mmpdu(hdr
->frame_control
)) {
492 info
->flags
|= IEEE80211_TX_CTL_NO_PS_BUFFER
;
496 ps_dbg(sta
->sdata
, "STA %pM aid %d: PS buffer for AC %d\n",
497 sta
->sta
.addr
, sta
->sta
.aid
, ac
);
498 if (tx
->local
->total_ps_buffered
>= TOTAL_MAX_TX_BUFFER
)
499 purge_old_ps_buffers(tx
->local
);
501 /* sync with ieee80211_sta_ps_deliver_wakeup */
502 spin_lock(&sta
->ps_lock
);
504 * STA woke up the meantime and all the frames on ps_tx_buf have
505 * been queued to pending queue. No reordering can happen, go
506 * ahead and Tx the packet.
508 if (!test_sta_flag(sta
, WLAN_STA_PS_STA
) &&
509 !test_sta_flag(sta
, WLAN_STA_PS_DRIVER
) &&
510 !test_sta_flag(sta
, WLAN_STA_PS_DELIVER
)) {
511 spin_unlock(&sta
->ps_lock
);
515 if (skb_queue_len(&sta
->ps_tx_buf
[ac
]) >= STA_MAX_TX_BUFFER
) {
516 struct sk_buff
*old
= skb_dequeue(&sta
->ps_tx_buf
[ac
]);
518 "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
520 ieee80211_free_txskb(&local
->hw
, old
);
522 tx
->local
->total_ps_buffered
++;
524 info
->control
.jiffies
= jiffies
;
525 info
->control
.vif
= &tx
->sdata
->vif
;
526 info
->control
.flags
|= IEEE80211_TX_INTCFL_NEED_TXPROCESSING
;
527 info
->flags
&= ~IEEE80211_TX_TEMPORARY_FLAGS
;
528 skb_queue_tail(&sta
->ps_tx_buf
[ac
], tx
->skb
);
529 spin_unlock(&sta
->ps_lock
);
531 if (!timer_pending(&local
->sta_cleanup
))
532 mod_timer(&local
->sta_cleanup
,
533 round_jiffies(jiffies
+
534 STA_INFO_CLEANUP_INTERVAL
));
537 * We queued up some frames, so the TIM bit might
538 * need to be set, recalculate it.
540 sta_info_recalc_tim(sta
);
543 } else if (unlikely(test_sta_flag(sta
, WLAN_STA_PS_STA
))) {
545 "STA %pM in PS mode, but polling/in SP -> send frame\n",
552 static ieee80211_tx_result debug_noinline
553 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data
*tx
)
555 if (unlikely(tx
->flags
& IEEE80211_TX_PS_BUFFERED
))
558 if (tx
->flags
& IEEE80211_TX_UNICAST
)
559 return ieee80211_tx_h_unicast_ps_buf(tx
);
561 return ieee80211_tx_h_multicast_ps_buf(tx
);
564 static ieee80211_tx_result debug_noinline
565 ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data
*tx
)
567 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx
->skb
);
569 if (unlikely(tx
->sdata
->control_port_protocol
== tx
->skb
->protocol
)) {
570 if (tx
->sdata
->control_port_no_encrypt
)
571 info
->flags
|= IEEE80211_TX_INTFL_DONT_ENCRYPT
;
572 info
->control
.flags
|= IEEE80211_TX_CTRL_PORT_CTRL_PROTO
;
573 info
->flags
|= IEEE80211_TX_CTL_USE_MINRATE
;
579 static ieee80211_tx_result debug_noinline
580 ieee80211_tx_h_select_key(struct ieee80211_tx_data
*tx
)
582 struct ieee80211_key
*key
;
583 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx
->skb
);
584 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)tx
->skb
->data
;
586 if (unlikely(info
->flags
& IEEE80211_TX_INTFL_DONT_ENCRYPT
)) {
592 (key
= rcu_dereference(tx
->sta
->ptk
[tx
->sta
->ptk_idx
])))
594 else if (ieee80211_is_group_privacy_action(tx
->skb
) &&
595 (key
= rcu_dereference(tx
->sdata
->default_multicast_key
)))
597 else if (ieee80211_is_mgmt(hdr
->frame_control
) &&
598 is_multicast_ether_addr(hdr
->addr1
) &&
599 ieee80211_is_robust_mgmt_frame(tx
->skb
) &&
600 (key
= rcu_dereference(tx
->sdata
->default_mgmt_key
)))
602 else if (is_multicast_ether_addr(hdr
->addr1
) &&
603 (key
= rcu_dereference(tx
->sdata
->default_multicast_key
)))
605 else if (!is_multicast_ether_addr(hdr
->addr1
) &&
606 (key
= rcu_dereference(tx
->sdata
->default_unicast_key
)))
612 bool skip_hw
= false;
614 /* TODO: add threshold stuff again */
616 switch (tx
->key
->conf
.cipher
) {
617 case WLAN_CIPHER_SUITE_WEP40
:
618 case WLAN_CIPHER_SUITE_WEP104
:
619 case WLAN_CIPHER_SUITE_TKIP
:
620 if (!ieee80211_is_data_present(hdr
->frame_control
))
623 case WLAN_CIPHER_SUITE_CCMP
:
624 case WLAN_CIPHER_SUITE_CCMP_256
:
625 case WLAN_CIPHER_SUITE_GCMP
:
626 case WLAN_CIPHER_SUITE_GCMP_256
:
627 if (!ieee80211_is_data_present(hdr
->frame_control
) &&
628 !ieee80211_use_mfp(hdr
->frame_control
, tx
->sta
,
630 !ieee80211_is_group_privacy_action(tx
->skb
))
633 skip_hw
= (tx
->key
->conf
.flags
&
634 IEEE80211_KEY_FLAG_SW_MGMT_TX
) &&
635 ieee80211_is_mgmt(hdr
->frame_control
);
637 case WLAN_CIPHER_SUITE_AES_CMAC
:
638 case WLAN_CIPHER_SUITE_BIP_CMAC_256
:
639 case WLAN_CIPHER_SUITE_BIP_GMAC_128
:
640 case WLAN_CIPHER_SUITE_BIP_GMAC_256
:
641 if (!ieee80211_is_mgmt(hdr
->frame_control
))
646 if (unlikely(tx
->key
&& tx
->key
->flags
& KEY_FLAG_TAINTED
&&
647 !ieee80211_is_deauth(hdr
->frame_control
)))
650 if (!skip_hw
&& tx
->key
&&
651 tx
->key
->flags
& KEY_FLAG_UPLOADED_TO_HARDWARE
)
652 info
->control
.hw_key
= &tx
->key
->conf
;
653 } else if (ieee80211_is_data_present(hdr
->frame_control
) && tx
->sta
&&
654 test_sta_flag(tx
->sta
, WLAN_STA_USES_ENCRYPTION
)) {
661 static ieee80211_tx_result debug_noinline
662 ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data
*tx
)
664 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx
->skb
);
665 struct ieee80211_hdr
*hdr
= (void *)tx
->skb
->data
;
666 struct ieee80211_supported_band
*sband
;
668 struct ieee80211_tx_rate_control txrc
;
669 struct ieee80211_sta_rates
*ratetbl
= NULL
;
670 bool encap
= info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
;
673 memset(&txrc
, 0, sizeof(txrc
));
675 sband
= tx
->local
->hw
.wiphy
->bands
[info
->band
];
677 len
= min_t(u32
, tx
->skb
->len
+ FCS_LEN
,
678 tx
->local
->hw
.wiphy
->frag_threshold
);
680 /* set up the tx rate control struct we give the RC algo */
681 txrc
.hw
= &tx
->local
->hw
;
683 txrc
.bss_conf
= &tx
->sdata
->vif
.bss_conf
;
685 txrc
.reported_rate
.idx
= -1;
686 txrc
.rate_idx_mask
= tx
->sdata
->rc_rateidx_mask
[info
->band
];
688 if (tx
->sdata
->rc_has_mcs_mask
[info
->band
])
689 txrc
.rate_idx_mcs_mask
=
690 tx
->sdata
->rc_rateidx_mcs_mask
[info
->band
];
692 txrc
.bss
= (tx
->sdata
->vif
.type
== NL80211_IFTYPE_AP
||
693 tx
->sdata
->vif
.type
== NL80211_IFTYPE_MESH_POINT
||
694 tx
->sdata
->vif
.type
== NL80211_IFTYPE_ADHOC
||
695 tx
->sdata
->vif
.type
== NL80211_IFTYPE_OCB
);
697 /* set up RTS protection if desired */
698 if (len
> tx
->local
->hw
.wiphy
->rts_threshold
) {
702 info
->control
.use_rts
= txrc
.rts
;
703 info
->control
.use_cts_prot
= tx
->sdata
->vif
.bss_conf
.use_cts_prot
;
706 * Use short preamble if the BSS can handle it, but not for
707 * management frames unless we know the receiver can handle
708 * that -- the management frame might be to a station that
709 * just wants a probe response.
711 if (tx
->sdata
->vif
.bss_conf
.use_short_preamble
&&
712 (ieee80211_is_tx_data(tx
->skb
) ||
713 (tx
->sta
&& test_sta_flag(tx
->sta
, WLAN_STA_SHORT_PREAMBLE
))))
714 txrc
.short_preamble
= true;
716 info
->control
.short_preamble
= txrc
.short_preamble
;
718 /* don't ask rate control when rate already injected via radiotap */
719 if (info
->control
.flags
& IEEE80211_TX_CTRL_RATE_INJECT
)
723 assoc
= test_sta_flag(tx
->sta
, WLAN_STA_ASSOC
);
726 * Lets not bother rate control if we're associated and cannot
727 * talk to the sta. This should not happen.
729 if (WARN(test_bit(SCAN_SW_SCANNING
, &tx
->local
->scanning
) && assoc
&&
730 !rate_usable_index_exists(sband
, &tx
->sta
->sta
),
731 "%s: Dropped data frame as no usable bitrate found while "
732 "scanning and associated. Target station: "
733 "%pM on %d GHz band\n",
735 encap
? ((struct ethhdr
*)hdr
)->h_dest
: hdr
->addr1
,
740 * If we're associated with the sta at this point we know we can at
741 * least send the frame at the lowest bit rate.
743 rate_control_get_rate(tx
->sdata
, tx
->sta
, &txrc
);
745 if (tx
->sta
&& !info
->control
.skip_table
)
746 ratetbl
= rcu_dereference(tx
->sta
->sta
.rates
);
748 if (unlikely(info
->control
.rates
[0].idx
< 0)) {
750 struct ieee80211_tx_rate rate
= {
751 .idx
= ratetbl
->rate
[0].idx
,
752 .flags
= ratetbl
->rate
[0].flags
,
753 .count
= ratetbl
->rate
[0].count
756 if (ratetbl
->rate
[0].idx
< 0)
764 tx
->rate
= info
->control
.rates
[0];
767 if (txrc
.reported_rate
.idx
< 0) {
768 txrc
.reported_rate
= tx
->rate
;
769 if (tx
->sta
&& ieee80211_is_tx_data(tx
->skb
))
770 tx
->sta
->tx_stats
.last_rate
= txrc
.reported_rate
;
772 tx
->sta
->tx_stats
.last_rate
= txrc
.reported_rate
;
777 if (unlikely(!info
->control
.rates
[0].count
))
778 info
->control
.rates
[0].count
= 1;
780 if (WARN_ON_ONCE((info
->control
.rates
[0].count
> 1) &&
781 (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)))
782 info
->control
.rates
[0].count
= 1;
787 static __le16
ieee80211_tx_next_seq(struct sta_info
*sta
, int tid
)
789 u16
*seq
= &sta
->tid_seq
[tid
];
790 __le16 ret
= cpu_to_le16(*seq
);
792 /* Increase the sequence number. */
793 *seq
= (*seq
+ 0x10) & IEEE80211_SCTL_SEQ
;
798 static ieee80211_tx_result debug_noinline
799 ieee80211_tx_h_sequence(struct ieee80211_tx_data
*tx
)
801 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx
->skb
);
802 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)tx
->skb
->data
;
806 * Packet injection may want to control the sequence
807 * number, if we have no matching interface then we
808 * neither assign one ourselves nor ask the driver to.
810 if (unlikely(info
->control
.vif
->type
== NL80211_IFTYPE_MONITOR
))
813 if (unlikely(ieee80211_is_ctl(hdr
->frame_control
)))
816 if (ieee80211_hdrlen(hdr
->frame_control
) < 24)
819 if (ieee80211_is_qos_nullfunc(hdr
->frame_control
))
822 if (info
->control
.flags
& IEEE80211_TX_CTRL_NO_SEQNO
)
826 * Anything but QoS data that has a sequence number field
827 * (is long enough) gets a sequence number from the global
828 * counter. QoS data frames with a multicast destination
829 * also use the global counter (802.11-2012 9.3.2.10).
831 if (!ieee80211_is_data_qos(hdr
->frame_control
) ||
832 is_multicast_ether_addr(hdr
->addr1
)) {
833 /* driver should assign sequence number */
834 info
->flags
|= IEEE80211_TX_CTL_ASSIGN_SEQ
;
835 /* for pure STA mode without beacons, we can do it */
836 hdr
->seq_ctrl
= cpu_to_le16(tx
->sdata
->sequence_number
);
837 tx
->sdata
->sequence_number
+= 0x10;
839 tx
->sta
->tx_stats
.msdu
[IEEE80211_NUM_TIDS
]++;
844 * This should be true for injected/management frames only, for
845 * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
846 * above since they are not QoS-data frames.
851 /* include per-STA, per-TID sequence counter */
852 tid
= ieee80211_get_tid(hdr
);
853 tx
->sta
->tx_stats
.msdu
[tid
]++;
855 hdr
->seq_ctrl
= ieee80211_tx_next_seq(tx
->sta
, tid
);
860 static int ieee80211_fragment(struct ieee80211_tx_data
*tx
,
861 struct sk_buff
*skb
, int hdrlen
,
864 struct ieee80211_local
*local
= tx
->local
;
865 struct ieee80211_tx_info
*info
;
867 int per_fragm
= frag_threshold
- hdrlen
- FCS_LEN
;
868 int pos
= hdrlen
+ per_fragm
;
869 int rem
= skb
->len
- hdrlen
- per_fragm
;
871 if (WARN_ON(rem
< 0))
874 /* first fragment was already added to queue by caller */
877 int fraglen
= per_fragm
;
882 tmp
= dev_alloc_skb(local
->tx_headroom
+
884 tx
->sdata
->encrypt_headroom
+
885 IEEE80211_ENCRYPT_TAILROOM
);
889 __skb_queue_tail(&tx
->skbs
, tmp
);
892 local
->tx_headroom
+ tx
->sdata
->encrypt_headroom
);
894 /* copy control information */
895 memcpy(tmp
->cb
, skb
->cb
, sizeof(tmp
->cb
));
897 info
= IEEE80211_SKB_CB(tmp
);
898 info
->flags
&= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT
|
899 IEEE80211_TX_CTL_FIRST_FRAGMENT
);
902 info
->flags
|= IEEE80211_TX_CTL_MORE_FRAMES
;
904 skb_copy_queue_mapping(tmp
, skb
);
905 tmp
->priority
= skb
->priority
;
908 /* copy header and data */
909 skb_put_data(tmp
, skb
->data
, hdrlen
);
910 skb_put_data(tmp
, skb
->data
+ pos
, fraglen
);
915 /* adjust first fragment's length */
916 skb_trim(skb
, hdrlen
+ per_fragm
);
920 static ieee80211_tx_result debug_noinline
921 ieee80211_tx_h_fragment(struct ieee80211_tx_data
*tx
)
923 struct sk_buff
*skb
= tx
->skb
;
924 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
925 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
926 int frag_threshold
= tx
->local
->hw
.wiphy
->frag_threshold
;
930 /* no matter what happens, tx->skb moves to tx->skbs */
931 __skb_queue_tail(&tx
->skbs
, skb
);
934 if (info
->flags
& IEEE80211_TX_CTL_DONTFRAG
)
937 if (ieee80211_hw_check(&tx
->local
->hw
, SUPPORTS_TX_FRAG
))
941 * Warn when submitting a fragmented A-MPDU frame and drop it.
942 * This scenario is handled in ieee80211_tx_prepare but extra
943 * caution taken here as fragmented ampdu may cause Tx stop.
945 if (WARN_ON(info
->flags
& IEEE80211_TX_CTL_AMPDU
))
948 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
950 /* internal error, why isn't DONTFRAG set? */
951 if (WARN_ON(skb
->len
+ FCS_LEN
<= frag_threshold
))
955 * Now fragment the frame. This will allocate all the fragments and
956 * chain them (using skb as the first fragment) to skb->next.
957 * During transmission, we will remove the successfully transmitted
958 * fragments from this list. When the low-level driver rejects one
959 * of the fragments then we will simply pretend to accept the skb
960 * but store it away as pending.
962 if (ieee80211_fragment(tx
, skb
, hdrlen
, frag_threshold
))
965 /* update duration/seq/flags of fragments */
968 skb_queue_walk(&tx
->skbs
, skb
) {
969 const __le16 morefrags
= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS
);
971 hdr
= (void *)skb
->data
;
972 info
= IEEE80211_SKB_CB(skb
);
974 if (!skb_queue_is_last(&tx
->skbs
, skb
)) {
975 hdr
->frame_control
|= morefrags
;
977 * No multi-rate retries for fragmented frames, that
978 * would completely throw off the NAV at other STAs.
980 info
->control
.rates
[1].idx
= -1;
981 info
->control
.rates
[2].idx
= -1;
982 info
->control
.rates
[3].idx
= -1;
983 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES
!= 4);
984 info
->flags
&= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE
;
986 hdr
->frame_control
&= ~morefrags
;
988 hdr
->seq_ctrl
|= cpu_to_le16(fragnum
& IEEE80211_SCTL_FRAG
);
995 static ieee80211_tx_result debug_noinline
996 ieee80211_tx_h_stats(struct ieee80211_tx_data
*tx
)
1004 skb_queue_walk(&tx
->skbs
, skb
) {
1005 ac
= skb_get_queue_mapping(skb
);
1006 tx
->sta
->tx_stats
.bytes
[ac
] += skb
->len
;
1009 tx
->sta
->tx_stats
.packets
[ac
]++;
1014 static ieee80211_tx_result debug_noinline
1015 ieee80211_tx_h_encrypt(struct ieee80211_tx_data
*tx
)
1020 switch (tx
->key
->conf
.cipher
) {
1021 case WLAN_CIPHER_SUITE_WEP40
:
1022 case WLAN_CIPHER_SUITE_WEP104
:
1023 return ieee80211_crypto_wep_encrypt(tx
);
1024 case WLAN_CIPHER_SUITE_TKIP
:
1025 return ieee80211_crypto_tkip_encrypt(tx
);
1026 case WLAN_CIPHER_SUITE_CCMP
:
1027 return ieee80211_crypto_ccmp_encrypt(
1028 tx
, IEEE80211_CCMP_MIC_LEN
);
1029 case WLAN_CIPHER_SUITE_CCMP_256
:
1030 return ieee80211_crypto_ccmp_encrypt(
1031 tx
, IEEE80211_CCMP_256_MIC_LEN
);
1032 case WLAN_CIPHER_SUITE_AES_CMAC
:
1033 return ieee80211_crypto_aes_cmac_encrypt(tx
);
1034 case WLAN_CIPHER_SUITE_BIP_CMAC_256
:
1035 return ieee80211_crypto_aes_cmac_256_encrypt(tx
);
1036 case WLAN_CIPHER_SUITE_BIP_GMAC_128
:
1037 case WLAN_CIPHER_SUITE_BIP_GMAC_256
:
1038 return ieee80211_crypto_aes_gmac_encrypt(tx
);
1039 case WLAN_CIPHER_SUITE_GCMP
:
1040 case WLAN_CIPHER_SUITE_GCMP_256
:
1041 return ieee80211_crypto_gcmp_encrypt(tx
);
1043 return ieee80211_crypto_hw_encrypt(tx
);
1049 static ieee80211_tx_result debug_noinline
1050 ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data
*tx
)
1052 struct sk_buff
*skb
;
1053 struct ieee80211_hdr
*hdr
;
1057 skb_queue_walk(&tx
->skbs
, skb
) {
1058 hdr
= (void *) skb
->data
;
1059 if (unlikely(ieee80211_is_pspoll(hdr
->frame_control
)))
1060 break; /* must not overwrite AID */
1061 if (!skb_queue_is_last(&tx
->skbs
, skb
)) {
1062 struct sk_buff
*next
= skb_queue_next(&tx
->skbs
, skb
);
1063 next_len
= next
->len
;
1066 group_addr
= is_multicast_ether_addr(hdr
->addr1
);
1069 ieee80211_duration(tx
, skb
, group_addr
, next_len
);
1075 /* actual transmit path */
1077 static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data
*tx
,
1078 struct sk_buff
*skb
,
1079 struct ieee80211_tx_info
*info
,
1080 struct tid_ampdu_tx
*tid_tx
,
1083 bool queued
= false;
1084 bool reset_agg_timer
= false;
1085 struct sk_buff
*purge_skb
= NULL
;
1087 if (test_bit(HT_AGG_STATE_OPERATIONAL
, &tid_tx
->state
)) {
1088 info
->flags
|= IEEE80211_TX_CTL_AMPDU
;
1089 reset_agg_timer
= true;
1090 } else if (test_bit(HT_AGG_STATE_WANT_START
, &tid_tx
->state
)) {
1092 * nothing -- this aggregation session is being started
1093 * but that might still fail with the driver
1095 } else if (!tx
->sta
->sta
.txq
[tid
]) {
1096 spin_lock(&tx
->sta
->lock
);
1098 * Need to re-check now, because we may get here
1100 * 1) in the window during which the setup is actually
1101 * already done, but not marked yet because not all
1102 * packets are spliced over to the driver pending
1103 * queue yet -- if this happened we acquire the lock
1104 * either before or after the splice happens, but
1105 * need to recheck which of these cases happened.
1107 * 2) during session teardown, if the OPERATIONAL bit
1108 * was cleared due to the teardown but the pointer
1109 * hasn't been assigned NULL yet (or we loaded it
1110 * before it was assigned) -- in this case it may
1111 * now be NULL which means we should just let the
1112 * packet pass through because splicing the frames
1113 * back is already done.
1115 tid_tx
= rcu_dereference_protected_tid_tx(tx
->sta
, tid
);
1118 /* do nothing, let packet pass through */
1119 } else if (test_bit(HT_AGG_STATE_OPERATIONAL
, &tid_tx
->state
)) {
1120 info
->flags
|= IEEE80211_TX_CTL_AMPDU
;
1121 reset_agg_timer
= true;
1124 if (info
->flags
& IEEE80211_TX_CTL_NO_PS_BUFFER
) {
1125 clear_sta_flag(tx
->sta
, WLAN_STA_SP
);
1126 ps_dbg(tx
->sta
->sdata
,
1127 "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
1128 tx
->sta
->sta
.addr
, tx
->sta
->sta
.aid
);
1130 info
->control
.vif
= &tx
->sdata
->vif
;
1131 info
->control
.flags
|= IEEE80211_TX_INTCFL_NEED_TXPROCESSING
;
1132 info
->flags
&= ~IEEE80211_TX_TEMPORARY_FLAGS
;
1133 __skb_queue_tail(&tid_tx
->pending
, skb
);
1134 if (skb_queue_len(&tid_tx
->pending
) > STA_MAX_TX_BUFFER
)
1135 purge_skb
= __skb_dequeue(&tid_tx
->pending
);
1137 spin_unlock(&tx
->sta
->lock
);
1140 ieee80211_free_txskb(&tx
->local
->hw
, purge_skb
);
1143 /* reset session timer */
1144 if (reset_agg_timer
)
1145 tid_tx
->last_tx
= jiffies
;
1152 * pass %NULL for the station if unknown, a valid pointer if known
1153 * or an ERR_PTR() if the station is known not to exist
1155 static ieee80211_tx_result
1156 ieee80211_tx_prepare(struct ieee80211_sub_if_data
*sdata
,
1157 struct ieee80211_tx_data
*tx
,
1158 struct sta_info
*sta
, struct sk_buff
*skb
)
1160 struct ieee80211_local
*local
= sdata
->local
;
1161 struct ieee80211_hdr
*hdr
;
1162 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1165 memset(tx
, 0, sizeof(*tx
));
1169 __skb_queue_head_init(&tx
->skbs
);
1172 * If this flag is set to true anywhere, and we get here,
1173 * we are doing the needed processing, so remove the flag
1176 info
->control
.flags
&= ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING
;
1178 hdr
= (struct ieee80211_hdr
*) skb
->data
;
1184 if (sdata
->vif
.type
== NL80211_IFTYPE_AP_VLAN
) {
1185 tx
->sta
= rcu_dereference(sdata
->u
.vlan
.sta
);
1186 if (!tx
->sta
&& sdata
->wdev
.use_4addr
)
1188 } else if (tx
->sdata
->control_port_protocol
== tx
->skb
->protocol
) {
1189 tx
->sta
= sta_info_get_bss(sdata
, hdr
->addr1
);
1191 if (!tx
->sta
&& !is_multicast_ether_addr(hdr
->addr1
))
1192 tx
->sta
= sta_info_get(sdata
, hdr
->addr1
);
1195 if (tx
->sta
&& ieee80211_is_data_qos(hdr
->frame_control
) &&
1196 !ieee80211_is_qos_nullfunc(hdr
->frame_control
) &&
1197 ieee80211_hw_check(&local
->hw
, AMPDU_AGGREGATION
) &&
1198 !ieee80211_hw_check(&local
->hw
, TX_AMPDU_SETUP_IN_HW
)) {
1199 struct tid_ampdu_tx
*tid_tx
;
1201 tid
= ieee80211_get_tid(hdr
);
1203 tid_tx
= rcu_dereference(tx
->sta
->ampdu_mlme
.tid_tx
[tid
]);
1207 queued
= ieee80211_tx_prep_agg(tx
, skb
, info
,
1210 if (unlikely(queued
))
1215 if (is_multicast_ether_addr(hdr
->addr1
)) {
1216 tx
->flags
&= ~IEEE80211_TX_UNICAST
;
1217 info
->flags
|= IEEE80211_TX_CTL_NO_ACK
;
1219 tx
->flags
|= IEEE80211_TX_UNICAST
;
1221 if (!(info
->flags
& IEEE80211_TX_CTL_DONTFRAG
)) {
1222 if (!(tx
->flags
& IEEE80211_TX_UNICAST
) ||
1223 skb
->len
+ FCS_LEN
<= local
->hw
.wiphy
->frag_threshold
||
1224 info
->flags
& IEEE80211_TX_CTL_AMPDU
)
1225 info
->flags
|= IEEE80211_TX_CTL_DONTFRAG
;
1229 info
->flags
|= IEEE80211_TX_CTL_CLEAR_PS_FILT
;
1230 else if (test_and_clear_sta_flag(tx
->sta
, WLAN_STA_CLEAR_PS_FILT
)) {
1231 info
->flags
|= IEEE80211_TX_CTL_CLEAR_PS_FILT
;
1232 ieee80211_check_fast_xmit(tx
->sta
);
1235 info
->flags
|= IEEE80211_TX_CTL_FIRST_FRAGMENT
;
1240 static struct txq_info
*ieee80211_get_txq(struct ieee80211_local
*local
,
1241 struct ieee80211_vif
*vif
,
1242 struct sta_info
*sta
,
1243 struct sk_buff
*skb
)
1245 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) skb
->data
;
1246 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1247 struct ieee80211_txq
*txq
= NULL
;
1249 if ((info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) ||
1250 (info
->control
.flags
& IEEE80211_TX_CTRL_PS_RESPONSE
))
1253 if (!(info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) &&
1254 unlikely(!ieee80211_is_data_present(hdr
->frame_control
))) {
1255 if ((!ieee80211_is_mgmt(hdr
->frame_control
) ||
1256 ieee80211_is_bufferable_mmpdu(hdr
->frame_control
) ||
1257 vif
->type
== NL80211_IFTYPE_STATION
) &&
1258 sta
&& sta
->uploaded
) {
1260 * This will be NULL if the driver didn't set the
1261 * opt-in hardware flag.
1263 txq
= sta
->sta
.txq
[IEEE80211_NUM_TIDS
];
1266 u8 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
1271 txq
= sta
->sta
.txq
[tid
];
1279 return to_txq_info(txq
);
1282 static void ieee80211_set_skb_enqueue_time(struct sk_buff
*skb
)
1284 IEEE80211_SKB_CB(skb
)->control
.enqueue_time
= codel_get_time();
1287 static u32
codel_skb_len_func(const struct sk_buff
*skb
)
1292 static codel_time_t
codel_skb_time_func(const struct sk_buff
*skb
)
1294 const struct ieee80211_tx_info
*info
;
1296 info
= (const struct ieee80211_tx_info
*)skb
->cb
;
1297 return info
->control
.enqueue_time
;
1300 static struct sk_buff
*codel_dequeue_func(struct codel_vars
*cvars
,
1303 struct ieee80211_local
*local
;
1304 struct txq_info
*txqi
;
1306 struct fq_flow
*flow
;
1309 local
= vif_to_sdata(txqi
->txq
.vif
)->local
;
1312 if (cvars
== &txqi
->def_cvars
)
1313 flow
= &txqi
->tin
.default_flow
;
1315 flow
= &fq
->flows
[cvars
- local
->cvars
];
1317 return fq_flow_dequeue(fq
, flow
);
1320 static void codel_drop_func(struct sk_buff
*skb
,
1323 struct ieee80211_local
*local
;
1324 struct ieee80211_hw
*hw
;
1325 struct txq_info
*txqi
;
1328 local
= vif_to_sdata(txqi
->txq
.vif
)->local
;
1331 ieee80211_free_txskb(hw
, skb
);
1334 static struct sk_buff
*fq_tin_dequeue_func(struct fq
*fq
,
1336 struct fq_flow
*flow
)
1338 struct ieee80211_local
*local
;
1339 struct txq_info
*txqi
;
1340 struct codel_vars
*cvars
;
1341 struct codel_params
*cparams
;
1342 struct codel_stats
*cstats
;
1344 local
= container_of(fq
, struct ieee80211_local
, fq
);
1345 txqi
= container_of(tin
, struct txq_info
, tin
);
1346 cstats
= &txqi
->cstats
;
1348 if (txqi
->txq
.sta
) {
1349 struct sta_info
*sta
= container_of(txqi
->txq
.sta
,
1350 struct sta_info
, sta
);
1351 cparams
= &sta
->cparams
;
1353 cparams
= &local
->cparams
;
1356 if (flow
== &tin
->default_flow
)
1357 cvars
= &txqi
->def_cvars
;
1359 cvars
= &local
->cvars
[flow
- fq
->flows
];
1361 return codel_dequeue(txqi
,
1367 codel_skb_time_func
,
1369 codel_dequeue_func
);
1372 static void fq_skb_free_func(struct fq
*fq
,
1374 struct fq_flow
*flow
,
1375 struct sk_buff
*skb
)
1377 struct ieee80211_local
*local
;
1379 local
= container_of(fq
, struct ieee80211_local
, fq
);
1380 ieee80211_free_txskb(&local
->hw
, skb
);
1383 static void ieee80211_txq_enqueue(struct ieee80211_local
*local
,
1384 struct txq_info
*txqi
,
1385 struct sk_buff
*skb
)
1387 struct fq
*fq
= &local
->fq
;
1388 struct fq_tin
*tin
= &txqi
->tin
;
1389 u32 flow_idx
= fq_flow_idx(fq
, skb
);
1391 ieee80211_set_skb_enqueue_time(skb
);
1393 spin_lock_bh(&fq
->lock
);
1395 * For management frames, don't really apply codel etc.,
1396 * we don't want to apply any shaping or anything we just
1397 * want to simplify the driver API by having them on the
1400 if (unlikely(txqi
->txq
.tid
== IEEE80211_NUM_TIDS
)) {
1401 IEEE80211_SKB_CB(skb
)->control
.flags
|=
1402 IEEE80211_TX_INTCFL_NEED_TXPROCESSING
;
1403 __skb_queue_tail(&txqi
->frags
, skb
);
1405 fq_tin_enqueue(fq
, tin
, flow_idx
, skb
,
1408 spin_unlock_bh(&fq
->lock
);
1411 static bool fq_vlan_filter_func(struct fq
*fq
, struct fq_tin
*tin
,
1412 struct fq_flow
*flow
, struct sk_buff
*skb
,
1415 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1417 return info
->control
.vif
== data
;
1420 void ieee80211_txq_remove_vlan(struct ieee80211_local
*local
,
1421 struct ieee80211_sub_if_data
*sdata
)
1423 struct fq
*fq
= &local
->fq
;
1424 struct txq_info
*txqi
;
1426 struct ieee80211_sub_if_data
*ap
;
1428 if (WARN_ON(sdata
->vif
.type
!= NL80211_IFTYPE_AP_VLAN
))
1431 ap
= container_of(sdata
->bss
, struct ieee80211_sub_if_data
, u
.ap
);
1436 txqi
= to_txq_info(ap
->vif
.txq
);
1439 spin_lock_bh(&fq
->lock
);
1440 fq_tin_filter(fq
, tin
, fq_vlan_filter_func
, &sdata
->vif
,
1442 spin_unlock_bh(&fq
->lock
);
1445 void ieee80211_txq_init(struct ieee80211_sub_if_data
*sdata
,
1446 struct sta_info
*sta
,
1447 struct txq_info
*txqi
, int tid
)
1449 fq_tin_init(&txqi
->tin
);
1450 codel_vars_init(&txqi
->def_cvars
);
1451 codel_stats_init(&txqi
->cstats
);
1452 __skb_queue_head_init(&txqi
->frags
);
1453 RB_CLEAR_NODE(&txqi
->schedule_order
);
1455 txqi
->txq
.vif
= &sdata
->vif
;
1458 sdata
->vif
.txq
= &txqi
->txq
;
1460 txqi
->txq
.ac
= IEEE80211_AC_BE
;
1465 if (tid
== IEEE80211_NUM_TIDS
) {
1466 if (sdata
->vif
.type
== NL80211_IFTYPE_STATION
) {
1467 /* Drivers need to opt in to the management MPDU TXQ */
1468 if (!ieee80211_hw_check(&sdata
->local
->hw
,
1471 } else if (!ieee80211_hw_check(&sdata
->local
->hw
,
1473 /* Drivers need to opt in to the bufferable MMPDU TXQ */
1476 txqi
->txq
.ac
= IEEE80211_AC_VO
;
1478 txqi
->txq
.ac
= ieee80211_ac_from_tid(tid
);
1481 txqi
->txq
.sta
= &sta
->sta
;
1482 txqi
->txq
.tid
= tid
;
1483 sta
->sta
.txq
[tid
] = &txqi
->txq
;
1486 void ieee80211_txq_purge(struct ieee80211_local
*local
,
1487 struct txq_info
*txqi
)
1489 struct fq
*fq
= &local
->fq
;
1490 struct fq_tin
*tin
= &txqi
->tin
;
1492 spin_lock_bh(&fq
->lock
);
1493 fq_tin_reset(fq
, tin
, fq_skb_free_func
);
1494 ieee80211_purge_tx_queue(&local
->hw
, &txqi
->frags
);
1495 spin_unlock_bh(&fq
->lock
);
1497 ieee80211_unschedule_txq(&local
->hw
, &txqi
->txq
, true);
1500 void ieee80211_txq_set_params(struct ieee80211_local
*local
)
1502 if (local
->hw
.wiphy
->txq_limit
)
1503 local
->fq
.limit
= local
->hw
.wiphy
->txq_limit
;
1505 local
->hw
.wiphy
->txq_limit
= local
->fq
.limit
;
1507 if (local
->hw
.wiphy
->txq_memory_limit
)
1508 local
->fq
.memory_limit
= local
->hw
.wiphy
->txq_memory_limit
;
1510 local
->hw
.wiphy
->txq_memory_limit
= local
->fq
.memory_limit
;
1512 if (local
->hw
.wiphy
->txq_quantum
)
1513 local
->fq
.quantum
= local
->hw
.wiphy
->txq_quantum
;
1515 local
->hw
.wiphy
->txq_quantum
= local
->fq
.quantum
;
1518 int ieee80211_txq_setup_flows(struct ieee80211_local
*local
)
1520 struct fq
*fq
= &local
->fq
;
1523 bool supp_vht
= false;
1524 enum nl80211_band band
;
1526 if (!local
->ops
->wake_tx_queue
)
1529 ret
= fq_init(fq
, 4096);
1534 * If the hardware doesn't support VHT, it is safe to limit the maximum
1535 * queue size. 4 Mbytes is 64 max-size aggregates in 802.11n.
1537 for (band
= 0; band
< NUM_NL80211_BANDS
; band
++) {
1538 struct ieee80211_supported_band
*sband
;
1540 sband
= local
->hw
.wiphy
->bands
[band
];
1544 supp_vht
= supp_vht
|| sband
->vht_cap
.vht_supported
;
1548 fq
->memory_limit
= 4 << 20; /* 4 Mbytes */
1550 codel_params_init(&local
->cparams
);
1551 local
->cparams
.interval
= MS2TIME(100);
1552 local
->cparams
.target
= MS2TIME(20);
1553 local
->cparams
.ecn
= true;
1555 local
->cvars
= kcalloc(fq
->flows_cnt
, sizeof(local
->cvars
[0]),
1557 if (!local
->cvars
) {
1558 spin_lock_bh(&fq
->lock
);
1559 fq_reset(fq
, fq_skb_free_func
);
1560 spin_unlock_bh(&fq
->lock
);
1564 for (i
= 0; i
< fq
->flows_cnt
; i
++)
1565 codel_vars_init(&local
->cvars
[i
]);
1567 ieee80211_txq_set_params(local
);
1572 void ieee80211_txq_teardown_flows(struct ieee80211_local
*local
)
1574 struct fq
*fq
= &local
->fq
;
1576 if (!local
->ops
->wake_tx_queue
)
1579 kfree(local
->cvars
);
1580 local
->cvars
= NULL
;
1582 spin_lock_bh(&fq
->lock
);
1583 fq_reset(fq
, fq_skb_free_func
);
1584 spin_unlock_bh(&fq
->lock
);
1587 static bool ieee80211_queue_skb(struct ieee80211_local
*local
,
1588 struct ieee80211_sub_if_data
*sdata
,
1589 struct sta_info
*sta
,
1590 struct sk_buff
*skb
)
1592 struct ieee80211_vif
*vif
;
1593 struct txq_info
*txqi
;
1595 if (!local
->ops
->wake_tx_queue
||
1596 sdata
->vif
.type
== NL80211_IFTYPE_MONITOR
)
1599 if (sdata
->vif
.type
== NL80211_IFTYPE_AP_VLAN
)
1600 sdata
= container_of(sdata
->bss
,
1601 struct ieee80211_sub_if_data
, u
.ap
);
1604 txqi
= ieee80211_get_txq(local
, vif
, sta
, skb
);
1609 ieee80211_txq_enqueue(local
, txqi
, skb
);
1611 schedule_and_wake_txq(local
, txqi
);
1616 static bool ieee80211_tx_frags(struct ieee80211_local
*local
,
1617 struct ieee80211_vif
*vif
,
1618 struct sta_info
*sta
,
1619 struct sk_buff_head
*skbs
,
1622 struct ieee80211_tx_control control
= {};
1623 struct sk_buff
*skb
, *tmp
;
1624 unsigned long flags
;
1626 skb_queue_walk_safe(skbs
, skb
, tmp
) {
1627 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1628 int q
= info
->hw_queue
;
1630 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1631 if (WARN_ON_ONCE(q
>= local
->hw
.queues
)) {
1632 __skb_unlink(skb
, skbs
);
1633 ieee80211_free_txskb(&local
->hw
, skb
);
1638 spin_lock_irqsave(&local
->queue_stop_reason_lock
, flags
);
1639 if (local
->queue_stop_reasons
[q
] ||
1640 (!txpending
&& !skb_queue_empty(&local
->pending
[q
]))) {
1641 if (unlikely(info
->flags
&
1642 IEEE80211_TX_INTFL_OFFCHAN_TX_OK
)) {
1643 if (local
->queue_stop_reasons
[q
] &
1644 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL
)) {
1646 * Drop off-channel frames if queues
1647 * are stopped for any reason other
1648 * than off-channel operation. Never
1651 spin_unlock_irqrestore(
1652 &local
->queue_stop_reason_lock
,
1654 ieee80211_purge_tx_queue(&local
->hw
,
1661 * Since queue is stopped, queue up frames for
1662 * later transmission from the tx-pending
1663 * tasklet when the queue is woken again.
1666 skb_queue_splice_init(skbs
,
1667 &local
->pending
[q
]);
1669 skb_queue_splice_tail_init(skbs
,
1670 &local
->pending
[q
]);
1672 spin_unlock_irqrestore(&local
->queue_stop_reason_lock
,
1677 spin_unlock_irqrestore(&local
->queue_stop_reason_lock
, flags
);
1679 info
->control
.vif
= vif
;
1680 control
.sta
= sta
? &sta
->sta
: NULL
;
1682 __skb_unlink(skb
, skbs
);
1683 drv_tx(local
, &control
, skb
);
1690 * Returns false if the frame couldn't be transmitted but was queued instead.
1692 static bool __ieee80211_tx(struct ieee80211_local
*local
,
1693 struct sk_buff_head
*skbs
, int led_len
,
1694 struct sta_info
*sta
, bool txpending
)
1696 struct ieee80211_tx_info
*info
;
1697 struct ieee80211_sub_if_data
*sdata
;
1698 struct ieee80211_vif
*vif
;
1699 struct sk_buff
*skb
;
1703 if (WARN_ON(skb_queue_empty(skbs
)))
1706 skb
= skb_peek(skbs
);
1707 fc
= ((struct ieee80211_hdr
*)skb
->data
)->frame_control
;
1708 info
= IEEE80211_SKB_CB(skb
);
1709 sdata
= vif_to_sdata(info
->control
.vif
);
1710 if (sta
&& !sta
->uploaded
)
1713 switch (sdata
->vif
.type
) {
1714 case NL80211_IFTYPE_MONITOR
:
1715 if (sdata
->u
.mntr
.flags
& MONITOR_FLAG_ACTIVE
) {
1719 sdata
= rcu_dereference(local
->monitor_sdata
);
1723 vif
->hw_queue
[skb_get_queue_mapping(skb
)];
1724 } else if (ieee80211_hw_check(&local
->hw
, QUEUE_CONTROL
)) {
1725 ieee80211_purge_tx_queue(&local
->hw
, skbs
);
1730 case NL80211_IFTYPE_AP_VLAN
:
1731 sdata
= container_of(sdata
->bss
,
1732 struct ieee80211_sub_if_data
, u
.ap
);
1739 result
= ieee80211_tx_frags(local
, vif
, sta
, skbs
, txpending
);
1741 ieee80211_tpt_led_trig_tx(local
, fc
, led_len
);
1743 WARN_ON_ONCE(!skb_queue_empty(skbs
));
1749 * Invoke TX handlers, return 0 on success and non-zero if the
1750 * frame was dropped or queued.
1752 * The handlers are split into an early and late part. The latter is everything
1753 * that can be sensitive to reordering, and will be deferred to after packets
1754 * are dequeued from the intermediate queues (when they are enabled).
1756 static int invoke_tx_handlers_early(struct ieee80211_tx_data
*tx
)
1758 ieee80211_tx_result res
= TX_DROP
;
1760 #define CALL_TXH(txh) \
1763 if (res != TX_CONTINUE) \
1767 CALL_TXH(ieee80211_tx_h_dynamic_ps
);
1768 CALL_TXH(ieee80211_tx_h_check_assoc
);
1769 CALL_TXH(ieee80211_tx_h_ps_buf
);
1770 CALL_TXH(ieee80211_tx_h_check_control_port_protocol
);
1771 CALL_TXH(ieee80211_tx_h_select_key
);
1774 if (unlikely(res
== TX_DROP
)) {
1775 I802_DEBUG_INC(tx
->local
->tx_handlers_drop
);
1777 ieee80211_free_txskb(&tx
->local
->hw
, tx
->skb
);
1779 ieee80211_purge_tx_queue(&tx
->local
->hw
, &tx
->skbs
);
1781 } else if (unlikely(res
== TX_QUEUED
)) {
1782 I802_DEBUG_INC(tx
->local
->tx_handlers_queued
);
1790 * Late handlers can be called while the sta lock is held. Handlers that can
1791 * cause packets to be generated will cause deadlock!
1793 static int invoke_tx_handlers_late(struct ieee80211_tx_data
*tx
)
1795 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(tx
->skb
);
1796 ieee80211_tx_result res
= TX_CONTINUE
;
1798 if (unlikely(info
->flags
& IEEE80211_TX_INTFL_RETRANSMISSION
)) {
1799 __skb_queue_tail(&tx
->skbs
, tx
->skb
);
1804 if (!ieee80211_hw_check(&tx
->local
->hw
, HAS_RATE_CONTROL
))
1805 CALL_TXH(ieee80211_tx_h_rate_ctrl
);
1807 CALL_TXH(ieee80211_tx_h_michael_mic_add
);
1808 CALL_TXH(ieee80211_tx_h_sequence
);
1809 CALL_TXH(ieee80211_tx_h_fragment
);
1810 /* handlers after fragment must be aware of tx info fragmentation! */
1811 CALL_TXH(ieee80211_tx_h_stats
);
1812 CALL_TXH(ieee80211_tx_h_encrypt
);
1813 if (!ieee80211_hw_check(&tx
->local
->hw
, HAS_RATE_CONTROL
))
1814 CALL_TXH(ieee80211_tx_h_calculate_duration
);
1818 if (unlikely(res
== TX_DROP
)) {
1819 I802_DEBUG_INC(tx
->local
->tx_handlers_drop
);
1821 ieee80211_free_txskb(&tx
->local
->hw
, tx
->skb
);
1823 ieee80211_purge_tx_queue(&tx
->local
->hw
, &tx
->skbs
);
1825 } else if (unlikely(res
== TX_QUEUED
)) {
1826 I802_DEBUG_INC(tx
->local
->tx_handlers_queued
);
1833 static int invoke_tx_handlers(struct ieee80211_tx_data
*tx
)
1835 int r
= invoke_tx_handlers_early(tx
);
1839 return invoke_tx_handlers_late(tx
);
1842 bool ieee80211_tx_prepare_skb(struct ieee80211_hw
*hw
,
1843 struct ieee80211_vif
*vif
, struct sk_buff
*skb
,
1844 int band
, struct ieee80211_sta
**sta
)
1846 struct ieee80211_sub_if_data
*sdata
= vif_to_sdata(vif
);
1847 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1848 struct ieee80211_tx_data tx
;
1849 struct sk_buff
*skb2
;
1851 if (ieee80211_tx_prepare(sdata
, &tx
, NULL
, skb
) == TX_DROP
)
1855 info
->control
.vif
= vif
;
1856 info
->hw_queue
= vif
->hw_queue
[skb_get_queue_mapping(skb
)];
1858 if (invoke_tx_handlers(&tx
))
1863 *sta
= &tx
.sta
->sta
;
1868 /* this function isn't suitable for fragmented data frames */
1869 skb2
= __skb_dequeue(&tx
.skbs
);
1870 if (WARN_ON(skb2
!= skb
|| !skb_queue_empty(&tx
.skbs
))) {
1871 ieee80211_free_txskb(hw
, skb2
);
1872 ieee80211_purge_tx_queue(hw
, &tx
.skbs
);
1878 EXPORT_SYMBOL(ieee80211_tx_prepare_skb
);
1881 * Returns false if the frame couldn't be transmitted but was queued instead.
1883 static bool ieee80211_tx(struct ieee80211_sub_if_data
*sdata
,
1884 struct sta_info
*sta
, struct sk_buff
*skb
,
1887 struct ieee80211_local
*local
= sdata
->local
;
1888 struct ieee80211_tx_data tx
;
1889 ieee80211_tx_result res_prepare
;
1890 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1894 if (unlikely(skb
->len
< 10)) {
1899 /* initialises tx */
1901 res_prepare
= ieee80211_tx_prepare(sdata
, &tx
, sta
, skb
);
1903 if (unlikely(res_prepare
== TX_DROP
)) {
1904 ieee80211_free_txskb(&local
->hw
, skb
);
1906 } else if (unlikely(res_prepare
== TX_QUEUED
)) {
1910 /* set up hw_queue value early */
1911 if (!(info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
) ||
1912 !ieee80211_hw_check(&local
->hw
, QUEUE_CONTROL
))
1914 sdata
->vif
.hw_queue
[skb_get_queue_mapping(skb
)];
1916 if (invoke_tx_handlers_early(&tx
))
1919 if (ieee80211_queue_skb(local
, sdata
, tx
.sta
, tx
.skb
))
1922 if (!invoke_tx_handlers_late(&tx
))
1923 result
= __ieee80211_tx(local
, &tx
.skbs
, led_len
,
1929 /* device xmit handlers */
1931 enum ieee80211_encrypt
{
1937 static int ieee80211_skb_resize(struct ieee80211_sub_if_data
*sdata
,
1938 struct sk_buff
*skb
,
1940 enum ieee80211_encrypt encrypt
)
1942 struct ieee80211_local
*local
= sdata
->local
;
1946 enc_tailroom
= encrypt
== ENCRYPT_MGMT
||
1947 (encrypt
== ENCRYPT_DATA
&&
1948 sdata
->crypto_tx_tailroom_needed_cnt
);
1951 tail_need
= IEEE80211_ENCRYPT_TAILROOM
;
1952 tail_need
-= skb_tailroom(skb
);
1953 tail_need
= max_t(int, tail_need
, 0);
1956 if (skb_cloned(skb
) &&
1957 (!ieee80211_hw_check(&local
->hw
, SUPPORTS_CLONED_SKBS
) ||
1958 !skb_clone_writable(skb
, ETH_HLEN
) || enc_tailroom
))
1959 I802_DEBUG_INC(local
->tx_expand_skb_head_cloned
);
1960 else if (head_need
|| tail_need
)
1961 I802_DEBUG_INC(local
->tx_expand_skb_head
);
1965 if (pskb_expand_head(skb
, head_need
, tail_need
, GFP_ATOMIC
)) {
1966 wiphy_debug(local
->hw
.wiphy
,
1967 "failed to reallocate TX buffer\n");
1974 void ieee80211_xmit(struct ieee80211_sub_if_data
*sdata
,
1975 struct sta_info
*sta
, struct sk_buff
*skb
)
1977 struct ieee80211_local
*local
= sdata
->local
;
1978 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1979 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) skb
->data
;
1981 enum ieee80211_encrypt encrypt
;
1983 if (info
->flags
& IEEE80211_TX_INTFL_DONT_ENCRYPT
)
1984 encrypt
= ENCRYPT_NO
;
1985 else if (ieee80211_is_mgmt(hdr
->frame_control
))
1986 encrypt
= ENCRYPT_MGMT
;
1988 encrypt
= ENCRYPT_DATA
;
1990 headroom
= local
->tx_headroom
;
1991 if (encrypt
!= ENCRYPT_NO
)
1992 headroom
+= sdata
->encrypt_headroom
;
1993 headroom
-= skb_headroom(skb
);
1994 headroom
= max_t(int, 0, headroom
);
1996 if (ieee80211_skb_resize(sdata
, skb
, headroom
, encrypt
)) {
1997 ieee80211_free_txskb(&local
->hw
, skb
);
2001 /* reload after potential resize */
2002 hdr
= (struct ieee80211_hdr
*) skb
->data
;
2003 info
->control
.vif
= &sdata
->vif
;
2005 if (ieee80211_vif_is_mesh(&sdata
->vif
)) {
2006 if (ieee80211_is_data(hdr
->frame_control
) &&
2007 is_unicast_ether_addr(hdr
->addr1
)) {
2008 if (mesh_nexthop_resolve(sdata
, skb
))
2009 return; /* skb queued: don't free */
2011 ieee80211_mps_set_frame_flags(sdata
, NULL
, hdr
);
2015 ieee80211_set_qos_hdr(sdata
, skb
);
2016 ieee80211_tx(sdata
, sta
, skb
, false);
2019 static bool ieee80211_validate_radiotap_len(struct sk_buff
*skb
)
2021 struct ieee80211_radiotap_header
*rthdr
=
2022 (struct ieee80211_radiotap_header
*)skb
->data
;
2024 /* check for not even having the fixed radiotap header part */
2025 if (unlikely(skb
->len
< sizeof(struct ieee80211_radiotap_header
)))
2026 return false; /* too short to be possibly valid */
2028 /* is it a header version we can trust to find length from? */
2029 if (unlikely(rthdr
->it_version
))
2030 return false; /* only version 0 is supported */
2032 /* does the skb contain enough to deliver on the alleged length? */
2033 if (unlikely(skb
->len
< ieee80211_get_radiotap_len(skb
->data
)))
2034 return false; /* skb too short for claimed rt header extent */
2039 bool ieee80211_parse_tx_radiotap(struct sk_buff
*skb
,
2040 struct net_device
*dev
)
2042 struct ieee80211_local
*local
= wdev_priv(dev
->ieee80211_ptr
);
2043 struct ieee80211_radiotap_iterator iterator
;
2044 struct ieee80211_radiotap_header
*rthdr
=
2045 (struct ieee80211_radiotap_header
*) skb
->data
;
2046 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
2047 int ret
= ieee80211_radiotap_iterator_init(&iterator
, rthdr
, skb
->len
,
2051 bool rate_found
= false;
2052 u8 rate_retries
= 0;
2054 u8 mcs_known
, mcs_flags
, mcs_bw
;
2056 u8 vht_mcs
= 0, vht_nss
= 0;
2059 if (!ieee80211_validate_radiotap_len(skb
))
2062 info
->flags
|= IEEE80211_TX_INTFL_DONT_ENCRYPT
|
2063 IEEE80211_TX_CTL_DONTFRAG
;
2066 * for every radiotap entry that is present
2067 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
2068 * entries present, or -EINVAL on error)
2072 ret
= ieee80211_radiotap_iterator_next(&iterator
);
2077 /* see if this argument is something we can use */
2078 switch (iterator
.this_arg_index
) {
2080 * You must take care when dereferencing iterator.this_arg
2081 * for multibyte types... the pointer is not aligned. Use
2082 * get_unaligned((type *)iterator.this_arg) to dereference
2083 * iterator.this_arg for type "type" safely on all arches.
2085 case IEEE80211_RADIOTAP_FLAGS
:
2086 if (*iterator
.this_arg
& IEEE80211_RADIOTAP_F_FCS
) {
2088 * this indicates that the skb we have been
2089 * handed has the 32-bit FCS CRC at the end...
2090 * we should react to that by snipping it off
2091 * because it will be recomputed and added
2094 if (skb
->len
< (iterator
._max_length
+ FCS_LEN
))
2097 skb_trim(skb
, skb
->len
- FCS_LEN
);
2099 if (*iterator
.this_arg
& IEEE80211_RADIOTAP_F_WEP
)
2100 info
->flags
&= ~IEEE80211_TX_INTFL_DONT_ENCRYPT
;
2101 if (*iterator
.this_arg
& IEEE80211_RADIOTAP_F_FRAG
)
2102 info
->flags
&= ~IEEE80211_TX_CTL_DONTFRAG
;
2105 case IEEE80211_RADIOTAP_TX_FLAGS
:
2106 txflags
= get_unaligned_le16(iterator
.this_arg
);
2107 if (txflags
& IEEE80211_RADIOTAP_F_TX_NOACK
)
2108 info
->flags
|= IEEE80211_TX_CTL_NO_ACK
;
2109 if (txflags
& IEEE80211_RADIOTAP_F_TX_NOSEQNO
)
2110 info
->control
.flags
|= IEEE80211_TX_CTRL_NO_SEQNO
;
2111 if (txflags
& IEEE80211_RADIOTAP_F_TX_ORDER
)
2112 info
->control
.flags
|=
2113 IEEE80211_TX_CTRL_DONT_REORDER
;
2116 case IEEE80211_RADIOTAP_RATE
:
2117 rate
= *iterator
.this_arg
;
2122 case IEEE80211_RADIOTAP_DATA_RETRIES
:
2123 rate_retries
= *iterator
.this_arg
;
2126 case IEEE80211_RADIOTAP_MCS
:
2127 mcs_known
= iterator
.this_arg
[0];
2128 mcs_flags
= iterator
.this_arg
[1];
2129 if (!(mcs_known
& IEEE80211_RADIOTAP_MCS_HAVE_MCS
))
2133 rate
= iterator
.this_arg
[2];
2134 rate_flags
= IEEE80211_TX_RC_MCS
;
2136 if (mcs_known
& IEEE80211_RADIOTAP_MCS_HAVE_GI
&&
2137 mcs_flags
& IEEE80211_RADIOTAP_MCS_SGI
)
2138 rate_flags
|= IEEE80211_TX_RC_SHORT_GI
;
2140 mcs_bw
= mcs_flags
& IEEE80211_RADIOTAP_MCS_BW_MASK
;
2141 if (mcs_known
& IEEE80211_RADIOTAP_MCS_HAVE_BW
&&
2142 mcs_bw
== IEEE80211_RADIOTAP_MCS_BW_40
)
2143 rate_flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
2145 if (mcs_known
& IEEE80211_RADIOTAP_MCS_HAVE_FEC
&&
2146 mcs_flags
& IEEE80211_RADIOTAP_MCS_FEC_LDPC
)
2147 info
->flags
|= IEEE80211_TX_CTL_LDPC
;
2149 if (mcs_known
& IEEE80211_RADIOTAP_MCS_HAVE_STBC
) {
2150 u8 stbc
= u8_get_bits(mcs_flags
,
2151 IEEE80211_RADIOTAP_MCS_STBC_MASK
);
2154 u32_encode_bits(stbc
,
2155 IEEE80211_TX_CTL_STBC
);
2159 case IEEE80211_RADIOTAP_VHT
:
2160 vht_known
= get_unaligned_le16(iterator
.this_arg
);
2163 rate_flags
= IEEE80211_TX_RC_VHT_MCS
;
2164 if ((vht_known
& IEEE80211_RADIOTAP_VHT_KNOWN_GI
) &&
2165 (iterator
.this_arg
[2] &
2166 IEEE80211_RADIOTAP_VHT_FLAG_SGI
))
2167 rate_flags
|= IEEE80211_TX_RC_SHORT_GI
;
2169 IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH
) {
2170 if (iterator
.this_arg
[3] == 1)
2172 IEEE80211_TX_RC_40_MHZ_WIDTH
;
2173 else if (iterator
.this_arg
[3] == 4)
2175 IEEE80211_TX_RC_80_MHZ_WIDTH
;
2176 else if (iterator
.this_arg
[3] == 11)
2178 IEEE80211_TX_RC_160_MHZ_WIDTH
;
2181 vht_mcs
= iterator
.this_arg
[4] >> 4;
2182 vht_nss
= iterator
.this_arg
[4] & 0xF;
2186 * Please update the file
2187 * Documentation/networking/mac80211-injection.rst
2188 * when parsing new fields here.
2196 if (ret
!= -ENOENT
) /* ie, if we didn't simply run out of fields */
2200 struct ieee80211_supported_band
*sband
=
2201 local
->hw
.wiphy
->bands
[info
->band
];
2203 info
->control
.flags
|= IEEE80211_TX_CTRL_RATE_INJECT
;
2205 for (i
= 0; i
< IEEE80211_TX_MAX_RATES
; i
++) {
2206 info
->control
.rates
[i
].idx
= -1;
2207 info
->control
.rates
[i
].flags
= 0;
2208 info
->control
.rates
[i
].count
= 0;
2211 if (rate_flags
& IEEE80211_TX_RC_MCS
) {
2212 info
->control
.rates
[0].idx
= rate
;
2213 } else if (rate_flags
& IEEE80211_TX_RC_VHT_MCS
) {
2214 ieee80211_rate_set_vht(info
->control
.rates
, vht_mcs
,
2217 for (i
= 0; i
< sband
->n_bitrates
; i
++) {
2218 if (rate
* 5 != sband
->bitrates
[i
].bitrate
)
2221 info
->control
.rates
[0].idx
= i
;
2226 if (info
->control
.rates
[0].idx
< 0)
2227 info
->control
.flags
&= ~IEEE80211_TX_CTRL_RATE_INJECT
;
2229 info
->control
.rates
[0].flags
= rate_flags
;
2230 info
->control
.rates
[0].count
= min_t(u8
, rate_retries
+ 1,
2231 local
->hw
.max_rate_tries
);
2237 netdev_tx_t
ieee80211_monitor_start_xmit(struct sk_buff
*skb
,
2238 struct net_device
*dev
)
2240 struct ieee80211_local
*local
= wdev_priv(dev
->ieee80211_ptr
);
2241 struct ieee80211_chanctx_conf
*chanctx_conf
;
2242 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
2243 struct ieee80211_hdr
*hdr
;
2244 struct ieee80211_sub_if_data
*tmp_sdata
, *sdata
;
2245 struct cfg80211_chan_def
*chandef
;
2249 memset(info
, 0, sizeof(*info
));
2250 info
->flags
= IEEE80211_TX_CTL_REQ_TX_STATUS
|
2251 IEEE80211_TX_CTL_INJECTED
;
2253 /* Sanity-check the length of the radiotap header */
2254 if (!ieee80211_validate_radiotap_len(skb
))
2257 /* we now know there is a radiotap header with a length we can use */
2258 len_rthdr
= ieee80211_get_radiotap_len(skb
->data
);
2261 * fix up the pointers accounting for the radiotap
2262 * header still being in there. We are being given
2263 * a precooked IEEE80211 header so no need for
2266 skb_set_mac_header(skb
, len_rthdr
);
2268 * these are just fixed to the end of the rt area since we
2269 * don't have any better information and at this point, nobody cares
2271 skb_set_network_header(skb
, len_rthdr
);
2272 skb_set_transport_header(skb
, len_rthdr
);
2274 if (skb
->len
< len_rthdr
+ 2)
2277 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ len_rthdr
);
2278 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
2280 if (skb
->len
< len_rthdr
+ hdrlen
)
2284 * Initialize skb->protocol if the injected frame is a data frame
2285 * carrying a rfc1042 header
2287 if (ieee80211_is_data(hdr
->frame_control
) &&
2288 skb
->len
>= len_rthdr
+ hdrlen
+ sizeof(rfc1042_header
) + 2) {
2289 u8
*payload
= (u8
*)hdr
+ hdrlen
;
2291 if (ether_addr_equal(payload
, rfc1042_header
))
2292 skb
->protocol
= cpu_to_be16((payload
[6] << 8) |
2299 * We process outgoing injected frames that have a local address
2300 * we handle as though they are non-injected frames.
2301 * This code here isn't entirely correct, the local MAC address
2302 * isn't always enough to find the interface to use; for proper
2303 * VLAN support we have an nl80211-based mechanism.
2305 * This is necessary, for example, for old hostapd versions that
2306 * don't use nl80211-based management TX/RX.
2308 sdata
= IEEE80211_DEV_TO_SUB_IF(dev
);
2310 list_for_each_entry_rcu(tmp_sdata
, &local
->interfaces
, list
) {
2311 if (!ieee80211_sdata_running(tmp_sdata
))
2313 if (tmp_sdata
->vif
.type
== NL80211_IFTYPE_MONITOR
||
2314 tmp_sdata
->vif
.type
== NL80211_IFTYPE_AP_VLAN
)
2316 if (ether_addr_equal(tmp_sdata
->vif
.addr
, hdr
->addr2
)) {
2322 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
2323 if (!chanctx_conf
) {
2324 tmp_sdata
= rcu_dereference(local
->monitor_sdata
);
2327 rcu_dereference(tmp_sdata
->vif
.chanctx_conf
);
2331 chandef
= &chanctx_conf
->def
;
2332 else if (!local
->use_chanctx
)
2333 chandef
= &local
->_oper_chandef
;
2338 * Frame injection is not allowed if beaconing is not allowed
2339 * or if we need radar detection. Beaconing is usually not allowed when
2340 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
2341 * Passive scan is also used in world regulatory domains where
2342 * your country is not known and as such it should be treated as
2343 * NO TX unless the channel is explicitly allowed in which case
2344 * your current regulatory domain would not have the passive scan
2347 * Since AP mode uses monitor interfaces to inject/TX management
2348 * frames we can make AP mode the exception to this rule once it
2349 * supports radar detection as its implementation can deal with
2350 * radar detection by itself. We can do that later by adding a
2351 * monitor flag interfaces used for AP support.
2353 if (!cfg80211_reg_can_beacon(local
->hw
.wiphy
, chandef
,
2357 info
->band
= chandef
->chan
->band
;
2359 /* Initialize skb->priority according to frame type and TID class,
2360 * with respect to the sub interface that the frame will actually
2361 * be transmitted on. If the DONT_REORDER flag is set, the original
2362 * skb-priority is preserved to assure frames injected with this
2363 * flag are not reordered relative to each other.
2365 ieee80211_select_queue_80211(sdata
, skb
, hdr
);
2366 skb_set_queue_mapping(skb
, ieee80211_ac_from_tid(skb
->priority
));
2369 * Process the radiotap header. This will now take into account the
2370 * selected chandef above to accurately set injection rates and
2373 if (!ieee80211_parse_tx_radiotap(skb
, dev
))
2376 /* remove the injection radiotap header */
2377 skb_pull(skb
, len_rthdr
);
2379 ieee80211_xmit(sdata
, NULL
, skb
);
2382 return NETDEV_TX_OK
;
2388 return NETDEV_TX_OK
; /* meaning, we dealt with the skb */
2391 static inline bool ieee80211_is_tdls_setup(struct sk_buff
*skb
)
2393 u16 ethertype
= (skb
->data
[12] << 8) | skb
->data
[13];
2395 return ethertype
== ETH_P_TDLS
&&
2397 skb
->data
[14] == WLAN_TDLS_SNAP_RFTYPE
;
2400 int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data
*sdata
,
2401 struct sk_buff
*skb
,
2402 struct sta_info
**sta_out
)
2404 struct sta_info
*sta
;
2406 switch (sdata
->vif
.type
) {
2407 case NL80211_IFTYPE_AP_VLAN
:
2408 sta
= rcu_dereference(sdata
->u
.vlan
.sta
);
2412 } else if (sdata
->wdev
.use_4addr
) {
2416 case NL80211_IFTYPE_AP
:
2417 case NL80211_IFTYPE_OCB
:
2418 case NL80211_IFTYPE_ADHOC
:
2419 if (is_multicast_ether_addr(skb
->data
)) {
2420 *sta_out
= ERR_PTR(-ENOENT
);
2423 sta
= sta_info_get_bss(sdata
, skb
->data
);
2425 #ifdef CONFIG_MAC80211_MESH
2426 case NL80211_IFTYPE_MESH_POINT
:
2427 /* determined much later */
2431 case NL80211_IFTYPE_STATION
:
2432 if (sdata
->wdev
.wiphy
->flags
& WIPHY_FLAG_SUPPORTS_TDLS
) {
2433 sta
= sta_info_get(sdata
, skb
->data
);
2434 if (sta
&& test_sta_flag(sta
, WLAN_STA_TDLS_PEER
)) {
2435 if (test_sta_flag(sta
,
2436 WLAN_STA_TDLS_PEER_AUTH
)) {
2442 * TDLS link during setup - throw out frames to
2443 * peer. Allow TDLS-setup frames to unauthorized
2444 * peers for the special case of a link teardown
2445 * after a TDLS sta is removed due to being
2448 if (!ieee80211_is_tdls_setup(skb
))
2454 sta
= sta_info_get(sdata
, sdata
->u
.mgd
.bssid
);
2462 *sta_out
= sta
?: ERR_PTR(-ENOENT
);
2466 static u16
ieee80211_store_ack_skb(struct ieee80211_local
*local
,
2467 struct sk_buff
*skb
,
2471 struct sk_buff
*ack_skb
;
2475 ack_skb
= skb_clone_sk(skb
);
2477 ack_skb
= skb_clone(skb
, GFP_ATOMIC
);
2480 unsigned long flags
;
2483 spin_lock_irqsave(&local
->ack_status_lock
, flags
);
2484 id
= idr_alloc(&local
->ack_status_frames
, ack_skb
,
2485 1, 0x2000, GFP_ATOMIC
);
2486 spin_unlock_irqrestore(&local
->ack_status_lock
, flags
);
2490 *info_flags
|= IEEE80211_TX_CTL_REQ_TX_STATUS
;
2492 *cookie
= ieee80211_mgmt_tx_cookie(local
);
2493 IEEE80211_SKB_CB(ack_skb
)->ack
.cookie
= *cookie
;
2504 * ieee80211_build_hdr - build 802.11 header in the given frame
2505 * @sdata: virtual interface to build the header for
2506 * @skb: the skb to build the header in
2507 * @info_flags: skb flags to set
2508 * @sta: the station pointer
2509 * @ctrl_flags: info control flags to set
2510 * @cookie: cookie pointer to fill (if not %NULL)
2512 * This function takes the skb with 802.3 header and reformats the header to
2513 * the appropriate IEEE 802.11 header based on which interface the packet is
2514 * being transmitted on.
2516 * Note that this function also takes care of the TX status request and
2517 * potential unsharing of the SKB - this needs to be interleaved with the
2520 * The function requires the read-side RCU lock held
2522 * Returns: the (possibly reallocated) skb or an ERR_PTR() code
2524 static struct sk_buff
*ieee80211_build_hdr(struct ieee80211_sub_if_data
*sdata
,
2525 struct sk_buff
*skb
, u32 info_flags
,
2526 struct sta_info
*sta
, u32 ctrl_flags
,
2529 struct ieee80211_local
*local
= sdata
->local
;
2530 struct ieee80211_tx_info
*info
;
2532 u16 ethertype
, hdrlen
, meshhdrlen
= 0;
2534 struct ieee80211_hdr hdr
;
2535 struct ieee80211s_hdr mesh_hdr __maybe_unused
;
2536 struct mesh_path __maybe_unused
*mppath
= NULL
, *mpath
= NULL
;
2537 const u8
*encaps_data
;
2538 int encaps_len
, skip_header_bytes
;
2539 bool wme_sta
= false, authorized
= false;
2543 struct ieee80211_chanctx_conf
*chanctx_conf
;
2544 struct ieee80211_sub_if_data
*ap_sdata
;
2545 enum nl80211_band band
;
2551 #ifdef CONFIG_MAC80211_DEBUGFS
2552 if (local
->force_tx_status
)
2553 info_flags
|= IEEE80211_TX_CTL_REQ_TX_STATUS
;
2556 /* convert Ethernet header to proper 802.11 header (based on
2557 * operation mode) */
2558 ethertype
= (skb
->data
[12] << 8) | skb
->data
[13];
2559 fc
= cpu_to_le16(IEEE80211_FTYPE_DATA
| IEEE80211_STYPE_DATA
);
2561 switch (sdata
->vif
.type
) {
2562 case NL80211_IFTYPE_AP_VLAN
:
2563 if (sdata
->wdev
.use_4addr
) {
2564 fc
|= cpu_to_le16(IEEE80211_FCTL_FROMDS
| IEEE80211_FCTL_TODS
);
2566 memcpy(hdr
.addr1
, sta
->sta
.addr
, ETH_ALEN
);
2567 memcpy(hdr
.addr2
, sdata
->vif
.addr
, ETH_ALEN
);
2568 memcpy(hdr
.addr3
, skb
->data
, ETH_ALEN
);
2569 memcpy(hdr
.addr4
, skb
->data
+ ETH_ALEN
, ETH_ALEN
);
2571 authorized
= test_sta_flag(sta
, WLAN_STA_AUTHORIZED
);
2572 wme_sta
= sta
->sta
.wme
;
2574 ap_sdata
= container_of(sdata
->bss
, struct ieee80211_sub_if_data
,
2576 chanctx_conf
= rcu_dereference(ap_sdata
->vif
.chanctx_conf
);
2577 if (!chanctx_conf
) {
2581 band
= chanctx_conf
->def
.chan
->band
;
2582 if (sdata
->wdev
.use_4addr
)
2585 case NL80211_IFTYPE_AP
:
2586 if (sdata
->vif
.type
== NL80211_IFTYPE_AP
)
2587 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
2588 if (!chanctx_conf
) {
2592 fc
|= cpu_to_le16(IEEE80211_FCTL_FROMDS
);
2594 memcpy(hdr
.addr1
, skb
->data
, ETH_ALEN
);
2595 memcpy(hdr
.addr2
, sdata
->vif
.addr
, ETH_ALEN
);
2596 memcpy(hdr
.addr3
, skb
->data
+ ETH_ALEN
, ETH_ALEN
);
2598 band
= chanctx_conf
->def
.chan
->band
;
2600 #ifdef CONFIG_MAC80211_MESH
2601 case NL80211_IFTYPE_MESH_POINT
:
2602 if (!is_multicast_ether_addr(skb
->data
)) {
2603 struct sta_info
*next_hop
;
2604 bool mpp_lookup
= true;
2606 mpath
= mesh_path_lookup(sdata
, skb
->data
);
2609 next_hop
= rcu_dereference(mpath
->next_hop
);
2611 !(mpath
->flags
& (MESH_PATH_ACTIVE
|
2612 MESH_PATH_RESOLVING
)))
2617 mppath
= mpp_path_lookup(sdata
, skb
->data
);
2619 mppath
->exp_time
= jiffies
;
2622 if (mppath
&& mpath
)
2623 mesh_path_del(sdata
, mpath
->dst
);
2627 * Use address extension if it is a packet from
2628 * another interface or if we know the destination
2629 * is being proxied by a portal (i.e. portal address
2630 * differs from proxied address)
2632 if (ether_addr_equal(sdata
->vif
.addr
, skb
->data
+ ETH_ALEN
) &&
2633 !(mppath
&& !ether_addr_equal(mppath
->mpp
, skb
->data
))) {
2634 hdrlen
= ieee80211_fill_mesh_addresses(&hdr
, &fc
,
2635 skb
->data
, skb
->data
+ ETH_ALEN
);
2636 meshhdrlen
= ieee80211_new_mesh_header(sdata
, &mesh_hdr
,
2639 /* DS -> MBSS (802.11-2012 13.11.3.3).
2640 * For unicast with unknown forwarding information,
2641 * destination might be in the MBSS or if that fails
2642 * forwarded to another mesh gate. In either case
2643 * resolution will be handled in ieee80211_xmit(), so
2644 * leave the original DA. This also works for mcast */
2645 const u8
*mesh_da
= skb
->data
;
2648 mesh_da
= mppath
->mpp
;
2650 mesh_da
= mpath
->dst
;
2652 hdrlen
= ieee80211_fill_mesh_addresses(&hdr
, &fc
,
2653 mesh_da
, sdata
->vif
.addr
);
2654 if (is_multicast_ether_addr(mesh_da
))
2655 /* DA TA mSA AE:SA */
2656 meshhdrlen
= ieee80211_new_mesh_header(
2658 skb
->data
+ ETH_ALEN
, NULL
);
2660 /* RA TA mDA mSA AE:DA SA */
2661 meshhdrlen
= ieee80211_new_mesh_header(
2662 sdata
, &mesh_hdr
, skb
->data
,
2663 skb
->data
+ ETH_ALEN
);
2666 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
2667 if (!chanctx_conf
) {
2671 band
= chanctx_conf
->def
.chan
->band
;
2673 /* For injected frames, fill RA right away as nexthop lookup
2676 if ((ctrl_flags
& IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP
) &&
2677 is_zero_ether_addr(hdr
.addr1
))
2678 memcpy(hdr
.addr1
, skb
->data
, ETH_ALEN
);
2681 case NL80211_IFTYPE_STATION
:
2682 /* we already did checks when looking up the RA STA */
2683 tdls_peer
= test_sta_flag(sta
, WLAN_STA_TDLS_PEER
);
2687 memcpy(hdr
.addr1
, skb
->data
, ETH_ALEN
);
2688 memcpy(hdr
.addr2
, skb
->data
+ ETH_ALEN
, ETH_ALEN
);
2689 memcpy(hdr
.addr3
, sdata
->u
.mgd
.bssid
, ETH_ALEN
);
2691 } else if (sdata
->u
.mgd
.use_4addr
&&
2692 cpu_to_be16(ethertype
) != sdata
->control_port_protocol
) {
2693 fc
|= cpu_to_le16(IEEE80211_FCTL_FROMDS
|
2694 IEEE80211_FCTL_TODS
);
2696 memcpy(hdr
.addr1
, sdata
->u
.mgd
.bssid
, ETH_ALEN
);
2697 memcpy(hdr
.addr2
, sdata
->vif
.addr
, ETH_ALEN
);
2698 memcpy(hdr
.addr3
, skb
->data
, ETH_ALEN
);
2699 memcpy(hdr
.addr4
, skb
->data
+ ETH_ALEN
, ETH_ALEN
);
2702 fc
|= cpu_to_le16(IEEE80211_FCTL_TODS
);
2704 memcpy(hdr
.addr1
, sdata
->u
.mgd
.bssid
, ETH_ALEN
);
2705 memcpy(hdr
.addr2
, skb
->data
+ ETH_ALEN
, ETH_ALEN
);
2706 memcpy(hdr
.addr3
, skb
->data
, ETH_ALEN
);
2709 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
2710 if (!chanctx_conf
) {
2714 band
= chanctx_conf
->def
.chan
->band
;
2716 case NL80211_IFTYPE_OCB
:
2718 memcpy(hdr
.addr1
, skb
->data
, ETH_ALEN
);
2719 memcpy(hdr
.addr2
, skb
->data
+ ETH_ALEN
, ETH_ALEN
);
2720 eth_broadcast_addr(hdr
.addr3
);
2722 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
2723 if (!chanctx_conf
) {
2727 band
= chanctx_conf
->def
.chan
->band
;
2729 case NL80211_IFTYPE_ADHOC
:
2731 memcpy(hdr
.addr1
, skb
->data
, ETH_ALEN
);
2732 memcpy(hdr
.addr2
, skb
->data
+ ETH_ALEN
, ETH_ALEN
);
2733 memcpy(hdr
.addr3
, sdata
->u
.ibss
.bssid
, ETH_ALEN
);
2735 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
2736 if (!chanctx_conf
) {
2740 band
= chanctx_conf
->def
.chan
->band
;
2747 multicast
= is_multicast_ether_addr(hdr
.addr1
);
2749 /* sta is always NULL for mesh */
2751 authorized
= test_sta_flag(sta
, WLAN_STA_AUTHORIZED
);
2752 wme_sta
= sta
->sta
.wme
;
2753 } else if (ieee80211_vif_is_mesh(&sdata
->vif
)) {
2754 /* For mesh, the use of the QoS header is mandatory */
2758 /* receiver does QoS (which also means we do) use it */
2760 fc
|= cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
2765 * Drop unicast frames to unauthorised stations unless they are
2766 * EAPOL frames from the local station.
2768 if (unlikely(!ieee80211_vif_is_mesh(&sdata
->vif
) &&
2769 (sdata
->vif
.type
!= NL80211_IFTYPE_OCB
) &&
2770 !multicast
&& !authorized
&&
2771 (cpu_to_be16(ethertype
) != sdata
->control_port_protocol
||
2772 !ether_addr_equal(sdata
->vif
.addr
, skb
->data
+ ETH_ALEN
)))) {
2773 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
2774 net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
2775 sdata
->name
, hdr
.addr1
);
2778 I802_DEBUG_INC(local
->tx_handlers_drop_unauth_port
);
2784 if (unlikely(!multicast
&& ((skb
->sk
&&
2785 skb_shinfo(skb
)->tx_flags
& SKBTX_WIFI_STATUS
) ||
2786 ctrl_flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
)))
2787 info_id
= ieee80211_store_ack_skb(local
, skb
, &info_flags
,
2791 * If the skb is shared we need to obtain our own copy.
2793 if (skb_shared(skb
)) {
2794 struct sk_buff
*tmp_skb
= skb
;
2796 /* can't happen -- skb is a clone if info_id != 0 */
2799 skb
= skb_clone(skb
, GFP_ATOMIC
);
2808 hdr
.frame_control
= fc
;
2809 hdr
.duration_id
= 0;
2812 skip_header_bytes
= ETH_HLEN
;
2813 if (ethertype
== ETH_P_AARP
|| ethertype
== ETH_P_IPX
) {
2814 encaps_data
= bridge_tunnel_header
;
2815 encaps_len
= sizeof(bridge_tunnel_header
);
2816 skip_header_bytes
-= 2;
2817 } else if (ethertype
>= ETH_P_802_3_MIN
) {
2818 encaps_data
= rfc1042_header
;
2819 encaps_len
= sizeof(rfc1042_header
);
2820 skip_header_bytes
-= 2;
2826 skb_pull(skb
, skip_header_bytes
);
2827 head_need
= hdrlen
+ encaps_len
+ meshhdrlen
- skb_headroom(skb
);
2830 * So we need to modify the skb header and hence need a copy of
2831 * that. The head_need variable above doesn't, so far, include
2832 * the needed header space that we don't need right away. If we
2833 * can, then we don't reallocate right now but only after the
2834 * frame arrives at the master device (if it does...)
2836 * If we cannot, however, then we will reallocate to include all
2837 * the ever needed space. Also, if we need to reallocate it anyway,
2838 * make it big enough for everything we may ever need.
2841 if (head_need
> 0 || skb_cloned(skb
)) {
2842 head_need
+= sdata
->encrypt_headroom
;
2843 head_need
+= local
->tx_headroom
;
2844 head_need
= max_t(int, 0, head_need
);
2845 if (ieee80211_skb_resize(sdata
, skb
, head_need
, ENCRYPT_DATA
)) {
2846 ieee80211_free_txskb(&local
->hw
, skb
);
2848 return ERR_PTR(-ENOMEM
);
2853 memcpy(skb_push(skb
, encaps_len
), encaps_data
, encaps_len
);
2855 #ifdef CONFIG_MAC80211_MESH
2857 memcpy(skb_push(skb
, meshhdrlen
), &mesh_hdr
, meshhdrlen
);
2860 if (ieee80211_is_data_qos(fc
)) {
2861 __le16
*qos_control
;
2863 qos_control
= skb_push(skb
, 2);
2864 memcpy(skb_push(skb
, hdrlen
- 2), &hdr
, hdrlen
- 2);
2866 * Maybe we could actually set some fields here, for now just
2867 * initialise to zero to indicate no special operation.
2871 memcpy(skb_push(skb
, hdrlen
), &hdr
, hdrlen
);
2873 skb_reset_mac_header(skb
);
2875 info
= IEEE80211_SKB_CB(skb
);
2876 memset(info
, 0, sizeof(*info
));
2878 info
->flags
= info_flags
;
2879 info
->ack_frame_id
= info_id
;
2881 info
->control
.flags
= ctrl_flags
;
2886 return ERR_PTR(ret
);
2890 * fast-xmit overview
2892 * The core idea of this fast-xmit is to remove per-packet checks by checking
2893 * them out of band. ieee80211_check_fast_xmit() implements the out-of-band
2894 * checks that are needed to get the sta->fast_tx pointer assigned, after which
2895 * much less work can be done per packet. For example, fragmentation must be
2896 * disabled or the fast_tx pointer will not be set. All the conditions are seen
2899 * Once assigned, the fast_tx data structure also caches the per-packet 802.11
2900 * header and other data to aid packet processing in ieee80211_xmit_fast().
2902 * The most difficult part of this is that when any of these assumptions
2903 * change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(),
2904 * ieee80211_check_fast_xmit() or friends) is required to reset the data,
2905 * since the per-packet code no longer checks the conditions. This is reflected
2906 * by the calls to these functions throughout the rest of the code, and must be
2907 * maintained if any of the TX path checks change.
2910 void ieee80211_check_fast_xmit(struct sta_info
*sta
)
2912 struct ieee80211_fast_tx build
= {}, *fast_tx
= NULL
, *old
;
2913 struct ieee80211_local
*local
= sta
->local
;
2914 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
2915 struct ieee80211_hdr
*hdr
= (void *)build
.hdr
;
2916 struct ieee80211_chanctx_conf
*chanctx_conf
;
2919 if (!ieee80211_hw_check(&local
->hw
, SUPPORT_FAST_XMIT
))
2922 /* Locking here protects both the pointer itself, and against concurrent
2923 * invocations winning data access races to, e.g., the key pointer that
2925 * Without it, the invocation of this function right after the key
2926 * pointer changes wouldn't be sufficient, as another CPU could access
2927 * the pointer, then stall, and then do the cache update after the CPU
2928 * that invalidated the key.
2929 * With the locking, such scenarios cannot happen as the check for the
2930 * key and the fast-tx assignment are done atomically, so the CPU that
2931 * modifies the key will either wait or other one will see the key
2932 * cleared/changed already.
2934 spin_lock_bh(&sta
->lock
);
2935 if (ieee80211_hw_check(&local
->hw
, SUPPORTS_PS
) &&
2936 !ieee80211_hw_check(&local
->hw
, SUPPORTS_DYNAMIC_PS
) &&
2937 sdata
->vif
.type
== NL80211_IFTYPE_STATION
)
2940 if (!test_sta_flag(sta
, WLAN_STA_AUTHORIZED
))
2943 if (test_sta_flag(sta
, WLAN_STA_PS_STA
) ||
2944 test_sta_flag(sta
, WLAN_STA_PS_DRIVER
) ||
2945 test_sta_flag(sta
, WLAN_STA_PS_DELIVER
) ||
2946 test_sta_flag(sta
, WLAN_STA_CLEAR_PS_FILT
))
2949 if (sdata
->noack_map
)
2952 /* fast-xmit doesn't handle fragmentation at all */
2953 if (local
->hw
.wiphy
->frag_threshold
!= (u32
)-1 &&
2954 !ieee80211_hw_check(&local
->hw
, SUPPORTS_TX_FRAG
))
2958 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
2959 if (!chanctx_conf
) {
2963 build
.band
= chanctx_conf
->def
.chan
->band
;
2966 fc
= cpu_to_le16(IEEE80211_FTYPE_DATA
| IEEE80211_STYPE_DATA
);
2968 switch (sdata
->vif
.type
) {
2969 case NL80211_IFTYPE_ADHOC
:
2971 build
.da_offs
= offsetof(struct ieee80211_hdr
, addr1
);
2972 build
.sa_offs
= offsetof(struct ieee80211_hdr
, addr2
);
2973 memcpy(hdr
->addr3
, sdata
->u
.ibss
.bssid
, ETH_ALEN
);
2976 case NL80211_IFTYPE_STATION
:
2977 if (test_sta_flag(sta
, WLAN_STA_TDLS_PEER
)) {
2979 build
.da_offs
= offsetof(struct ieee80211_hdr
, addr1
);
2980 build
.sa_offs
= offsetof(struct ieee80211_hdr
, addr2
);
2981 memcpy(hdr
->addr3
, sdata
->u
.mgd
.bssid
, ETH_ALEN
);
2986 if (sdata
->u
.mgd
.use_4addr
) {
2987 /* non-regular ethertype cannot use the fastpath */
2988 fc
|= cpu_to_le16(IEEE80211_FCTL_FROMDS
|
2989 IEEE80211_FCTL_TODS
);
2991 memcpy(hdr
->addr1
, sdata
->u
.mgd
.bssid
, ETH_ALEN
);
2992 memcpy(hdr
->addr2
, sdata
->vif
.addr
, ETH_ALEN
);
2993 build
.da_offs
= offsetof(struct ieee80211_hdr
, addr3
);
2994 build
.sa_offs
= offsetof(struct ieee80211_hdr
, addr4
);
2998 fc
|= cpu_to_le16(IEEE80211_FCTL_TODS
);
3000 memcpy(hdr
->addr1
, sdata
->u
.mgd
.bssid
, ETH_ALEN
);
3001 build
.da_offs
= offsetof(struct ieee80211_hdr
, addr3
);
3002 build
.sa_offs
= offsetof(struct ieee80211_hdr
, addr2
);
3005 case NL80211_IFTYPE_AP_VLAN
:
3006 if (sdata
->wdev
.use_4addr
) {
3007 fc
|= cpu_to_le16(IEEE80211_FCTL_FROMDS
|
3008 IEEE80211_FCTL_TODS
);
3010 memcpy(hdr
->addr1
, sta
->sta
.addr
, ETH_ALEN
);
3011 memcpy(hdr
->addr2
, sdata
->vif
.addr
, ETH_ALEN
);
3012 build
.da_offs
= offsetof(struct ieee80211_hdr
, addr3
);
3013 build
.sa_offs
= offsetof(struct ieee80211_hdr
, addr4
);
3018 case NL80211_IFTYPE_AP
:
3019 fc
|= cpu_to_le16(IEEE80211_FCTL_FROMDS
);
3021 build
.da_offs
= offsetof(struct ieee80211_hdr
, addr1
);
3022 memcpy(hdr
->addr2
, sdata
->vif
.addr
, ETH_ALEN
);
3023 build
.sa_offs
= offsetof(struct ieee80211_hdr
, addr3
);
3027 /* not handled on fast-xmit */
3033 fc
|= cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
3036 /* We store the key here so there's no point in using rcu_dereference()
3037 * but that's fine because the code that changes the pointers will call
3038 * this function after doing so. For a single CPU that would be enough,
3039 * for multiple see the comment above.
3041 build
.key
= rcu_access_pointer(sta
->ptk
[sta
->ptk_idx
]);
3043 build
.key
= rcu_access_pointer(sdata
->default_unicast_key
);
3045 bool gen_iv
, iv_spc
, mmic
;
3047 gen_iv
= build
.key
->conf
.flags
& IEEE80211_KEY_FLAG_GENERATE_IV
;
3048 iv_spc
= build
.key
->conf
.flags
& IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3049 mmic
= build
.key
->conf
.flags
&
3050 (IEEE80211_KEY_FLAG_GENERATE_MMIC
|
3051 IEEE80211_KEY_FLAG_PUT_MIC_SPACE
);
3053 /* don't handle software crypto */
3054 if (!(build
.key
->flags
& KEY_FLAG_UPLOADED_TO_HARDWARE
))
3057 /* Key is being removed */
3058 if (build
.key
->flags
& KEY_FLAG_TAINTED
)
3061 switch (build
.key
->conf
.cipher
) {
3062 case WLAN_CIPHER_SUITE_CCMP
:
3063 case WLAN_CIPHER_SUITE_CCMP_256
:
3065 build
.pn_offs
= build
.hdr_len
;
3066 if (gen_iv
|| iv_spc
)
3067 build
.hdr_len
+= IEEE80211_CCMP_HDR_LEN
;
3069 case WLAN_CIPHER_SUITE_GCMP
:
3070 case WLAN_CIPHER_SUITE_GCMP_256
:
3072 build
.pn_offs
= build
.hdr_len
;
3073 if (gen_iv
|| iv_spc
)
3074 build
.hdr_len
+= IEEE80211_GCMP_HDR_LEN
;
3076 case WLAN_CIPHER_SUITE_TKIP
:
3077 /* cannot handle MMIC or IV generation in xmit-fast */
3081 build
.hdr_len
+= IEEE80211_TKIP_IV_LEN
;
3083 case WLAN_CIPHER_SUITE_WEP40
:
3084 case WLAN_CIPHER_SUITE_WEP104
:
3085 /* cannot handle IV generation in fast-xmit */
3089 build
.hdr_len
+= IEEE80211_WEP_IV_LEN
;
3091 case WLAN_CIPHER_SUITE_AES_CMAC
:
3092 case WLAN_CIPHER_SUITE_BIP_CMAC_256
:
3093 case WLAN_CIPHER_SUITE_BIP_GMAC_128
:
3094 case WLAN_CIPHER_SUITE_BIP_GMAC_256
:
3096 "management cipher suite 0x%x enabled for data\n",
3097 build
.key
->conf
.cipher
);
3100 /* we don't know how to generate IVs for this at all */
3101 if (WARN_ON(gen_iv
))
3103 /* pure hardware keys are OK, of course */
3104 if (!(build
.key
->flags
& KEY_FLAG_CIPHER_SCHEME
))
3106 /* cipher scheme might require space allocation */
3108 build
.key
->conf
.iv_len
> IEEE80211_FAST_XMIT_MAX_IV
)
3111 build
.hdr_len
+= build
.key
->conf
.iv_len
;
3114 fc
|= cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
3117 hdr
->frame_control
= fc
;
3119 memcpy(build
.hdr
+ build
.hdr_len
,
3120 rfc1042_header
, sizeof(rfc1042_header
));
3121 build
.hdr_len
+= sizeof(rfc1042_header
);
3123 fast_tx
= kmemdup(&build
, sizeof(build
), GFP_ATOMIC
);
3124 /* if the kmemdup fails, continue w/o fast_tx */
3129 /* we might have raced against another call to this function */
3130 old
= rcu_dereference_protected(sta
->fast_tx
,
3131 lockdep_is_held(&sta
->lock
));
3132 rcu_assign_pointer(sta
->fast_tx
, fast_tx
);
3134 kfree_rcu(old
, rcu_head
);
3135 spin_unlock_bh(&sta
->lock
);
3138 void ieee80211_check_fast_xmit_all(struct ieee80211_local
*local
)
3140 struct sta_info
*sta
;
3143 list_for_each_entry_rcu(sta
, &local
->sta_list
, list
)
3144 ieee80211_check_fast_xmit(sta
);
3148 void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data
*sdata
)
3150 struct ieee80211_local
*local
= sdata
->local
;
3151 struct sta_info
*sta
;
3155 list_for_each_entry_rcu(sta
, &local
->sta_list
, list
) {
3156 if (sdata
!= sta
->sdata
&&
3157 (!sta
->sdata
->bss
|| sta
->sdata
->bss
!= sdata
->bss
))
3159 ieee80211_check_fast_xmit(sta
);
3165 void ieee80211_clear_fast_xmit(struct sta_info
*sta
)
3167 struct ieee80211_fast_tx
*fast_tx
;
3169 spin_lock_bh(&sta
->lock
);
3170 fast_tx
= rcu_dereference_protected(sta
->fast_tx
,
3171 lockdep_is_held(&sta
->lock
));
3172 RCU_INIT_POINTER(sta
->fast_tx
, NULL
);
3173 spin_unlock_bh(&sta
->lock
);
3176 kfree_rcu(fast_tx
, rcu_head
);
3179 static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local
*local
,
3180 struct sk_buff
*skb
, int headroom
)
3182 if (skb_headroom(skb
) < headroom
) {
3183 I802_DEBUG_INC(local
->tx_expand_skb_head
);
3185 if (pskb_expand_head(skb
, headroom
, 0, GFP_ATOMIC
)) {
3186 wiphy_debug(local
->hw
.wiphy
,
3187 "failed to reallocate TX buffer\n");
3195 static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data
*sdata
,
3196 struct ieee80211_fast_tx
*fast_tx
,
3197 struct sk_buff
*skb
)
3199 struct ieee80211_local
*local
= sdata
->local
;
3200 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
3201 struct ieee80211_hdr
*hdr
;
3202 struct ethhdr
*amsdu_hdr
;
3203 int hdr_len
= fast_tx
->hdr_len
- sizeof(rfc1042_header
);
3204 int subframe_len
= skb
->len
- hdr_len
;
3206 u8
*qc
, *h_80211_src
, *h_80211_dst
;
3209 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)
3212 if (info
->control
.flags
& IEEE80211_TX_CTRL_AMSDU
)
3215 if (!ieee80211_amsdu_realloc_pad(local
, skb
, sizeof(*amsdu_hdr
)))
3218 data
= skb_push(skb
, sizeof(*amsdu_hdr
));
3219 memmove(data
, data
+ sizeof(*amsdu_hdr
), hdr_len
);
3221 amsdu_hdr
= data
+ hdr_len
;
3222 /* h_80211_src/dst is addr* field within hdr */
3223 h_80211_src
= data
+ fast_tx
->sa_offs
;
3224 h_80211_dst
= data
+ fast_tx
->da_offs
;
3226 amsdu_hdr
->h_proto
= cpu_to_be16(subframe_len
);
3227 ether_addr_copy(amsdu_hdr
->h_source
, h_80211_src
);
3228 ether_addr_copy(amsdu_hdr
->h_dest
, h_80211_dst
);
3230 /* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA
3231 * fields needs to be changed to BSSID for A-MSDU frames depending
3232 * on FromDS/ToDS values.
3234 switch (sdata
->vif
.type
) {
3235 case NL80211_IFTYPE_STATION
:
3236 bssid
= sdata
->u
.mgd
.bssid
;
3238 case NL80211_IFTYPE_AP
:
3239 case NL80211_IFTYPE_AP_VLAN
:
3240 bssid
= sdata
->vif
.addr
;
3246 if (bssid
&& ieee80211_has_fromds(hdr
->frame_control
))
3247 ether_addr_copy(h_80211_src
, bssid
);
3249 if (bssid
&& ieee80211_has_tods(hdr
->frame_control
))
3250 ether_addr_copy(h_80211_dst
, bssid
);
3252 qc
= ieee80211_get_qos_ctl(hdr
);
3253 *qc
|= IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
3255 info
->control
.flags
|= IEEE80211_TX_CTRL_AMSDU
;
3260 static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data
*sdata
,
3261 struct sta_info
*sta
,
3262 struct ieee80211_fast_tx
*fast_tx
,
3263 struct sk_buff
*skb
)
3265 struct ieee80211_local
*local
= sdata
->local
;
3266 struct fq
*fq
= &local
->fq
;
3268 struct fq_flow
*flow
;
3269 u8 tid
= skb
->priority
& IEEE80211_QOS_CTL_TAG1D_MASK
;
3270 struct ieee80211_txq
*txq
= sta
->sta
.txq
[tid
];
3271 struct txq_info
*txqi
;
3272 struct sk_buff
**frag_tail
, *head
;
3273 int subframe_len
= skb
->len
- ETH_ALEN
;
3274 u8 max_subframes
= sta
->sta
.max_amsdu_subframes
;
3275 int max_frags
= local
->hw
.max_tx_fragments
;
3276 int max_amsdu_len
= sta
->sta
.max_amsdu_len
;
3282 unsigned int orig_len
;
3283 int n
= 2, nfrags
, pad
= 0;
3286 if (!ieee80211_hw_check(&local
->hw
, TX_AMSDU
))
3289 if (sdata
->vif
.offload_flags
& IEEE80211_OFFLOAD_ENCAP_ENABLED
)
3292 if (skb_is_gso(skb
))
3298 txqi
= to_txq_info(txq
);
3299 if (test_bit(IEEE80211_TXQ_NO_AMSDU
, &txqi
->flags
))
3302 if (sta
->sta
.max_rc_amsdu_len
)
3303 max_amsdu_len
= min_t(int, max_amsdu_len
,
3304 sta
->sta
.max_rc_amsdu_len
);
3306 if (sta
->sta
.max_tid_amsdu_len
[tid
])
3307 max_amsdu_len
= min_t(int, max_amsdu_len
,
3308 sta
->sta
.max_tid_amsdu_len
[tid
]);
3310 flow_idx
= fq_flow_idx(fq
, skb
);
3312 spin_lock_bh(&fq
->lock
);
3314 /* TODO: Ideally aggregation should be done on dequeue to remain
3315 * responsive to environment changes.
3319 flow
= fq_flow_classify(fq
, tin
, flow_idx
, skb
);
3320 head
= skb_peek_tail(&flow
->queue
);
3321 if (!head
|| skb_is_gso(head
))
3324 orig_truesize
= head
->truesize
;
3325 orig_len
= head
->len
;
3327 if (skb
->len
+ head
->len
> max_amsdu_len
)
3330 nfrags
= 1 + skb_shinfo(skb
)->nr_frags
;
3331 nfrags
+= 1 + skb_shinfo(head
)->nr_frags
;
3332 frag_tail
= &skb_shinfo(head
)->frag_list
;
3333 while (*frag_tail
) {
3334 nfrags
+= 1 + skb_shinfo(*frag_tail
)->nr_frags
;
3335 frag_tail
= &(*frag_tail
)->next
;
3339 if (max_subframes
&& n
> max_subframes
)
3342 if (max_frags
&& nfrags
> max_frags
)
3345 if (!drv_can_aggregate_in_amsdu(local
, head
, skb
))
3348 if (!ieee80211_amsdu_prepare_head(sdata
, fast_tx
, head
))
3352 * Pad out the previous subframe to a multiple of 4 by adding the
3353 * padding to the next one, that's being added. Note that head->len
3354 * is the length of the full A-MSDU, but that works since each time
3355 * we add a new subframe we pad out the previous one to a multiple
3356 * of 4 and thus it no longer matters in the next round.
3358 hdrlen
= fast_tx
->hdr_len
- sizeof(rfc1042_header
);
3359 if ((head
->len
- hdrlen
) & 3)
3360 pad
= 4 - ((head
->len
- hdrlen
) & 3);
3362 if (!ieee80211_amsdu_realloc_pad(local
, skb
, sizeof(rfc1042_header
) +
3367 data
= skb_push(skb
, ETH_ALEN
+ 2);
3368 memmove(data
, data
+ ETH_ALEN
+ 2, 2 * ETH_ALEN
);
3370 data
+= 2 * ETH_ALEN
;
3371 len
= cpu_to_be16(subframe_len
);
3372 memcpy(data
, &len
, 2);
3373 memcpy(data
+ 2, rfc1042_header
, sizeof(rfc1042_header
));
3375 memset(skb_push(skb
, pad
), 0, pad
);
3377 head
->len
+= skb
->len
;
3378 head
->data_len
+= skb
->len
;
3382 fq
->memory_usage
+= head
->truesize
- orig_truesize
;
3383 if (head
->len
!= orig_len
) {
3384 flow
->backlog
+= head
->len
- orig_len
;
3385 tin
->backlog_bytes
+= head
->len
- orig_len
;
3388 spin_unlock_bh(&fq
->lock
);
3394 * Can be called while the sta lock is held. Anything that can cause packets to
3395 * be generated will cause deadlock!
3397 static ieee80211_tx_result
3398 ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data
*sdata
,
3399 struct sta_info
*sta
, u8 pn_offs
,
3400 struct ieee80211_key
*key
,
3401 struct ieee80211_tx_data
*tx
)
3403 struct sk_buff
*skb
= tx
->skb
;
3404 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
3405 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
3406 u8 tid
= IEEE80211_NUM_TIDS
;
3408 if (!ieee80211_hw_check(&tx
->local
->hw
, HAS_RATE_CONTROL
) &&
3409 ieee80211_tx_h_rate_ctrl(tx
) != TX_CONTINUE
)
3413 info
->control
.hw_key
= &key
->conf
;
3415 dev_sw_netstats_tx_add(skb
->dev
, 1, skb
->len
);
3417 if (hdr
->frame_control
& cpu_to_le16(IEEE80211_STYPE_QOS_DATA
)) {
3418 tid
= skb
->priority
& IEEE80211_QOS_CTL_TAG1D_MASK
;
3419 hdr
->seq_ctrl
= ieee80211_tx_next_seq(sta
, tid
);
3421 info
->flags
|= IEEE80211_TX_CTL_ASSIGN_SEQ
;
3422 hdr
->seq_ctrl
= cpu_to_le16(sdata
->sequence_number
);
3423 sdata
->sequence_number
+= 0x10;
3426 if (skb_shinfo(skb
)->gso_size
)
3427 sta
->tx_stats
.msdu
[tid
] +=
3428 DIV_ROUND_UP(skb
->len
, skb_shinfo(skb
)->gso_size
);
3430 sta
->tx_stats
.msdu
[tid
]++;
3432 info
->hw_queue
= sdata
->vif
.hw_queue
[skb_get_queue_mapping(skb
)];
3434 /* statistics normally done by ieee80211_tx_h_stats (but that
3435 * has to consider fragmentation, so is more complex)
3437 sta
->tx_stats
.bytes
[skb_get_queue_mapping(skb
)] += skb
->len
;
3438 sta
->tx_stats
.packets
[skb_get_queue_mapping(skb
)]++;
3442 u8
*crypto_hdr
= skb
->data
+ pn_offs
;
3444 switch (key
->conf
.cipher
) {
3445 case WLAN_CIPHER_SUITE_CCMP
:
3446 case WLAN_CIPHER_SUITE_CCMP_256
:
3447 case WLAN_CIPHER_SUITE_GCMP
:
3448 case WLAN_CIPHER_SUITE_GCMP_256
:
3449 pn
= atomic64_inc_return(&key
->conf
.tx_pn
);
3451 crypto_hdr
[1] = pn
>> 8;
3452 crypto_hdr
[3] = 0x20 | (key
->conf
.keyidx
<< 6);
3453 crypto_hdr
[4] = pn
>> 16;
3454 crypto_hdr
[5] = pn
>> 24;
3455 crypto_hdr
[6] = pn
>> 32;
3456 crypto_hdr
[7] = pn
>> 40;
3464 static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data
*sdata
,
3465 struct sta_info
*sta
,
3466 struct ieee80211_fast_tx
*fast_tx
,
3467 struct sk_buff
*skb
)
3469 struct ieee80211_local
*local
= sdata
->local
;
3470 u16 ethertype
= (skb
->data
[12] << 8) | skb
->data
[13];
3471 int extra_head
= fast_tx
->hdr_len
- (ETH_HLEN
- 2);
3472 int hw_headroom
= sdata
->local
->hw
.extra_tx_headroom
;
3474 struct ieee80211_tx_info
*info
;
3475 struct ieee80211_hdr
*hdr
= (void *)fast_tx
->hdr
;
3476 struct ieee80211_tx_data tx
;
3477 ieee80211_tx_result r
;
3478 struct tid_ampdu_tx
*tid_tx
= NULL
;
3479 u8 tid
= IEEE80211_NUM_TIDS
;
3481 /* control port protocol needs a lot of special handling */
3482 if (cpu_to_be16(ethertype
) == sdata
->control_port_protocol
)
3485 /* only RFC 1042 SNAP */
3486 if (ethertype
< ETH_P_802_3_MIN
)
3489 /* don't handle TX status request here either */
3490 if (skb
->sk
&& skb_shinfo(skb
)->tx_flags
& SKBTX_WIFI_STATUS
)
3493 if (hdr
->frame_control
& cpu_to_le16(IEEE80211_STYPE_QOS_DATA
)) {
3494 tid
= skb
->priority
& IEEE80211_QOS_CTL_TAG1D_MASK
;
3495 tid_tx
= rcu_dereference(sta
->ampdu_mlme
.tid_tx
[tid
]);
3497 if (!test_bit(HT_AGG_STATE_OPERATIONAL
, &tid_tx
->state
))
3499 if (tid_tx
->timeout
)
3500 tid_tx
->last_tx
= jiffies
;
3504 /* after this point (skb is modified) we cannot return false */
3506 if (skb_shared(skb
)) {
3507 struct sk_buff
*tmp_skb
= skb
;
3509 skb
= skb_clone(skb
, GFP_ATOMIC
);
3516 if ((hdr
->frame_control
& cpu_to_le16(IEEE80211_STYPE_QOS_DATA
)) &&
3517 ieee80211_amsdu_aggregate(sdata
, sta
, fast_tx
, skb
))
3520 /* will not be crypto-handled beyond what we do here, so use false
3521 * as the may-encrypt argument for the resize to not account for
3522 * more room than we already have in 'extra_head'
3524 if (unlikely(ieee80211_skb_resize(sdata
, skb
,
3525 max_t(int, extra_head
+ hw_headroom
-
3526 skb_headroom(skb
), 0),
3532 memcpy(ð
, skb
->data
, ETH_HLEN
- 2);
3533 hdr
= skb_push(skb
, extra_head
);
3534 memcpy(skb
->data
, fast_tx
->hdr
, fast_tx
->hdr_len
);
3535 memcpy(skb
->data
+ fast_tx
->da_offs
, eth
.h_dest
, ETH_ALEN
);
3536 memcpy(skb
->data
+ fast_tx
->sa_offs
, eth
.h_source
, ETH_ALEN
);
3538 info
= IEEE80211_SKB_CB(skb
);
3539 memset(info
, 0, sizeof(*info
));
3540 info
->band
= fast_tx
->band
;
3541 info
->control
.vif
= &sdata
->vif
;
3542 info
->flags
= IEEE80211_TX_CTL_FIRST_FRAGMENT
|
3543 IEEE80211_TX_CTL_DONTFRAG
|
3544 (tid_tx
? IEEE80211_TX_CTL_AMPDU
: 0);
3545 info
->control
.flags
= IEEE80211_TX_CTRL_FAST_XMIT
;
3547 #ifdef CONFIG_MAC80211_DEBUGFS
3548 if (local
->force_tx_status
)
3549 info
->flags
|= IEEE80211_TX_CTL_REQ_TX_STATUS
;
3552 if (hdr
->frame_control
& cpu_to_le16(IEEE80211_STYPE_QOS_DATA
)) {
3553 tid
= skb
->priority
& IEEE80211_QOS_CTL_TAG1D_MASK
;
3554 *ieee80211_get_qos_ctl(hdr
) = tid
;
3557 __skb_queue_head_init(&tx
.skbs
);
3559 tx
.flags
= IEEE80211_TX_UNICAST
;
3563 tx
.key
= fast_tx
->key
;
3565 if (ieee80211_queue_skb(local
, sdata
, sta
, skb
))
3569 r
= ieee80211_xmit_fast_finish(sdata
, sta
, fast_tx
->pn_offs
,
3577 if (sdata
->vif
.type
== NL80211_IFTYPE_AP_VLAN
)
3578 sdata
= container_of(sdata
->bss
,
3579 struct ieee80211_sub_if_data
, u
.ap
);
3581 __skb_queue_tail(&tx
.skbs
, skb
);
3582 ieee80211_tx_frags(local
, &sdata
->vif
, sta
, &tx
.skbs
, false);
3586 struct sk_buff
*ieee80211_tx_dequeue(struct ieee80211_hw
*hw
,
3587 struct ieee80211_txq
*txq
)
3589 struct ieee80211_local
*local
= hw_to_local(hw
);
3590 struct txq_info
*txqi
= container_of(txq
, struct txq_info
, txq
);
3591 struct ieee80211_hdr
*hdr
;
3592 struct sk_buff
*skb
= NULL
;
3593 struct fq
*fq
= &local
->fq
;
3594 struct fq_tin
*tin
= &txqi
->tin
;
3595 struct ieee80211_tx_info
*info
;
3596 struct ieee80211_tx_data tx
;
3597 ieee80211_tx_result r
;
3598 struct ieee80211_vif
*vif
= txq
->vif
;
3600 WARN_ON_ONCE(softirq_count() == 0);
3602 if (!ieee80211_txq_airtime_check(hw
, txq
))
3606 spin_lock_bh(&fq
->lock
);
3608 if (test_bit(IEEE80211_TXQ_STOP
, &txqi
->flags
) ||
3609 test_bit(IEEE80211_TXQ_STOP_NETIF_TX
, &txqi
->flags
))
3612 if (vif
->txqs_stopped
[txq
->ac
]) {
3613 set_bit(IEEE80211_TXQ_STOP_NETIF_TX
, &txqi
->flags
);
3617 /* Make sure fragments stay together. */
3618 skb
= __skb_dequeue(&txqi
->frags
);
3619 if (unlikely(skb
)) {
3620 if (!(IEEE80211_SKB_CB(skb
)->control
.flags
&
3621 IEEE80211_TX_INTCFL_NEED_TXPROCESSING
))
3623 IEEE80211_SKB_CB(skb
)->control
.flags
&=
3624 ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING
;
3626 skb
= fq_tin_dequeue(fq
, tin
, fq_tin_dequeue_func
);
3632 spin_unlock_bh(&fq
->lock
);
3634 hdr
= (struct ieee80211_hdr
*)skb
->data
;
3635 info
= IEEE80211_SKB_CB(skb
);
3637 memset(&tx
, 0, sizeof(tx
));
3638 __skb_queue_head_init(&tx
.skbs
);
3641 tx
.sdata
= vif_to_sdata(info
->control
.vif
);
3644 tx
.sta
= container_of(txq
->sta
, struct sta_info
, sta
);
3646 * Drop unicast frames to unauthorised stations unless they are
3647 * injected frames or EAPOL frames from the local station.
3649 if (unlikely(!(info
->flags
& IEEE80211_TX_CTL_INJECTED
) &&
3650 ieee80211_is_data(hdr
->frame_control
) &&
3651 !ieee80211_vif_is_mesh(&tx
.sdata
->vif
) &&
3652 tx
.sdata
->vif
.type
!= NL80211_IFTYPE_OCB
&&
3653 !is_multicast_ether_addr(hdr
->addr1
) &&
3654 !test_sta_flag(tx
.sta
, WLAN_STA_AUTHORIZED
) &&
3655 (!(info
->control
.flags
&
3656 IEEE80211_TX_CTRL_PORT_CTRL_PROTO
) ||
3657 !ether_addr_equal(tx
.sdata
->vif
.addr
,
3659 I802_DEBUG_INC(local
->tx_handlers_drop_unauth_port
);
3660 ieee80211_free_txskb(&local
->hw
, skb
);
3666 * The key can be removed while the packet was queued, so need to call
3667 * this here to get the current key.
3669 r
= ieee80211_tx_h_select_key(&tx
);
3670 if (r
!= TX_CONTINUE
) {
3671 ieee80211_free_txskb(&local
->hw
, skb
);
3675 if (test_bit(IEEE80211_TXQ_AMPDU
, &txqi
->flags
))
3676 info
->flags
|= IEEE80211_TX_CTL_AMPDU
;
3678 info
->flags
&= ~IEEE80211_TX_CTL_AMPDU
;
3680 if (info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) {
3681 if (!ieee80211_hw_check(&local
->hw
, HAS_RATE_CONTROL
)) {
3682 r
= ieee80211_tx_h_rate_ctrl(&tx
);
3683 if (r
!= TX_CONTINUE
) {
3684 ieee80211_free_txskb(&local
->hw
, skb
);
3691 if (info
->control
.flags
& IEEE80211_TX_CTRL_FAST_XMIT
) {
3692 struct sta_info
*sta
= container_of(txq
->sta
, struct sta_info
,
3697 (tx
.key
->conf
.flags
& IEEE80211_KEY_FLAG_GENERATE_IV
))
3698 pn_offs
= ieee80211_hdrlen(hdr
->frame_control
);
3700 r
= ieee80211_xmit_fast_finish(sta
->sdata
, sta
, pn_offs
,
3702 if (r
!= TX_CONTINUE
) {
3703 ieee80211_free_txskb(&local
->hw
, skb
);
3707 if (invoke_tx_handlers_late(&tx
))
3710 skb
= __skb_dequeue(&tx
.skbs
);
3712 if (!skb_queue_empty(&tx
.skbs
)) {
3713 spin_lock_bh(&fq
->lock
);
3714 skb_queue_splice_tail(&tx
.skbs
, &txqi
->frags
);
3715 spin_unlock_bh(&fq
->lock
);
3719 if (skb_has_frag_list(skb
) &&
3720 !ieee80211_hw_check(&local
->hw
, TX_FRAG_LIST
)) {
3721 if (skb_linearize(skb
)) {
3722 ieee80211_free_txskb(&local
->hw
, skb
);
3727 switch (tx
.sdata
->vif
.type
) {
3728 case NL80211_IFTYPE_MONITOR
:
3729 if (tx
.sdata
->u
.mntr
.flags
& MONITOR_FLAG_ACTIVE
) {
3730 vif
= &tx
.sdata
->vif
;
3733 tx
.sdata
= rcu_dereference(local
->monitor_sdata
);
3735 vif
= &tx
.sdata
->vif
;
3737 vif
->hw_queue
[skb_get_queue_mapping(skb
)];
3738 } else if (ieee80211_hw_check(&local
->hw
, QUEUE_CONTROL
)) {
3739 ieee80211_free_txskb(&local
->hw
, skb
);
3745 case NL80211_IFTYPE_AP_VLAN
:
3746 tx
.sdata
= container_of(tx
.sdata
->bss
,
3747 struct ieee80211_sub_if_data
, u
.ap
);
3750 vif
= &tx
.sdata
->vif
;
3755 IEEE80211_SKB_CB(skb
)->control
.vif
= vif
;
3758 wiphy_ext_feature_isset(local
->hw
.wiphy
, NL80211_EXT_FEATURE_AQL
)) {
3759 bool ampdu
= txq
->ac
!= IEEE80211_AC_VO
;
3762 airtime
= ieee80211_calc_expected_tx_airtime(hw
, vif
, txq
->sta
,
3765 airtime
= ieee80211_info_set_tx_time_est(info
, airtime
);
3766 ieee80211_sta_update_pending_airtime(local
, tx
.sta
,
3776 spin_unlock_bh(&fq
->lock
);
3780 EXPORT_SYMBOL(ieee80211_tx_dequeue
);
3782 struct ieee80211_txq
*ieee80211_next_txq(struct ieee80211_hw
*hw
, u8 ac
)
3784 struct ieee80211_local
*local
= hw_to_local(hw
);
3785 struct airtime_sched_info
*air_sched
;
3786 u64 now
= ktime_get_boottime_ns();
3787 struct ieee80211_txq
*ret
= NULL
;
3788 struct airtime_info
*air_info
;
3789 struct txq_info
*txqi
= NULL
;
3790 struct rb_node
*node
;
3793 air_sched
= &local
->airtime
[ac
];
3794 spin_lock_bh(&air_sched
->lock
);
3796 node
= air_sched
->schedule_pos
;
3800 node
= rb_first_cached(&air_sched
->active_txqs
);
3803 node
= rb_next(node
);
3809 txqi
= container_of(node
, struct txq_info
, schedule_order
);
3810 air_info
= to_airtime_info(&txqi
->txq
);
3812 if (air_info
->v_t
> air_sched
->v_t
&&
3813 (!first
|| !airtime_catchup_v_t(air_sched
, air_info
->v_t
, now
)))
3816 if (!ieee80211_txq_airtime_check(hw
, &txqi
->txq
)) {
3821 air_sched
->schedule_pos
= node
;
3822 air_sched
->last_schedule_activity
= now
;
3825 spin_unlock_bh(&air_sched
->lock
);
3828 EXPORT_SYMBOL(ieee80211_next_txq
);
3830 static void __ieee80211_insert_txq(struct rb_root_cached
*root
,
3831 struct txq_info
*txqi
)
3833 struct rb_node
**new = &root
->rb_root
.rb_node
;
3834 struct airtime_info
*old_air
, *new_air
;
3835 struct rb_node
*parent
= NULL
;
3836 struct txq_info
*__txqi
;
3837 bool leftmost
= true;
3841 __txqi
= rb_entry(parent
, struct txq_info
, schedule_order
);
3842 old_air
= to_airtime_info(&__txqi
->txq
);
3843 new_air
= to_airtime_info(&txqi
->txq
);
3845 if (new_air
->v_t
<= old_air
->v_t
) {
3846 new = &parent
->rb_left
;
3848 new = &parent
->rb_right
;
3853 rb_link_node(&txqi
->schedule_order
, parent
, new);
3854 rb_insert_color_cached(&txqi
->schedule_order
, root
, leftmost
);
3857 void ieee80211_resort_txq(struct ieee80211_hw
*hw
,
3858 struct ieee80211_txq
*txq
)
3860 struct airtime_info
*air_info
= to_airtime_info(txq
);
3861 struct ieee80211_local
*local
= hw_to_local(hw
);
3862 struct txq_info
*txqi
= to_txq_info(txq
);
3863 struct airtime_sched_info
*air_sched
;
3865 air_sched
= &local
->airtime
[txq
->ac
];
3867 lockdep_assert_held(&air_sched
->lock
);
3869 if (!RB_EMPTY_NODE(&txqi
->schedule_order
)) {
3870 struct airtime_info
*a_prev
= NULL
, *a_next
= NULL
;
3871 struct txq_info
*t_prev
, *t_next
;
3872 struct rb_node
*n_prev
, *n_next
;
3874 /* Erasing a node can cause an expensive rebalancing operation,
3875 * so we check the previous and next nodes first and only remove
3876 * and re-insert if the current node is not already in the
3879 if ((n_prev
= rb_prev(&txqi
->schedule_order
)) != NULL
) {
3880 t_prev
= container_of(n_prev
, struct txq_info
,
3882 a_prev
= to_airtime_info(&t_prev
->txq
);
3885 if ((n_next
= rb_next(&txqi
->schedule_order
)) != NULL
) {
3886 t_next
= container_of(n_next
, struct txq_info
,
3888 a_next
= to_airtime_info(&t_next
->txq
);
3891 if ((!a_prev
|| a_prev
->v_t
<= air_info
->v_t
) &&
3892 (!a_next
|| a_next
->v_t
> air_info
->v_t
))
3895 if (air_sched
->schedule_pos
== &txqi
->schedule_order
)
3896 air_sched
->schedule_pos
= n_prev
;
3898 rb_erase_cached(&txqi
->schedule_order
,
3899 &air_sched
->active_txqs
);
3900 RB_CLEAR_NODE(&txqi
->schedule_order
);
3901 __ieee80211_insert_txq(&air_sched
->active_txqs
, txqi
);
3905 void ieee80211_update_airtime_weight(struct ieee80211_local
*local
,
3906 struct airtime_sched_info
*air_sched
,
3907 u64 now
, bool force
)
3909 struct airtime_info
*air_info
, *tmp
;
3913 now
= ktime_get_boottime_ns();
3915 lockdep_assert_held(&air_sched
->lock
);
3917 if (!force
&& (air_sched
->last_weight_update
<
3918 now
- AIRTIME_ACTIVE_DURATION
))
3921 list_for_each_entry_safe(air_info
, tmp
,
3922 &air_sched
->active_list
, list
) {
3923 if (airtime_is_active(air_info
, now
))
3924 weight_sum
+= air_info
->weight
;
3926 list_del_init(&air_info
->list
);
3928 airtime_weight_sum_set(air_sched
, weight_sum
);
3929 air_sched
->last_weight_update
= now
;
3932 void ieee80211_schedule_txq(struct ieee80211_hw
*hw
,
3933 struct ieee80211_txq
*txq
)
3934 __acquires(txq_lock
) __releases(txq_lock
)
3936 struct ieee80211_local
*local
= hw_to_local(hw
);
3937 struct txq_info
*txqi
= to_txq_info(txq
);
3938 struct airtime_sched_info
*air_sched
;
3939 u64 now
= ktime_get_boottime_ns();
3940 struct airtime_info
*air_info
;
3944 air_sched
= &local
->airtime
[ac
];
3945 air_info
= to_airtime_info(txq
);
3947 spin_lock_bh(&air_sched
->lock
);
3948 was_active
= airtime_is_active(air_info
, now
);
3949 airtime_set_active(air_sched
, air_info
, now
);
3951 if (!RB_EMPTY_NODE(&txqi
->schedule_order
))
3954 /* If the station has been inactive for a while, catch up its v_t so it
3955 * doesn't get indefinite priority; see comment above the definition of
3956 * AIRTIME_MAX_BEHIND.
3958 if ((!was_active
&& air_info
->v_t
< air_sched
->v_t
) ||
3959 air_info
->v_t
< air_sched
->v_t
- AIRTIME_MAX_BEHIND
)
3960 air_info
->v_t
= air_sched
->v_t
;
3962 ieee80211_update_airtime_weight(local
, air_sched
, now
, !was_active
);
3963 __ieee80211_insert_txq(&air_sched
->active_txqs
, txqi
);
3966 spin_unlock_bh(&air_sched
->lock
);
3968 EXPORT_SYMBOL(ieee80211_schedule_txq
);
3970 static void __ieee80211_unschedule_txq(struct ieee80211_hw
*hw
,
3971 struct ieee80211_txq
*txq
,
3974 struct ieee80211_local
*local
= hw_to_local(hw
);
3975 struct txq_info
*txqi
= to_txq_info(txq
);
3976 struct airtime_sched_info
*air_sched
;
3977 struct airtime_info
*air_info
;
3979 air_sched
= &local
->airtime
[txq
->ac
];
3980 air_info
= to_airtime_info(&txqi
->txq
);
3982 lockdep_assert_held(&air_sched
->lock
);
3985 list_del_init(&air_info
->list
);
3986 ieee80211_update_airtime_weight(local
, air_sched
, 0, true);
3989 if (RB_EMPTY_NODE(&txqi
->schedule_order
))
3992 if (air_sched
->schedule_pos
== &txqi
->schedule_order
)
3993 air_sched
->schedule_pos
= rb_prev(&txqi
->schedule_order
);
3996 airtime_set_active(air_sched
, air_info
,
3997 ktime_get_boottime_ns());
3999 rb_erase_cached(&txqi
->schedule_order
,
4000 &air_sched
->active_txqs
);
4001 RB_CLEAR_NODE(&txqi
->schedule_order
);
4004 void ieee80211_unschedule_txq(struct ieee80211_hw
*hw
,
4005 struct ieee80211_txq
*txq
,
4007 __acquires(txq_lock
) __releases(txq_lock
)
4009 struct ieee80211_local
*local
= hw_to_local(hw
);
4011 spin_lock_bh(&local
->airtime
[txq
->ac
].lock
);
4012 __ieee80211_unschedule_txq(hw
, txq
, purge
);
4013 spin_unlock_bh(&local
->airtime
[txq
->ac
].lock
);
4016 void ieee80211_return_txq(struct ieee80211_hw
*hw
,
4017 struct ieee80211_txq
*txq
, bool force
)
4019 struct ieee80211_local
*local
= hw_to_local(hw
);
4020 struct txq_info
*txqi
= to_txq_info(txq
);
4022 spin_lock_bh(&local
->airtime
[txq
->ac
].lock
);
4024 if (!RB_EMPTY_NODE(&txqi
->schedule_order
) && !force
&&
4025 !txq_has_queue(txq
))
4026 __ieee80211_unschedule_txq(hw
, txq
, false);
4028 spin_unlock_bh(&local
->airtime
[txq
->ac
].lock
);
4030 EXPORT_SYMBOL(ieee80211_return_txq
);
4032 DEFINE_STATIC_KEY_FALSE(aql_disable
);
4034 bool ieee80211_txq_airtime_check(struct ieee80211_hw
*hw
,
4035 struct ieee80211_txq
*txq
)
4037 struct airtime_info
*air_info
= to_airtime_info(txq
);
4038 struct ieee80211_local
*local
= hw_to_local(hw
);
4040 if (!wiphy_ext_feature_isset(local
->hw
.wiphy
, NL80211_EXT_FEATURE_AQL
))
4043 if (static_branch_unlikely(&aql_disable
))
4049 if (unlikely(txq
->tid
== IEEE80211_NUM_TIDS
))
4052 if (atomic_read(&air_info
->aql_tx_pending
) < air_info
->aql_limit_low
)
4055 if (atomic_read(&local
->aql_total_pending_airtime
) <
4056 local
->aql_threshold
&&
4057 atomic_read(&air_info
->aql_tx_pending
) < air_info
->aql_limit_high
)
4062 EXPORT_SYMBOL(ieee80211_txq_airtime_check
);
4064 bool ieee80211_txq_may_transmit(struct ieee80211_hw
*hw
,
4065 struct ieee80211_txq
*txq
)
4067 struct txq_info
*first_txqi
= NULL
, *txqi
= to_txq_info(txq
);
4068 struct ieee80211_local
*local
= hw_to_local(hw
);
4069 struct airtime_sched_info
*air_sched
;
4070 struct airtime_info
*air_info
;
4071 struct rb_node
*node
= NULL
;
4076 if (!ieee80211_txq_airtime_check(hw
, txq
))
4079 air_sched
= &local
->airtime
[txq
->ac
];
4080 spin_lock_bh(&air_sched
->lock
);
4082 if (RB_EMPTY_NODE(&txqi
->schedule_order
))
4085 now
= ktime_get_boottime_ns();
4087 /* Like in ieee80211_next_txq(), make sure the first station in the
4088 * scheduling order is eligible for transmission to avoid starvation.
4090 node
= rb_first_cached(&air_sched
->active_txqs
);
4092 first_txqi
= container_of(node
, struct txq_info
,
4094 air_info
= to_airtime_info(&first_txqi
->txq
);
4096 if (air_sched
->v_t
< air_info
->v_t
)
4097 airtime_catchup_v_t(air_sched
, air_info
->v_t
, now
);
4100 air_info
= to_airtime_info(&txqi
->txq
);
4101 if (air_info
->v_t
<= air_sched
->v_t
) {
4102 air_sched
->last_schedule_activity
= now
;
4107 spin_unlock_bh(&air_sched
->lock
);
4110 EXPORT_SYMBOL(ieee80211_txq_may_transmit
);
4112 void ieee80211_txq_schedule_start(struct ieee80211_hw
*hw
, u8 ac
)
4114 struct ieee80211_local
*local
= hw_to_local(hw
);
4115 struct airtime_sched_info
*air_sched
= &local
->airtime
[ac
];
4117 spin_lock_bh(&air_sched
->lock
);
4118 air_sched
->schedule_pos
= NULL
;
4119 spin_unlock_bh(&air_sched
->lock
);
4121 EXPORT_SYMBOL(ieee80211_txq_schedule_start
);
4124 ieee80211_aggr_check(struct ieee80211_sub_if_data
*sdata
,
4125 struct sta_info
*sta
,
4126 struct sk_buff
*skb
)
4128 struct rate_control_ref
*ref
= sdata
->local
->rate_ctrl
;
4131 if (!ref
|| !(ref
->ops
->capa
& RATE_CTRL_CAPA_AMPDU_TRIGGER
))
4134 if (!sta
|| !sta
->sta
.ht_cap
.ht_supported
||
4135 !sta
->sta
.wme
|| skb_get_queue_mapping(skb
) == IEEE80211_AC_VO
||
4136 skb
->protocol
== sdata
->control_port_protocol
)
4139 tid
= skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
;
4140 if (likely(sta
->ampdu_mlme
.tid_tx
[tid
]))
4143 ieee80211_start_tx_ba_session(&sta
->sta
, tid
, 0);
4146 void __ieee80211_subif_start_xmit(struct sk_buff
*skb
,
4147 struct net_device
*dev
,
4152 struct ieee80211_sub_if_data
*sdata
= IEEE80211_DEV_TO_SUB_IF(dev
);
4153 struct ieee80211_local
*local
= sdata
->local
;
4154 struct sta_info
*sta
;
4155 struct sk_buff
*next
;
4157 if (unlikely(skb
->len
< ETH_HLEN
)) {
4164 if (ieee80211_lookup_ra_sta(sdata
, skb
, &sta
))
4170 if (local
->ops
->wake_tx_queue
) {
4171 u16 queue
= __ieee80211_select_queue(sdata
, sta
, skb
);
4172 skb_set_queue_mapping(skb
, queue
);
4176 ieee80211_aggr_check(sdata
, sta
, skb
);
4179 struct ieee80211_fast_tx
*fast_tx
;
4181 sk_pacing_shift_update(skb
->sk
, sdata
->local
->hw
.tx_sk_pacing_shift
);
4183 fast_tx
= rcu_dereference(sta
->fast_tx
);
4186 ieee80211_xmit_fast(sdata
, sta
, fast_tx
, skb
))
4190 if (skb_is_gso(skb
)) {
4191 struct sk_buff
*segs
;
4193 segs
= skb_gso_segment(skb
, 0);
4201 /* we cannot process non-linear frames on this path */
4202 if (skb_linearize(skb
)) {
4207 /* the frame could be fragmented, software-encrypted, and other
4208 * things so we cannot really handle checksum offload with it -
4209 * fix it up in software before we handle anything else.
4211 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4212 skb_set_transport_header(skb
,
4213 skb_checksum_start_offset(skb
));
4214 if (skb_checksum_help(skb
))
4219 skb_list_walk_safe(skb
, skb
, next
) {
4220 skb_mark_not_on_list(skb
);
4222 if (skb
->protocol
== sdata
->control_port_protocol
)
4223 ctrl_flags
|= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP
;
4225 skb
= ieee80211_build_hdr(sdata
, skb
, info_flags
,
4226 sta
, ctrl_flags
, cookie
);
4228 kfree_skb_list(next
);
4232 dev_sw_netstats_tx_add(dev
, 1, skb
->len
);
4234 ieee80211_xmit(sdata
, sta
, skb
);
4243 static int ieee80211_change_da(struct sk_buff
*skb
, struct sta_info
*sta
)
4248 err
= skb_ensure_writable(skb
, ETH_HLEN
);
4252 eth
= (void *)skb
->data
;
4253 ether_addr_copy(eth
->h_dest
, sta
->sta
.addr
);
4258 static bool ieee80211_multicast_to_unicast(struct sk_buff
*skb
,
4259 struct net_device
*dev
)
4261 struct ieee80211_sub_if_data
*sdata
= IEEE80211_DEV_TO_SUB_IF(dev
);
4262 const struct ethhdr
*eth
= (void *)skb
->data
;
4263 const struct vlan_ethhdr
*ethvlan
= (void *)skb
->data
;
4266 if (likely(!is_multicast_ether_addr(eth
->h_dest
)))
4269 switch (sdata
->vif
.type
) {
4270 case NL80211_IFTYPE_AP_VLAN
:
4271 if (sdata
->u
.vlan
.sta
)
4273 if (sdata
->wdev
.use_4addr
)
4276 case NL80211_IFTYPE_AP
:
4277 /* check runtime toggle for this bss */
4278 if (!sdata
->bss
->multicast_to_unicast
)
4285 /* multicast to unicast conversion only for some payload */
4286 ethertype
= eth
->h_proto
;
4287 if (ethertype
== htons(ETH_P_8021Q
) && skb
->len
>= VLAN_ETH_HLEN
)
4288 ethertype
= ethvlan
->h_vlan_encapsulated_proto
;
4289 switch (ethertype
) {
4290 case htons(ETH_P_ARP
):
4291 case htons(ETH_P_IP
):
4292 case htons(ETH_P_IPV6
):
4302 ieee80211_convert_to_unicast(struct sk_buff
*skb
, struct net_device
*dev
,
4303 struct sk_buff_head
*queue
)
4305 struct ieee80211_sub_if_data
*sdata
= IEEE80211_DEV_TO_SUB_IF(dev
);
4306 struct ieee80211_local
*local
= sdata
->local
;
4307 const struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
4308 struct sta_info
*sta
, *first
= NULL
;
4309 struct sk_buff
*cloned_skb
;
4313 list_for_each_entry_rcu(sta
, &local
->sta_list
, list
) {
4314 if (sdata
!= sta
->sdata
)
4315 /* AP-VLAN mismatch */
4317 if (unlikely(ether_addr_equal(eth
->h_source
, sta
->sta
.addr
)))
4318 /* do not send back to source */
4324 cloned_skb
= skb_clone(skb
, GFP_ATOMIC
);
4327 if (unlikely(ieee80211_change_da(cloned_skb
, sta
))) {
4328 dev_kfree_skb(cloned_skb
);
4331 __skb_queue_tail(queue
, cloned_skb
);
4334 if (likely(first
)) {
4335 if (unlikely(ieee80211_change_da(skb
, first
)))
4337 __skb_queue_tail(queue
, skb
);
4339 /* no STA connected, drop */
4346 __skb_queue_purge(queue
);
4347 __skb_queue_tail(queue
, skb
);
4353 * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
4354 * @skb: packet to be sent
4355 * @dev: incoming interface
4357 * On failure skb will be freed.
4359 netdev_tx_t
ieee80211_subif_start_xmit(struct sk_buff
*skb
,
4360 struct net_device
*dev
)
4362 if (unlikely(ieee80211_multicast_to_unicast(skb
, dev
))) {
4363 struct sk_buff_head queue
;
4365 __skb_queue_head_init(&queue
);
4366 ieee80211_convert_to_unicast(skb
, dev
, &queue
);
4367 while ((skb
= __skb_dequeue(&queue
)))
4368 __ieee80211_subif_start_xmit(skb
, dev
, 0, 0, NULL
);
4370 __ieee80211_subif_start_xmit(skb
, dev
, 0, 0, NULL
);
4373 return NETDEV_TX_OK
;
4376 static bool ieee80211_tx_8023(struct ieee80211_sub_if_data
*sdata
,
4377 struct sk_buff
*skb
, int led_len
,
4378 struct sta_info
*sta
,
4381 struct ieee80211_local
*local
= sdata
->local
;
4382 struct ieee80211_tx_control control
= {};
4383 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
4384 struct ieee80211_sta
*pubsta
= NULL
;
4385 unsigned long flags
;
4386 int q
= info
->hw_queue
;
4389 sk_pacing_shift_update(skb
->sk
, local
->hw
.tx_sk_pacing_shift
);
4391 if (ieee80211_queue_skb(local
, sdata
, sta
, skb
))
4394 spin_lock_irqsave(&local
->queue_stop_reason_lock
, flags
);
4396 if (local
->queue_stop_reasons
[q
] ||
4397 (!txpending
&& !skb_queue_empty(&local
->pending
[q
]))) {
4399 skb_queue_head(&local
->pending
[q
], skb
);
4401 skb_queue_tail(&local
->pending
[q
], skb
);
4403 spin_unlock_irqrestore(&local
->queue_stop_reason_lock
, flags
);
4408 spin_unlock_irqrestore(&local
->queue_stop_reason_lock
, flags
);
4410 if (sta
&& sta
->uploaded
)
4413 control
.sta
= pubsta
;
4415 drv_tx(local
, &control
, skb
);
4420 static void ieee80211_8023_xmit(struct ieee80211_sub_if_data
*sdata
,
4421 struct net_device
*dev
, struct sta_info
*sta
,
4422 struct ieee80211_key
*key
, struct sk_buff
*skb
)
4424 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
4425 struct ieee80211_local
*local
= sdata
->local
;
4426 struct tid_ampdu_tx
*tid_tx
;
4429 if (local
->ops
->wake_tx_queue
) {
4430 u16 queue
= __ieee80211_select_queue(sdata
, sta
, skb
);
4431 skb_set_queue_mapping(skb
, queue
);
4435 if (unlikely(test_bit(SCAN_SW_SCANNING
, &local
->scanning
)) &&
4436 test_bit(SDATA_STATE_OFFCHANNEL
, &sdata
->state
))
4439 memset(info
, 0, sizeof(*info
));
4441 ieee80211_aggr_check(sdata
, sta
, skb
);
4443 tid
= skb
->priority
& IEEE80211_QOS_CTL_TAG1D_MASK
;
4444 tid_tx
= rcu_dereference(sta
->ampdu_mlme
.tid_tx
[tid
]);
4446 if (!test_bit(HT_AGG_STATE_OPERATIONAL
, &tid_tx
->state
)) {
4447 /* fall back to non-offload slow path */
4448 __ieee80211_subif_start_xmit(skb
, dev
, 0, 0, NULL
);
4452 info
->flags
|= IEEE80211_TX_CTL_AMPDU
;
4453 if (tid_tx
->timeout
)
4454 tid_tx
->last_tx
= jiffies
;
4457 if (unlikely(skb
->sk
&&
4458 skb_shinfo(skb
)->tx_flags
& SKBTX_WIFI_STATUS
))
4459 info
->ack_frame_id
= ieee80211_store_ack_skb(local
, skb
,
4460 &info
->flags
, NULL
);
4462 info
->hw_queue
= sdata
->vif
.hw_queue
[skb_get_queue_mapping(skb
)];
4464 dev_sw_netstats_tx_add(dev
, 1, skb
->len
);
4466 sta
->tx_stats
.bytes
[skb_get_queue_mapping(skb
)] += skb
->len
;
4467 sta
->tx_stats
.packets
[skb_get_queue_mapping(skb
)]++;
4469 if (sdata
->vif
.type
== NL80211_IFTYPE_AP_VLAN
)
4470 sdata
= container_of(sdata
->bss
,
4471 struct ieee80211_sub_if_data
, u
.ap
);
4473 info
->flags
|= IEEE80211_TX_CTL_HW_80211_ENCAP
;
4474 info
->control
.vif
= &sdata
->vif
;
4477 info
->control
.hw_key
= &key
->conf
;
4479 ieee80211_tx_8023(sdata
, skb
, skb
->len
, sta
, false);
4487 netdev_tx_t
ieee80211_subif_start_xmit_8023(struct sk_buff
*skb
,
4488 struct net_device
*dev
)
4490 struct ieee80211_sub_if_data
*sdata
= IEEE80211_DEV_TO_SUB_IF(dev
);
4491 struct ethhdr
*ehdr
= (struct ethhdr
*)skb
->data
;
4492 struct ieee80211_key
*key
;
4493 struct sta_info
*sta
;
4495 if (unlikely(skb
->len
< ETH_HLEN
)) {
4497 return NETDEV_TX_OK
;
4502 if (ieee80211_lookup_ra_sta(sdata
, skb
, &sta
)) {
4507 if (unlikely(IS_ERR_OR_NULL(sta
) || !sta
->uploaded
||
4508 !test_sta_flag(sta
, WLAN_STA_AUTHORIZED
) ||
4509 sdata
->control_port_protocol
== ehdr
->h_proto
))
4512 key
= rcu_dereference(sta
->ptk
[sta
->ptk_idx
]);
4514 key
= rcu_dereference(sdata
->default_unicast_key
);
4516 if (key
&& (!(key
->flags
& KEY_FLAG_UPLOADED_TO_HARDWARE
) ||
4517 key
->conf
.cipher
== WLAN_CIPHER_SUITE_TKIP
))
4520 ieee80211_8023_xmit(sdata
, dev
, sta
, key
, skb
);
4524 ieee80211_subif_start_xmit(skb
, dev
);
4528 return NETDEV_TX_OK
;
4532 ieee80211_build_data_template(struct ieee80211_sub_if_data
*sdata
,
4533 struct sk_buff
*skb
, u32 info_flags
)
4535 struct ieee80211_hdr
*hdr
;
4536 struct ieee80211_tx_data tx
= {
4537 .local
= sdata
->local
,
4540 struct sta_info
*sta
;
4544 if (ieee80211_lookup_ra_sta(sdata
, skb
, &sta
)) {
4546 skb
= ERR_PTR(-EINVAL
);
4550 skb
= ieee80211_build_hdr(sdata
, skb
, info_flags
, sta
, 0, NULL
);
4554 hdr
= (void *)skb
->data
;
4555 tx
.sta
= sta_info_get(sdata
, hdr
->addr1
);
4558 if (ieee80211_tx_h_select_key(&tx
) != TX_CONTINUE
) {
4561 return ERR_PTR(-EINVAL
);
4570 * ieee80211_clear_tx_pending may not be called in a context where
4571 * it is possible that it packets could come in again.
4573 void ieee80211_clear_tx_pending(struct ieee80211_local
*local
)
4575 struct sk_buff
*skb
;
4578 for (i
= 0; i
< local
->hw
.queues
; i
++) {
4579 while ((skb
= skb_dequeue(&local
->pending
[i
])) != NULL
)
4580 ieee80211_free_txskb(&local
->hw
, skb
);
4585 * Returns false if the frame couldn't be transmitted but was queued instead,
4586 * which in this case means re-queued -- take as an indication to stop sending
4587 * more pending frames.
4589 static bool ieee80211_tx_pending_skb(struct ieee80211_local
*local
,
4590 struct sk_buff
*skb
)
4592 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
4593 struct ieee80211_sub_if_data
*sdata
;
4594 struct sta_info
*sta
;
4595 struct ieee80211_hdr
*hdr
;
4597 struct ieee80211_chanctx_conf
*chanctx_conf
;
4599 sdata
= vif_to_sdata(info
->control
.vif
);
4601 if (info
->control
.flags
& IEEE80211_TX_INTCFL_NEED_TXPROCESSING
) {
4602 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
4603 if (unlikely(!chanctx_conf
)) {
4607 info
->band
= chanctx_conf
->def
.chan
->band
;
4608 result
= ieee80211_tx(sdata
, NULL
, skb
, true);
4609 } else if (info
->flags
& IEEE80211_TX_CTL_HW_80211_ENCAP
) {
4610 if (ieee80211_lookup_ra_sta(sdata
, skb
, &sta
)) {
4615 if (IS_ERR(sta
) || (sta
&& !sta
->uploaded
))
4618 result
= ieee80211_tx_8023(sdata
, skb
, skb
->len
, sta
, true);
4620 struct sk_buff_head skbs
;
4622 __skb_queue_head_init(&skbs
);
4623 __skb_queue_tail(&skbs
, skb
);
4625 hdr
= (struct ieee80211_hdr
*)skb
->data
;
4626 sta
= sta_info_get(sdata
, hdr
->addr1
);
4628 result
= __ieee80211_tx(local
, &skbs
, skb
->len
, sta
, true);
4635 * Transmit all pending packets. Called from tasklet.
4637 void ieee80211_tx_pending(struct tasklet_struct
*t
)
4639 struct ieee80211_local
*local
= from_tasklet(local
, t
,
4640 tx_pending_tasklet
);
4641 unsigned long flags
;
4647 spin_lock_irqsave(&local
->queue_stop_reason_lock
, flags
);
4648 for (i
= 0; i
< local
->hw
.queues
; i
++) {
4650 * If queue is stopped by something other than due to pending
4651 * frames, or we have no pending frames, proceed to next queue.
4653 if (local
->queue_stop_reasons
[i
] ||
4654 skb_queue_empty(&local
->pending
[i
]))
4657 while (!skb_queue_empty(&local
->pending
[i
])) {
4658 struct sk_buff
*skb
= __skb_dequeue(&local
->pending
[i
]);
4659 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
4661 if (WARN_ON(!info
->control
.vif
)) {
4662 ieee80211_free_txskb(&local
->hw
, skb
);
4666 spin_unlock_irqrestore(&local
->queue_stop_reason_lock
,
4669 txok
= ieee80211_tx_pending_skb(local
, skb
);
4670 spin_lock_irqsave(&local
->queue_stop_reason_lock
,
4676 if (skb_queue_empty(&local
->pending
[i
]))
4677 ieee80211_propagate_queue_wake(local
, i
);
4679 spin_unlock_irqrestore(&local
->queue_stop_reason_lock
, flags
);
4684 /* functions for drivers to get certain frames */
4686 static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data
*sdata
,
4687 struct ps_data
*ps
, struct sk_buff
*skb
,
4692 int i
, have_bits
= 0, n1
, n2
;
4694 /* Generate bitmap for TIM only if there are any STAs in power save
4696 if (atomic_read(&ps
->num_sta_ps
) > 0)
4697 /* in the hope that this is faster than
4698 * checking byte-for-byte */
4699 have_bits
= !bitmap_empty((unsigned long *)ps
->tim
,
4700 IEEE80211_MAX_AID
+1);
4702 if (ps
->dtim_count
== 0)
4703 ps
->dtim_count
= sdata
->vif
.bss_conf
.dtim_period
- 1;
4708 tim
= pos
= skb_put(skb
, 6);
4709 *pos
++ = WLAN_EID_TIM
;
4711 *pos
++ = ps
->dtim_count
;
4712 *pos
++ = sdata
->vif
.bss_conf
.dtim_period
;
4714 if (ps
->dtim_count
== 0 && !skb_queue_empty(&ps
->bc_buf
))
4717 ps
->dtim_bc_mc
= aid0
== 1;
4720 /* Find largest even number N1 so that bits numbered 1 through
4721 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
4722 * (N2 + 1) x 8 through 2007 are 0. */
4724 for (i
= 0; i
< IEEE80211_MAX_TIM_LEN
; i
++) {
4731 for (i
= IEEE80211_MAX_TIM_LEN
- 1; i
>= n1
; i
--) {
4738 /* Bitmap control */
4740 /* Part Virt Bitmap */
4741 skb_put(skb
, n2
- n1
);
4742 memcpy(pos
, ps
->tim
+ n1
, n2
- n1
+ 1);
4744 tim
[1] = n2
- n1
+ 4;
4746 *pos
++ = aid0
; /* Bitmap control */
4747 *pos
++ = 0; /* Part Virt Bitmap */
4751 static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data
*sdata
,
4752 struct ps_data
*ps
, struct sk_buff
*skb
,
4755 struct ieee80211_local
*local
= sdata
->local
;
4758 * Not very nice, but we want to allow the driver to call
4759 * ieee80211_beacon_get() as a response to the set_tim()
4760 * callback. That, however, is already invoked under the
4761 * sta_lock to guarantee consistent and race-free update
4762 * of the tim bitmap in mac80211 and the driver.
4764 if (local
->tim_in_locked_section
) {
4765 __ieee80211_beacon_add_tim(sdata
, ps
, skb
, is_template
);
4767 spin_lock_bh(&local
->tim_lock
);
4768 __ieee80211_beacon_add_tim(sdata
, ps
, skb
, is_template
);
4769 spin_unlock_bh(&local
->tim_lock
);
4775 static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data
*sdata
,
4776 struct beacon_data
*beacon
)
4778 struct probe_resp
*resp
;
4780 size_t beacon_data_len
;
4782 u8 count
= beacon
->cntdwn_current_counter
;
4784 switch (sdata
->vif
.type
) {
4785 case NL80211_IFTYPE_AP
:
4786 beacon_data
= beacon
->tail
;
4787 beacon_data_len
= beacon
->tail_len
;
4789 case NL80211_IFTYPE_ADHOC
:
4790 beacon_data
= beacon
->head
;
4791 beacon_data_len
= beacon
->head_len
;
4793 case NL80211_IFTYPE_MESH_POINT
:
4794 beacon_data
= beacon
->head
;
4795 beacon_data_len
= beacon
->head_len
;
4802 for (i
= 0; i
< IEEE80211_MAX_CNTDWN_COUNTERS_NUM
; ++i
) {
4803 resp
= rcu_dereference(sdata
->u
.ap
.probe_resp
);
4805 if (beacon
->cntdwn_counter_offsets
[i
]) {
4806 if (WARN_ON_ONCE(beacon
->cntdwn_counter_offsets
[i
] >=
4812 beacon_data
[beacon
->cntdwn_counter_offsets
[i
]] = count
;
4815 if (sdata
->vif
.type
== NL80211_IFTYPE_AP
&& resp
)
4816 resp
->data
[resp
->cntdwn_counter_offsets
[i
]] = count
;
4821 static u8
__ieee80211_beacon_update_cntdwn(struct beacon_data
*beacon
)
4823 beacon
->cntdwn_current_counter
--;
4825 /* the counter should never reach 0 */
4826 WARN_ON_ONCE(!beacon
->cntdwn_current_counter
);
4828 return beacon
->cntdwn_current_counter
;
4831 u8
ieee80211_beacon_update_cntdwn(struct ieee80211_vif
*vif
)
4833 struct ieee80211_sub_if_data
*sdata
= vif_to_sdata(vif
);
4834 struct beacon_data
*beacon
= NULL
;
4839 if (sdata
->vif
.type
== NL80211_IFTYPE_AP
)
4840 beacon
= rcu_dereference(sdata
->u
.ap
.beacon
);
4841 else if (sdata
->vif
.type
== NL80211_IFTYPE_ADHOC
)
4842 beacon
= rcu_dereference(sdata
->u
.ibss
.presp
);
4843 else if (ieee80211_vif_is_mesh(&sdata
->vif
))
4844 beacon
= rcu_dereference(sdata
->u
.mesh
.beacon
);
4849 count
= __ieee80211_beacon_update_cntdwn(beacon
);
4855 EXPORT_SYMBOL(ieee80211_beacon_update_cntdwn
);
4857 void ieee80211_beacon_set_cntdwn(struct ieee80211_vif
*vif
, u8 counter
)
4859 struct ieee80211_sub_if_data
*sdata
= vif_to_sdata(vif
);
4860 struct beacon_data
*beacon
= NULL
;
4864 if (sdata
->vif
.type
== NL80211_IFTYPE_AP
)
4865 beacon
= rcu_dereference(sdata
->u
.ap
.beacon
);
4866 else if (sdata
->vif
.type
== NL80211_IFTYPE_ADHOC
)
4867 beacon
= rcu_dereference(sdata
->u
.ibss
.presp
);
4868 else if (ieee80211_vif_is_mesh(&sdata
->vif
))
4869 beacon
= rcu_dereference(sdata
->u
.mesh
.beacon
);
4874 if (counter
< beacon
->cntdwn_current_counter
)
4875 beacon
->cntdwn_current_counter
= counter
;
4880 EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn
);
4882 bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif
*vif
)
4884 struct ieee80211_sub_if_data
*sdata
= vif_to_sdata(vif
);
4885 struct beacon_data
*beacon
= NULL
;
4887 size_t beacon_data_len
;
4890 if (!ieee80211_sdata_running(sdata
))
4894 if (vif
->type
== NL80211_IFTYPE_AP
) {
4895 struct ieee80211_if_ap
*ap
= &sdata
->u
.ap
;
4897 beacon
= rcu_dereference(ap
->beacon
);
4898 if (WARN_ON(!beacon
|| !beacon
->tail
))
4900 beacon_data
= beacon
->tail
;
4901 beacon_data_len
= beacon
->tail_len
;
4902 } else if (vif
->type
== NL80211_IFTYPE_ADHOC
) {
4903 struct ieee80211_if_ibss
*ifibss
= &sdata
->u
.ibss
;
4905 beacon
= rcu_dereference(ifibss
->presp
);
4909 beacon_data
= beacon
->head
;
4910 beacon_data_len
= beacon
->head_len
;
4911 } else if (vif
->type
== NL80211_IFTYPE_MESH_POINT
) {
4912 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
4914 beacon
= rcu_dereference(ifmsh
->beacon
);
4918 beacon_data
= beacon
->head
;
4919 beacon_data_len
= beacon
->head_len
;
4925 if (!beacon
->cntdwn_counter_offsets
[0])
4928 if (WARN_ON_ONCE(beacon
->cntdwn_counter_offsets
[0] > beacon_data_len
))
4931 if (beacon_data
[beacon
->cntdwn_counter_offsets
[0]] == 1)
4939 EXPORT_SYMBOL(ieee80211_beacon_cntdwn_is_complete
);
4941 static int ieee80211_beacon_protect(struct sk_buff
*skb
,
4942 struct ieee80211_local
*local
,
4943 struct ieee80211_sub_if_data
*sdata
)
4945 ieee80211_tx_result res
;
4946 struct ieee80211_tx_data tx
;
4947 struct sk_buff
*check_skb
;
4949 memset(&tx
, 0, sizeof(tx
));
4950 tx
.key
= rcu_dereference(sdata
->default_beacon_key
);
4955 __skb_queue_head_init(&tx
.skbs
);
4956 __skb_queue_tail(&tx
.skbs
, skb
);
4957 res
= ieee80211_tx_h_encrypt(&tx
);
4958 check_skb
= __skb_dequeue(&tx
.skbs
);
4959 /* we may crash after this, but it'd be a bug in crypto */
4960 WARN_ON(check_skb
!= skb
);
4961 if (WARN_ON_ONCE(res
!= TX_CONTINUE
))
4967 static struct sk_buff
*
4968 __ieee80211_beacon_get(struct ieee80211_hw
*hw
,
4969 struct ieee80211_vif
*vif
,
4970 struct ieee80211_mutable_offsets
*offs
,
4973 struct ieee80211_local
*local
= hw_to_local(hw
);
4974 struct beacon_data
*beacon
= NULL
;
4975 struct sk_buff
*skb
= NULL
;
4976 struct ieee80211_tx_info
*info
;
4977 struct ieee80211_sub_if_data
*sdata
= NULL
;
4978 enum nl80211_band band
;
4979 struct ieee80211_tx_rate_control txrc
;
4980 struct ieee80211_chanctx_conf
*chanctx_conf
;
4981 int csa_off_base
= 0;
4985 sdata
= vif_to_sdata(vif
);
4986 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
4988 if (!ieee80211_sdata_running(sdata
) || !chanctx_conf
)
4992 memset(offs
, 0, sizeof(*offs
));
4994 if (sdata
->vif
.type
== NL80211_IFTYPE_AP
) {
4995 struct ieee80211_if_ap
*ap
= &sdata
->u
.ap
;
4997 beacon
= rcu_dereference(ap
->beacon
);
4999 if (beacon
->cntdwn_counter_offsets
[0]) {
5001 ieee80211_beacon_update_cntdwn(vif
);
5003 ieee80211_set_beacon_cntdwn(sdata
, beacon
);
5007 * headroom, head length,
5008 * tail length and maximum TIM length
5010 skb
= dev_alloc_skb(local
->tx_headroom
+
5012 beacon
->tail_len
+ 256 +
5013 local
->hw
.extra_beacon_tailroom
);
5017 skb_reserve(skb
, local
->tx_headroom
);
5018 skb_put_data(skb
, beacon
->head
, beacon
->head_len
);
5020 ieee80211_beacon_add_tim(sdata
, &ap
->ps
, skb
,
5024 offs
->tim_offset
= beacon
->head_len
;
5025 offs
->tim_length
= skb
->len
- beacon
->head_len
;
5027 /* for AP the csa offsets are from tail */
5028 csa_off_base
= skb
->len
;
5032 skb_put_data(skb
, beacon
->tail
,
5035 if (ieee80211_beacon_protect(skb
, local
, sdata
) < 0)
5039 } else if (sdata
->vif
.type
== NL80211_IFTYPE_ADHOC
) {
5040 struct ieee80211_if_ibss
*ifibss
= &sdata
->u
.ibss
;
5041 struct ieee80211_hdr
*hdr
;
5043 beacon
= rcu_dereference(ifibss
->presp
);
5047 if (beacon
->cntdwn_counter_offsets
[0]) {
5049 __ieee80211_beacon_update_cntdwn(beacon
);
5051 ieee80211_set_beacon_cntdwn(sdata
, beacon
);
5054 skb
= dev_alloc_skb(local
->tx_headroom
+ beacon
->head_len
+
5055 local
->hw
.extra_beacon_tailroom
);
5058 skb_reserve(skb
, local
->tx_headroom
);
5059 skb_put_data(skb
, beacon
->head
, beacon
->head_len
);
5061 hdr
= (struct ieee80211_hdr
*) skb
->data
;
5062 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
5063 IEEE80211_STYPE_BEACON
);
5064 } else if (ieee80211_vif_is_mesh(&sdata
->vif
)) {
5065 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
5067 beacon
= rcu_dereference(ifmsh
->beacon
);
5071 if (beacon
->cntdwn_counter_offsets
[0]) {
5073 /* TODO: For mesh csa_counter is in TU, so
5074 * decrementing it by one isn't correct, but
5075 * for now we leave it consistent with overall
5076 * mac80211's behavior.
5078 __ieee80211_beacon_update_cntdwn(beacon
);
5080 ieee80211_set_beacon_cntdwn(sdata
, beacon
);
5083 if (ifmsh
->sync_ops
)
5084 ifmsh
->sync_ops
->adjust_tsf(sdata
, beacon
);
5086 skb
= dev_alloc_skb(local
->tx_headroom
+
5090 local
->hw
.extra_beacon_tailroom
);
5093 skb_reserve(skb
, local
->tx_headroom
);
5094 skb_put_data(skb
, beacon
->head
, beacon
->head_len
);
5095 ieee80211_beacon_add_tim(sdata
, &ifmsh
->ps
, skb
, is_template
);
5098 offs
->tim_offset
= beacon
->head_len
;
5099 offs
->tim_length
= skb
->len
- beacon
->head_len
;
5102 skb_put_data(skb
, beacon
->tail
, beacon
->tail_len
);
5109 if (offs
&& beacon
) {
5112 for (i
= 0; i
< IEEE80211_MAX_CNTDWN_COUNTERS_NUM
; i
++) {
5113 u16 csa_off
= beacon
->cntdwn_counter_offsets
[i
];
5118 offs
->cntdwn_counter_offs
[i
] = csa_off_base
+ csa_off
;
5122 band
= chanctx_conf
->def
.chan
->band
;
5124 info
= IEEE80211_SKB_CB(skb
);
5126 info
->flags
|= IEEE80211_TX_INTFL_DONT_ENCRYPT
;
5127 info
->flags
|= IEEE80211_TX_CTL_NO_ACK
;
5130 memset(&txrc
, 0, sizeof(txrc
));
5132 txrc
.sband
= local
->hw
.wiphy
->bands
[band
];
5133 txrc
.bss_conf
= &sdata
->vif
.bss_conf
;
5135 txrc
.reported_rate
.idx
= -1;
5136 if (sdata
->beacon_rate_set
&& sdata
->beacon_rateidx_mask
[band
])
5137 txrc
.rate_idx_mask
= sdata
->beacon_rateidx_mask
[band
];
5139 txrc
.rate_idx_mask
= sdata
->rc_rateidx_mask
[band
];
5141 rate_control_get_rate(sdata
, NULL
, &txrc
);
5143 info
->control
.vif
= vif
;
5145 info
->flags
|= IEEE80211_TX_CTL_CLEAR_PS_FILT
|
5146 IEEE80211_TX_CTL_ASSIGN_SEQ
|
5147 IEEE80211_TX_CTL_FIRST_FRAGMENT
;
5155 ieee80211_beacon_get_template(struct ieee80211_hw
*hw
,
5156 struct ieee80211_vif
*vif
,
5157 struct ieee80211_mutable_offsets
*offs
)
5159 return __ieee80211_beacon_get(hw
, vif
, offs
, true);
5161 EXPORT_SYMBOL(ieee80211_beacon_get_template
);
5163 struct sk_buff
*ieee80211_beacon_get_tim(struct ieee80211_hw
*hw
,
5164 struct ieee80211_vif
*vif
,
5165 u16
*tim_offset
, u16
*tim_length
)
5167 struct ieee80211_mutable_offsets offs
= {};
5168 struct sk_buff
*bcn
= __ieee80211_beacon_get(hw
, vif
, &offs
, false);
5169 struct sk_buff
*copy
;
5170 struct ieee80211_supported_band
*sband
;
5177 *tim_offset
= offs
.tim_offset
;
5180 *tim_length
= offs
.tim_length
;
5182 if (ieee80211_hw_check(hw
, BEACON_TX_STATUS
) ||
5183 !hw_to_local(hw
)->monitors
)
5186 /* send a copy to monitor interfaces */
5187 copy
= skb_copy(bcn
, GFP_ATOMIC
);
5191 shift
= ieee80211_vif_get_shift(vif
);
5192 sband
= ieee80211_get_sband(vif_to_sdata(vif
));
5196 ieee80211_tx_monitor(hw_to_local(hw
), copy
, sband
, 1, shift
, false,
5201 EXPORT_SYMBOL(ieee80211_beacon_get_tim
);
5203 struct sk_buff
*ieee80211_proberesp_get(struct ieee80211_hw
*hw
,
5204 struct ieee80211_vif
*vif
)
5206 struct ieee80211_if_ap
*ap
= NULL
;
5207 struct sk_buff
*skb
= NULL
;
5208 struct probe_resp
*presp
= NULL
;
5209 struct ieee80211_hdr
*hdr
;
5210 struct ieee80211_sub_if_data
*sdata
= vif_to_sdata(vif
);
5212 if (sdata
->vif
.type
!= NL80211_IFTYPE_AP
)
5218 presp
= rcu_dereference(ap
->probe_resp
);
5222 skb
= dev_alloc_skb(presp
->len
);
5226 skb_put_data(skb
, presp
->data
, presp
->len
);
5228 hdr
= (struct ieee80211_hdr
*) skb
->data
;
5229 memset(hdr
->addr1
, 0, sizeof(hdr
->addr1
));
5235 EXPORT_SYMBOL(ieee80211_proberesp_get
);
5237 struct sk_buff
*ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw
*hw
,
5238 struct ieee80211_vif
*vif
)
5240 struct sk_buff
*skb
= NULL
;
5241 struct fils_discovery_data
*tmpl
= NULL
;
5242 struct ieee80211_sub_if_data
*sdata
= vif_to_sdata(vif
);
5244 if (sdata
->vif
.type
!= NL80211_IFTYPE_AP
)
5248 tmpl
= rcu_dereference(sdata
->u
.ap
.fils_discovery
);
5254 skb
= dev_alloc_skb(sdata
->local
->hw
.extra_tx_headroom
+ tmpl
->len
);
5256 skb_reserve(skb
, sdata
->local
->hw
.extra_tx_headroom
);
5257 skb_put_data(skb
, tmpl
->data
, tmpl
->len
);
5263 EXPORT_SYMBOL(ieee80211_get_fils_discovery_tmpl
);
5266 ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw
*hw
,
5267 struct ieee80211_vif
*vif
)
5269 struct sk_buff
*skb
= NULL
;
5270 struct unsol_bcast_probe_resp_data
*tmpl
= NULL
;
5271 struct ieee80211_sub_if_data
*sdata
= vif_to_sdata(vif
);
5273 if (sdata
->vif
.type
!= NL80211_IFTYPE_AP
)
5277 tmpl
= rcu_dereference(sdata
->u
.ap
.unsol_bcast_probe_resp
);
5283 skb
= dev_alloc_skb(sdata
->local
->hw
.extra_tx_headroom
+ tmpl
->len
);
5285 skb_reserve(skb
, sdata
->local
->hw
.extra_tx_headroom
);
5286 skb_put_data(skb
, tmpl
->data
, tmpl
->len
);
5292 EXPORT_SYMBOL(ieee80211_get_unsol_bcast_probe_resp_tmpl
);
5294 struct sk_buff
*ieee80211_pspoll_get(struct ieee80211_hw
*hw
,
5295 struct ieee80211_vif
*vif
)
5297 struct ieee80211_sub_if_data
*sdata
;
5298 struct ieee80211_if_managed
*ifmgd
;
5299 struct ieee80211_pspoll
*pspoll
;
5300 struct ieee80211_local
*local
;
5301 struct sk_buff
*skb
;
5303 if (WARN_ON(vif
->type
!= NL80211_IFTYPE_STATION
))
5306 sdata
= vif_to_sdata(vif
);
5307 ifmgd
= &sdata
->u
.mgd
;
5308 local
= sdata
->local
;
5310 skb
= dev_alloc_skb(local
->hw
.extra_tx_headroom
+ sizeof(*pspoll
));
5314 skb_reserve(skb
, local
->hw
.extra_tx_headroom
);
5316 pspoll
= skb_put_zero(skb
, sizeof(*pspoll
));
5317 pspoll
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_CTL
|
5318 IEEE80211_STYPE_PSPOLL
);
5319 pspoll
->aid
= cpu_to_le16(sdata
->vif
.bss_conf
.aid
);
5321 /* aid in PS-Poll has its two MSBs each set to 1 */
5322 pspoll
->aid
|= cpu_to_le16(1 << 15 | 1 << 14);
5324 memcpy(pspoll
->bssid
, ifmgd
->bssid
, ETH_ALEN
);
5325 memcpy(pspoll
->ta
, vif
->addr
, ETH_ALEN
);
5329 EXPORT_SYMBOL(ieee80211_pspoll_get
);
5331 struct sk_buff
*ieee80211_nullfunc_get(struct ieee80211_hw
*hw
,
5332 struct ieee80211_vif
*vif
,
5335 struct ieee80211_hdr_3addr
*nullfunc
;
5336 struct ieee80211_sub_if_data
*sdata
;
5337 struct ieee80211_if_managed
*ifmgd
;
5338 struct ieee80211_local
*local
;
5339 struct sk_buff
*skb
;
5342 if (WARN_ON(vif
->type
!= NL80211_IFTYPE_STATION
))
5345 sdata
= vif_to_sdata(vif
);
5346 ifmgd
= &sdata
->u
.mgd
;
5347 local
= sdata
->local
;
5350 struct sta_info
*sta
;
5353 sta
= sta_info_get(sdata
, ifmgd
->bssid
);
5354 qos
= sta
&& sta
->sta
.wme
;
5358 skb
= dev_alloc_skb(local
->hw
.extra_tx_headroom
+
5359 sizeof(*nullfunc
) + 2);
5363 skb_reserve(skb
, local
->hw
.extra_tx_headroom
);
5365 nullfunc
= skb_put_zero(skb
, sizeof(*nullfunc
));
5366 nullfunc
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
5367 IEEE80211_STYPE_NULLFUNC
|
5368 IEEE80211_FCTL_TODS
);
5370 __le16 qoshdr
= cpu_to_le16(7);
5372 BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC
|
5373 IEEE80211_STYPE_NULLFUNC
) !=
5374 IEEE80211_STYPE_QOS_NULLFUNC
);
5375 nullfunc
->frame_control
|=
5376 cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC
);
5378 skb_set_queue_mapping(skb
, IEEE80211_AC_VO
);
5379 skb_put_data(skb
, &qoshdr
, sizeof(qoshdr
));
5382 memcpy(nullfunc
->addr1
, ifmgd
->bssid
, ETH_ALEN
);
5383 memcpy(nullfunc
->addr2
, vif
->addr
, ETH_ALEN
);
5384 memcpy(nullfunc
->addr3
, ifmgd
->bssid
, ETH_ALEN
);
5388 EXPORT_SYMBOL(ieee80211_nullfunc_get
);
5390 struct sk_buff
*ieee80211_probereq_get(struct ieee80211_hw
*hw
,
5392 const u8
*ssid
, size_t ssid_len
,
5395 struct ieee80211_local
*local
= hw_to_local(hw
);
5396 struct ieee80211_hdr_3addr
*hdr
;
5397 struct sk_buff
*skb
;
5401 ie_ssid_len
= 2 + ssid_len
;
5403 skb
= dev_alloc_skb(local
->hw
.extra_tx_headroom
+ sizeof(*hdr
) +
5404 ie_ssid_len
+ tailroom
);
5408 skb_reserve(skb
, local
->hw
.extra_tx_headroom
);
5410 hdr
= skb_put_zero(skb
, sizeof(*hdr
));
5411 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
5412 IEEE80211_STYPE_PROBE_REQ
);
5413 eth_broadcast_addr(hdr
->addr1
);
5414 memcpy(hdr
->addr2
, src_addr
, ETH_ALEN
);
5415 eth_broadcast_addr(hdr
->addr3
);
5417 pos
= skb_put(skb
, ie_ssid_len
);
5418 *pos
++ = WLAN_EID_SSID
;
5421 memcpy(pos
, ssid
, ssid_len
);
5426 EXPORT_SYMBOL(ieee80211_probereq_get
);
5428 void ieee80211_rts_get(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
5429 const void *frame
, size_t frame_len
,
5430 const struct ieee80211_tx_info
*frame_txctl
,
5431 struct ieee80211_rts
*rts
)
5433 const struct ieee80211_hdr
*hdr
= frame
;
5435 rts
->frame_control
=
5436 cpu_to_le16(IEEE80211_FTYPE_CTL
| IEEE80211_STYPE_RTS
);
5437 rts
->duration
= ieee80211_rts_duration(hw
, vif
, frame_len
,
5439 memcpy(rts
->ra
, hdr
->addr1
, sizeof(rts
->ra
));
5440 memcpy(rts
->ta
, hdr
->addr2
, sizeof(rts
->ta
));
5442 EXPORT_SYMBOL(ieee80211_rts_get
);
5444 void ieee80211_ctstoself_get(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
5445 const void *frame
, size_t frame_len
,
5446 const struct ieee80211_tx_info
*frame_txctl
,
5447 struct ieee80211_cts
*cts
)
5449 const struct ieee80211_hdr
*hdr
= frame
;
5451 cts
->frame_control
=
5452 cpu_to_le16(IEEE80211_FTYPE_CTL
| IEEE80211_STYPE_CTS
);
5453 cts
->duration
= ieee80211_ctstoself_duration(hw
, vif
,
5454 frame_len
, frame_txctl
);
5455 memcpy(cts
->ra
, hdr
->addr1
, sizeof(cts
->ra
));
5457 EXPORT_SYMBOL(ieee80211_ctstoself_get
);
5460 ieee80211_get_buffered_bc(struct ieee80211_hw
*hw
,
5461 struct ieee80211_vif
*vif
)
5463 struct ieee80211_local
*local
= hw_to_local(hw
);
5464 struct sk_buff
*skb
= NULL
;
5465 struct ieee80211_tx_data tx
;
5466 struct ieee80211_sub_if_data
*sdata
;
5468 struct ieee80211_tx_info
*info
;
5469 struct ieee80211_chanctx_conf
*chanctx_conf
;
5471 sdata
= vif_to_sdata(vif
);
5474 chanctx_conf
= rcu_dereference(sdata
->vif
.chanctx_conf
);
5479 if (sdata
->vif
.type
== NL80211_IFTYPE_AP
) {
5480 struct beacon_data
*beacon
=
5481 rcu_dereference(sdata
->u
.ap
.beacon
);
5483 if (!beacon
|| !beacon
->head
)
5486 ps
= &sdata
->u
.ap
.ps
;
5487 } else if (ieee80211_vif_is_mesh(&sdata
->vif
)) {
5488 ps
= &sdata
->u
.mesh
.ps
;
5493 if (ps
->dtim_count
!= 0 || !ps
->dtim_bc_mc
)
5494 goto out
; /* send buffered bc/mc only after DTIM beacon */
5497 skb
= skb_dequeue(&ps
->bc_buf
);
5500 local
->total_ps_buffered
--;
5502 if (!skb_queue_empty(&ps
->bc_buf
) && skb
->len
>= 2) {
5503 struct ieee80211_hdr
*hdr
=
5504 (struct ieee80211_hdr
*) skb
->data
;
5505 /* more buffered multicast/broadcast frames ==> set
5506 * MoreData flag in IEEE 802.11 header to inform PS
5508 hdr
->frame_control
|=
5509 cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
5512 if (sdata
->vif
.type
== NL80211_IFTYPE_AP
)
5513 sdata
= IEEE80211_DEV_TO_SUB_IF(skb
->dev
);
5514 if (!ieee80211_tx_prepare(sdata
, &tx
, NULL
, skb
))
5516 ieee80211_free_txskb(hw
, skb
);
5519 info
= IEEE80211_SKB_CB(skb
);
5521 tx
.flags
|= IEEE80211_TX_PS_BUFFERED
;
5522 info
->band
= chanctx_conf
->def
.chan
->band
;
5524 if (invoke_tx_handlers(&tx
))
5531 EXPORT_SYMBOL(ieee80211_get_buffered_bc
);
5533 int ieee80211_reserve_tid(struct ieee80211_sta
*pubsta
, u8 tid
)
5535 struct sta_info
*sta
= container_of(pubsta
, struct sta_info
, sta
);
5536 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
5537 struct ieee80211_local
*local
= sdata
->local
;
5541 lockdep_assert_held(&local
->sta_mtx
);
5543 /* only some cases are supported right now */
5544 switch (sdata
->vif
.type
) {
5545 case NL80211_IFTYPE_STATION
:
5546 case NL80211_IFTYPE_AP
:
5547 case NL80211_IFTYPE_AP_VLAN
:
5554 if (WARN_ON(tid
>= IEEE80211_NUM_UPS
))
5557 if (sta
->reserved_tid
== tid
) {
5562 if (sta
->reserved_tid
!= IEEE80211_TID_UNRESERVED
) {
5563 sdata_err(sdata
, "TID reservation already active\n");
5568 ieee80211_stop_vif_queues(sdata
->local
, sdata
,
5569 IEEE80211_QUEUE_STOP_REASON_RESERVE_TID
);
5573 /* Tear down BA sessions so we stop aggregating on this TID */
5574 if (ieee80211_hw_check(&local
->hw
, AMPDU_AGGREGATION
)) {
5575 set_sta_flag(sta
, WLAN_STA_BLOCK_BA
);
5576 __ieee80211_stop_tx_ba_session(sta
, tid
,
5577 AGG_STOP_LOCAL_REQUEST
);
5580 queues
= BIT(sdata
->vif
.hw_queue
[ieee802_1d_to_ac
[tid
]]);
5581 __ieee80211_flush_queues(local
, sdata
, queues
, false);
5583 sta
->reserved_tid
= tid
;
5585 ieee80211_wake_vif_queues(local
, sdata
,
5586 IEEE80211_QUEUE_STOP_REASON_RESERVE_TID
);
5588 if (ieee80211_hw_check(&local
->hw
, AMPDU_AGGREGATION
))
5589 clear_sta_flag(sta
, WLAN_STA_BLOCK_BA
);
5595 EXPORT_SYMBOL(ieee80211_reserve_tid
);
5597 void ieee80211_unreserve_tid(struct ieee80211_sta
*pubsta
, u8 tid
)
5599 struct sta_info
*sta
= container_of(pubsta
, struct sta_info
, sta
);
5600 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
5602 lockdep_assert_held(&sdata
->local
->sta_mtx
);
5604 /* only some cases are supported right now */
5605 switch (sdata
->vif
.type
) {
5606 case NL80211_IFTYPE_STATION
:
5607 case NL80211_IFTYPE_AP
:
5608 case NL80211_IFTYPE_AP_VLAN
:
5615 if (tid
!= sta
->reserved_tid
) {
5616 sdata_err(sdata
, "TID to unreserve (%d) isn't reserved\n", tid
);
5620 sta
->reserved_tid
= IEEE80211_TID_UNRESERVED
;
5622 EXPORT_SYMBOL(ieee80211_unreserve_tid
);
5624 void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data
*sdata
,
5625 struct sk_buff
*skb
, int tid
,
5626 enum nl80211_band band
)
5628 int ac
= ieee80211_ac_from_tid(tid
);
5630 skb_reset_mac_header(skb
);
5631 skb_set_queue_mapping(skb
, ac
);
5632 skb
->priority
= tid
;
5634 skb
->dev
= sdata
->dev
;
5637 * The other path calling ieee80211_xmit is from the tasklet,
5638 * and while we can handle concurrent transmissions locking
5639 * requirements are that we do not come into tx with bhs on.
5642 IEEE80211_SKB_CB(skb
)->band
= band
;
5643 ieee80211_xmit(sdata
, NULL
, skb
);
5647 int ieee80211_tx_control_port(struct wiphy
*wiphy
, struct net_device
*dev
,
5648 const u8
*buf
, size_t len
,
5649 const u8
*dest
, __be16 proto
, bool unencrypted
,
5652 struct ieee80211_sub_if_data
*sdata
= IEEE80211_DEV_TO_SUB_IF(dev
);
5653 struct ieee80211_local
*local
= sdata
->local
;
5654 struct sta_info
*sta
;
5655 struct sk_buff
*skb
;
5656 struct ethhdr
*ehdr
;
5660 /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
5661 * or Pre-Authentication
5663 if (proto
!= sdata
->control_port_protocol
&&
5664 proto
!= cpu_to_be16(ETH_P_PREAUTH
))
5667 if (proto
== sdata
->control_port_protocol
)
5668 ctrl_flags
|= IEEE80211_TX_CTRL_PORT_CTRL_PROTO
|
5669 IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP
;
5672 flags
|= IEEE80211_TX_INTFL_DONT_ENCRYPT
;
5675 ctrl_flags
|= IEEE80211_TX_CTL_REQ_TX_STATUS
;
5677 flags
|= IEEE80211_TX_INTFL_NL80211_FRAME_TX
;
5679 skb
= dev_alloc_skb(local
->hw
.extra_tx_headroom
+
5680 sizeof(struct ethhdr
) + len
);
5684 skb_reserve(skb
, local
->hw
.extra_tx_headroom
+ sizeof(struct ethhdr
));
5686 skb_put_data(skb
, buf
, len
);
5688 ehdr
= skb_push(skb
, sizeof(struct ethhdr
));
5689 memcpy(ehdr
->h_dest
, dest
, ETH_ALEN
);
5690 memcpy(ehdr
->h_source
, sdata
->vif
.addr
, ETH_ALEN
);
5691 ehdr
->h_proto
= proto
;
5694 skb
->protocol
= proto
;
5695 skb_reset_network_header(skb
);
5696 skb_reset_mac_header(skb
);
5698 /* update QoS header to prioritize control port frames if possible,
5699 * priorization also happens for control port frames send over
5704 if (ieee80211_lookup_ra_sta(sdata
, skb
, &sta
) == 0 && !IS_ERR(sta
)) {
5705 u16 queue
= __ieee80211_select_queue(sdata
, sta
, skb
);
5707 skb_set_queue_mapping(skb
, queue
);
5713 /* mutex lock is only needed for incrementing the cookie counter */
5714 mutex_lock(&local
->mtx
);
5717 __ieee80211_subif_start_xmit(skb
, skb
->dev
, flags
, ctrl_flags
, cookie
);
5720 mutex_unlock(&local
->mtx
);
5725 int ieee80211_probe_mesh_link(struct wiphy
*wiphy
, struct net_device
*dev
,
5726 const u8
*buf
, size_t len
)
5728 struct ieee80211_sub_if_data
*sdata
= IEEE80211_DEV_TO_SUB_IF(dev
);
5729 struct ieee80211_local
*local
= sdata
->local
;
5730 struct sk_buff
*skb
;
5732 skb
= dev_alloc_skb(local
->hw
.extra_tx_headroom
+ len
+
5733 30 + /* header size */
5734 18); /* 11s header size */
5738 skb_reserve(skb
, local
->hw
.extra_tx_headroom
);
5739 skb_put_data(skb
, buf
, len
);
5742 skb
->protocol
= htons(ETH_P_802_3
);
5743 skb_reset_network_header(skb
);
5744 skb_reset_mac_header(skb
);
5747 __ieee80211_subif_start_xmit(skb
, skb
->dev
, 0,
5748 IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP
,