]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/iwlwifi/mvm/tdls.c
Merge tag 'media/v3.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / iwlwifi / mvm / tdls.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2014 Intel Mobile Communications GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2014 Intel Mobile Communications GmbH
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64 #include <linux/etherdevice.h>
65 #include "mvm.h"
66 #include "time-event.h"
67
68 #define TU_TO_US(x) (x * 1024)
69 #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
70
71 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
72 {
73 struct ieee80211_sta *sta;
74 struct iwl_mvm_sta *mvmsta;
75 int i;
76
77 lockdep_assert_held(&mvm->mutex);
78
79 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
80 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
81 lockdep_is_held(&mvm->mutex));
82 if (!sta || IS_ERR(sta) || !sta->tdls)
83 continue;
84
85 mvmsta = iwl_mvm_sta_from_mac80211(sta);
86 ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
87 NL80211_TDLS_TEARDOWN,
88 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
89 GFP_KERNEL);
90 }
91 }
92
93 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
94 {
95 struct ieee80211_sta *sta;
96 struct iwl_mvm_sta *mvmsta;
97 int count = 0;
98 int i;
99
100 lockdep_assert_held(&mvm->mutex);
101
102 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
103 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
104 lockdep_is_held(&mvm->mutex));
105 if (!sta || IS_ERR(sta) || !sta->tdls)
106 continue;
107
108 if (vif) {
109 mvmsta = iwl_mvm_sta_from_mac80211(sta);
110 if (mvmsta->vif != vif)
111 continue;
112 }
113
114 count++;
115 }
116
117 return count;
118 }
119
120 static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
121 {
122 struct iwl_rx_packet *pkt;
123 struct iwl_tdls_config_res *resp;
124 struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
125 struct iwl_host_cmd cmd = {
126 .id = TDLS_CONFIG_CMD,
127 .flags = CMD_WANT_SKB,
128 .data = { &tdls_cfg_cmd, },
129 .len = { sizeof(struct iwl_tdls_config_cmd), },
130 };
131 struct ieee80211_sta *sta;
132 int ret, i, cnt;
133 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
134
135 lockdep_assert_held(&mvm->mutex);
136
137 tdls_cfg_cmd.id_and_color =
138 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
139 tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
140 tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
141
142 /* for now the Tx cmd is empty and unused */
143
144 /* populate TDLS peer data */
145 cnt = 0;
146 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
147 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
148 lockdep_is_held(&mvm->mutex));
149 if (IS_ERR_OR_NULL(sta) || !sta->tdls)
150 continue;
151
152 tdls_cfg_cmd.sta_info[cnt].sta_id = i;
153 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
154 IWL_MVM_TDLS_FW_TID;
155 tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
156 tdls_cfg_cmd.sta_info[cnt].is_initiator =
157 cpu_to_le32(sta->tdls_initiator ? 1 : 0);
158
159 cnt++;
160 }
161
162 tdls_cfg_cmd.tdls_peer_count = cnt;
163 IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
164
165 ret = iwl_mvm_send_cmd(mvm, &cmd);
166 if (WARN_ON_ONCE(ret))
167 return;
168
169 pkt = cmd.resp_pkt;
170 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
171 IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
172 pkt->hdr.flags);
173 goto exit;
174 }
175
176 if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
177 goto exit;
178
179 /* we don't really care about the response at this point */
180
181 exit:
182 iwl_free_resp(&cmd);
183 }
184
185 void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
186 bool sta_added)
187 {
188 int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
189
190 /* when the first peer joins, send a power update first */
191 if (tdls_sta_cnt == 1 && sta_added)
192 iwl_mvm_power_update_mac(mvm);
193
194 /* configure the FW with TDLS peer info */
195 iwl_mvm_tdls_config(mvm, vif);
196
197 /* when the last peer leaves, send a power update last */
198 if (tdls_sta_cnt == 0 && !sta_added)
199 iwl_mvm_power_update_mac(mvm);
200 }
201
202 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
203 struct ieee80211_vif *vif)
204 {
205 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
206 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
207
208 /*
209 * iwl_mvm_protect_session() reads directly from the device
210 * (the system time), so make sure it is available.
211 */
212 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
213 return;
214
215 mutex_lock(&mvm->mutex);
216 /* Protect the session to hear the TDLS setup response on the channel */
217 iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
218 mutex_unlock(&mvm->mutex);
219
220 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
221 }
222
223 static const char *
224 iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
225 {
226 switch (state) {
227 case IWL_MVM_TDLS_SW_IDLE:
228 return "IDLE";
229 case IWL_MVM_TDLS_SW_REQ_SENT:
230 return "REQ SENT";
231 case IWL_MVM_TDLS_SW_REQ_RCVD:
232 return "REQ RECEIVED";
233 case IWL_MVM_TDLS_SW_ACTIVE:
234 return "ACTIVE";
235 }
236
237 return NULL;
238 }
239
240 static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
241 enum iwl_mvm_tdls_cs_state state)
242 {
243 if (mvm->tdls_cs.state == state)
244 return;
245
246 IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
247 iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
248 iwl_mvm_tdls_cs_state_str(state));
249 mvm->tdls_cs.state = state;
250
251 if (state == IWL_MVM_TDLS_SW_IDLE)
252 mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
253 }
254
255 int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
256 struct iwl_device_cmd *cmd)
257 {
258 struct iwl_rx_packet *pkt = rxb_addr(rxb);
259 struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
260 struct ieee80211_sta *sta;
261 unsigned int delay;
262 struct iwl_mvm_sta *mvmsta;
263 struct ieee80211_vif *vif;
264 u32 sta_id = le32_to_cpu(notif->sta_id);
265
266 lockdep_assert_held(&mvm->mutex);
267
268 /* can fail sometimes */
269 if (!le32_to_cpu(notif->status)) {
270 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
271 goto out;
272 }
273
274 if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
275 goto out;
276
277 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
278 lockdep_is_held(&mvm->mutex));
279 /* the station may not be here, but if it is, it must be a TDLS peer */
280 if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
281 goto out;
282
283 mvmsta = iwl_mvm_sta_from_mac80211(sta);
284 vif = mvmsta->vif;
285
286 /*
287 * Update state and possibly switch again after this is over (DTIM).
288 * Also convert TU to msec.
289 */
290 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
291 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
292 msecs_to_jiffies(delay));
293
294 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
295
296 out:
297 return 0;
298 }
299
300 static int
301 iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
302 enum iwl_tdls_channel_switch_type type,
303 const u8 *peer, bool peer_initiator)
304 {
305 bool same_peer = false;
306 int ret = 0;
307
308 /* get the existing peer if it's there */
309 if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
310 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
311 struct ieee80211_sta *sta = rcu_dereference_protected(
312 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
313 lockdep_is_held(&mvm->mutex));
314 if (!IS_ERR_OR_NULL(sta))
315 same_peer = ether_addr_equal(peer, sta->addr);
316 }
317
318 switch (mvm->tdls_cs.state) {
319 case IWL_MVM_TDLS_SW_IDLE:
320 /*
321 * might be spurious packet from the peer after the switch is
322 * already done
323 */
324 if (type == TDLS_MOVE_CH)
325 ret = -EINVAL;
326 break;
327 case IWL_MVM_TDLS_SW_REQ_SENT:
328 /*
329 * We received a ch-switch request while an outgoing one is
330 * pending. Allow it to proceed if the other peer is the same
331 * one we sent to, and we are not the link initiator.
332 */
333 if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH) {
334 if (!same_peer)
335 ret = -EBUSY;
336 else if (!peer_initiator) /* we are the initiator */
337 ret = -EBUSY;
338 }
339 break;
340 case IWL_MVM_TDLS_SW_REQ_RCVD:
341 /* as above, allow the link initiator to proceed */
342 if (type == TDLS_SEND_CHAN_SW_REQ) {
343 if (!same_peer)
344 ret = -EBUSY;
345 else if (peer_initiator) /* they are the initiator */
346 ret = -EBUSY;
347 } else if (type == TDLS_MOVE_CH) {
348 ret = -EINVAL;
349 }
350 break;
351 case IWL_MVM_TDLS_SW_ACTIVE:
352 /* we don't allow initiations during active channel switch */
353 if (type == TDLS_SEND_CHAN_SW_REQ)
354 ret = -EINVAL;
355 break;
356 }
357
358 if (ret)
359 IWL_DEBUG_TDLS(mvm,
360 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
361 type, mvm->tdls_cs.state, peer, same_peer,
362 peer_initiator);
363
364 return ret;
365 }
366
367 static int
368 iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
369 struct ieee80211_vif *vif,
370 enum iwl_tdls_channel_switch_type type,
371 const u8 *peer, bool peer_initiator,
372 u8 oper_class,
373 struct cfg80211_chan_def *chandef,
374 u32 timestamp, u16 switch_time,
375 u16 switch_timeout, struct sk_buff *skb,
376 u32 ch_sw_tm_ie)
377 {
378 struct ieee80211_sta *sta;
379 struct iwl_mvm_sta *mvmsta;
380 struct ieee80211_tx_info *info;
381 struct ieee80211_hdr *hdr;
382 struct iwl_tdls_channel_switch_cmd cmd = {0};
383 int ret;
384
385 lockdep_assert_held(&mvm->mutex);
386
387 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator);
388 if (ret)
389 return ret;
390
391 if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
392 ret = -EINVAL;
393 goto out;
394 }
395
396 cmd.switch_type = type;
397 cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
398 cmd.timing.switch_time = cpu_to_le32(switch_time);
399 cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
400
401 rcu_read_lock();
402 sta = ieee80211_find_sta(vif, peer);
403 if (!sta) {
404 rcu_read_unlock();
405 ret = -ENOENT;
406 goto out;
407 }
408 mvmsta = iwl_mvm_sta_from_mac80211(sta);
409 cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
410
411 if (!chandef) {
412 if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
413 mvm->tdls_cs.peer.chandef.chan) {
414 /* actually moving to the channel */
415 chandef = &mvm->tdls_cs.peer.chandef;
416 } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
417 type == TDLS_MOVE_CH) {
418 /* we need to return to base channel */
419 struct ieee80211_chanctx_conf *chanctx =
420 rcu_dereference(vif->chanctx_conf);
421
422 if (WARN_ON_ONCE(!chanctx)) {
423 rcu_read_unlock();
424 goto out;
425 }
426
427 chandef = &chanctx->def;
428 }
429 }
430
431 if (chandef) {
432 cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
433 PHY_BAND_24 : PHY_BAND_5);
434 cmd.ci.channel = chandef->chan->hw_value;
435 cmd.ci.width = iwl_mvm_get_channel_width(chandef);
436 cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
437 }
438
439 /* keep quota calculation simple for now - 50% of DTIM for TDLS */
440 cmd.timing.max_offchan_duration =
441 cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
442 vif->bss_conf.beacon_int) / 2);
443
444 /* Switch time is the first element in the switch-timing IE. */
445 cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
446
447 info = IEEE80211_SKB_CB(skb);
448 if (info->control.hw_key)
449 iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
450
451 iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
452 mvmsta->sta_id);
453
454 hdr = (void *)skb->data;
455 iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
456 hdr->frame_control);
457 rcu_read_unlock();
458
459 memcpy(cmd.frame.data, skb->data, skb->len);
460
461 ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
462 sizeof(cmd), &cmd);
463 if (ret) {
464 IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
465 ret);
466 goto out;
467 }
468
469 /* channel switch has started, update state */
470 if (type != TDLS_MOVE_CH) {
471 mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
472 iwl_mvm_tdls_update_cs_state(mvm,
473 type == TDLS_SEND_CHAN_SW_REQ ?
474 IWL_MVM_TDLS_SW_REQ_SENT :
475 IWL_MVM_TDLS_SW_REQ_RCVD);
476 }
477
478 out:
479
480 /* channel switch failed - we are idle */
481 if (ret)
482 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
483
484 return ret;
485 }
486
487 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
488 {
489 struct iwl_mvm *mvm;
490 struct ieee80211_sta *sta;
491 struct iwl_mvm_sta *mvmsta;
492 struct ieee80211_vif *vif;
493 unsigned int delay;
494 int ret;
495
496 mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
497 mutex_lock(&mvm->mutex);
498
499 /* called after an active channel switch has finished or timed-out */
500 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
501
502 /* station might be gone, in that case do nothing */
503 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
504 goto out;
505
506 sta = rcu_dereference_protected(
507 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
508 lockdep_is_held(&mvm->mutex));
509 /* the station may not be here, but if it is, it must be a TDLS peer */
510 if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
511 goto out;
512
513 mvmsta = iwl_mvm_sta_from_mac80211(sta);
514 vif = mvmsta->vif;
515 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
516 TDLS_SEND_CHAN_SW_REQ,
517 sta->addr,
518 mvm->tdls_cs.peer.initiator,
519 mvm->tdls_cs.peer.op_class,
520 &mvm->tdls_cs.peer.chandef,
521 0, 0, 0,
522 mvm->tdls_cs.peer.skb,
523 mvm->tdls_cs.peer.ch_sw_tm_ie);
524 if (ret)
525 IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
526
527 /* retry after a DTIM if we failed sending now */
528 delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
529 queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
530 msecs_to_jiffies(delay));
531 out:
532 mutex_unlock(&mvm->mutex);
533 }
534
535 int
536 iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
537 struct ieee80211_vif *vif,
538 struct ieee80211_sta *sta, u8 oper_class,
539 struct cfg80211_chan_def *chandef,
540 struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
541 {
542 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
543 struct iwl_mvm_sta *mvmsta;
544 unsigned int delay;
545 int ret;
546
547 mutex_lock(&mvm->mutex);
548
549 IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
550 sta->addr, chandef->chan->center_freq, chandef->width);
551
552 /* we only support a single peer for channel switching */
553 if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
554 IWL_DEBUG_TDLS(mvm,
555 "Existing peer. Can't start switch with %pM\n",
556 sta->addr);
557 ret = -EBUSY;
558 goto out;
559 }
560
561 ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
562 TDLS_SEND_CHAN_SW_REQ,
563 sta->addr, sta->tdls_initiator,
564 oper_class, chandef, 0, 0, 0,
565 tmpl_skb, ch_sw_tm_ie);
566 if (ret)
567 goto out;
568
569 /*
570 * Mark the peer as "in tdls switch" for this vif. We only allow a
571 * single such peer per vif.
572 */
573 mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
574 if (!mvm->tdls_cs.peer.skb) {
575 ret = -ENOMEM;
576 goto out;
577 }
578
579 mvmsta = iwl_mvm_sta_from_mac80211(sta);
580 mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
581 mvm->tdls_cs.peer.chandef = *chandef;
582 mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
583 mvm->tdls_cs.peer.op_class = oper_class;
584 mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
585
586 /*
587 * Wait for 2 DTIM periods before attempting the next switch. The next
588 * switch will be made sooner if the current one completes before that.
589 */
590 delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
591 vif->bss_conf.beacon_int);
592 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
593 msecs_to_jiffies(delay));
594
595 out:
596 mutex_unlock(&mvm->mutex);
597 return ret;
598 }
599
600 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
601 struct ieee80211_vif *vif,
602 struct ieee80211_sta *sta)
603 {
604 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
605 struct ieee80211_sta *cur_sta;
606 bool wait_for_phy = false;
607
608 mutex_lock(&mvm->mutex);
609
610 IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
611
612 /* we only support a single peer for channel switching */
613 if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
614 IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
615 goto out;
616 }
617
618 cur_sta = rcu_dereference_protected(
619 mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
620 lockdep_is_held(&mvm->mutex));
621 /* make sure it's the same peer */
622 if (cur_sta != sta)
623 goto out;
624
625 /*
626 * If we're currently in a switch because of the now canceled peer,
627 * wait a DTIM here to make sure the phy is back on the base channel.
628 * We can't otherwise force it.
629 */
630 if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
631 mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
632 wait_for_phy = true;
633
634 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
635 dev_kfree_skb(mvm->tdls_cs.peer.skb);
636 mvm->tdls_cs.peer.skb = NULL;
637
638 out:
639 mutex_unlock(&mvm->mutex);
640
641 /* make sure the phy is on the base channel */
642 if (wait_for_phy)
643 msleep(TU_TO_MS(vif->bss_conf.dtim_period *
644 vif->bss_conf.beacon_int));
645
646 /* flush the channel switch state */
647 flush_delayed_work(&mvm->tdls_cs.dwork);
648
649 IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
650 }
651
652 void
653 iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
654 struct ieee80211_vif *vif,
655 struct ieee80211_tdls_ch_sw_params *params)
656 {
657 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
658 enum iwl_tdls_channel_switch_type type;
659 unsigned int delay;
660
661 mutex_lock(&mvm->mutex);
662
663 IWL_DEBUG_TDLS(mvm,
664 "Received TDLS ch switch action %d from %pM status %d\n",
665 params->action_code, params->sta->addr, params->status);
666
667 /*
668 * we got a non-zero status from a peer we were switching to - move to
669 * the idle state and retry again later
670 */
671 if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
672 params->status != 0 &&
673 mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
674 mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
675 struct ieee80211_sta *cur_sta;
676
677 /* make sure it's the same peer */
678 cur_sta = rcu_dereference_protected(
679 mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
680 lockdep_is_held(&mvm->mutex));
681 if (cur_sta == params->sta) {
682 iwl_mvm_tdls_update_cs_state(mvm,
683 IWL_MVM_TDLS_SW_IDLE);
684 goto retry;
685 }
686 }
687
688 type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
689 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
690
691 iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
692 params->sta->tdls_initiator, 0,
693 params->chandef, params->timestamp,
694 params->switch_time,
695 params->switch_timeout,
696 params->tmpl_skb,
697 params->ch_sw_tm_ie);
698
699 retry:
700 /* register a timeout in case we don't succeed in switching */
701 delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
702 1024 / 1000;
703 mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
704 msecs_to_jiffies(delay));
705 mutex_unlock(&mvm->mutex);
706 }