]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/net/wireless/intel/iwlwifi/mvm/d3.c
Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm
[mirror_ubuntu-focal-kernel.git] / drivers / net / wireless / intel / iwlwifi / mvm / d3.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68 #include <linux/etherdevice.h>
69 #include <linux/ip.h>
70 #include <linux/fs.h>
71 #include <net/cfg80211.h>
72 #include <net/ipv6.h>
73 #include <net/tcp.h>
74 #include <net/addrconf.h>
75 #include "iwl-modparams.h"
76 #include "fw-api.h"
77 #include "mvm.h"
78
79 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
80 struct ieee80211_vif *vif,
81 struct cfg80211_gtk_rekey_data *data)
82 {
83 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
84 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
85
86 if (iwlwifi_mod_params.swcrypto)
87 return;
88
89 mutex_lock(&mvm->mutex);
90
91 memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
92 memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
93 mvmvif->rekey_data.replay_ctr =
94 cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
95 mvmvif->rekey_data.valid = true;
96
97 mutex_unlock(&mvm->mutex);
98 }
99
100 #if IS_ENABLED(CONFIG_IPV6)
101 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
102 struct ieee80211_vif *vif,
103 struct inet6_dev *idev)
104 {
105 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
106 struct inet6_ifaddr *ifa;
107 int idx = 0;
108
109 memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
110
111 read_lock_bh(&idev->lock);
112 list_for_each_entry(ifa, &idev->addr_list, if_list) {
113 mvmvif->target_ipv6_addrs[idx] = ifa->addr;
114 if (ifa->flags & IFA_F_TENTATIVE)
115 __set_bit(idx, mvmvif->tentative_addrs);
116 idx++;
117 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
118 break;
119 }
120 read_unlock_bh(&idev->lock);
121
122 mvmvif->num_target_ipv6_addrs = idx;
123 }
124 #endif
125
126 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
127 struct ieee80211_vif *vif, int idx)
128 {
129 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
130
131 mvmvif->tx_key_idx = idx;
132 }
133
134 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
135 {
136 int i;
137
138 for (i = 0; i < IWL_P1K_SIZE; i++)
139 out[i] = cpu_to_le16(p1k[i]);
140 }
141
142 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
143 struct iwl_mvm_key_pn *ptk_pn,
144 struct ieee80211_key_seq *seq,
145 int tid, int queues)
146 {
147 const u8 *ret = seq->ccmp.pn;
148 int i;
149
150 /* get the PN from mac80211, used on the default queue */
151 ieee80211_get_key_rx_seq(key, tid, seq);
152
153 /* and use the internal data for the other queues */
154 for (i = 1; i < queues; i++) {
155 const u8 *tmp = ptk_pn->q[i].pn[tid];
156
157 if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0)
158 ret = tmp;
159 }
160
161 return ret;
162 }
163
164 struct wowlan_key_data {
165 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
166 struct iwl_wowlan_tkip_params_cmd *tkip;
167 bool error, use_rsc_tsc, use_tkip, configure_keys;
168 int wep_key_idx;
169 };
170
171 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
172 struct ieee80211_vif *vif,
173 struct ieee80211_sta *sta,
174 struct ieee80211_key_conf *key,
175 void *_data)
176 {
177 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
178 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
179 struct wowlan_key_data *data = _data;
180 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
181 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
182 struct iwl_p1k_cache *rx_p1ks;
183 u8 *rx_mic_key;
184 struct ieee80211_key_seq seq;
185 u32 cur_rx_iv32 = 0;
186 u16 p1k[IWL_P1K_SIZE];
187 int ret, i;
188
189 switch (key->cipher) {
190 case WLAN_CIPHER_SUITE_WEP40:
191 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
192 struct {
193 struct iwl_mvm_wep_key_cmd wep_key_cmd;
194 struct iwl_mvm_wep_key wep_key;
195 } __packed wkc = {
196 .wep_key_cmd.mac_id_n_color =
197 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
198 mvmvif->color)),
199 .wep_key_cmd.num_keys = 1,
200 /* firmware sets STA_KEY_FLG_WEP_13BYTES */
201 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
202 .wep_key.key_index = key->keyidx,
203 .wep_key.key_size = key->keylen,
204 };
205
206 /*
207 * This will fail -- the key functions don't set support
208 * pairwise WEP keys. However, that's better than silently
209 * failing WoWLAN. Or maybe not?
210 */
211 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
212 break;
213
214 memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
215 if (key->keyidx == mvmvif->tx_key_idx) {
216 /* TX key must be at offset 0 */
217 wkc.wep_key.key_offset = 0;
218 } else {
219 /* others start at 1 */
220 data->wep_key_idx++;
221 wkc.wep_key.key_offset = data->wep_key_idx;
222 }
223
224 if (data->configure_keys) {
225 mutex_lock(&mvm->mutex);
226 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
227 sizeof(wkc), &wkc);
228 data->error = ret != 0;
229
230 mvm->ptk_ivlen = key->iv_len;
231 mvm->ptk_icvlen = key->icv_len;
232 mvm->gtk_ivlen = key->iv_len;
233 mvm->gtk_icvlen = key->icv_len;
234 mutex_unlock(&mvm->mutex);
235 }
236
237 /* don't upload key again */
238 return;
239 }
240 default:
241 data->error = true;
242 return;
243 case WLAN_CIPHER_SUITE_AES_CMAC:
244 /*
245 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
246 * but we also shouldn't abort suspend due to that. It does have
247 * support for the IGTK key renewal, but doesn't really use the
248 * IGTK for anything. This means we could spuriously wake up or
249 * be deauthenticated, but that was considered acceptable.
250 */
251 return;
252 case WLAN_CIPHER_SUITE_TKIP:
253 if (sta) {
254 u64 pn64;
255
256 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
257 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
258
259 rx_p1ks = data->tkip->rx_uni;
260
261 pn64 = atomic64_read(&key->tx_pn);
262 tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
263 tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
264
265 ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
266 p1k);
267 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
268
269 memcpy(data->tkip->mic_keys.tx,
270 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
271 IWL_MIC_KEY_SIZE);
272
273 rx_mic_key = data->tkip->mic_keys.rx_unicast;
274 } else {
275 tkip_sc =
276 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
277 rx_p1ks = data->tkip->rx_multi;
278 rx_mic_key = data->tkip->mic_keys.rx_mcast;
279 }
280
281 /*
282 * For non-QoS this relies on the fact that both the uCode and
283 * mac80211 use TID 0 (as they need to to avoid replay attacks)
284 * for checking the IV in the frames.
285 */
286 for (i = 0; i < IWL_NUM_RSC; i++) {
287 ieee80211_get_key_rx_seq(key, i, &seq);
288 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
289 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
290 /* wrapping isn't allowed, AP must rekey */
291 if (seq.tkip.iv32 > cur_rx_iv32)
292 cur_rx_iv32 = seq.tkip.iv32;
293 }
294
295 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
296 cur_rx_iv32, p1k);
297 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
298 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
299 cur_rx_iv32 + 1, p1k);
300 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
301
302 memcpy(rx_mic_key,
303 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
304 IWL_MIC_KEY_SIZE);
305
306 data->use_tkip = true;
307 data->use_rsc_tsc = true;
308 break;
309 case WLAN_CIPHER_SUITE_CCMP:
310 if (sta) {
311 u64 pn64;
312
313 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
314 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
315
316 pn64 = atomic64_read(&key->tx_pn);
317 aes_tx_sc->pn = cpu_to_le64(pn64);
318 } else {
319 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
320 }
321
322 /*
323 * For non-QoS this relies on the fact that both the uCode and
324 * mac80211/our RX code use TID 0 for checking the PN.
325 */
326 if (sta && iwl_mvm_has_new_rx_api(mvm)) {
327 struct iwl_mvm_sta *mvmsta;
328 struct iwl_mvm_key_pn *ptk_pn;
329 const u8 *pn;
330
331 mvmsta = iwl_mvm_sta_from_mac80211(sta);
332 ptk_pn = rcu_dereference_protected(
333 mvmsta->ptk_pn[key->keyidx],
334 lockdep_is_held(&mvm->mutex));
335 if (WARN_ON(!ptk_pn))
336 break;
337
338 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
339 pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
340 mvm->trans->num_rx_queues);
341 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
342 ((u64)pn[4] << 8) |
343 ((u64)pn[3] << 16) |
344 ((u64)pn[2] << 24) |
345 ((u64)pn[1] << 32) |
346 ((u64)pn[0] << 40));
347 }
348 } else {
349 for (i = 0; i < IWL_NUM_RSC; i++) {
350 u8 *pn = seq.ccmp.pn;
351
352 ieee80211_get_key_rx_seq(key, i, &seq);
353 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
354 ((u64)pn[4] << 8) |
355 ((u64)pn[3] << 16) |
356 ((u64)pn[2] << 24) |
357 ((u64)pn[1] << 32) |
358 ((u64)pn[0] << 40));
359 }
360 }
361 data->use_rsc_tsc = true;
362 break;
363 }
364
365 if (data->configure_keys) {
366 mutex_lock(&mvm->mutex);
367 /*
368 * The D3 firmware hardcodes the key offset 0 as the key it
369 * uses to transmit packets to the AP, i.e. the PTK.
370 */
371 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
372 mvm->ptk_ivlen = key->iv_len;
373 mvm->ptk_icvlen = key->icv_len;
374 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
375 } else {
376 /*
377 * firmware only supports TSC/RSC for a single key,
378 * so if there are multiple keep overwriting them
379 * with new ones -- this relies on mac80211 doing
380 * list_add_tail().
381 */
382 mvm->gtk_ivlen = key->iv_len;
383 mvm->gtk_icvlen = key->icv_len;
384 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
385 }
386 mutex_unlock(&mvm->mutex);
387 data->error = ret != 0;
388 }
389 }
390
391 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
392 struct cfg80211_wowlan *wowlan)
393 {
394 struct iwl_wowlan_patterns_cmd *pattern_cmd;
395 struct iwl_host_cmd cmd = {
396 .id = WOWLAN_PATTERNS,
397 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
398 };
399 int i, err;
400
401 if (!wowlan->n_patterns)
402 return 0;
403
404 cmd.len[0] = sizeof(*pattern_cmd) +
405 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
406
407 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
408 if (!pattern_cmd)
409 return -ENOMEM;
410
411 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
412
413 for (i = 0; i < wowlan->n_patterns; i++) {
414 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
415
416 memcpy(&pattern_cmd->patterns[i].mask,
417 wowlan->patterns[i].mask, mask_len);
418 memcpy(&pattern_cmd->patterns[i].pattern,
419 wowlan->patterns[i].pattern,
420 wowlan->patterns[i].pattern_len);
421 pattern_cmd->patterns[i].mask_size = mask_len;
422 pattern_cmd->patterns[i].pattern_size =
423 wowlan->patterns[i].pattern_len;
424 }
425
426 cmd.data[0] = pattern_cmd;
427 err = iwl_mvm_send_cmd(mvm, &cmd);
428 kfree(pattern_cmd);
429 return err;
430 }
431
432 enum iwl_mvm_tcp_packet_type {
433 MVM_TCP_TX_SYN,
434 MVM_TCP_RX_SYNACK,
435 MVM_TCP_TX_DATA,
436 MVM_TCP_RX_ACK,
437 MVM_TCP_RX_WAKE,
438 MVM_TCP_TX_FIN,
439 };
440
441 static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
442 {
443 __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
444 return cpu_to_le16(be16_to_cpu((__force __be16)check));
445 }
446
447 static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
448 struct cfg80211_wowlan_tcp *tcp,
449 void *_pkt, u8 *mask,
450 __le16 *pseudo_hdr_csum,
451 enum iwl_mvm_tcp_packet_type ptype)
452 {
453 struct {
454 struct ethhdr eth;
455 struct iphdr ip;
456 struct tcphdr tcp;
457 u8 data[];
458 } __packed *pkt = _pkt;
459 u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
460 int i;
461
462 pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
463 pkt->ip.version = 4;
464 pkt->ip.ihl = 5;
465 pkt->ip.protocol = IPPROTO_TCP;
466
467 switch (ptype) {
468 case MVM_TCP_TX_SYN:
469 case MVM_TCP_TX_DATA:
470 case MVM_TCP_TX_FIN:
471 memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
472 memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
473 pkt->ip.ttl = 128;
474 pkt->ip.saddr = tcp->src;
475 pkt->ip.daddr = tcp->dst;
476 pkt->tcp.source = cpu_to_be16(tcp->src_port);
477 pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
478 /* overwritten for TX SYN later */
479 pkt->tcp.doff = sizeof(struct tcphdr) / 4;
480 pkt->tcp.window = cpu_to_be16(65000);
481 break;
482 case MVM_TCP_RX_SYNACK:
483 case MVM_TCP_RX_ACK:
484 case MVM_TCP_RX_WAKE:
485 memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
486 memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
487 pkt->ip.saddr = tcp->dst;
488 pkt->ip.daddr = tcp->src;
489 pkt->tcp.source = cpu_to_be16(tcp->dst_port);
490 pkt->tcp.dest = cpu_to_be16(tcp->src_port);
491 break;
492 default:
493 WARN_ON(1);
494 return;
495 }
496
497 switch (ptype) {
498 case MVM_TCP_TX_SYN:
499 /* firmware assumes 8 option bytes - 8 NOPs for now */
500 memset(pkt->data, 0x01, 8);
501 ip_tot_len += 8;
502 pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
503 pkt->tcp.syn = 1;
504 break;
505 case MVM_TCP_TX_DATA:
506 ip_tot_len += tcp->payload_len;
507 memcpy(pkt->data, tcp->payload, tcp->payload_len);
508 pkt->tcp.psh = 1;
509 pkt->tcp.ack = 1;
510 break;
511 case MVM_TCP_TX_FIN:
512 pkt->tcp.fin = 1;
513 pkt->tcp.ack = 1;
514 break;
515 case MVM_TCP_RX_SYNACK:
516 pkt->tcp.syn = 1;
517 pkt->tcp.ack = 1;
518 break;
519 case MVM_TCP_RX_ACK:
520 pkt->tcp.ack = 1;
521 break;
522 case MVM_TCP_RX_WAKE:
523 ip_tot_len += tcp->wake_len;
524 pkt->tcp.psh = 1;
525 pkt->tcp.ack = 1;
526 memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
527 break;
528 }
529
530 switch (ptype) {
531 case MVM_TCP_TX_SYN:
532 case MVM_TCP_TX_DATA:
533 case MVM_TCP_TX_FIN:
534 pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
535 pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
536 break;
537 case MVM_TCP_RX_WAKE:
538 for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
539 u8 tmp = tcp->wake_mask[i];
540 mask[i + 6] |= tmp << 6;
541 if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
542 mask[i + 7] = tmp >> 2;
543 }
544 /* fall through for ethernet/IP/TCP headers mask */
545 case MVM_TCP_RX_SYNACK:
546 case MVM_TCP_RX_ACK:
547 mask[0] = 0xff; /* match ethernet */
548 /*
549 * match ethernet, ip.version, ip.ihl
550 * the ip.ihl half byte is really masked out by firmware
551 */
552 mask[1] = 0x7f;
553 mask[2] = 0x80; /* match ip.protocol */
554 mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
555 mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
556 mask[5] = 0x80; /* match tcp flags */
557 /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
558 break;
559 };
560
561 *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
562 pkt->ip.saddr, pkt->ip.daddr);
563 }
564
565 static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
566 struct ieee80211_vif *vif,
567 struct cfg80211_wowlan_tcp *tcp)
568 {
569 struct iwl_wowlan_remote_wake_config *cfg;
570 struct iwl_host_cmd cmd = {
571 .id = REMOTE_WAKE_CONFIG_CMD,
572 .len = { sizeof(*cfg), },
573 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
574 };
575 int ret;
576
577 if (!tcp)
578 return 0;
579
580 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
581 if (!cfg)
582 return -ENOMEM;
583 cmd.data[0] = cfg;
584
585 cfg->max_syn_retries = 10;
586 cfg->max_data_retries = 10;
587 cfg->tcp_syn_ack_timeout = 1; /* seconds */
588 cfg->tcp_ack_timeout = 1; /* seconds */
589
590 /* SYN (TX) */
591 iwl_mvm_build_tcp_packet(
592 vif, tcp, cfg->syn_tx.data, NULL,
593 &cfg->syn_tx.info.tcp_pseudo_header_checksum,
594 MVM_TCP_TX_SYN);
595 cfg->syn_tx.info.tcp_payload_length = 0;
596
597 /* SYN/ACK (RX) */
598 iwl_mvm_build_tcp_packet(
599 vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
600 &cfg->synack_rx.info.tcp_pseudo_header_checksum,
601 MVM_TCP_RX_SYNACK);
602 cfg->synack_rx.info.tcp_payload_length = 0;
603
604 /* KEEPALIVE/ACK (TX) */
605 iwl_mvm_build_tcp_packet(
606 vif, tcp, cfg->keepalive_tx.data, NULL,
607 &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
608 MVM_TCP_TX_DATA);
609 cfg->keepalive_tx.info.tcp_payload_length =
610 cpu_to_le16(tcp->payload_len);
611 cfg->sequence_number_offset = tcp->payload_seq.offset;
612 /* length must be 0..4, the field is little endian */
613 cfg->sequence_number_length = tcp->payload_seq.len;
614 cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
615 cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
616 if (tcp->payload_tok.len) {
617 cfg->token_offset = tcp->payload_tok.offset;
618 cfg->token_length = tcp->payload_tok.len;
619 cfg->num_tokens =
620 cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
621 memcpy(cfg->tokens, tcp->payload_tok.token_stream,
622 tcp->tokens_size);
623 } else {
624 /* set tokens to max value to almost never run out */
625 cfg->num_tokens = cpu_to_le16(65535);
626 }
627
628 /* ACK (RX) */
629 iwl_mvm_build_tcp_packet(
630 vif, tcp, cfg->keepalive_ack_rx.data,
631 cfg->keepalive_ack_rx.rx_mask,
632 &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
633 MVM_TCP_RX_ACK);
634 cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
635
636 /* WAKEUP (RX) */
637 iwl_mvm_build_tcp_packet(
638 vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
639 &cfg->wake_rx.info.tcp_pseudo_header_checksum,
640 MVM_TCP_RX_WAKE);
641 cfg->wake_rx.info.tcp_payload_length =
642 cpu_to_le16(tcp->wake_len);
643
644 /* FIN */
645 iwl_mvm_build_tcp_packet(
646 vif, tcp, cfg->fin_tx.data, NULL,
647 &cfg->fin_tx.info.tcp_pseudo_header_checksum,
648 MVM_TCP_TX_FIN);
649 cfg->fin_tx.info.tcp_payload_length = 0;
650
651 ret = iwl_mvm_send_cmd(mvm, &cmd);
652 kfree(cfg);
653
654 return ret;
655 }
656
657 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
658 struct ieee80211_sta *ap_sta)
659 {
660 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
661 struct ieee80211_chanctx_conf *ctx;
662 u8 chains_static, chains_dynamic;
663 struct cfg80211_chan_def chandef;
664 int ret, i;
665 struct iwl_binding_cmd binding_cmd = {};
666 struct iwl_time_quota_cmd quota_cmd = {};
667 u32 status;
668 int size;
669
670 if (fw_has_capa(&mvm->fw->ucode_capa,
671 IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
672 size = sizeof(binding_cmd);
673 if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
674 !iwl_mvm_is_cdb_supported(mvm))
675 binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
676 else
677 binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
678 } else {
679 size = IWL_BINDING_CMD_SIZE_V1;
680 }
681
682 /* add back the PHY */
683 if (WARN_ON(!mvmvif->phy_ctxt))
684 return -EINVAL;
685
686 rcu_read_lock();
687 ctx = rcu_dereference(vif->chanctx_conf);
688 if (WARN_ON(!ctx)) {
689 rcu_read_unlock();
690 return -EINVAL;
691 }
692 chandef = ctx->def;
693 chains_static = ctx->rx_chains_static;
694 chains_dynamic = ctx->rx_chains_dynamic;
695 rcu_read_unlock();
696
697 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
698 chains_static, chains_dynamic);
699 if (ret)
700 return ret;
701
702 /* add back the MAC */
703 mvmvif->uploaded = false;
704
705 if (WARN_ON(!vif->bss_conf.assoc))
706 return -EINVAL;
707
708 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
709 if (ret)
710 return ret;
711
712 /* add back binding - XXX refactor? */
713 binding_cmd.id_and_color =
714 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
715 mvmvif->phy_ctxt->color));
716 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
717 binding_cmd.phy =
718 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
719 mvmvif->phy_ctxt->color));
720 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
721 mvmvif->color));
722 for (i = 1; i < MAX_MACS_IN_BINDING; i++)
723 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
724
725 status = 0;
726 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
727 size, &binding_cmd, &status);
728 if (ret) {
729 IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
730 return ret;
731 }
732
733 if (status) {
734 IWL_ERR(mvm, "Binding command failed: %u\n", status);
735 return -EIO;
736 }
737
738 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
739 if (ret)
740 return ret;
741 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
742
743 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
744 if (ret)
745 return ret;
746
747 /* and some quota */
748 quota_cmd.quotas[0].id_and_color =
749 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
750 mvmvif->phy_ctxt->color));
751 quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
752 quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
753
754 for (i = 1; i < MAX_BINDINGS; i++)
755 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
756
757 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
758 sizeof(quota_cmd), &quota_cmd);
759 if (ret)
760 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
761
762 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
763 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
764
765 return 0;
766 }
767
768 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
769 struct ieee80211_vif *vif)
770 {
771 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
772 struct iwl_nonqos_seq_query_cmd query_cmd = {
773 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
774 .mac_id_n_color =
775 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
776 mvmvif->color)),
777 };
778 struct iwl_host_cmd cmd = {
779 .id = NON_QOS_TX_COUNTER_CMD,
780 .flags = CMD_WANT_SKB,
781 };
782 int err;
783 u32 size;
784
785 cmd.data[0] = &query_cmd;
786 cmd.len[0] = sizeof(query_cmd);
787
788 err = iwl_mvm_send_cmd(mvm, &cmd);
789 if (err)
790 return err;
791
792 size = iwl_rx_packet_payload_len(cmd.resp_pkt);
793 if (size < sizeof(__le16)) {
794 err = -EINVAL;
795 } else {
796 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
797 /* firmware returns next, not last-used seqno */
798 err = (u16) (err - 0x10);
799 }
800
801 iwl_free_resp(&cmd);
802 return err;
803 }
804
805 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
806 {
807 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
808 struct iwl_nonqos_seq_query_cmd query_cmd = {
809 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
810 .mac_id_n_color =
811 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
812 mvmvif->color)),
813 .value = cpu_to_le16(mvmvif->seqno),
814 };
815
816 /* return if called during restart, not resume from D3 */
817 if (!mvmvif->seqno_valid)
818 return;
819
820 mvmvif->seqno_valid = false;
821
822 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
823 sizeof(query_cmd), &query_cmd))
824 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
825 }
826
827 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
828 {
829 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
830
831 iwl_mvm_stop_device(mvm);
832 /*
833 * Set the HW restart bit -- this is mostly true as we're
834 * going to load new firmware and reprogram that, though
835 * the reprogramming is going to be manual to avoid adding
836 * all the MACs that aren't support.
837 * We don't have to clear up everything though because the
838 * reprogramming is manual. When we resume, we'll actually
839 * go through a proper restart sequence again to switch
840 * back to the runtime firmware image.
841 */
842 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
843
844 /* the fw is reset, so all the keys are cleared */
845 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
846
847 mvm->ptk_ivlen = 0;
848 mvm->ptk_icvlen = 0;
849 mvm->ptk_ivlen = 0;
850 mvm->ptk_icvlen = 0;
851
852 return iwl_mvm_load_d3_fw(mvm);
853 }
854
855 static int
856 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
857 struct cfg80211_wowlan *wowlan,
858 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
859 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
860 struct ieee80211_sta *ap_sta)
861 {
862 int ret;
863 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
864
865 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
866
867 wowlan_config_cmd->is_11n_connection =
868 ap_sta->ht_cap.ht_supported;
869 wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
870 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
871
872 /* Query the last used seqno and set it */
873 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
874 if (ret < 0)
875 return ret;
876
877 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
878
879 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
880
881 if (wowlan->disconnect)
882 wowlan_config_cmd->wakeup_filter |=
883 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
884 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
885 if (wowlan->magic_pkt)
886 wowlan_config_cmd->wakeup_filter |=
887 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
888 if (wowlan->gtk_rekey_failure)
889 wowlan_config_cmd->wakeup_filter |=
890 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
891 if (wowlan->eap_identity_req)
892 wowlan_config_cmd->wakeup_filter |=
893 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
894 if (wowlan->four_way_handshake)
895 wowlan_config_cmd->wakeup_filter |=
896 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
897 if (wowlan->n_patterns)
898 wowlan_config_cmd->wakeup_filter |=
899 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
900
901 if (wowlan->rfkill_release)
902 wowlan_config_cmd->wakeup_filter |=
903 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
904
905 if (wowlan->tcp) {
906 /*
907 * Set the "link change" (really "link lost") flag as well
908 * since that implies losing the TCP connection.
909 */
910 wowlan_config_cmd->wakeup_filter |=
911 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
912 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
913 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
914 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
915 }
916
917 return 0;
918 }
919
920 static void
921 iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm *mvm,
922 struct ieee80211_vif *vif,
923 void (*iter)(struct ieee80211_hw *hw,
924 struct ieee80211_vif *vif,
925 struct ieee80211_sta *sta,
926 struct ieee80211_key_conf *key,
927 void *data),
928 void *data)
929 {
930 struct ieee80211_sta *ap_sta;
931
932 rcu_read_lock();
933
934 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]);
935 if (IS_ERR_OR_NULL(ap_sta))
936 goto out;
937
938 ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data);
939 out:
940 rcu_read_unlock();
941 }
942
943 int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
944 struct ieee80211_vif *vif,
945 bool d0i3,
946 u32 cmd_flags)
947 {
948 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
949 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
950 struct wowlan_key_data key_data = {
951 .configure_keys = !d0i3,
952 .use_rsc_tsc = false,
953 .tkip = &tkip_cmd,
954 .use_tkip = false,
955 };
956 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
957 int ret;
958
959 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
960 if (!key_data.rsc_tsc)
961 return -ENOMEM;
962
963 /*
964 * if we have to configure keys, call ieee80211_iter_keys(),
965 * as we need non-atomic context in order to take the
966 * required locks.
967 * for the d0i3 we can't use ieee80211_iter_keys(), as
968 * taking (almost) any mutex might result in deadlock.
969 */
970 if (!d0i3) {
971 /*
972 * Note that currently we don't propagate cmd_flags
973 * to the iterator. In case of key_data.configure_keys,
974 * all the configured commands are SYNC, and
975 * iwl_mvm_wowlan_program_keys() will take care of
976 * locking/unlocking mvm->mutex.
977 */
978 ieee80211_iter_keys(mvm->hw, vif,
979 iwl_mvm_wowlan_program_keys,
980 &key_data);
981 } else {
982 iwl_mvm_iter_d0i3_ap_keys(mvm, vif,
983 iwl_mvm_wowlan_program_keys,
984 &key_data);
985 }
986
987 if (key_data.error) {
988 ret = -EIO;
989 goto out;
990 }
991
992 if (key_data.use_rsc_tsc) {
993 ret = iwl_mvm_send_cmd_pdu(mvm,
994 WOWLAN_TSC_RSC_PARAM, cmd_flags,
995 sizeof(*key_data.rsc_tsc),
996 key_data.rsc_tsc);
997 if (ret)
998 goto out;
999 }
1000
1001 if (key_data.use_tkip &&
1002 !fw_has_api(&mvm->fw->ucode_capa,
1003 IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
1004 ret = iwl_mvm_send_cmd_pdu(mvm,
1005 WOWLAN_TKIP_PARAM,
1006 cmd_flags, sizeof(tkip_cmd),
1007 &tkip_cmd);
1008 if (ret)
1009 goto out;
1010 }
1011
1012 /* configure rekey data only if offloaded rekey is supported (d3) */
1013 if (mvmvif->rekey_data.valid && !d0i3) {
1014 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
1015 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
1016 NL80211_KCK_LEN);
1017 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
1018 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
1019 NL80211_KEK_LEN);
1020 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
1021 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
1022
1023 ret = iwl_mvm_send_cmd_pdu(mvm,
1024 WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
1025 sizeof(kek_kck_cmd),
1026 &kek_kck_cmd);
1027 if (ret)
1028 goto out;
1029 }
1030 ret = 0;
1031 out:
1032 kfree(key_data.rsc_tsc);
1033 return ret;
1034 }
1035
1036 static int
1037 iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
1038 struct cfg80211_wowlan *wowlan,
1039 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
1040 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
1041 struct ieee80211_sta *ap_sta)
1042 {
1043 int ret;
1044 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1045 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1046
1047 if (!unified_image) {
1048 ret = iwl_mvm_switch_to_d3(mvm);
1049 if (ret)
1050 return ret;
1051
1052 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
1053 if (ret)
1054 return ret;
1055 }
1056
1057 if (!iwlwifi_mod_params.swcrypto) {
1058 /*
1059 * This needs to be unlocked due to lock ordering
1060 * constraints. Since we're in the suspend path
1061 * that isn't really a problem though.
1062 */
1063 mutex_unlock(&mvm->mutex);
1064 ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false,
1065 CMD_ASYNC);
1066 mutex_lock(&mvm->mutex);
1067 if (ret)
1068 return ret;
1069 }
1070
1071 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
1072 sizeof(*wowlan_config_cmd),
1073 wowlan_config_cmd);
1074 if (ret)
1075 return ret;
1076
1077 ret = iwl_mvm_send_patterns(mvm, wowlan);
1078 if (ret)
1079 return ret;
1080
1081 ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
1082 if (ret)
1083 return ret;
1084
1085 ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
1086 return ret;
1087 }
1088
1089 static int
1090 iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
1091 struct cfg80211_wowlan *wowlan,
1092 struct cfg80211_sched_scan_request *nd_config,
1093 struct ieee80211_vif *vif)
1094 {
1095 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1096 int ret;
1097 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1098 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1099
1100 if (!unified_image) {
1101 ret = iwl_mvm_switch_to_d3(mvm);
1102 if (ret)
1103 return ret;
1104 } else {
1105 /* In theory, we wouldn't have to stop a running sched
1106 * scan in order to start another one (for
1107 * net-detect). But in practice this doesn't seem to
1108 * work properly, so stop any running sched_scan now.
1109 */
1110 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
1111 if (ret)
1112 return ret;
1113 }
1114
1115 /* rfkill release can be either for wowlan or netdetect */
1116 if (wowlan->rfkill_release)
1117 wowlan_config_cmd.wakeup_filter |=
1118 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
1119
1120 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
1121 sizeof(wowlan_config_cmd),
1122 &wowlan_config_cmd);
1123 if (ret)
1124 return ret;
1125
1126 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
1127 IWL_MVM_SCAN_NETDETECT);
1128 if (ret)
1129 return ret;
1130
1131 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
1132 return -EBUSY;
1133
1134 /* save the sched scan matchsets... */
1135 if (nd_config->n_match_sets) {
1136 mvm->nd_match_sets = kmemdup(nd_config->match_sets,
1137 sizeof(*nd_config->match_sets) *
1138 nd_config->n_match_sets,
1139 GFP_KERNEL);
1140 if (mvm->nd_match_sets)
1141 mvm->n_nd_match_sets = nd_config->n_match_sets;
1142 }
1143
1144 /* ...and the sched scan channels for later reporting */
1145 mvm->nd_channels = kmemdup(nd_config->channels,
1146 sizeof(*nd_config->channels) *
1147 nd_config->n_channels,
1148 GFP_KERNEL);
1149 if (mvm->nd_channels)
1150 mvm->n_nd_channels = nd_config->n_channels;
1151
1152 return 0;
1153 }
1154
1155 static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
1156 {
1157 kfree(mvm->nd_match_sets);
1158 mvm->nd_match_sets = NULL;
1159 mvm->n_nd_match_sets = 0;
1160 kfree(mvm->nd_channels);
1161 mvm->nd_channels = NULL;
1162 mvm->n_nd_channels = 0;
1163 }
1164
1165 static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1166 struct cfg80211_wowlan *wowlan,
1167 bool test)
1168 {
1169 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1170 struct ieee80211_vif *vif = NULL;
1171 struct iwl_mvm_vif *mvmvif = NULL;
1172 struct ieee80211_sta *ap_sta = NULL;
1173 struct iwl_d3_manager_config d3_cfg_cmd_data = {
1174 /*
1175 * Program the minimum sleep time to 10 seconds, as many
1176 * platforms have issues processing a wakeup signal while
1177 * still being in the process of suspending.
1178 */
1179 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
1180 };
1181 struct iwl_host_cmd d3_cfg_cmd = {
1182 .id = D3_CONFIG_CMD,
1183 .flags = CMD_WANT_SKB,
1184 .data[0] = &d3_cfg_cmd_data,
1185 .len[0] = sizeof(d3_cfg_cmd_data),
1186 };
1187 int ret;
1188 int len __maybe_unused;
1189 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1190 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1191
1192 if (!wowlan) {
1193 /*
1194 * mac80211 shouldn't get here, but for D3 test
1195 * it doesn't warrant a warning
1196 */
1197 WARN_ON(!test);
1198 return -EINVAL;
1199 }
1200
1201 mutex_lock(&mvm->mutex);
1202
1203 vif = iwl_mvm_get_bss_vif(mvm);
1204 if (IS_ERR_OR_NULL(vif)) {
1205 ret = 1;
1206 goto out_noreset;
1207 }
1208
1209 mvmvif = iwl_mvm_vif_from_mac80211(vif);
1210
1211 if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
1212 /* if we're not associated, this must be netdetect */
1213 if (!wowlan->nd_config) {
1214 ret = 1;
1215 goto out_noreset;
1216 }
1217
1218 ret = iwl_mvm_netdetect_config(
1219 mvm, wowlan, wowlan->nd_config, vif);
1220 if (ret)
1221 goto out;
1222
1223 mvm->net_detect = true;
1224 } else {
1225 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1226
1227 ap_sta = rcu_dereference_protected(
1228 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1229 lockdep_is_held(&mvm->mutex));
1230 if (IS_ERR_OR_NULL(ap_sta)) {
1231 ret = -EINVAL;
1232 goto out_noreset;
1233 }
1234
1235 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1236 vif, mvmvif, ap_sta);
1237 if (ret)
1238 goto out_noreset;
1239 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1240 vif, mvmvif, ap_sta);
1241 if (ret)
1242 goto out;
1243
1244 mvm->net_detect = false;
1245 }
1246
1247 ret = iwl_mvm_power_update_device(mvm);
1248 if (ret)
1249 goto out;
1250
1251 ret = iwl_mvm_power_update_mac(mvm);
1252 if (ret)
1253 goto out;
1254
1255 #ifdef CONFIG_IWLWIFI_DEBUGFS
1256 if (mvm->d3_wake_sysassert)
1257 d3_cfg_cmd_data.wakeup_flags |=
1258 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
1259 #endif
1260
1261 /* must be last -- this switches firmware state */
1262 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
1263 if (ret)
1264 goto out;
1265 #ifdef CONFIG_IWLWIFI_DEBUGFS
1266 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
1267 if (len >= sizeof(u32)) {
1268 mvm->d3_test_pme_ptr =
1269 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
1270 }
1271 #endif
1272 iwl_free_resp(&d3_cfg_cmd);
1273
1274 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1275
1276 iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
1277 out:
1278 if (ret < 0) {
1279 iwl_mvm_free_nd(mvm);
1280
1281 if (!unified_image) {
1282 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1283 if (mvm->fw_restart > 0) {
1284 mvm->fw_restart--;
1285 ieee80211_restart_hw(mvm->hw);
1286 }
1287 }
1288 }
1289 out_noreset:
1290 mutex_unlock(&mvm->mutex);
1291
1292 return ret;
1293 }
1294
1295 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
1296 {
1297 struct iwl_notification_wait wait_d3;
1298 static const u16 d3_notif[] = { D3_CONFIG_CMD };
1299 int ret;
1300
1301 iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
1302 d3_notif, ARRAY_SIZE(d3_notif),
1303 NULL, NULL);
1304
1305 ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
1306 if (ret)
1307 goto remove_notif;
1308
1309 ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
1310 WARN_ON_ONCE(ret);
1311 return ret;
1312
1313 remove_notif:
1314 iwl_remove_notification(&mvm->notif_wait, &wait_d3);
1315 return ret;
1316 }
1317
1318 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1319 {
1320 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1321 struct iwl_trans *trans = mvm->trans;
1322 int ret;
1323
1324 /* make sure the d0i3 exit work is not pending */
1325 flush_work(&mvm->d0i3_exit_work);
1326
1327 ret = iwl_trans_suspend(trans);
1328 if (ret)
1329 return ret;
1330
1331 if (wowlan->any) {
1332 trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
1333
1334 if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
1335 ret = iwl_mvm_enter_d0i3_sync(mvm);
1336
1337 if (ret)
1338 return ret;
1339 }
1340
1341 mutex_lock(&mvm->d0i3_suspend_mutex);
1342 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1343 mutex_unlock(&mvm->d0i3_suspend_mutex);
1344
1345 iwl_trans_d3_suspend(trans, false, false);
1346
1347 return 0;
1348 }
1349
1350 trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
1351
1352 return __iwl_mvm_suspend(hw, wowlan, false);
1353 }
1354
1355 /* converted data from the different status responses */
1356 struct iwl_wowlan_status_data {
1357 u16 pattern_number;
1358 u16 qos_seq_ctr[8];
1359 u32 wakeup_reasons;
1360 u32 wake_packet_length;
1361 u32 wake_packet_bufsize;
1362 const u8 *wake_packet;
1363 };
1364
1365 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1366 struct ieee80211_vif *vif,
1367 struct iwl_wowlan_status_data *status)
1368 {
1369 struct sk_buff *pkt = NULL;
1370 struct cfg80211_wowlan_wakeup wakeup = {
1371 .pattern_idx = -1,
1372 };
1373 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1374 u32 reasons = status->wakeup_reasons;
1375
1376 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
1377 wakeup_report = NULL;
1378 goto report;
1379 }
1380
1381 pm_wakeup_event(mvm->dev, 0);
1382
1383 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
1384 wakeup.magic_pkt = true;
1385
1386 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
1387 wakeup.pattern_idx =
1388 status->pattern_number;
1389
1390 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1391 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
1392 wakeup.disconnect = true;
1393
1394 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
1395 wakeup.gtk_rekey_failure = true;
1396
1397 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1398 wakeup.rfkill_release = true;
1399
1400 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
1401 wakeup.eap_identity_req = true;
1402
1403 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
1404 wakeup.four_way_handshake = true;
1405
1406 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
1407 wakeup.tcp_connlost = true;
1408
1409 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
1410 wakeup.tcp_nomoretokens = true;
1411
1412 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
1413 wakeup.tcp_match = true;
1414
1415 if (status->wake_packet_bufsize) {
1416 int pktsize = status->wake_packet_bufsize;
1417 int pktlen = status->wake_packet_length;
1418 const u8 *pktdata = status->wake_packet;
1419 struct ieee80211_hdr *hdr = (void *)pktdata;
1420 int truncated = pktlen - pktsize;
1421
1422 /* this would be a firmware bug */
1423 if (WARN_ON_ONCE(truncated < 0))
1424 truncated = 0;
1425
1426 if (ieee80211_is_data(hdr->frame_control)) {
1427 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
1428 int ivlen = 0, icvlen = 4; /* also FCS */
1429
1430 pkt = alloc_skb(pktsize, GFP_KERNEL);
1431 if (!pkt)
1432 goto report;
1433
1434 skb_put_data(pkt, pktdata, hdrlen);
1435 pktdata += hdrlen;
1436 pktsize -= hdrlen;
1437
1438 if (ieee80211_has_protected(hdr->frame_control)) {
1439 /*
1440 * This is unlocked and using gtk_i(c)vlen,
1441 * but since everything is under RTNL still
1442 * that's not really a problem - changing
1443 * it would be difficult.
1444 */
1445 if (is_multicast_ether_addr(hdr->addr1)) {
1446 ivlen = mvm->gtk_ivlen;
1447 icvlen += mvm->gtk_icvlen;
1448 } else {
1449 ivlen = mvm->ptk_ivlen;
1450 icvlen += mvm->ptk_icvlen;
1451 }
1452 }
1453
1454 /* if truncated, FCS/ICV is (partially) gone */
1455 if (truncated >= icvlen) {
1456 icvlen = 0;
1457 truncated -= icvlen;
1458 } else {
1459 icvlen -= truncated;
1460 truncated = 0;
1461 }
1462
1463 pktsize -= ivlen + icvlen;
1464 pktdata += ivlen;
1465
1466 skb_put_data(pkt, pktdata, pktsize);
1467
1468 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
1469 goto report;
1470 wakeup.packet = pkt->data;
1471 wakeup.packet_present_len = pkt->len;
1472 wakeup.packet_len = pkt->len - truncated;
1473 wakeup.packet_80211 = false;
1474 } else {
1475 int fcslen = 4;
1476
1477 if (truncated >= 4) {
1478 truncated -= 4;
1479 fcslen = 0;
1480 } else {
1481 fcslen -= truncated;
1482 truncated = 0;
1483 }
1484 pktsize -= fcslen;
1485 wakeup.packet = status->wake_packet;
1486 wakeup.packet_present_len = pktsize;
1487 wakeup.packet_len = pktlen - truncated;
1488 wakeup.packet_80211 = true;
1489 }
1490 }
1491
1492 report:
1493 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1494 kfree_skb(pkt);
1495 }
1496
1497 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
1498 struct ieee80211_key_seq *seq)
1499 {
1500 u64 pn;
1501
1502 pn = le64_to_cpu(sc->pn);
1503 seq->ccmp.pn[0] = pn >> 40;
1504 seq->ccmp.pn[1] = pn >> 32;
1505 seq->ccmp.pn[2] = pn >> 24;
1506 seq->ccmp.pn[3] = pn >> 16;
1507 seq->ccmp.pn[4] = pn >> 8;
1508 seq->ccmp.pn[5] = pn;
1509 }
1510
1511 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
1512 struct ieee80211_key_seq *seq)
1513 {
1514 seq->tkip.iv32 = le32_to_cpu(sc->iv32);
1515 seq->tkip.iv16 = le16_to_cpu(sc->iv16);
1516 }
1517
1518 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
1519 struct ieee80211_sta *sta,
1520 struct ieee80211_key_conf *key)
1521 {
1522 int tid;
1523
1524 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1525
1526 if (sta && iwl_mvm_has_new_rx_api(mvm)) {
1527 struct iwl_mvm_sta *mvmsta;
1528 struct iwl_mvm_key_pn *ptk_pn;
1529
1530 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1531
1532 ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx],
1533 lockdep_is_held(&mvm->mutex));
1534 if (WARN_ON(!ptk_pn))
1535 return;
1536
1537 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1538 struct ieee80211_key_seq seq = {};
1539 int i;
1540
1541 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1542 ieee80211_set_key_rx_seq(key, tid, &seq);
1543 for (i = 1; i < mvm->trans->num_rx_queues; i++)
1544 memcpy(ptk_pn->q[i].pn[tid],
1545 seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
1546 }
1547 } else {
1548 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1549 struct ieee80211_key_seq seq = {};
1550
1551 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1552 ieee80211_set_key_rx_seq(key, tid, &seq);
1553 }
1554 }
1555 }
1556
1557 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1558 struct ieee80211_key_conf *key)
1559 {
1560 int tid;
1561
1562 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1563
1564 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1565 struct ieee80211_key_seq seq = {};
1566
1567 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
1568 ieee80211_set_key_rx_seq(key, tid, &seq);
1569 }
1570 }
1571
1572 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
1573 struct ieee80211_key_conf *key,
1574 struct iwl_wowlan_status *status)
1575 {
1576 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
1577
1578 switch (key->cipher) {
1579 case WLAN_CIPHER_SUITE_CCMP:
1580 iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
1581 break;
1582 case WLAN_CIPHER_SUITE_TKIP:
1583 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
1584 break;
1585 default:
1586 WARN_ON(1);
1587 }
1588 }
1589
1590 struct iwl_mvm_d3_gtk_iter_data {
1591 struct iwl_mvm *mvm;
1592 struct iwl_wowlan_status *status;
1593 void *last_gtk;
1594 u32 cipher;
1595 bool find_phase, unhandled_cipher;
1596 int num_keys;
1597 };
1598
1599 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
1600 struct ieee80211_vif *vif,
1601 struct ieee80211_sta *sta,
1602 struct ieee80211_key_conf *key,
1603 void *_data)
1604 {
1605 struct iwl_mvm_d3_gtk_iter_data *data = _data;
1606
1607 if (data->unhandled_cipher)
1608 return;
1609
1610 switch (key->cipher) {
1611 case WLAN_CIPHER_SUITE_WEP40:
1612 case WLAN_CIPHER_SUITE_WEP104:
1613 /* ignore WEP completely, nothing to do */
1614 return;
1615 case WLAN_CIPHER_SUITE_CCMP:
1616 case WLAN_CIPHER_SUITE_TKIP:
1617 /* we support these */
1618 break;
1619 default:
1620 /* everything else (even CMAC for MFP) - disconnect from AP */
1621 data->unhandled_cipher = true;
1622 return;
1623 }
1624
1625 data->num_keys++;
1626
1627 /*
1628 * pairwise key - update sequence counters only;
1629 * note that this assumes no TDLS sessions are active
1630 */
1631 if (sta) {
1632 struct ieee80211_key_seq seq = {};
1633 union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
1634
1635 if (data->find_phase)
1636 return;
1637
1638 switch (key->cipher) {
1639 case WLAN_CIPHER_SUITE_CCMP:
1640 iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
1641 sta, key);
1642 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
1643 break;
1644 case WLAN_CIPHER_SUITE_TKIP:
1645 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
1646 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
1647 atomic64_set(&key->tx_pn,
1648 (u64)seq.tkip.iv16 |
1649 ((u64)seq.tkip.iv32 << 16));
1650 break;
1651 }
1652
1653 /* that's it for this key */
1654 return;
1655 }
1656
1657 if (data->find_phase) {
1658 data->last_gtk = key;
1659 data->cipher = key->cipher;
1660 return;
1661 }
1662
1663 if (data->status->num_of_gtk_rekeys)
1664 ieee80211_remove_key(key);
1665 else if (data->last_gtk == key)
1666 iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
1667 }
1668
1669 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1670 struct ieee80211_vif *vif,
1671 struct iwl_wowlan_status *status)
1672 {
1673 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1674 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1675 .mvm = mvm,
1676 .status = status,
1677 };
1678 u32 disconnection_reasons =
1679 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1680 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1681
1682 if (!status || !vif->bss_conf.bssid)
1683 return false;
1684
1685 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
1686 return false;
1687
1688 /* find last GTK that we used initially, if any */
1689 gtkdata.find_phase = true;
1690 ieee80211_iter_keys(mvm->hw, vif,
1691 iwl_mvm_d3_update_keys, &gtkdata);
1692 /* not trying to keep connections with MFP/unhandled ciphers */
1693 if (gtkdata.unhandled_cipher)
1694 return false;
1695 if (!gtkdata.num_keys)
1696 goto out;
1697 if (!gtkdata.last_gtk)
1698 return false;
1699
1700 /*
1701 * invalidate all other GTKs that might still exist and update
1702 * the one that we used
1703 */
1704 gtkdata.find_phase = false;
1705 ieee80211_iter_keys(mvm->hw, vif,
1706 iwl_mvm_d3_update_keys, &gtkdata);
1707
1708 if (status->num_of_gtk_rekeys) {
1709 struct ieee80211_key_conf *key;
1710 struct {
1711 struct ieee80211_key_conf conf;
1712 u8 key[32];
1713 } conf = {
1714 .conf.cipher = gtkdata.cipher,
1715 .conf.keyidx = status->gtk.key_index,
1716 };
1717
1718 switch (gtkdata.cipher) {
1719 case WLAN_CIPHER_SUITE_CCMP:
1720 conf.conf.keylen = WLAN_KEY_LEN_CCMP;
1721 memcpy(conf.conf.key, status->gtk.decrypt_key,
1722 WLAN_KEY_LEN_CCMP);
1723 break;
1724 case WLAN_CIPHER_SUITE_TKIP:
1725 conf.conf.keylen = WLAN_KEY_LEN_TKIP;
1726 memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
1727 /* leave TX MIC key zeroed, we don't use it anyway */
1728 memcpy(conf.conf.key +
1729 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
1730 status->gtk.tkip_mic_key, 8);
1731 break;
1732 }
1733
1734 key = ieee80211_gtk_rekey_add(vif, &conf.conf);
1735 if (IS_ERR(key))
1736 return false;
1737 iwl_mvm_set_key_rx_seq(mvm, key, status);
1738 }
1739
1740 if (status->num_of_gtk_rekeys) {
1741 __be64 replay_ctr =
1742 cpu_to_be64(le64_to_cpu(status->replay_ctr));
1743 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
1744 (void *)&replay_ctr, GFP_KERNEL);
1745 }
1746
1747 out:
1748 mvmvif->seqno_valid = true;
1749 /* +0x10 because the set API expects next-to-use, not last-used */
1750 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
1751
1752 return true;
1753 }
1754
1755 static struct iwl_wowlan_status *
1756 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1757 {
1758 u32 base = mvm->error_event_table[0];
1759 struct error_table_start {
1760 /* cf. struct iwl_error_event_table */
1761 u32 valid;
1762 u32 error_id;
1763 } err_info;
1764 struct iwl_host_cmd cmd = {
1765 .id = WOWLAN_GET_STATUSES,
1766 .flags = CMD_WANT_SKB,
1767 };
1768 struct iwl_wowlan_status *status, *fw_status;
1769 int ret, len, status_size;
1770
1771 iwl_trans_read_mem_bytes(mvm->trans, base,
1772 &err_info, sizeof(err_info));
1773
1774 if (err_info.valid) {
1775 IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
1776 err_info.valid, err_info.error_id);
1777 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1778 struct cfg80211_wowlan_wakeup wakeup = {
1779 .rfkill_release = true,
1780 };
1781 ieee80211_report_wowlan_wakeup(vif, &wakeup,
1782 GFP_KERNEL);
1783 }
1784 return ERR_PTR(-EIO);
1785 }
1786
1787 /* only for tracing for now */
1788 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1789 if (ret)
1790 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1791
1792 ret = iwl_mvm_send_cmd(mvm, &cmd);
1793 if (ret) {
1794 IWL_ERR(mvm, "failed to query status (%d)\n", ret);
1795 return ERR_PTR(ret);
1796 }
1797
1798 status_size = sizeof(*fw_status);
1799
1800 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1801 if (len < status_size) {
1802 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1803 fw_status = ERR_PTR(-EIO);
1804 goto out_free_resp;
1805 }
1806
1807 status = (void *)cmd.resp_pkt->data;
1808 if (len != (status_size +
1809 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
1810 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1811 fw_status = ERR_PTR(-EIO);
1812 goto out_free_resp;
1813 }
1814
1815 fw_status = kmemdup(status, len, GFP_KERNEL);
1816
1817 out_free_resp:
1818 iwl_free_resp(&cmd);
1819 return fw_status;
1820 }
1821
1822 /* releases the MVM mutex */
1823 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1824 struct ieee80211_vif *vif)
1825 {
1826 struct iwl_wowlan_status_data status;
1827 struct iwl_wowlan_status *fw_status;
1828 int i;
1829 bool keep;
1830 struct iwl_mvm_sta *mvm_ap_sta;
1831
1832 fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1833 if (IS_ERR_OR_NULL(fw_status))
1834 goto out_unlock;
1835
1836 status.pattern_number = le16_to_cpu(fw_status->pattern_number);
1837 for (i = 0; i < 8; i++)
1838 status.qos_seq_ctr[i] =
1839 le16_to_cpu(fw_status->qos_seq_ctr[i]);
1840 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
1841 status.wake_packet_length =
1842 le32_to_cpu(fw_status->wake_packet_length);
1843 status.wake_packet_bufsize =
1844 le32_to_cpu(fw_status->wake_packet_bufsize);
1845 status.wake_packet = fw_status->wake_packet;
1846
1847 /* still at hard-coded place 0 for D3 image */
1848 mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
1849 if (!mvm_ap_sta)
1850 goto out_free;
1851
1852 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1853 u16 seq = status.qos_seq_ctr[i];
1854 /* firmware stores last-used value, we store next value */
1855 seq += 0x10;
1856 mvm_ap_sta->tid_data[i].seq_number = seq;
1857 }
1858
1859 /* now we have all the data we need, unlock to avoid mac80211 issues */
1860 mutex_unlock(&mvm->mutex);
1861
1862 iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1863
1864 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
1865
1866 kfree(fw_status);
1867 return keep;
1868
1869 out_free:
1870 kfree(fw_status);
1871 out_unlock:
1872 mutex_unlock(&mvm->mutex);
1873 return false;
1874 }
1875
1876 void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
1877 struct ieee80211_vif *vif,
1878 struct iwl_wowlan_status *status)
1879 {
1880 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1881 .mvm = mvm,
1882 .status = status,
1883 };
1884
1885 /*
1886 * rekey handling requires taking locks that can't be taken now.
1887 * however, d0i3 doesn't offload rekey, so we're fine.
1888 */
1889 if (WARN_ON_ONCE(status->num_of_gtk_rekeys))
1890 return;
1891
1892 /* find last GTK that we used initially, if any */
1893 gtkdata.find_phase = true;
1894 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
1895
1896 gtkdata.find_phase = false;
1897 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
1898 }
1899
1900 struct iwl_mvm_nd_query_results {
1901 u32 matched_profiles;
1902 struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
1903 };
1904
1905 static int
1906 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
1907 struct iwl_mvm_nd_query_results *results)
1908 {
1909 struct iwl_scan_offload_profiles_query *query;
1910 struct iwl_host_cmd cmd = {
1911 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
1912 .flags = CMD_WANT_SKB,
1913 };
1914 int ret, len;
1915
1916 ret = iwl_mvm_send_cmd(mvm, &cmd);
1917 if (ret) {
1918 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
1919 return ret;
1920 }
1921
1922 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1923 if (len < sizeof(*query)) {
1924 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
1925 ret = -EIO;
1926 goto out_free_resp;
1927 }
1928
1929 query = (void *)cmd.resp_pkt->data;
1930
1931 results->matched_profiles = le32_to_cpu(query->matched_profiles);
1932 memcpy(results->matches, query->matches, sizeof(results->matches));
1933
1934 #ifdef CONFIG_IWLWIFI_DEBUGFS
1935 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
1936 #endif
1937
1938 out_free_resp:
1939 iwl_free_resp(&cmd);
1940 return ret;
1941 }
1942
1943 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
1944 struct ieee80211_vif *vif)
1945 {
1946 struct cfg80211_wowlan_nd_info *net_detect = NULL;
1947 struct cfg80211_wowlan_wakeup wakeup = {
1948 .pattern_idx = -1,
1949 };
1950 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1951 struct iwl_mvm_nd_query_results query;
1952 struct iwl_wowlan_status *fw_status;
1953 unsigned long matched_profiles;
1954 u32 reasons = 0;
1955 int i, j, n_matches, ret;
1956
1957 fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1958 if (!IS_ERR_OR_NULL(fw_status)) {
1959 reasons = le32_to_cpu(fw_status->wakeup_reasons);
1960 kfree(fw_status);
1961 }
1962
1963 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1964 wakeup.rfkill_release = true;
1965
1966 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
1967 goto out;
1968
1969 ret = iwl_mvm_netdetect_query_results(mvm, &query);
1970 if (ret || !query.matched_profiles) {
1971 wakeup_report = NULL;
1972 goto out;
1973 }
1974
1975 matched_profiles = query.matched_profiles;
1976 if (mvm->n_nd_match_sets) {
1977 n_matches = hweight_long(matched_profiles);
1978 } else {
1979 IWL_ERR(mvm, "no net detect match information available\n");
1980 n_matches = 0;
1981 }
1982
1983 net_detect = kzalloc(sizeof(*net_detect) +
1984 (n_matches * sizeof(net_detect->matches[0])),
1985 GFP_KERNEL);
1986 if (!net_detect || !n_matches)
1987 goto out_report_nd;
1988
1989 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
1990 struct iwl_scan_offload_profile_match *fw_match;
1991 struct cfg80211_wowlan_nd_match *match;
1992 int idx, n_channels = 0;
1993
1994 fw_match = &query.matches[i];
1995
1996 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
1997 n_channels += hweight8(fw_match->matching_channels[j]);
1998
1999 match = kzalloc(sizeof(*match) +
2000 (n_channels * sizeof(*match->channels)),
2001 GFP_KERNEL);
2002 if (!match)
2003 goto out_report_nd;
2004
2005 net_detect->matches[net_detect->n_matches++] = match;
2006
2007 /* We inverted the order of the SSIDs in the scan
2008 * request, so invert the index here.
2009 */
2010 idx = mvm->n_nd_match_sets - i - 1;
2011 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
2012 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
2013 match->ssid.ssid_len);
2014
2015 if (mvm->n_nd_channels < n_channels)
2016 continue;
2017
2018 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
2019 if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
2020 match->channels[match->n_channels++] =
2021 mvm->nd_channels[j]->center_freq;
2022 }
2023
2024 out_report_nd:
2025 wakeup.net_detect = net_detect;
2026 out:
2027 iwl_mvm_free_nd(mvm);
2028
2029 mutex_unlock(&mvm->mutex);
2030 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
2031
2032 if (net_detect) {
2033 for (i = 0; i < net_detect->n_matches; i++)
2034 kfree(net_detect->matches[i]);
2035 kfree(net_detect);
2036 }
2037 }
2038
2039 static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
2040 {
2041 #ifdef CONFIG_IWLWIFI_DEBUGFS
2042 const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
2043 u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
2044 u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
2045
2046 if (!mvm->store_d3_resume_sram)
2047 return;
2048
2049 if (!mvm->d3_resume_sram) {
2050 mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
2051 if (!mvm->d3_resume_sram)
2052 return;
2053 }
2054
2055 iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
2056 #endif
2057 }
2058
2059 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
2060 struct ieee80211_vif *vif)
2061 {
2062 /* skip the one we keep connection on */
2063 if (data == vif)
2064 return;
2065
2066 if (vif->type == NL80211_IFTYPE_STATION)
2067 ieee80211_resume_disconnect(vif);
2068 }
2069
2070 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
2071 {
2072 struct ieee80211_vif *vif = NULL;
2073 int ret = 1;
2074 enum iwl_d3_status d3_status;
2075 bool keep = false;
2076 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2077 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2078 bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
2079 IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
2080
2081 mutex_lock(&mvm->mutex);
2082
2083 /* get the BSS vif pointer again */
2084 vif = iwl_mvm_get_bss_vif(mvm);
2085 if (IS_ERR_OR_NULL(vif))
2086 goto err;
2087
2088 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
2089 if (ret)
2090 goto err;
2091
2092 if (d3_status != IWL_D3_STATUS_ALIVE) {
2093 IWL_INFO(mvm, "Device was reset during suspend\n");
2094 goto err;
2095 }
2096
2097 /* query SRAM first in case we want event logging */
2098 iwl_mvm_read_d3_sram(mvm);
2099
2100 if (d0i3_first) {
2101 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
2102 if (ret < 0) {
2103 IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
2104 ret);
2105 goto err;
2106 }
2107 }
2108
2109 /*
2110 * Query the current location and source from the D3 firmware so we
2111 * can play it back when we re-intiailize the D0 firmware
2112 */
2113 iwl_mvm_update_changed_regdom(mvm);
2114
2115 if (!unified_image)
2116 /* Re-configure default SAR profile */
2117 iwl_mvm_sar_select_profile(mvm, 1, 1);
2118
2119 if (mvm->net_detect) {
2120 /* If this is a non-unified image, we restart the FW,
2121 * so no need to stop the netdetect scan. If that
2122 * fails, continue and try to get the wake-up reasons,
2123 * but trigger a HW restart by keeping a failure code
2124 * in ret.
2125 */
2126 if (unified_image)
2127 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
2128 false);
2129
2130 iwl_mvm_query_netdetect_reasons(mvm, vif);
2131 /* has unlocked the mutex, so skip that */
2132 goto out;
2133 } else {
2134 keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
2135 #ifdef CONFIG_IWLWIFI_DEBUGFS
2136 if (keep)
2137 mvm->keep_vif = vif;
2138 #endif
2139 /* has unlocked the mutex, so skip that */
2140 goto out_iterate;
2141 }
2142
2143 err:
2144 iwl_mvm_free_nd(mvm);
2145 mutex_unlock(&mvm->mutex);
2146
2147 out_iterate:
2148 if (!test)
2149 ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
2150 IEEE80211_IFACE_ITER_NORMAL,
2151 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
2152
2153 out:
2154 /* no need to reset the device in unified images, if successful */
2155 if (unified_image && !ret) {
2156 /* nothing else to do if we already sent D0I3_END_CMD */
2157 if (d0i3_first)
2158 return 0;
2159
2160 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
2161 if (!ret)
2162 return 0;
2163 }
2164
2165 /*
2166 * Reconfigure the device in one of the following cases:
2167 * 1. We are not using a unified image
2168 * 2. We are using a unified image but had an error while exiting D3
2169 */
2170 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
2171 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
2172 /*
2173 * When switching images we return 1, which causes mac80211
2174 * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
2175 * This type of reconfig calls iwl_mvm_restart_complete(),
2176 * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
2177 * to take the reference here.
2178 */
2179 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
2180
2181 return 1;
2182 }
2183
2184 static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
2185 {
2186 iwl_trans_resume(mvm->trans);
2187
2188 return __iwl_mvm_resume(mvm, false);
2189 }
2190
2191 static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
2192 {
2193 bool exit_now;
2194 enum iwl_d3_status d3_status;
2195 struct iwl_trans *trans = mvm->trans;
2196
2197 iwl_trans_d3_resume(trans, &d3_status, false, false);
2198
2199 /*
2200 * make sure to clear D0I3_DEFER_WAKEUP before
2201 * calling iwl_trans_resume(), which might wait
2202 * for d0i3 exit completion.
2203 */
2204 mutex_lock(&mvm->d0i3_suspend_mutex);
2205 __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
2206 exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
2207 &mvm->d0i3_suspend_flags);
2208 mutex_unlock(&mvm->d0i3_suspend_mutex);
2209 if (exit_now) {
2210 IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
2211 _iwl_mvm_exit_d0i3(mvm);
2212 }
2213
2214 iwl_trans_resume(trans);
2215
2216 if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
2217 int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
2218
2219 if (ret)
2220 return ret;
2221 /*
2222 * d0i3 exit will be deferred until reconfig_complete.
2223 * make sure there we are out of d0i3.
2224 */
2225 }
2226 return 0;
2227 }
2228
2229 int iwl_mvm_resume(struct ieee80211_hw *hw)
2230 {
2231 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2232 int ret;
2233
2234 if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)
2235 ret = iwl_mvm_resume_d0i3(mvm);
2236 else
2237 ret = iwl_mvm_resume_d3(mvm);
2238
2239 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2240
2241 return ret;
2242 }
2243
2244 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
2245 {
2246 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2247
2248 device_set_wakeup_enable(mvm->trans->dev, enabled);
2249 }
2250
2251 #ifdef CONFIG_IWLWIFI_DEBUGFS
2252 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2253 {
2254 struct iwl_mvm *mvm = inode->i_private;
2255 int err;
2256
2257 if (mvm->d3_test_active)
2258 return -EBUSY;
2259
2260 file->private_data = inode->i_private;
2261
2262 ieee80211_stop_queues(mvm->hw);
2263 synchronize_net();
2264
2265 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
2266
2267 /* start pseudo D3 */
2268 rtnl_lock();
2269 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
2270 rtnl_unlock();
2271 if (err > 0)
2272 err = -EINVAL;
2273 if (err) {
2274 ieee80211_wake_queues(mvm->hw);
2275 return err;
2276 }
2277 mvm->d3_test_active = true;
2278 mvm->keep_vif = NULL;
2279 return 0;
2280 }
2281
2282 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
2283 size_t count, loff_t *ppos)
2284 {
2285 struct iwl_mvm *mvm = file->private_data;
2286 u32 pme_asserted;
2287
2288 while (true) {
2289 /* read pme_ptr if available */
2290 if (mvm->d3_test_pme_ptr) {
2291 pme_asserted = iwl_trans_read_mem32(mvm->trans,
2292 mvm->d3_test_pme_ptr);
2293 if (pme_asserted)
2294 break;
2295 }
2296
2297 if (msleep_interruptible(100))
2298 break;
2299 }
2300
2301 return 0;
2302 }
2303
2304 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
2305 struct ieee80211_vif *vif)
2306 {
2307 /* skip the one we keep connection on */
2308 if (_data == vif)
2309 return;
2310
2311 if (vif->type == NL80211_IFTYPE_STATION)
2312 ieee80211_connection_loss(vif);
2313 }
2314
2315 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2316 {
2317 struct iwl_mvm *mvm = inode->i_private;
2318 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2319 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2320
2321 mvm->d3_test_active = false;
2322
2323 rtnl_lock();
2324 __iwl_mvm_resume(mvm, true);
2325 rtnl_unlock();
2326
2327 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2328
2329 iwl_abort_notification_waits(&mvm->notif_wait);
2330 if (!unified_image) {
2331 int remaining_time = 10;
2332
2333 ieee80211_restart_hw(mvm->hw);
2334
2335 /* wait for restart and disconnect all interfaces */
2336 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2337 remaining_time > 0) {
2338 remaining_time--;
2339 msleep(1000);
2340 }
2341
2342 if (remaining_time == 0)
2343 IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
2344 }
2345
2346 ieee80211_iterate_active_interfaces_atomic(
2347 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2348 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
2349
2350 ieee80211_wake_queues(mvm->hw);
2351
2352 return 0;
2353 }
2354
2355 const struct file_operations iwl_dbgfs_d3_test_ops = {
2356 .llseek = no_llseek,
2357 .open = iwl_mvm_d3_test_open,
2358 .read = iwl_mvm_d3_test_read,
2359 .release = iwl_mvm_d3_test_release,
2360 };
2361 #endif