1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <linux/firmware.h>
68 #include <linux/rtnetlink.h>
69 #include "iwl-trans.h"
72 #include "iwl-eeprom-parse.h"
73 #include "iwl-eeprom-read.h"
74 #include "iwl-nvm-parse.h"
77 /* Default NVM size to read */
78 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
79 #define IWL_MAX_NVM_SECTION_SIZE 0x1b58
80 #define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc
82 #define NVM_WRITE_OPCODE 1
83 #define NVM_READ_OPCODE 0
85 /* load nvm chunk response */
87 READ_NVM_CHUNK_SUCCEED
= 0,
88 READ_NVM_CHUNK_NOT_VALID_ADDRESS
= 1
92 * prepare the NVM host command w/ the pointers to the nvm buffer
95 static int iwl_nvm_write_chunk(struct iwl_mvm
*mvm
, u16 section
,
96 u16 offset
, u16 length
, const u8
*data
)
98 struct iwl_nvm_access_cmd nvm_access_cmd
= {
99 .offset
= cpu_to_le16(offset
),
100 .length
= cpu_to_le16(length
),
101 .type
= cpu_to_le16(section
),
102 .op_code
= NVM_WRITE_OPCODE
,
104 struct iwl_host_cmd cmd
= {
105 .id
= NVM_ACCESS_CMD
,
106 .len
= { sizeof(struct iwl_nvm_access_cmd
), length
},
107 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
108 .data
= { &nvm_access_cmd
, data
},
109 /* data may come from vmalloc, so use _DUP */
110 .dataflags
= { 0, IWL_HCMD_DFL_DUP
},
112 struct iwl_rx_packet
*pkt
;
113 struct iwl_nvm_access_resp
*nvm_resp
;
116 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
121 /* Extract & check NVM write response */
122 nvm_resp
= (void *)pkt
->data
;
123 if (le16_to_cpu(nvm_resp
->status
) != READ_NVM_CHUNK_SUCCEED
) {
125 "NVM access write command failed for section %u (status = 0x%x)\n",
126 section
, le16_to_cpu(nvm_resp
->status
));
134 static int iwl_nvm_read_chunk(struct iwl_mvm
*mvm
, u16 section
,
135 u16 offset
, u16 length
, u8
*data
)
137 struct iwl_nvm_access_cmd nvm_access_cmd
= {
138 .offset
= cpu_to_le16(offset
),
139 .length
= cpu_to_le16(length
),
140 .type
= cpu_to_le16(section
),
141 .op_code
= NVM_READ_OPCODE
,
143 struct iwl_nvm_access_resp
*nvm_resp
;
144 struct iwl_rx_packet
*pkt
;
145 struct iwl_host_cmd cmd
= {
146 .id
= NVM_ACCESS_CMD
,
147 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
148 .data
= { &nvm_access_cmd
, },
150 int ret
, bytes_read
, offset_read
;
153 cmd
.len
[0] = sizeof(struct iwl_nvm_access_cmd
);
155 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
161 /* Extract NVM response */
162 nvm_resp
= (void *)pkt
->data
;
163 ret
= le16_to_cpu(nvm_resp
->status
);
164 bytes_read
= le16_to_cpu(nvm_resp
->length
);
165 offset_read
= le16_to_cpu(nvm_resp
->offset
);
166 resp_data
= nvm_resp
->data
;
169 (ret
== READ_NVM_CHUNK_NOT_VALID_ADDRESS
)) {
171 * meaning of NOT_VALID_ADDRESS:
172 * driver try to read chunk from address that is
173 * multiple of 2K and got an error since addr is empty.
174 * meaning of (offset != 0): driver already
175 * read valid data from another chunk so this case
178 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
179 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
183 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
184 "NVM access command failed with status %d (device: %s)\n",
185 ret
, mvm
->cfg
->name
);
191 if (offset_read
!= offset
) {
192 IWL_ERR(mvm
, "NVM ACCESS response with invalid offset %d\n",
198 /* Write data to NVM */
199 memcpy(data
+ offset
, resp_data
, bytes_read
);
207 static int iwl_nvm_write_section(struct iwl_mvm
*mvm
, u16 section
,
208 const u8
*data
, u16 length
)
212 /* copy data in chunks of 2k (and remainder if any) */
214 while (offset
< length
) {
217 chunk_size
= min(IWL_NVM_DEFAULT_CHUNK_SIZE
,
220 ret
= iwl_nvm_write_chunk(mvm
, section
, offset
,
221 chunk_size
, data
+ offset
);
225 offset
+= chunk_size
;
231 static void iwl_mvm_nvm_fixups(struct iwl_mvm
*mvm
, unsigned int section
,
232 u8
*data
, unsigned int len
)
234 #define IWL_4165_DEVICE_ID 0x5501
235 #define NVM_SKU_CAP_MIMO_DISABLE BIT(5)
237 if (section
== NVM_SECTION_TYPE_PHY_SKU
&&
238 mvm
->trans
->hw_id
== IWL_4165_DEVICE_ID
&& data
&& len
>= 5 &&
239 (data
[4] & NVM_SKU_CAP_MIMO_DISABLE
))
240 /* OTP 0x52 bug work around: it's a 1x1 device */
241 data
[3] = ANT_B
| (ANT_B
<< 4);
245 * Reads an NVM section completely.
246 * NICs prior to 7000 family doesn't have a real NVM, but just read
247 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
248 * by uCode, we need to manually check in this case that we don't
249 * overflow and try to read more than the EEPROM size.
250 * For 7000 family NICs, we supply the maximal size we can read, and
251 * the uCode fills the response with as much data as we can,
252 * without overflowing, so no check is needed.
254 static int iwl_nvm_read_section(struct iwl_mvm
*mvm
, u16 section
,
255 u8
*data
, u32 size_read
)
257 u16 length
, offset
= 0;
260 /* Set nvm section read length */
261 length
= IWL_NVM_DEFAULT_CHUNK_SIZE
;
265 /* Read the NVM until exhausted (reading less than requested) */
266 while (ret
== length
) {
267 /* Check no memory assumptions fail and cause an overflow */
268 if ((size_read
+ offset
+ length
) >
269 mvm
->cfg
->base_params
->eeprom_size
) {
270 IWL_ERR(mvm
, "EEPROM size is too small for NVM\n");
274 ret
= iwl_nvm_read_chunk(mvm
, section
, offset
, length
, data
);
276 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
277 "Cannot read NVM from section %d offset %d, length %d\n",
278 section
, offset
, length
);
284 iwl_mvm_nvm_fixups(mvm
, section
, data
, offset
);
286 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
287 "NVM section %d read completed\n", section
);
291 static struct iwl_nvm_data
*
292 iwl_parse_nvm_sections(struct iwl_mvm
*mvm
)
294 struct iwl_nvm_section
*sections
= mvm
->nvm_sections
;
296 const __le16
*sw
, *calib
, *regulatory
, *mac_override
, *phy_sku
;
300 /* Checking for required sections */
301 if (mvm
->trans
->cfg
->nvm_type
!= IWL_NVM_EXT
) {
302 if (!mvm
->nvm_sections
[NVM_SECTION_TYPE_SW
].data
||
303 !mvm
->nvm_sections
[mvm
->cfg
->nvm_hw_section_num
].data
) {
304 IWL_ERR(mvm
, "Can't parse empty OTP/NVM sections\n");
308 if (mvm
->trans
->cfg
->nvm_type
== IWL_NVM_SDP
)
309 regulatory_type
= NVM_SECTION_TYPE_REGULATORY_SDP
;
311 regulatory_type
= NVM_SECTION_TYPE_REGULATORY
;
313 /* SW and REGULATORY sections are mandatory */
314 if (!mvm
->nvm_sections
[NVM_SECTION_TYPE_SW
].data
||
315 !mvm
->nvm_sections
[regulatory_type
].data
) {
317 "Can't parse empty family 8000 OTP/NVM sections\n");
320 /* MAC_OVERRIDE or at least HW section must exist */
321 if (!mvm
->nvm_sections
[mvm
->cfg
->nvm_hw_section_num
].data
&&
322 !mvm
->nvm_sections
[NVM_SECTION_TYPE_MAC_OVERRIDE
].data
) {
324 "Can't parse mac_address, empty sections\n");
328 /* PHY_SKU section is mandatory in B0 */
329 if (!mvm
->nvm_sections
[NVM_SECTION_TYPE_PHY_SKU
].data
) {
331 "Can't parse phy_sku in B0, empty sections\n");
336 hw
= (const __be16
*)sections
[mvm
->cfg
->nvm_hw_section_num
].data
;
337 sw
= (const __le16
*)sections
[NVM_SECTION_TYPE_SW
].data
;
338 calib
= (const __le16
*)sections
[NVM_SECTION_TYPE_CALIBRATION
].data
;
340 (const __le16
*)sections
[NVM_SECTION_TYPE_MAC_OVERRIDE
].data
;
341 phy_sku
= (const __le16
*)sections
[NVM_SECTION_TYPE_PHY_SKU
].data
;
343 regulatory
= mvm
->trans
->cfg
->nvm_type
== IWL_NVM_SDP
?
344 (const __le16
*)sections
[NVM_SECTION_TYPE_REGULATORY_SDP
].data
:
345 (const __le16
*)sections
[NVM_SECTION_TYPE_REGULATORY
].data
;
347 lar_enabled
= !iwlwifi_mod_params
.lar_disable
&&
348 fw_has_capa(&mvm
->fw
->ucode_capa
,
349 IWL_UCODE_TLV_CAPA_LAR_SUPPORT
);
351 return iwl_parse_nvm_data(mvm
->trans
, mvm
->cfg
, hw
, sw
, calib
,
352 regulatory
, mac_override
, phy_sku
,
353 mvm
->fw
->valid_tx_ant
, mvm
->fw
->valid_rx_ant
,
357 #define MAX_NVM_FILE_LEN 16384
360 * Reads external NVM from a file into mvm->nvm_sections
362 * HOW TO CREATE THE NVM FILE FORMAT:
363 * ------------------------------
364 * 1. create hex file, format:
369 * rev - 6 bit (word1)
370 * len - 10 bit (word1)
372 * rsv - 12 bit (word2)
374 * 2. flip 8bits with 8 bits per line to get the right NVM file format
376 * 3. create binary file from the hex file
378 * 4. save as "iNVM_xxx.bin" under /lib/firmware
380 int iwl_mvm_read_external_nvm(struct iwl_mvm
*mvm
)
382 int ret
, section_size
;
384 const struct firmware
*fw_entry
;
392 int max_section_size
;
393 const __le32
*dword_buff
;
395 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
396 #define NVM_WORD2_ID(x) (x >> 12)
397 #define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8))
398 #define EXT_NVM_WORD1_ID(x) ((x) >> 4)
399 #define NVM_HEADER_0 (0x2A504C54)
400 #define NVM_HEADER_1 (0x4E564D2A)
401 #define NVM_HEADER_SIZE (4 * sizeof(u32))
403 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "Read from external NVM\n");
405 /* Maximal size depends on NVM version */
406 if (mvm
->trans
->cfg
->nvm_type
!= IWL_NVM_EXT
)
407 max_section_size
= IWL_MAX_NVM_SECTION_SIZE
;
409 max_section_size
= IWL_MAX_EXT_NVM_SECTION_SIZE
;
412 * Obtain NVM image via request_firmware. Since we already used
413 * request_firmware_nowait() for the firmware binary load and only
414 * get here after that we assume the NVM request can be satisfied
417 ret
= request_firmware(&fw_entry
, mvm
->nvm_file_name
,
420 IWL_ERR(mvm
, "ERROR: %s isn't available %d\n",
421 mvm
->nvm_file_name
, ret
);
425 IWL_INFO(mvm
, "Loaded NVM file %s (%zu bytes)\n",
426 mvm
->nvm_file_name
, fw_entry
->size
);
428 if (fw_entry
->size
> MAX_NVM_FILE_LEN
) {
429 IWL_ERR(mvm
, "NVM file too large\n");
434 eof
= fw_entry
->data
+ fw_entry
->size
;
435 dword_buff
= (__le32
*)fw_entry
->data
;
437 /* some NVM file will contain a header.
438 * The header is identified by 2 dwords header as follow:
439 * dword[0] = 0x2A504C54
440 * dword[1] = 0x4E564D2A
442 * This header must be skipped when providing the NVM data to the FW.
444 if (fw_entry
->size
> NVM_HEADER_SIZE
&&
445 dword_buff
[0] == cpu_to_le32(NVM_HEADER_0
) &&
446 dword_buff
[1] == cpu_to_le32(NVM_HEADER_1
)) {
447 file_sec
= (void *)(fw_entry
->data
+ NVM_HEADER_SIZE
);
448 IWL_INFO(mvm
, "NVM Version %08X\n", le32_to_cpu(dword_buff
[2]));
449 IWL_INFO(mvm
, "NVM Manufacturing date %08X\n",
450 le32_to_cpu(dword_buff
[3]));
452 /* nvm file validation, dword_buff[2] holds the file version */
453 if (mvm
->trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
&&
454 CSR_HW_REV_STEP(mvm
->trans
->hw_rev
) == SILICON_C_STEP
&&
455 le32_to_cpu(dword_buff
[2]) < 0xE4A) {
460 file_sec
= (void *)fw_entry
->data
;
464 if (file_sec
->data
> eof
) {
466 "ERROR - NVM file too short for section header\n");
471 /* check for EOF marker */
472 if (!file_sec
->word1
&& !file_sec
->word2
) {
477 if (mvm
->trans
->cfg
->nvm_type
!= IWL_NVM_EXT
) {
479 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec
->word1
));
480 section_id
= NVM_WORD2_ID(le16_to_cpu(file_sec
->word2
));
482 section_size
= 2 * EXT_NVM_WORD2_LEN(
483 le16_to_cpu(file_sec
->word2
));
484 section_id
= EXT_NVM_WORD1_ID(
485 le16_to_cpu(file_sec
->word1
));
488 if (section_size
> max_section_size
) {
489 IWL_ERR(mvm
, "ERROR - section too large (%d)\n",
496 IWL_ERR(mvm
, "ERROR - section empty\n");
501 if (file_sec
->data
+ section_size
> eof
) {
503 "ERROR - NVM file too short for section (%d bytes)\n",
509 if (WARN(section_id
>= NVM_MAX_NUM_SECTIONS
,
510 "Invalid NVM section ID %d\n", section_id
)) {
515 temp
= kmemdup(file_sec
->data
, section_size
, GFP_KERNEL
);
521 iwl_mvm_nvm_fixups(mvm
, section_id
, temp
, section_size
);
523 kfree(mvm
->nvm_sections
[section_id
].data
);
524 mvm
->nvm_sections
[section_id
].data
= temp
;
525 mvm
->nvm_sections
[section_id
].length
= section_size
;
527 /* advance to the next section */
528 file_sec
= (void *)(file_sec
->data
+ section_size
);
531 release_firmware(fw_entry
);
535 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
536 int iwl_mvm_load_nvm_to_nic(struct iwl_mvm
*mvm
)
539 struct iwl_nvm_section
*sections
= mvm
->nvm_sections
;
541 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "'Write to NVM\n");
543 for (i
= 0; i
< ARRAY_SIZE(mvm
->nvm_sections
); i
++) {
544 if (!mvm
->nvm_sections
[i
].data
|| !mvm
->nvm_sections
[i
].length
)
546 ret
= iwl_nvm_write_section(mvm
, i
, sections
[i
].data
,
549 IWL_ERR(mvm
, "iwl_mvm_send_cmd failed: %d\n", ret
);
556 int iwl_nvm_init(struct iwl_mvm
*mvm
)
560 u8
*nvm_buffer
, *temp
;
561 const char *nvm_file_C
= mvm
->cfg
->default_nvm_file_C_step
;
563 if (WARN_ON_ONCE(mvm
->cfg
->nvm_hw_section_num
>= NVM_MAX_NUM_SECTIONS
))
566 /* load NVM values from nic */
567 /* Read From FW NVM */
568 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "Read from NVM\n");
570 nvm_buffer
= kmalloc(mvm
->cfg
->base_params
->eeprom_size
,
574 for (section
= 0; section
< NVM_MAX_NUM_SECTIONS
; section
++) {
575 /* we override the constness for initial read */
576 ret
= iwl_nvm_read_section(mvm
, section
, nvm_buffer
,
581 temp
= kmemdup(nvm_buffer
, ret
, GFP_KERNEL
);
587 iwl_mvm_nvm_fixups(mvm
, section
, temp
, ret
);
589 mvm
->nvm_sections
[section
].data
= temp
;
590 mvm
->nvm_sections
[section
].length
= ret
;
592 #ifdef CONFIG_IWLWIFI_DEBUGFS
594 case NVM_SECTION_TYPE_SW
:
595 mvm
->nvm_sw_blob
.data
= temp
;
596 mvm
->nvm_sw_blob
.size
= ret
;
598 case NVM_SECTION_TYPE_CALIBRATION
:
599 mvm
->nvm_calib_blob
.data
= temp
;
600 mvm
->nvm_calib_blob
.size
= ret
;
602 case NVM_SECTION_TYPE_PRODUCTION
:
603 mvm
->nvm_prod_blob
.data
= temp
;
604 mvm
->nvm_prod_blob
.size
= ret
;
606 case NVM_SECTION_TYPE_PHY_SKU
:
607 mvm
->nvm_phy_sku_blob
.data
= temp
;
608 mvm
->nvm_phy_sku_blob
.size
= ret
;
611 if (section
== mvm
->cfg
->nvm_hw_section_num
) {
612 mvm
->nvm_hw_blob
.data
= temp
;
613 mvm
->nvm_hw_blob
.size
= ret
;
620 IWL_ERR(mvm
, "OTP is blank\n");
623 /* Only if PNVM selected in the mod param - load external NVM */
624 if (mvm
->nvm_file_name
) {
625 /* read External NVM file from the mod param */
626 ret
= iwl_mvm_read_external_nvm(mvm
);
628 mvm
->nvm_file_name
= nvm_file_C
;
630 if ((ret
== -EFAULT
|| ret
== -ENOENT
) &&
631 mvm
->nvm_file_name
) {
632 /* in case nvm file was failed try again */
633 ret
= iwl_mvm_read_external_nvm(mvm
);
642 /* parse the relevant nvm sections */
643 mvm
->nvm_data
= iwl_parse_nvm_sections(mvm
);
646 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "nvm version = %x\n",
647 mvm
->nvm_data
->nvm_version
);
652 struct iwl_mcc_update_resp
*
653 iwl_mvm_update_mcc(struct iwl_mvm
*mvm
, const char *alpha2
,
654 enum iwl_mcc_source src_id
)
656 struct iwl_mcc_update_cmd mcc_update_cmd
= {
657 .mcc
= cpu_to_le16(alpha2
[0] << 8 | alpha2
[1]),
658 .source_id
= (u8
)src_id
,
660 struct iwl_mcc_update_resp
*resp_cp
;
661 struct iwl_rx_packet
*pkt
;
662 struct iwl_host_cmd cmd
= {
663 .id
= MCC_UPDATE_CMD
,
664 .flags
= CMD_WANT_SKB
,
665 .data
= { &mcc_update_cmd
},
670 int resp_len
, n_channels
;
672 bool resp_v2
= fw_has_capa(&mvm
->fw
->ucode_capa
,
673 IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2
);
675 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm
)))
676 return ERR_PTR(-EOPNOTSUPP
);
678 cmd
.len
[0] = sizeof(struct iwl_mcc_update_cmd
);
680 cmd
.len
[0] = sizeof(struct iwl_mcc_update_cmd_v1
);
682 IWL_DEBUG_LAR(mvm
, "send MCC update to FW with '%c%c' src = %d\n",
683 alpha2
[0], alpha2
[1], src_id
);
685 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
691 /* Extract MCC response */
693 struct iwl_mcc_update_resp
*mcc_resp
= (void *)pkt
->data
;
695 n_channels
= __le32_to_cpu(mcc_resp
->n_channels
);
696 resp_len
= sizeof(struct iwl_mcc_update_resp
) +
697 n_channels
* sizeof(__le32
);
698 resp_cp
= kmemdup(mcc_resp
, resp_len
, GFP_KERNEL
);
700 resp_cp
= ERR_PTR(-ENOMEM
);
704 struct iwl_mcc_update_resp_v1
*mcc_resp_v1
= (void *)pkt
->data
;
706 n_channels
= __le32_to_cpu(mcc_resp_v1
->n_channels
);
707 resp_len
= sizeof(struct iwl_mcc_update_resp
) +
708 n_channels
* sizeof(__le32
);
709 resp_cp
= kzalloc(resp_len
, GFP_KERNEL
);
711 resp_cp
= ERR_PTR(-ENOMEM
);
715 resp_cp
->status
= mcc_resp_v1
->status
;
716 resp_cp
->mcc
= mcc_resp_v1
->mcc
;
717 resp_cp
->cap
= mcc_resp_v1
->cap
;
718 resp_cp
->source_id
= mcc_resp_v1
->source_id
;
719 resp_cp
->n_channels
= mcc_resp_v1
->n_channels
;
720 memcpy(resp_cp
->channels
, mcc_resp_v1
->channels
,
721 n_channels
* sizeof(__le32
));
724 status
= le32_to_cpu(resp_cp
->status
);
726 mcc
= le16_to_cpu(resp_cp
->mcc
);
728 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
730 mcc
= 0x3030; /* "00" - world */
731 resp_cp
->mcc
= cpu_to_le16(mcc
);
735 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
736 status
, mcc
, mcc
>> 8, mcc
& 0xff,
737 !!(status
== MCC_RESP_NEW_CHAN_PROFILE
), n_channels
);
744 int iwl_mvm_init_mcc(struct iwl_mvm
*mvm
)
749 struct ieee80211_regdomain
*regd
;
752 if (mvm
->cfg
->nvm_type
== IWL_NVM_EXT
) {
753 tlv_lar
= fw_has_capa(&mvm
->fw
->ucode_capa
,
754 IWL_UCODE_TLV_CAPA_LAR_SUPPORT
);
755 nvm_lar
= mvm
->nvm_data
->lar_enabled
;
756 if (tlv_lar
!= nvm_lar
)
758 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
759 tlv_lar
? "enabled" : "disabled",
760 nvm_lar
? "enabled" : "disabled");
763 if (!iwl_mvm_is_lar_supported(mvm
))
767 * try to replay the last set MCC to FW. If it doesn't exist,
768 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
770 retval
= iwl_mvm_init_fw_regd(mvm
);
771 if (retval
!= -ENOENT
)
775 * Driver regulatory hint for initial update, this also informs the
776 * firmware we support wifi location updates.
777 * Disallow scans that might crash the FW while the LAR regdomain
780 mvm
->lar_regdom_set
= false;
782 regd
= iwl_mvm_get_current_regdomain(mvm
, NULL
);
783 if (IS_ERR_OR_NULL(regd
))
786 if (iwl_mvm_is_wifi_mcc_supported(mvm
) &&
787 !iwl_get_bios_mcc(mvm
->dev
, mcc
)) {
789 regd
= iwl_mvm_get_regdomain(mvm
->hw
->wiphy
, mcc
,
790 MCC_SOURCE_BIOS
, NULL
);
791 if (IS_ERR_OR_NULL(regd
))
795 retval
= regulatory_set_wiphy_regd_sync_rtnl(mvm
->hw
->wiphy
, regd
);
800 void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm
*mvm
,
801 struct iwl_rx_cmd_buffer
*rxb
)
803 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
804 struct iwl_mcc_chub_notif
*notif
= (void *)pkt
->data
;
805 enum iwl_mcc_source src
;
807 struct ieee80211_regdomain
*regd
;
809 lockdep_assert_held(&mvm
->mutex
);
811 if (iwl_mvm_is_vif_assoc(mvm
) && notif
->source_id
== MCC_SOURCE_WIFI
) {
812 IWL_DEBUG_LAR(mvm
, "Ignore mcc update while associated\n");
816 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm
)))
819 mcc
[0] = le16_to_cpu(notif
->mcc
) >> 8;
820 mcc
[1] = le16_to_cpu(notif
->mcc
) & 0xff;
822 src
= notif
->source_id
;
825 "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
827 regd
= iwl_mvm_get_regdomain(mvm
->hw
->wiphy
, mcc
, src
, NULL
);
828 if (IS_ERR_OR_NULL(regd
))
831 regulatory_set_wiphy_regd(mvm
->hw
->wiphy
, regd
);