1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
68 #include <linux/netdevice.h>
69 #include <linux/acpi.h>
71 #include "iwl-trans.h"
72 #include "iwl-op-mode.h"
74 #include "iwl-debug.h"
75 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
76 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
78 #include "iwl-eeprom-parse.h"
82 #include "iwl-phy-db.h"
84 #define MVM_UCODE_ALIVE_TIMEOUT HZ
85 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
87 #define UCODE_VALID_OK cpu_to_le32(0x1)
89 struct iwl_mvm_alive_data
{
94 static int iwl_send_tx_ant_cfg(struct iwl_mvm
*mvm
, u8 valid_tx_ant
)
96 struct iwl_tx_ant_cfg_cmd tx_ant_cmd
= {
97 .valid
= cpu_to_le32(valid_tx_ant
),
100 IWL_DEBUG_FW(mvm
, "select valid tx ant: %u\n", valid_tx_ant
);
101 return iwl_mvm_send_cmd_pdu(mvm
, TX_ANT_CONFIGURATION_CMD
, 0,
102 sizeof(tx_ant_cmd
), &tx_ant_cmd
);
105 static int iwl_send_rss_cfg_cmd(struct iwl_mvm
*mvm
)
108 struct iwl_rss_config_cmd cmd
= {
109 .flags
= cpu_to_le32(IWL_RSS_ENABLE
),
110 .hash_mask
= IWL_RSS_HASH_TYPE_IPV4_TCP
|
111 IWL_RSS_HASH_TYPE_IPV4_UDP
|
112 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD
|
113 IWL_RSS_HASH_TYPE_IPV6_TCP
|
114 IWL_RSS_HASH_TYPE_IPV6_UDP
|
115 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD
,
118 if (mvm
->trans
->num_rx_queues
== 1)
121 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
122 for (i
= 0; i
< ARRAY_SIZE(cmd
.indirection_table
); i
++)
123 cmd
.indirection_table
[i
] =
124 1 + (i
% (mvm
->trans
->num_rx_queues
- 1));
125 netdev_rss_key_fill(cmd
.secret_key
, sizeof(cmd
.secret_key
));
127 return iwl_mvm_send_cmd_pdu(mvm
, RSS_CONFIG_CMD
, 0, sizeof(cmd
), &cmd
);
130 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm
*mvm
)
132 struct iwl_dqa_enable_cmd dqa_cmd
= {
133 .cmd_queue
= cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE
),
135 u32 cmd_id
= iwl_cmd_id(DQA_ENABLE_CMD
, DATA_PATH_GROUP
, 0);
138 ret
= iwl_mvm_send_cmd_pdu(mvm
, cmd_id
, 0, sizeof(dqa_cmd
), &dqa_cmd
);
140 IWL_ERR(mvm
, "Failed to send DQA enabling command: %d\n", ret
);
142 IWL_DEBUG_FW(mvm
, "Working in DQA mode\n");
147 void iwl_free_fw_paging(struct iwl_mvm
*mvm
)
151 if (!mvm
->fw_paging_db
[0].fw_paging_block
)
154 for (i
= 0; i
< NUM_OF_FW_PAGING_BLOCKS
; i
++) {
155 struct iwl_fw_paging
*paging
= &mvm
->fw_paging_db
[i
];
157 if (!paging
->fw_paging_block
) {
159 "Paging: block %d already freed, continue to next page\n",
164 dma_unmap_page(mvm
->trans
->dev
, paging
->fw_paging_phys
,
165 paging
->fw_paging_size
, DMA_BIDIRECTIONAL
);
167 __free_pages(paging
->fw_paging_block
,
168 get_order(paging
->fw_paging_size
));
169 paging
->fw_paging_block
= NULL
;
171 kfree(mvm
->trans
->paging_download_buf
);
172 mvm
->trans
->paging_download_buf
= NULL
;
173 mvm
->trans
->paging_db
= NULL
;
175 memset(mvm
->fw_paging_db
, 0, sizeof(mvm
->fw_paging_db
));
178 static int iwl_fill_paging_mem(struct iwl_mvm
*mvm
, const struct fw_img
*image
)
184 * find where is the paging image start point:
185 * if CPU2 exist and it's in paging format, then the image looks like:
186 * CPU1 sections (2 or more)
187 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
188 * CPU2 sections (not paged)
189 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
190 * non paged to CPU2 paging sec
192 * CPU2 paging image (including instruction and data)
194 for (sec_idx
= 0; sec_idx
< image
->num_sec
; sec_idx
++) {
195 if (image
->sec
[sec_idx
].offset
== PAGING_SEPARATOR_SECTION
) {
202 * If paging is enabled there should be at least 2 more sections left
203 * (one for CSS and one for Paging data)
205 if (sec_idx
>= image
->num_sec
- 1) {
206 IWL_ERR(mvm
, "Paging: Missing CSS and/or paging sections\n");
207 iwl_free_fw_paging(mvm
);
211 /* copy the CSS block to the dram */
212 IWL_DEBUG_FW(mvm
, "Paging: load paging CSS to FW, sec = %d\n",
215 memcpy(page_address(mvm
->fw_paging_db
[0].fw_paging_block
),
216 image
->sec
[sec_idx
].data
,
217 mvm
->fw_paging_db
[0].fw_paging_size
);
218 dma_sync_single_for_device(mvm
->trans
->dev
,
219 mvm
->fw_paging_db
[0].fw_paging_phys
,
220 mvm
->fw_paging_db
[0].fw_paging_size
,
224 "Paging: copied %d CSS bytes to first block\n",
225 mvm
->fw_paging_db
[0].fw_paging_size
);
230 * copy the paging blocks to the dram
231 * loop index start from 1 since that CSS block already copied to dram
232 * and CSS index is 0.
233 * loop stop at num_of_paging_blk since that last block is not full.
235 for (idx
= 1; idx
< mvm
->num_of_paging_blk
; idx
++) {
236 struct iwl_fw_paging
*block
= &mvm
->fw_paging_db
[idx
];
238 memcpy(page_address(block
->fw_paging_block
),
239 image
->sec
[sec_idx
].data
+ offset
,
240 block
->fw_paging_size
);
241 dma_sync_single_for_device(mvm
->trans
->dev
,
242 block
->fw_paging_phys
,
243 block
->fw_paging_size
,
248 "Paging: copied %d paging bytes to block %d\n",
249 mvm
->fw_paging_db
[idx
].fw_paging_size
,
252 offset
+= mvm
->fw_paging_db
[idx
].fw_paging_size
;
255 /* copy the last paging block */
256 if (mvm
->num_of_pages_in_last_blk
> 0) {
257 struct iwl_fw_paging
*block
= &mvm
->fw_paging_db
[idx
];
259 memcpy(page_address(block
->fw_paging_block
),
260 image
->sec
[sec_idx
].data
+ offset
,
261 FW_PAGING_SIZE
* mvm
->num_of_pages_in_last_blk
);
262 dma_sync_single_for_device(mvm
->trans
->dev
,
263 block
->fw_paging_phys
,
264 block
->fw_paging_size
,
268 "Paging: copied %d pages in the last block %d\n",
269 mvm
->num_of_pages_in_last_blk
, idx
);
275 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm
*mvm
,
276 struct iwl_rx_cmd_buffer
*rxb
)
278 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
279 struct iwl_mfu_assert_dump_notif
*mfu_dump_notif
= (void *)pkt
->data
;
280 __le32
*dump_data
= mfu_dump_notif
->data
;
281 int n_words
= le32_to_cpu(mfu_dump_notif
->data_size
) / sizeof(__le32
);
284 if (mfu_dump_notif
->index_num
== 0)
285 IWL_INFO(mvm
, "MFUART assert id 0x%x occurred\n",
286 le32_to_cpu(mfu_dump_notif
->assert_id
));
288 for (i
= 0; i
< n_words
; i
++)
290 "MFUART assert dump, dword %u: 0x%08x\n",
291 le16_to_cpu(mfu_dump_notif
->index_num
) *
293 le32_to_cpu(dump_data
[i
]));
296 static int iwl_alloc_fw_paging_mem(struct iwl_mvm
*mvm
,
297 const struct fw_img
*image
)
301 int blk_idx
, order
, num_of_pages
, size
, dma_enabled
;
303 if (mvm
->fw_paging_db
[0].fw_paging_block
)
306 dma_enabled
= is_device_dma_capable(mvm
->trans
->dev
);
308 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
309 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE
) != PAGING_BLOCK_SIZE
);
311 num_of_pages
= image
->paging_mem_size
/ FW_PAGING_SIZE
;
312 mvm
->num_of_paging_blk
=
313 DIV_ROUND_UP(num_of_pages
, NUM_OF_PAGE_PER_GROUP
);
314 mvm
->num_of_pages_in_last_blk
=
316 NUM_OF_PAGE_PER_GROUP
* (mvm
->num_of_paging_blk
- 1);
319 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
320 mvm
->num_of_paging_blk
,
321 mvm
->num_of_pages_in_last_blk
);
324 * Allocate CSS and paging blocks in dram.
326 for (blk_idx
= 0; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
327 /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
328 size
= blk_idx
? PAGING_BLOCK_SIZE
: FW_PAGING_SIZE
;
329 order
= get_order(size
);
330 block
= alloc_pages(GFP_KERNEL
, order
);
332 /* free all the previous pages since we failed */
333 iwl_free_fw_paging(mvm
);
337 mvm
->fw_paging_db
[blk_idx
].fw_paging_block
= block
;
338 mvm
->fw_paging_db
[blk_idx
].fw_paging_size
= size
;
341 phys
= dma_map_page(mvm
->trans
->dev
, block
, 0,
344 if (dma_mapping_error(mvm
->trans
->dev
, phys
)) {
346 * free the previous pages and the current one
347 * since we failed to map_page.
349 iwl_free_fw_paging(mvm
);
352 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= phys
;
354 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
=
356 blk_idx
<< BLOCK_2_EXP_SIZE
;
361 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
365 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
372 static int iwl_save_fw_paging(struct iwl_mvm
*mvm
,
373 const struct fw_img
*fw
)
377 ret
= iwl_alloc_fw_paging_mem(mvm
, fw
);
381 return iwl_fill_paging_mem(mvm
, fw
);
384 /* send paging cmd to FW in case CPU2 has paging image */
385 static int iwl_send_paging_cmd(struct iwl_mvm
*mvm
, const struct fw_img
*fw
)
387 struct iwl_fw_paging_cmd paging_cmd
= {
388 .flags
= cpu_to_le32(PAGING_CMD_IS_SECURED
|
389 PAGING_CMD_IS_ENABLED
|
390 (mvm
->num_of_pages_in_last_blk
<<
391 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS
)),
392 .block_size
= cpu_to_le32(BLOCK_2_EXP_SIZE
),
393 .block_num
= cpu_to_le32(mvm
->num_of_paging_blk
),
397 /* loop for for all paging blocks + CSS block */
398 for (blk_idx
= 0; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
399 dma_addr_t addr
= mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
;
402 addr
= addr
>> PAGE_2_EXP_SIZE
;
403 phy_addr
= cpu_to_le32(addr
);
404 paging_cmd
.device_phy_addr
[blk_idx
] = phy_addr
;
407 return iwl_mvm_send_cmd_pdu(mvm
, iwl_cmd_id(FW_PAGING_BLOCK_CMD
,
408 IWL_ALWAYS_LONG_GROUP
, 0),
409 0, sizeof(paging_cmd
), &paging_cmd
);
413 * Send paging item cmd to FW in case CPU2 has paging image
415 static int iwl_trans_get_paging_item(struct iwl_mvm
*mvm
)
418 struct iwl_fw_get_item_cmd fw_get_item_cmd
= {
419 .item_id
= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
),
422 struct iwl_fw_get_item_resp
*item_resp
;
423 struct iwl_host_cmd cmd
= {
424 .id
= iwl_cmd_id(FW_GET_ITEM_CMD
, IWL_ALWAYS_LONG_GROUP
, 0),
425 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
426 .data
= { &fw_get_item_cmd
, },
429 cmd
.len
[0] = sizeof(struct iwl_fw_get_item_cmd
);
431 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
434 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
439 item_resp
= (void *)((struct iwl_rx_packet
*)cmd
.resp_pkt
)->data
;
440 if (item_resp
->item_id
!= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
)) {
442 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
443 le32_to_cpu(item_resp
->item_id
));
448 /* Add an extra page for headers */
449 mvm
->trans
->paging_download_buf
= kzalloc(PAGING_BLOCK_SIZE
+
452 if (!mvm
->trans
->paging_download_buf
) {
456 mvm
->trans
->paging_req_addr
= le32_to_cpu(item_resp
->item_val
);
457 mvm
->trans
->paging_db
= mvm
->fw_paging_db
;
459 "Paging: got paging request address (paging_req_addr 0x%08x)\n",
460 mvm
->trans
->paging_req_addr
);
468 static bool iwl_alive_fn(struct iwl_notif_wait_data
*notif_wait
,
469 struct iwl_rx_packet
*pkt
, void *data
)
471 struct iwl_mvm
*mvm
=
472 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
473 struct iwl_mvm_alive_data
*alive_data
= data
;
474 struct mvm_alive_resp_v3
*palive3
;
475 struct mvm_alive_resp
*palive
;
476 struct iwl_umac_alive
*umac
;
477 struct iwl_lmac_alive
*lmac1
;
478 struct iwl_lmac_alive
*lmac2
= NULL
;
481 if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive
)) {
482 palive
= (void *)pkt
->data
;
483 umac
= &palive
->umac_data
;
484 lmac1
= &palive
->lmac_data
[0];
485 lmac2
= &palive
->lmac_data
[1];
486 status
= le16_to_cpu(palive
->status
);
488 palive3
= (void *)pkt
->data
;
489 umac
= &palive3
->umac_data
;
490 lmac1
= &palive3
->lmac_data
;
491 status
= le16_to_cpu(palive3
->status
);
494 mvm
->error_event_table
[0] = le32_to_cpu(lmac1
->error_event_table_ptr
);
496 mvm
->error_event_table
[1] =
497 le32_to_cpu(lmac2
->error_event_table_ptr
);
498 mvm
->log_event_table
= le32_to_cpu(lmac1
->log_event_table_ptr
);
499 mvm
->sf_space
.addr
= le32_to_cpu(lmac1
->st_fwrd_addr
);
500 mvm
->sf_space
.size
= le32_to_cpu(lmac1
->st_fwrd_size
);
502 mvm
->umac_error_event_table
= le32_to_cpu(umac
->error_info_addr
);
504 alive_data
->scd_base_addr
= le32_to_cpu(lmac1
->scd_base_ptr
);
505 alive_data
->valid
= status
== IWL_ALIVE_STATUS_OK
;
506 if (mvm
->umac_error_event_table
)
507 mvm
->support_umac_log
= true;
510 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
511 status
, lmac1
->ver_type
, lmac1
->ver_subtype
);
514 IWL_DEBUG_FW(mvm
, "Alive ucode CDB\n");
517 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
518 le32_to_cpu(umac
->umac_major
),
519 le32_to_cpu(umac
->umac_minor
));
524 static bool iwl_wait_init_complete(struct iwl_notif_wait_data
*notif_wait
,
525 struct iwl_rx_packet
*pkt
, void *data
)
527 WARN_ON(pkt
->hdr
.cmd
!= INIT_COMPLETE_NOTIF
);
532 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data
*notif_wait
,
533 struct iwl_rx_packet
*pkt
, void *data
)
535 struct iwl_phy_db
*phy_db
= data
;
537 if (pkt
->hdr
.cmd
!= CALIB_RES_NOTIF_PHY_DB
) {
538 WARN_ON(pkt
->hdr
.cmd
!= INIT_COMPLETE_NOTIF
);
542 WARN_ON(iwl_phy_db_set_section(phy_db
, pkt
));
547 static int iwl_mvm_init_paging(struct iwl_mvm
*mvm
)
549 const struct fw_img
*fw
= &mvm
->fw
->img
[mvm
->cur_ucode
];
553 * Configure and operate fw paging mechanism.
554 * The driver configures the paging flow only once.
555 * The CPU2 paging image is included in the IWL_UCODE_INIT image.
557 if (!fw
->paging_mem_size
)
561 * When dma is not enabled, the driver needs to copy / write
562 * the downloaded / uploaded page to / from the smem.
563 * This gets the location of the place were the pages are
566 if (!is_device_dma_capable(mvm
->trans
->dev
)) {
567 ret
= iwl_trans_get_paging_item(mvm
);
569 IWL_ERR(mvm
, "failed to get FW paging item\n");
574 ret
= iwl_save_fw_paging(mvm
, fw
);
576 IWL_ERR(mvm
, "failed to save the FW paging image\n");
580 ret
= iwl_send_paging_cmd(mvm
, fw
);
582 IWL_ERR(mvm
, "failed to send the paging cmd\n");
583 iwl_free_fw_paging(mvm
);
589 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm
*mvm
,
590 enum iwl_ucode_type ucode_type
)
592 struct iwl_notification_wait alive_wait
;
593 struct iwl_mvm_alive_data alive_data
;
594 const struct fw_img
*fw
;
596 enum iwl_ucode_type old_type
= mvm
->cur_ucode
;
597 static const u16 alive_cmd
[] = { MVM_ALIVE
};
598 struct iwl_sf_region st_fwrd_space
;
600 if (ucode_type
== IWL_UCODE_REGULAR
&&
601 iwl_fw_dbg_conf_usniffer(mvm
->fw
, FW_DBG_START_FROM_ALIVE
) &&
602 !(fw_has_capa(&mvm
->fw
->ucode_capa
,
603 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED
)))
604 fw
= iwl_get_ucode_image(mvm
->fw
, IWL_UCODE_REGULAR_USNIFFER
);
606 fw
= iwl_get_ucode_image(mvm
->fw
, ucode_type
);
609 mvm
->cur_ucode
= ucode_type
;
610 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING
, &mvm
->status
);
612 iwl_init_notification_wait(&mvm
->notif_wait
, &alive_wait
,
613 alive_cmd
, ARRAY_SIZE(alive_cmd
),
614 iwl_alive_fn
, &alive_data
);
616 ret
= iwl_trans_start_fw(mvm
->trans
, fw
, ucode_type
== IWL_UCODE_INIT
);
618 mvm
->cur_ucode
= old_type
;
619 iwl_remove_notification(&mvm
->notif_wait
, &alive_wait
);
624 * Some things may run in the background now, but we
625 * just wait for the ALIVE notification here.
627 ret
= iwl_wait_notification(&mvm
->notif_wait
, &alive_wait
,
628 MVM_UCODE_ALIVE_TIMEOUT
);
630 struct iwl_trans
*trans
= mvm
->trans
;
632 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_A000
)
634 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
635 iwl_read_prph(trans
, UMAG_SB_CPU_1_STATUS
),
636 iwl_read_prph(trans
, UMAG_SB_CPU_2_STATUS
));
637 else if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
)
639 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
640 iwl_read_prph(trans
, SB_CPU_1_STATUS
),
641 iwl_read_prph(trans
, SB_CPU_2_STATUS
));
642 mvm
->cur_ucode
= old_type
;
646 if (!alive_data
.valid
) {
647 IWL_ERR(mvm
, "Loaded ucode is not valid!\n");
648 mvm
->cur_ucode
= old_type
;
653 * update the sdio allocation according to the pointer we get in the
654 * alive notification.
656 st_fwrd_space
.addr
= mvm
->sf_space
.addr
;
657 st_fwrd_space
.size
= mvm
->sf_space
.size
;
658 ret
= iwl_trans_update_sf(mvm
->trans
, &st_fwrd_space
);
660 IWL_ERR(mvm
, "Failed to update SF size. ret %d\n", ret
);
664 iwl_trans_fw_alive(mvm
->trans
, alive_data
.scd_base_addr
);
667 * Note: all the queues are enabled as part of the interface
668 * initialization, but in firmware restart scenarios they
669 * could be stopped, so wake them up. In firmware restart,
670 * mac80211 will have the queues stopped as well until the
671 * reconfiguration completes. During normal startup, they
675 memset(&mvm
->queue_info
, 0, sizeof(mvm
->queue_info
));
676 if (iwl_mvm_is_dqa_supported(mvm
))
677 mvm
->queue_info
[IWL_MVM_DQA_CMD_QUEUE
].hw_queue_refcount
= 1;
679 mvm
->queue_info
[IWL_MVM_CMD_QUEUE
].hw_queue_refcount
= 1;
681 for (i
= 0; i
< IEEE80211_MAX_QUEUES
; i
++)
682 atomic_set(&mvm
->mac80211_queue_stop_count
[i
], 0);
684 set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING
, &mvm
->status
);
689 static int iwl_run_unified_mvm_ucode(struct iwl_mvm
*mvm
, bool read_nvm
)
691 struct iwl_notification_wait init_wait
;
692 struct iwl_nvm_access_complete_cmd nvm_complete
= {};
693 struct iwl_init_extended_cfg_cmd init_cfg
= {
694 .init_flags
= cpu_to_le32(BIT(IWL_INIT_NVM
)),
696 static const u16 init_complete
[] = {
701 lockdep_assert_held(&mvm
->mutex
);
703 iwl_init_notification_wait(&mvm
->notif_wait
,
706 ARRAY_SIZE(init_complete
),
707 iwl_wait_init_complete
,
710 /* Will also start the device */
711 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_REGULAR
);
713 IWL_ERR(mvm
, "Failed to start RT ucode: %d\n", ret
);
717 /* Send init config command to mark that we are sending NVM access
720 ret
= iwl_mvm_send_cmd_pdu(mvm
, WIDE_ID(SYSTEM_GROUP
,
721 INIT_EXTENDED_CFG_CMD
), 0,
722 sizeof(init_cfg
), &init_cfg
);
724 IWL_ERR(mvm
, "Failed to run init config command: %d\n",
729 /* Load NVM to NIC if needed */
730 if (mvm
->nvm_file_name
) {
731 iwl_mvm_read_external_nvm(mvm
);
732 iwl_mvm_load_nvm_to_nic(mvm
);
735 if (IWL_MVM_PARSE_NVM
&& read_nvm
) {
736 ret
= iwl_nvm_init(mvm
, true);
738 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
743 ret
= iwl_mvm_send_cmd_pdu(mvm
, WIDE_ID(REGULATORY_AND_NVM_GROUP
,
744 NVM_ACCESS_COMPLETE
), 0,
745 sizeof(nvm_complete
), &nvm_complete
);
747 IWL_ERR(mvm
, "Failed to run complete NVM access: %d\n",
752 /* We wait for the INIT complete notification */
753 ret
= iwl_wait_notification(&mvm
->notif_wait
, &init_wait
,
754 MVM_UCODE_ALIVE_TIMEOUT
);
758 /* Read the NVM only at driver load time, no need to do this twice */
759 if (!IWL_MVM_PARSE_NVM
&& read_nvm
) {
760 ret
= iwl_mvm_nvm_get_from_fw(mvm
);
762 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
770 iwl_remove_notification(&mvm
->notif_wait
, &init_wait
);
774 static int iwl_send_phy_cfg_cmd(struct iwl_mvm
*mvm
)
776 struct iwl_phy_cfg_cmd phy_cfg_cmd
;
777 enum iwl_ucode_type ucode_type
= mvm
->cur_ucode
;
780 phy_cfg_cmd
.phy_cfg
= cpu_to_le32(iwl_mvm_get_phy_config(mvm
));
781 phy_cfg_cmd
.calib_control
.event_trigger
=
782 mvm
->fw
->default_calib
[ucode_type
].event_trigger
;
783 phy_cfg_cmd
.calib_control
.flow_trigger
=
784 mvm
->fw
->default_calib
[ucode_type
].flow_trigger
;
786 IWL_DEBUG_INFO(mvm
, "Sending Phy CFG command: 0x%x\n",
787 phy_cfg_cmd
.phy_cfg
);
789 return iwl_mvm_send_cmd_pdu(mvm
, PHY_CONFIGURATION_CMD
, 0,
790 sizeof(phy_cfg_cmd
), &phy_cfg_cmd
);
793 int iwl_run_init_mvm_ucode(struct iwl_mvm
*mvm
, bool read_nvm
)
795 struct iwl_notification_wait calib_wait
;
796 static const u16 init_complete
[] = {
798 CALIB_RES_NOTIF_PHY_DB
802 if (iwl_mvm_has_new_tx_api(mvm
))
803 return iwl_run_unified_mvm_ucode(mvm
, true);
805 lockdep_assert_held(&mvm
->mutex
);
807 if (WARN_ON_ONCE(mvm
->calibrating
))
810 iwl_init_notification_wait(&mvm
->notif_wait
,
813 ARRAY_SIZE(init_complete
),
814 iwl_wait_phy_db_entry
,
817 /* Will also start the device */
818 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_INIT
);
820 IWL_ERR(mvm
, "Failed to start INIT ucode: %d\n", ret
);
824 if (mvm
->cfg
->device_family
< IWL_DEVICE_FAMILY_8000
) {
825 ret
= iwl_mvm_send_bt_init_conf(mvm
);
830 /* Read the NVM only at driver load time, no need to do this twice */
833 ret
= iwl_nvm_init(mvm
, true);
835 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
840 /* In case we read the NVM from external file, load it to the NIC */
841 if (mvm
->nvm_file_name
)
842 iwl_mvm_load_nvm_to_nic(mvm
);
844 ret
= iwl_nvm_check_version(mvm
->nvm_data
, mvm
->trans
);
848 * abort after reading the nvm in case RF Kill is on, we will complete
849 * the init seq later when RF kill will switch to off
851 if (iwl_mvm_is_radio_hw_killed(mvm
)) {
852 IWL_DEBUG_RF_KILL(mvm
,
853 "jump over all phy activities due to RF kill\n");
854 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
859 mvm
->calibrating
= true;
861 /* Send TX valid antennas before triggering calibrations */
862 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
867 * Send phy configurations command to init uCode
868 * to start the 16.0 uCode init image internal calibrations.
870 ret
= iwl_send_phy_cfg_cmd(mvm
);
872 IWL_ERR(mvm
, "Failed to run INIT calibrations: %d\n",
878 * Some things may run in the background now, but we
879 * just wait for the calibration complete notification.
881 ret
= iwl_wait_notification(&mvm
->notif_wait
, &calib_wait
,
882 MVM_UCODE_CALIB_TIMEOUT
);
884 if (ret
&& iwl_mvm_is_radio_hw_killed(mvm
)) {
885 IWL_DEBUG_RF_KILL(mvm
, "RFKILL while calibrating.\n");
891 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
893 mvm
->calibrating
= false;
894 if (iwlmvm_mod_params
.init_dbg
&& !mvm
->nvm_data
) {
895 /* we want to debug INIT and we have no NVM - fake */
896 mvm
->nvm_data
= kzalloc(sizeof(struct iwl_nvm_data
) +
897 sizeof(struct ieee80211_channel
) +
898 sizeof(struct ieee80211_rate
),
902 mvm
->nvm_data
->bands
[0].channels
= mvm
->nvm_data
->channels
;
903 mvm
->nvm_data
->bands
[0].n_channels
= 1;
904 mvm
->nvm_data
->bands
[0].n_bitrates
= 1;
905 mvm
->nvm_data
->bands
[0].bitrates
=
906 (void *)mvm
->nvm_data
->channels
+ 1;
907 mvm
->nvm_data
->bands
[0].bitrates
->hw_value
= 10;
913 static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm
*mvm
,
914 struct iwl_rx_packet
*pkt
)
916 struct iwl_shared_mem_cfg
*mem_cfg
= (void *)pkt
->data
;
918 int lmac_num
= le32_to_cpu(mem_cfg
->lmac_num
);
920 if (WARN_ON(lmac_num
> ARRAY_SIZE(mem_cfg
->lmac_smem
)))
923 mvm
->smem_cfg
.num_lmacs
= lmac_num
;
924 mvm
->smem_cfg
.num_txfifo_entries
=
925 ARRAY_SIZE(mem_cfg
->lmac_smem
[0].txfifo_size
);
926 mvm
->smem_cfg
.rxfifo2_size
= le32_to_cpu(mem_cfg
->rxfifo2_size
);
928 for (lmac
= 0; lmac
< lmac_num
; lmac
++) {
929 struct iwl_shared_mem_lmac_cfg
*lmac_cfg
=
930 &mem_cfg
->lmac_smem
[lmac
];
932 for (i
= 0; i
< ARRAY_SIZE(lmac_cfg
->txfifo_size
); i
++)
933 mvm
->smem_cfg
.lmac
[lmac
].txfifo_size
[i
] =
934 le32_to_cpu(lmac_cfg
->txfifo_size
[i
]);
935 mvm
->smem_cfg
.lmac
[lmac
].rxfifo1_size
=
936 le32_to_cpu(lmac_cfg
->rxfifo1_size
);
940 static void iwl_mvm_parse_shared_mem(struct iwl_mvm
*mvm
,
941 struct iwl_rx_packet
*pkt
)
943 struct iwl_shared_mem_cfg_v2
*mem_cfg
= (void *)pkt
->data
;
946 mvm
->smem_cfg
.num_lmacs
= 1;
948 mvm
->smem_cfg
.num_txfifo_entries
= ARRAY_SIZE(mem_cfg
->txfifo_size
);
949 for (i
= 0; i
< ARRAY_SIZE(mem_cfg
->txfifo_size
); i
++)
950 mvm
->smem_cfg
.lmac
[0].txfifo_size
[i
] =
951 le32_to_cpu(mem_cfg
->txfifo_size
[i
]);
953 mvm
->smem_cfg
.lmac
[0].rxfifo1_size
=
954 le32_to_cpu(mem_cfg
->rxfifo_size
[0]);
955 mvm
->smem_cfg
.rxfifo2_size
= le32_to_cpu(mem_cfg
->rxfifo_size
[1]);
957 /* new API has more data, from rxfifo_addr field and on */
958 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
959 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
)) {
960 BUILD_BUG_ON(sizeof(mvm
->smem_cfg
.internal_txfifo_size
) !=
961 sizeof(mem_cfg
->internal_txfifo_size
));
964 i
< ARRAY_SIZE(mvm
->smem_cfg
.internal_txfifo_size
);
966 mvm
->smem_cfg
.internal_txfifo_size
[i
] =
967 le32_to_cpu(mem_cfg
->internal_txfifo_size
[i
]);
971 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm
*mvm
)
973 struct iwl_host_cmd cmd
= {
974 .flags
= CMD_WANT_SKB
,
978 struct iwl_rx_packet
*pkt
;
980 lockdep_assert_held(&mvm
->mutex
);
982 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
983 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
))
984 cmd
.id
= iwl_cmd_id(SHARED_MEM_CFG_CMD
, SYSTEM_GROUP
, 0);
986 cmd
.id
= SHARED_MEM_CFG
;
988 if (WARN_ON(iwl_mvm_send_cmd(mvm
, &cmd
)))
992 if (iwl_mvm_has_new_tx_api(mvm
))
993 iwl_mvm_parse_shared_mem_a000(mvm
, pkt
);
995 iwl_mvm_parse_shared_mem(mvm
, pkt
);
997 IWL_DEBUG_INFO(mvm
, "SHARED MEM CFG: got memory offsets/sizes\n");
1002 static int iwl_mvm_config_ltr(struct iwl_mvm
*mvm
)
1004 struct iwl_ltr_config_cmd cmd
= {
1005 .flags
= cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE
),
1008 if (!mvm
->trans
->ltr_enabled
)
1011 return iwl_mvm_send_cmd_pdu(mvm
, LTR_CONFIG
, 0,
1016 #define ACPI_WRDS_METHOD "WRDS"
1017 #define ACPI_EWRD_METHOD "EWRD"
1018 #define ACPI_WGDS_METHOD "WGDS"
1019 #define ACPI_WIFI_DOMAIN (0x07)
1020 #define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2)
1021 #define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \
1022 IWL_MVM_SAR_TABLE_SIZE + 3)
1023 #define ACPI_WGDS_WIFI_DATA_SIZE 18
1024 #define ACPI_WGDS_NUM_BANDS 2
1025 #define ACPI_WGDS_TABLE_SIZE 3
1027 static int iwl_mvm_sar_set_profile(struct iwl_mvm
*mvm
,
1028 union acpi_object
*table
,
1029 struct iwl_mvm_sar_profile
*profile
,
1034 profile
->enabled
= enabled
;
1036 for (i
= 0; i
< IWL_MVM_SAR_TABLE_SIZE
; i
++) {
1037 if ((table
[i
].type
!= ACPI_TYPE_INTEGER
) ||
1038 (table
[i
].integer
.value
> U8_MAX
))
1041 profile
->table
[i
] = table
[i
].integer
.value
;
1047 static union acpi_object
*iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm
*mvm
,
1048 union acpi_object
*data
,
1052 union acpi_object
*wifi_pkg
;
1055 * We need at least two packages, one for the revision and one
1056 * for the data itself. Also check that the revision is valid
1057 * (i.e. it is an integer set to 0).
1059 if (data
->type
!= ACPI_TYPE_PACKAGE
||
1060 data
->package
.count
< 2 ||
1061 data
->package
.elements
[0].type
!= ACPI_TYPE_INTEGER
||
1062 data
->package
.elements
[0].integer
.value
!= 0) {
1063 IWL_DEBUG_RADIO(mvm
, "Unsupported packages structure\n");
1064 return ERR_PTR(-EINVAL
);
1067 /* loop through all the packages to find the one for WiFi */
1068 for (i
= 1; i
< data
->package
.count
; i
++) {
1069 union acpi_object
*domain
;
1071 wifi_pkg
= &data
->package
.elements
[i
];
1073 /* Skip anything that is not a package with the right
1074 * amount of elements (i.e. domain_type,
1075 * enabled/disabled plus the actual data size.
1077 if (wifi_pkg
->type
!= ACPI_TYPE_PACKAGE
||
1078 wifi_pkg
->package
.count
!= data_size
)
1081 domain
= &wifi_pkg
->package
.elements
[0];
1082 if (domain
->type
== ACPI_TYPE_INTEGER
&&
1083 domain
->integer
.value
== ACPI_WIFI_DOMAIN
)
1090 return ERR_PTR(-ENOENT
);
1095 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm
*mvm
)
1097 union acpi_object
*wifi_pkg
, *table
;
1098 acpi_handle root_handle
;
1100 struct acpi_buffer wrds
= {ACPI_ALLOCATE_BUFFER
, NULL
};
1105 root_handle
= ACPI_HANDLE(mvm
->dev
);
1107 IWL_DEBUG_RADIO(mvm
,
1108 "Could not retrieve root port ACPI handle\n");
1112 /* Get the method's handle */
1113 status
= acpi_get_handle(root_handle
, (acpi_string
)ACPI_WRDS_METHOD
,
1115 if (ACPI_FAILURE(status
)) {
1116 IWL_DEBUG_RADIO(mvm
, "WRDS method not found\n");
1120 /* Call WRDS with no arguments */
1121 status
= acpi_evaluate_object(handle
, NULL
, NULL
, &wrds
);
1122 if (ACPI_FAILURE(status
)) {
1123 IWL_DEBUG_RADIO(mvm
, "WRDS invocation failed (0x%x)\n", status
);
1127 wifi_pkg
= iwl_mvm_sar_find_wifi_pkg(mvm
, wrds
.pointer
,
1128 ACPI_WRDS_WIFI_DATA_SIZE
);
1129 if (IS_ERR(wifi_pkg
)) {
1130 ret
= PTR_ERR(wifi_pkg
);
1134 if (wifi_pkg
->package
.elements
[1].type
!= ACPI_TYPE_INTEGER
) {
1139 enabled
= !!(wifi_pkg
->package
.elements
[1].integer
.value
);
1141 /* position of the actual table */
1142 table
= &wifi_pkg
->package
.elements
[2];
1144 /* The profile from WRDS is officially profile 1, but goes
1145 * into sar_profiles[0] (because we don't have a profile 0).
1147 ret
= iwl_mvm_sar_set_profile(mvm
, table
, &mvm
->sar_profiles
[0],
1151 kfree(wrds
.pointer
);
1155 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm
*mvm
)
1157 union acpi_object
*wifi_pkg
;
1158 acpi_handle root_handle
;
1160 struct acpi_buffer ewrd
= {ACPI_ALLOCATE_BUFFER
, NULL
};
1163 int i
, n_profiles
, ret
;
1165 root_handle
= ACPI_HANDLE(mvm
->dev
);
1167 IWL_DEBUG_RADIO(mvm
,
1168 "Could not retrieve root port ACPI handle\n");
1172 /* Get the method's handle */
1173 status
= acpi_get_handle(root_handle
, (acpi_string
)ACPI_EWRD_METHOD
,
1175 if (ACPI_FAILURE(status
)) {
1176 IWL_DEBUG_RADIO(mvm
, "EWRD method not found\n");
1180 /* Call EWRD with no arguments */
1181 status
= acpi_evaluate_object(handle
, NULL
, NULL
, &ewrd
);
1182 if (ACPI_FAILURE(status
)) {
1183 IWL_DEBUG_RADIO(mvm
, "EWRD invocation failed (0x%x)\n", status
);
1187 wifi_pkg
= iwl_mvm_sar_find_wifi_pkg(mvm
, ewrd
.pointer
,
1188 ACPI_EWRD_WIFI_DATA_SIZE
);
1189 if (IS_ERR(wifi_pkg
)) {
1190 ret
= PTR_ERR(wifi_pkg
);
1194 if ((wifi_pkg
->package
.elements
[1].type
!= ACPI_TYPE_INTEGER
) ||
1195 (wifi_pkg
->package
.elements
[2].type
!= ACPI_TYPE_INTEGER
)) {
1200 enabled
= !!(wifi_pkg
->package
.elements
[1].integer
.value
);
1201 n_profiles
= wifi_pkg
->package
.elements
[2].integer
.value
;
1203 /* in case of BIOS bug */
1204 if (n_profiles
<= 0) {
1209 for (i
= 0; i
< n_profiles
; i
++) {
1210 /* the tables start at element 3 */
1213 /* The EWRD profiles officially go from 2 to 4, but we
1214 * save them in sar_profiles[1-3] (because we don't
1215 * have profile 0). So in the array we start from 1.
1217 ret
= iwl_mvm_sar_set_profile(mvm
,
1218 &wifi_pkg
->package
.elements
[pos
],
1219 &mvm
->sar_profiles
[i
+ 1],
1224 /* go to the next table */
1225 pos
+= IWL_MVM_SAR_TABLE_SIZE
;
1229 kfree(ewrd
.pointer
);
1233 static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm
*mvm
)
1235 union acpi_object
*wifi_pkg
;
1236 acpi_handle root_handle
;
1238 struct acpi_buffer wgds
= {ACPI_ALLOCATE_BUFFER
, NULL
};
1243 root_handle
= ACPI_HANDLE(mvm
->dev
);
1245 IWL_DEBUG_RADIO(mvm
,
1246 "Could not retrieve root port ACPI handle\n");
1250 /* Get the method's handle */
1251 status
= acpi_get_handle(root_handle
, (acpi_string
)ACPI_WGDS_METHOD
,
1253 if (ACPI_FAILURE(status
)) {
1254 IWL_DEBUG_RADIO(mvm
, "WGDS method not found\n");
1258 /* Call WGDS with no arguments */
1259 status
= acpi_evaluate_object(handle
, NULL
, NULL
, &wgds
);
1260 if (ACPI_FAILURE(status
)) {
1261 IWL_DEBUG_RADIO(mvm
, "WGDS invocation failed (0x%x)\n", status
);
1265 wifi_pkg
= iwl_mvm_sar_find_wifi_pkg(mvm
, wgds
.pointer
,
1266 ACPI_WGDS_WIFI_DATA_SIZE
);
1267 if (IS_ERR(wifi_pkg
)) {
1268 ret
= PTR_ERR(wifi_pkg
);
1272 for (i
= 0; i
< IWL_NUM_GEO_PROFILES
; i
++) {
1273 for (j
= 0; j
< IWL_MVM_GEO_TABLE_SIZE
; j
++) {
1274 union acpi_object
*entry
;
1276 entry
= &wifi_pkg
->package
.elements
[idx
++];
1277 if ((entry
->type
!= ACPI_TYPE_INTEGER
) ||
1278 (entry
->integer
.value
> U8_MAX
)) {
1283 mvm
->geo_profiles
[i
].values
[j
] = entry
->integer
.value
;
1288 kfree(wgds
.pointer
);
1292 int iwl_mvm_sar_select_profile(struct iwl_mvm
*mvm
, int prof_a
, int prof_b
)
1294 struct iwl_dev_tx_power_cmd cmd
= {
1295 .v3
.set_mode
= cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS
),
1298 int profs
[IWL_NUM_CHAIN_LIMITS
] = { prof_a
, prof_b
};
1299 int len
= sizeof(cmd
);
1301 BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS
< 2);
1302 BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS
* IWL_NUM_SUB_BANDS
!=
1303 IWL_MVM_SAR_TABLE_SIZE
);
1305 if (!fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_TX_POWER_ACK
))
1306 len
= sizeof(cmd
.v3
);
1308 for (i
= 0; i
< IWL_NUM_CHAIN_LIMITS
; i
++) {
1309 struct iwl_mvm_sar_profile
*prof
;
1311 /* don't allow SAR to be disabled (profile 0 means disable) */
1315 /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */
1316 if (profs
[i
] > IWL_MVM_SAR_PROFILE_NUM
)
1319 /* profiles go from 1 to 4, so decrement to access the array */
1320 prof
= &mvm
->sar_profiles
[profs
[i
] - 1];
1322 /* if the profile is disabled, do nothing */
1323 if (!prof
->enabled
) {
1324 IWL_DEBUG_RADIO(mvm
, "SAR profile %d is disabled.\n",
1326 /* if one of the profiles is disabled, we fail all */
1330 IWL_DEBUG_RADIO(mvm
, " Chain[%d]:\n", i
);
1331 for (j
= 0; j
< IWL_NUM_SUB_BANDS
; j
++) {
1332 idx
= (i
* IWL_NUM_SUB_BANDS
) + j
;
1333 cmd
.v3
.per_chain_restriction
[i
][j
] =
1334 cpu_to_le16(prof
->table
[idx
]);
1335 IWL_DEBUG_RADIO(mvm
, " Band[%d] = %d * .125dBm\n",
1336 j
, prof
->table
[idx
]);
1340 IWL_DEBUG_RADIO(mvm
, "Sending REDUCE_TX_POWER_CMD per chain\n");
1342 return iwl_mvm_send_cmd_pdu(mvm
, REDUCE_TX_POWER_CMD
, 0, len
, &cmd
);
1345 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm
*mvm
)
1347 struct iwl_geo_tx_power_profiles_resp
*resp
;
1350 struct iwl_geo_tx_power_profiles_cmd geo_cmd
= {
1351 .ops
= cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE
),
1353 struct iwl_host_cmd cmd
= {
1354 .id
= WIDE_ID(PHY_OPS_GROUP
, GEO_TX_POWER_LIMIT
),
1355 .len
= { sizeof(geo_cmd
), },
1356 .flags
= CMD_WANT_SKB
,
1357 .data
= { &geo_cmd
},
1360 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
1362 IWL_ERR(mvm
, "Failed to get geographic profile info %d\n", ret
);
1366 resp
= (void *)cmd
.resp_pkt
->data
;
1367 ret
= le32_to_cpu(resp
->profile_idx
);
1368 if (WARN_ON(ret
> IWL_NUM_GEO_PROFILES
)) {
1370 IWL_WARN(mvm
, "Invalid geographic profile idx (%d)\n", ret
);
1373 iwl_free_resp(&cmd
);
1377 static int iwl_mvm_sar_geo_init(struct iwl_mvm
*mvm
)
1379 struct iwl_geo_tx_power_profiles_cmd cmd
= {
1380 .ops
= cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES
),
1383 u16 cmd_wide_id
= WIDE_ID(PHY_OPS_GROUP
, GEO_TX_POWER_LIMIT
);
1385 ret
= iwl_mvm_sar_get_wgds_table(mvm
);
1387 IWL_DEBUG_RADIO(mvm
,
1388 "Geo SAR BIOS table invalid or unavailable. (%d)\n",
1390 /* we don't fail if the table is not available */
1394 IWL_DEBUG_RADIO(mvm
, "Sending GEO_TX_POWER_LIMIT\n");
1396 BUILD_BUG_ON(IWL_NUM_GEO_PROFILES
* ACPI_WGDS_NUM_BANDS
*
1397 ACPI_WGDS_TABLE_SIZE
!= ACPI_WGDS_WIFI_DATA_SIZE
);
1399 for (i
= 0; i
< IWL_NUM_GEO_PROFILES
; i
++) {
1400 struct iwl_per_chain_offset
*chain
=
1401 (struct iwl_per_chain_offset
*)&cmd
.table
[i
];
1403 for (j
= 0; j
< ACPI_WGDS_NUM_BANDS
; j
++) {
1406 value
= &mvm
->geo_profiles
[i
].values
[j
*
1407 IWL_GEO_PER_CHAIN_SIZE
];
1408 chain
[j
].max_tx_power
= cpu_to_le16(value
[0]);
1409 chain
[j
].chain_a
= value
[1];
1410 chain
[j
].chain_b
= value
[2];
1411 IWL_DEBUG_RADIO(mvm
,
1412 "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
1413 i
, j
, value
[1], value
[2], value
[0]);
1416 return iwl_mvm_send_cmd_pdu(mvm
, cmd_wide_id
, 0, sizeof(cmd
), &cmd
);
1419 #else /* CONFIG_ACPI */
1420 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm
*mvm
)
1425 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm
*mvm
)
1430 static int iwl_mvm_sar_geo_init(struct iwl_mvm
*mvm
)
1434 #endif /* CONFIG_ACPI */
1436 static int iwl_mvm_sar_init(struct iwl_mvm
*mvm
)
1440 ret
= iwl_mvm_sar_get_wrds_table(mvm
);
1442 IWL_DEBUG_RADIO(mvm
,
1443 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1445 /* if not available, don't fail and don't bother with EWRD */
1449 ret
= iwl_mvm_sar_get_ewrd_table(mvm
);
1450 /* if EWRD is not available, we can still use WRDS, so don't fail */
1452 IWL_DEBUG_RADIO(mvm
,
1453 "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1456 /* choose profile 1 (WRDS) as default for both chains */
1457 ret
= iwl_mvm_sar_select_profile(mvm
, 1, 1);
1459 /* if we don't have profile 0 from BIOS, just skip it */
1466 static int iwl_mvm_load_rt_fw(struct iwl_mvm
*mvm
)
1470 if (iwl_mvm_has_new_tx_api(mvm
))
1471 return iwl_run_unified_mvm_ucode(mvm
, false);
1473 ret
= iwl_run_init_mvm_ucode(mvm
, false);
1475 if (iwlmvm_mod_params
.init_dbg
)
1479 IWL_ERR(mvm
, "Failed to run INIT ucode: %d\n", ret
);
1480 /* this can't happen */
1481 if (WARN_ON(ret
> 0))
1487 * Stop and start the transport without entering low power
1488 * mode. This will save the state of other components on the
1489 * device that are triggered by the INIT firwmare (MFUART).
1491 _iwl_trans_stop_device(mvm
->trans
, false);
1492 ret
= _iwl_trans_start_hw(mvm
->trans
, false);
1496 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_REGULAR
);
1500 return iwl_mvm_init_paging(mvm
);
1503 int iwl_mvm_up(struct iwl_mvm
*mvm
)
1506 struct ieee80211_channel
*chan
;
1507 struct cfg80211_chan_def chandef
;
1509 lockdep_assert_held(&mvm
->mutex
);
1511 ret
= iwl_trans_start_hw(mvm
->trans
);
1515 ret
= iwl_mvm_load_rt_fw(mvm
);
1517 IWL_ERR(mvm
, "Failed to start RT ucode: %d\n", ret
);
1521 iwl_mvm_get_shared_mem_conf(mvm
);
1523 ret
= iwl_mvm_sf_update(mvm
, NULL
, false);
1525 IWL_ERR(mvm
, "Failed to initialize Smart Fifo\n");
1527 mvm
->fw_dbg_conf
= FW_DBG_INVALID
;
1528 /* if we have a destination, assume EARLY START */
1529 if (mvm
->fw
->dbg_dest_tlv
)
1530 mvm
->fw_dbg_conf
= FW_DBG_START_FROM_ALIVE
;
1531 iwl_mvm_start_fw_dbg_conf(mvm
, FW_DBG_START_FROM_ALIVE
);
1533 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1537 /* Send phy db control command and then phy db calibration*/
1538 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1539 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1543 ret
= iwl_send_phy_cfg_cmd(mvm
);
1548 ret
= iwl_mvm_send_bt_init_conf(mvm
);
1552 /* Init RSS configuration */
1553 /* TODO - remove a000 disablement when we have RXQ config API */
1554 if (iwl_mvm_has_new_rx_api(mvm
) && !iwl_mvm_has_new_tx_api(mvm
)) {
1555 ret
= iwl_send_rss_cfg_cmd(mvm
);
1557 IWL_ERR(mvm
, "Failed to configure RSS queues: %d\n",
1563 /* init the fw <-> mac80211 STA mapping */
1564 for (i
= 0; i
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); i
++)
1565 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1567 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_INVALID_STA
;
1569 /* reset quota debouncing buffer - 0xff will yield invalid data */
1570 memset(&mvm
->last_quota_cmd
, 0xff, sizeof(mvm
->last_quota_cmd
));
1572 /* Enable DQA-mode if required */
1573 if (iwl_mvm_is_dqa_supported(mvm
)) {
1574 ret
= iwl_mvm_send_dqa_cmd(mvm
);
1578 IWL_DEBUG_FW(mvm
, "Working in non-DQA mode\n");
1581 /* Add auxiliary station for scanning */
1582 ret
= iwl_mvm_add_aux_sta(mvm
);
1586 /* Add all the PHY contexts */
1587 chan
= &mvm
->hw
->wiphy
->bands
[NL80211_BAND_2GHZ
]->channels
[0];
1588 cfg80211_chandef_create(&chandef
, chan
, NL80211_CHAN_NO_HT
);
1589 for (i
= 0; i
< NUM_PHY_CTX
; i
++) {
1591 * The channel used here isn't relevant as it's
1592 * going to be overwritten in the other flows.
1593 * For now use the first channel we have.
1595 ret
= iwl_mvm_phy_ctxt_add(mvm
, &mvm
->phy_ctxts
[i
],
1601 #ifdef CONFIG_THERMAL
1602 if (iwl_mvm_is_tt_in_fw(mvm
)) {
1603 /* in order to give the responsibility of ct-kill and
1604 * TX backoff to FW we need to send empty temperature reporting
1605 * cmd during init time
1607 iwl_mvm_send_temp_report_ths_cmd(mvm
);
1609 /* Initialize tx backoffs to the minimal possible */
1610 iwl_mvm_tt_tx_backoff(mvm
, 0);
1613 /* TODO: read the budget from BIOS / Platform NVM */
1614 if (iwl_mvm_is_ctdp_supported(mvm
) && mvm
->cooling_dev
.cur_state
> 0) {
1615 ret
= iwl_mvm_ctdp_command(mvm
, CTDP_CMD_OPERATION_START
,
1616 mvm
->cooling_dev
.cur_state
);
1621 /* Initialize tx backoffs to the minimal possible */
1622 iwl_mvm_tt_tx_backoff(mvm
, 0);
1625 WARN_ON(iwl_mvm_config_ltr(mvm
));
1627 ret
= iwl_mvm_power_update_device(mvm
);
1632 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1633 * anyway, so don't init MCC.
1635 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL
, &mvm
->status
)) {
1636 ret
= iwl_mvm_init_mcc(mvm
);
1641 if (fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_UMAC_SCAN
)) {
1642 mvm
->scan_type
= IWL_SCAN_TYPE_NOT_SET
;
1643 ret
= iwl_mvm_config_scan(mvm
);
1648 /* allow FW/transport low power modes if not during restart */
1649 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1650 iwl_mvm_unref(mvm
, IWL_MVM_REF_UCODE_DOWN
);
1652 ret
= iwl_mvm_sar_init(mvm
);
1656 ret
= iwl_mvm_sar_geo_init(mvm
);
1660 IWL_DEBUG_INFO(mvm
, "RT uCode started.\n");
1663 if (!iwlmvm_mod_params
.init_dbg
)
1664 iwl_mvm_stop_device(mvm
);
1668 int iwl_mvm_load_d3_fw(struct iwl_mvm
*mvm
)
1672 lockdep_assert_held(&mvm
->mutex
);
1674 ret
= iwl_trans_start_hw(mvm
->trans
);
1678 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_WOWLAN
);
1680 IWL_ERR(mvm
, "Failed to start WoWLAN firmware: %d\n", ret
);
1684 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1688 /* Send phy db control command and then phy db calibration*/
1689 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1693 ret
= iwl_send_phy_cfg_cmd(mvm
);
1697 /* init the fw <-> mac80211 STA mapping */
1698 for (i
= 0; i
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); i
++)
1699 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1701 /* Add auxiliary station for scanning */
1702 ret
= iwl_mvm_add_aux_sta(mvm
);
1708 iwl_mvm_stop_device(mvm
);
1712 void iwl_mvm_rx_card_state_notif(struct iwl_mvm
*mvm
,
1713 struct iwl_rx_cmd_buffer
*rxb
)
1715 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1716 struct iwl_card_state_notif
*card_state_notif
= (void *)pkt
->data
;
1717 u32 flags
= le32_to_cpu(card_state_notif
->flags
);
1719 IWL_DEBUG_RF_KILL(mvm
, "Card state received: HW:%s SW:%s CT:%s\n",
1720 (flags
& HW_CARD_DISABLED
) ? "Kill" : "On",
1721 (flags
& SW_CARD_DISABLED
) ? "Kill" : "On",
1722 (flags
& CT_KILL_CARD_DISABLED
) ?
1723 "Reached" : "Not reached");
1726 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm
*mvm
,
1727 struct iwl_rx_cmd_buffer
*rxb
)
1729 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1730 struct iwl_mfuart_load_notif
*mfuart_notif
= (void *)pkt
->data
;
1733 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1734 le32_to_cpu(mfuart_notif
->installed_ver
),
1735 le32_to_cpu(mfuart_notif
->external_ver
),
1736 le32_to_cpu(mfuart_notif
->status
),
1737 le32_to_cpu(mfuart_notif
->duration
));
1739 if (iwl_rx_packet_payload_len(pkt
) == sizeof(*mfuart_notif
))
1741 "MFUART: image size: 0x%08x\n",
1742 le32_to_cpu(mfuart_notif
->image_size
));