1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
10 * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
24 * Contact Information:
25 * Intel Linux Wireless <linuxwifi@intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
31 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
32 * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
33 * All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
45 * * Neither the name Intel Corporation nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62 #include <linux/pci.h>
63 #include <linux/interrupt.h>
64 #include <linux/debugfs.h>
65 #include <linux/sched.h>
66 #include <linux/bitops.h>
67 #include <linux/gfp.h>
68 #include <linux/vmalloc.h>
69 #include <linux/module.h>
70 #include <linux/wait.h>
71 #include <linux/seq_file.h>
74 #include "iwl-trans.h"
78 #include "iwl-agn-hw.h"
79 #include "fw/error-dump.h"
81 #include "fw/api/tx.h"
84 #include "iwl-context-info-gen3.h"
86 /* extended range in FW SRAM */
87 #define IWL_FW_MEM_EXTENDED_START 0x40000
88 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
90 void iwl_trans_pcie_dump_regs(struct iwl_trans
*trans
)
92 #define PCI_DUMP_SIZE 352
93 #define PCI_MEM_DUMP_SIZE 64
94 #define PCI_PARENT_DUMP_SIZE 524
96 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
97 struct pci_dev
*pdev
= trans_pcie
->pci_dev
;
98 u32 i
, pos
, alloc_size
, *ptr
, *buf
;
101 if (trans_pcie
->pcie_dbg_dumped_once
)
104 /* Should be a multiple of 4 */
105 BUILD_BUG_ON(PCI_DUMP_SIZE
> 4096 || PCI_DUMP_SIZE
& 0x3);
106 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE
> 4096 || PCI_MEM_DUMP_SIZE
& 0x3);
107 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE
> 4096 || PCI_PARENT_DUMP_SIZE
& 0x3);
109 /* Alloc a max size buffer */
110 alloc_size
= PCI_ERR_ROOT_ERR_SRC
+ 4 + PREFIX_LEN
;
111 alloc_size
= max_t(u32
, alloc_size
, PCI_DUMP_SIZE
+ PREFIX_LEN
);
112 alloc_size
= max_t(u32
, alloc_size
, PCI_MEM_DUMP_SIZE
+ PREFIX_LEN
);
113 alloc_size
= max_t(u32
, alloc_size
, PCI_PARENT_DUMP_SIZE
+ PREFIX_LEN
);
115 buf
= kmalloc(alloc_size
, GFP_ATOMIC
);
118 prefix
= (char *)buf
+ alloc_size
- PREFIX_LEN
;
120 IWL_ERR(trans
, "iwlwifi transaction failed, dumping registers\n");
122 /* Print wifi device registers */
123 sprintf(prefix
, "iwlwifi %s: ", pci_name(pdev
));
124 IWL_ERR(trans
, "iwlwifi device config registers:\n");
125 for (i
= 0, ptr
= buf
; i
< PCI_DUMP_SIZE
; i
+= 4, ptr
++)
126 if (pci_read_config_dword(pdev
, i
, ptr
))
128 print_hex_dump(KERN_ERR
, prefix
, DUMP_PREFIX_OFFSET
, 32, 4, buf
, i
, 0);
130 IWL_ERR(trans
, "iwlwifi device memory mapped registers:\n");
131 for (i
= 0, ptr
= buf
; i
< PCI_MEM_DUMP_SIZE
; i
+= 4, ptr
++)
132 *ptr
= iwl_read32(trans
, i
);
133 print_hex_dump(KERN_ERR
, prefix
, DUMP_PREFIX_OFFSET
, 32, 4, buf
, i
, 0);
135 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ERR
);
137 IWL_ERR(trans
, "iwlwifi device AER capability structure:\n");
138 for (i
= 0, ptr
= buf
; i
< PCI_ERR_ROOT_COMMAND
; i
+= 4, ptr
++)
139 if (pci_read_config_dword(pdev
, pos
+ i
, ptr
))
141 print_hex_dump(KERN_ERR
, prefix
, DUMP_PREFIX_OFFSET
,
145 /* Print parent device registers next */
146 if (!pdev
->bus
->self
)
149 pdev
= pdev
->bus
->self
;
150 sprintf(prefix
, "iwlwifi %s: ", pci_name(pdev
));
152 IWL_ERR(trans
, "iwlwifi parent port (%s) config registers:\n",
154 for (i
= 0, ptr
= buf
; i
< PCI_PARENT_DUMP_SIZE
; i
+= 4, ptr
++)
155 if (pci_read_config_dword(pdev
, i
, ptr
))
157 print_hex_dump(KERN_ERR
, prefix
, DUMP_PREFIX_OFFSET
, 32, 4, buf
, i
, 0);
159 /* Print root port AER registers */
161 pdev
= pcie_find_root_port(pdev
);
163 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ERR
);
165 IWL_ERR(trans
, "iwlwifi root port (%s) AER cap structure:\n",
167 sprintf(prefix
, "iwlwifi %s: ", pci_name(pdev
));
168 for (i
= 0, ptr
= buf
; i
<= PCI_ERR_ROOT_ERR_SRC
; i
+= 4, ptr
++)
169 if (pci_read_config_dword(pdev
, pos
+ i
, ptr
))
171 print_hex_dump(KERN_ERR
, prefix
, DUMP_PREFIX_OFFSET
, 32,
177 print_hex_dump(KERN_ERR
, prefix
, DUMP_PREFIX_OFFSET
, 32, 4, buf
, i
, 0);
178 IWL_ERR(trans
, "Read failed at 0x%X\n", i
);
180 trans_pcie
->pcie_dbg_dumped_once
= 1;
184 static void iwl_trans_pcie_sw_reset(struct iwl_trans
*trans
)
186 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
187 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
188 usleep_range(5000, 6000);
191 static void iwl_pcie_free_fw_monitor(struct iwl_trans
*trans
)
193 struct iwl_dram_data
*fw_mon
= &trans
->dbg
.fw_mon
;
198 dma_free_coherent(trans
->dev
, fw_mon
->size
, fw_mon
->block
,
201 fw_mon
->block
= NULL
;
202 fw_mon
->physical
= 0;
206 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans
*trans
,
207 u8 max_power
, u8 min_power
)
209 struct iwl_dram_data
*fw_mon
= &trans
->dbg
.fw_mon
;
211 dma_addr_t physical
= 0;
218 for (power
= max_power
; power
>= min_power
; power
--) {
220 block
= dma_alloc_coherent(trans
->dev
, size
, &physical
,
221 GFP_KERNEL
| __GFP_NOWARN
);
226 "Allocated 0x%08x bytes for firmware monitor.\n",
231 if (WARN_ON_ONCE(!block
))
234 if (power
!= max_power
)
236 "Sorry - debug buffer is only %luK while you requested %luK\n",
237 (unsigned long)BIT(power
- 10),
238 (unsigned long)BIT(max_power
- 10));
240 fw_mon
->block
= block
;
241 fw_mon
->physical
= physical
;
245 void iwl_pcie_alloc_fw_monitor(struct iwl_trans
*trans
, u8 max_power
)
248 /* default max_power is maximum */
254 if (WARN(max_power
> 26,
255 "External buffer size for monitor is too big %d, check the FW TLV\n",
259 if (trans
->dbg
.fw_mon
.size
)
262 iwl_pcie_alloc_fw_monitor_block(trans
, max_power
, 11);
265 static u32
iwl_trans_pcie_read_shr(struct iwl_trans
*trans
, u32 reg
)
267 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
268 ((reg
& 0x0000ffff) | (2 << 28)));
269 return iwl_read32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
);
272 static void iwl_trans_pcie_write_shr(struct iwl_trans
*trans
, u32 reg
, u32 val
)
274 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
, val
);
275 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
276 ((reg
& 0x0000ffff) | (3 << 28)));
279 static void iwl_pcie_set_pwr(struct iwl_trans
*trans
, bool vaux
)
281 if (trans
->cfg
->apmg_not_supported
)
284 if (vaux
&& pci_pme_capable(to_pci_dev(trans
->dev
), PCI_D3cold
))
285 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
286 APMG_PS_CTRL_VAL_PWR_SRC_VAUX
,
287 ~APMG_PS_CTRL_MSK_PWR_SRC
);
289 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
290 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
291 ~APMG_PS_CTRL_MSK_PWR_SRC
);
295 #define PCI_CFG_RETRY_TIMEOUT 0x041
297 void iwl_pcie_apm_config(struct iwl_trans
*trans
)
299 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
304 * L0S states have been found to be unstable with our devices
305 * and in newer hardware they are not officially supported at
306 * all, so we must always set the L0S_DISABLED bit.
308 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_DISABLED
);
310 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_LNKCTL
, &lctl
);
311 trans
->pm_support
= !(lctl
& PCI_EXP_LNKCTL_ASPM_L0S
);
313 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_DEVCTL2
, &cap
);
314 trans
->ltr_enabled
= cap
& PCI_EXP_DEVCTL2_LTR_EN
;
315 IWL_DEBUG_POWER(trans
, "L1 %sabled - LTR %sabled\n",
316 (lctl
& PCI_EXP_LNKCTL_ASPM_L1
) ? "En" : "Dis",
317 trans
->ltr_enabled
? "En" : "Dis");
321 * Start up NIC's basic functionality after it has been reset
322 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
323 * NOTE: This does not load uCode nor start the embedded processor
325 static int iwl_pcie_apm_init(struct iwl_trans
*trans
)
329 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
332 * Use "set_bit" below rather than "write", to preserve any hardware
333 * bits already set by default after reset.
336 /* Disable L0S exit timer (platform NMI Work/Around) */
337 if (trans
->trans_cfg
->device_family
< IWL_DEVICE_FAMILY_8000
)
338 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
339 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
342 * Disable L0s without affecting L1;
343 * don't wait for ICH L0s (ICH bug W/A)
345 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
346 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
348 /* Set FH wait threshold to maximum (HW error during stress W/A) */
349 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
352 * Enable HAP INTA (interrupt from management bus) to
353 * wake device's PCI Express link L1a -> L0s
355 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
356 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
358 iwl_pcie_apm_config(trans
);
360 /* Configure analog phase-lock-loop before activating to D0A */
361 if (trans
->trans_cfg
->base_params
->pll_cfg
)
362 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
, CSR50_ANA_PLL_CFG_VAL
);
364 ret
= iwl_finish_nic_init(trans
, trans
->trans_cfg
);
368 if (trans
->cfg
->host_interrupt_operation_mode
) {
370 * This is a bit of an abuse - This is needed for 7260 / 3160
371 * only check host_interrupt_operation_mode even if this is
372 * not related to host_interrupt_operation_mode.
374 * Enable the oscillator to count wake up time for L1 exit. This
375 * consumes slightly more power (100uA) - but allows to be sure
376 * that we wake up from L1 on time.
378 * This looks weird: read twice the same register, discard the
379 * value, set a bit, and yet again, read that same register
380 * just to discard the value. But that's the way the hardware
383 iwl_read_prph(trans
, OSC_CLK
);
384 iwl_read_prph(trans
, OSC_CLK
);
385 iwl_set_bits_prph(trans
, OSC_CLK
, OSC_CLK_FORCE_CONTROL
);
386 iwl_read_prph(trans
, OSC_CLK
);
387 iwl_read_prph(trans
, OSC_CLK
);
391 * Enable DMA clock and wait for it to stabilize.
393 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
394 * bits do not disable clocks. This preserves any hardware
395 * bits already set by default in "CLK_CTRL_REG" after reset.
397 if (!trans
->cfg
->apmg_not_supported
) {
398 iwl_write_prph(trans
, APMG_CLK_EN_REG
,
399 APMG_CLK_VAL_DMA_CLK_RQT
);
402 /* Disable L1-Active */
403 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
404 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
406 /* Clear the interrupt in APMG if the NIC is in RFKILL */
407 iwl_write_prph(trans
, APMG_RTC_INT_STT_REG
,
408 APMG_RTC_INT_STT_RFKILL
);
411 set_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
417 * Enable LP XTAL to avoid HW bug where device may consume much power if
418 * FW is not loaded after device reset. LP XTAL is disabled by default
419 * after device HW reset. Do it only if XTAL is fed by internal source.
420 * Configure device's "persistence" mode to avoid resetting XTAL again when
421 * SHRD_HW_RST occurs in S3.
423 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans
*trans
)
427 u32 apmg_xtal_cfg_reg
;
431 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
432 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
434 iwl_trans_pcie_sw_reset(trans
);
436 ret
= iwl_finish_nic_init(trans
, trans
->trans_cfg
);
438 /* Release XTAL ON request */
439 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
440 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
445 * Clear "disable persistence" to avoid LP XTAL resetting when
446 * SHRD_HW_RST is applied in S3.
448 iwl_clear_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
449 APMG_PCIDEV_STT_VAL_PERSIST_DIS
);
452 * Force APMG XTAL to be active to prevent its disabling by HW
453 * caused by APMG idle state.
455 apmg_xtal_cfg_reg
= iwl_trans_pcie_read_shr(trans
,
456 SHR_APMG_XTAL_CFG_REG
);
457 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
459 SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
461 iwl_trans_pcie_sw_reset(trans
);
463 /* Enable LP XTAL by indirect access through CSR */
464 apmg_gp1_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_GP1_REG
);
465 iwl_trans_pcie_write_shr(trans
, SHR_APMG_GP1_REG
, apmg_gp1_reg
|
466 SHR_APMG_GP1_WF_XTAL_LP_EN
|
467 SHR_APMG_GP1_CHICKEN_BIT_SELECT
);
469 /* Clear delay line clock power up */
470 dl_cfg_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_DL_CFG_REG
);
471 iwl_trans_pcie_write_shr(trans
, SHR_APMG_DL_CFG_REG
, dl_cfg_reg
&
472 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP
);
475 * Enable persistence mode to avoid LP XTAL resetting when
476 * SHRD_HW_RST is applied in S3.
478 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
479 CSR_HW_IF_CONFIG_REG_PERSIST_MODE
);
482 * Clear "initialization complete" bit to move adapter from
483 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
485 iwl_clear_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
487 /* Activates XTAL resources monitor */
488 __iwl_trans_pcie_set_bit(trans
, CSR_MONITOR_CFG_REG
,
489 CSR_MONITOR_XTAL_RESOURCES
);
491 /* Release XTAL ON request */
492 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
493 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
496 /* Release APMG XTAL */
497 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
499 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
502 void iwl_pcie_apm_stop_master(struct iwl_trans
*trans
)
506 /* stop device's busmaster DMA activity */
507 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
509 ret
= iwl_poll_bit(trans
, CSR_RESET
,
510 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
511 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
513 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
515 IWL_DEBUG_INFO(trans
, "stop master\n");
518 static void iwl_pcie_apm_stop(struct iwl_trans
*trans
, bool op_mode_leave
)
520 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
523 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
524 iwl_pcie_apm_init(trans
);
526 /* inform ME that we are leaving */
527 if (trans
->trans_cfg
->device_family
== IWL_DEVICE_FAMILY_7000
)
528 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
529 APMG_PCIDEV_STT_VAL_WAKE_ME
);
530 else if (trans
->trans_cfg
->device_family
>=
531 IWL_DEVICE_FAMILY_8000
) {
532 iwl_set_bit(trans
, CSR_DBG_LINK_PWR_MGMT_REG
,
533 CSR_RESET_LINK_PWR_MGMT_DISABLED
);
534 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
535 CSR_HW_IF_CONFIG_REG_PREPARE
|
536 CSR_HW_IF_CONFIG_REG_ENABLE_PME
);
538 iwl_clear_bit(trans
, CSR_DBG_LINK_PWR_MGMT_REG
,
539 CSR_RESET_LINK_PWR_MGMT_DISABLED
);
544 clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
546 /* Stop device's DMA activity */
547 iwl_pcie_apm_stop_master(trans
);
549 if (trans
->cfg
->lp_xtal_workaround
) {
550 iwl_pcie_apm_lp_xtal_enable(trans
);
554 iwl_trans_pcie_sw_reset(trans
);
557 * Clear "initialization complete" bit to move adapter from
558 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
560 iwl_clear_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
563 static int iwl_pcie_nic_init(struct iwl_trans
*trans
)
565 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
569 spin_lock(&trans_pcie
->irq_lock
);
570 ret
= iwl_pcie_apm_init(trans
);
571 spin_unlock(&trans_pcie
->irq_lock
);
576 iwl_pcie_set_pwr(trans
, false);
578 iwl_op_mode_nic_config(trans
->op_mode
);
580 /* Allocate the RX queue, or reset if it is already allocated */
581 iwl_pcie_rx_init(trans
);
583 /* Allocate or reset and init all Tx and Command queues */
584 if (iwl_pcie_tx_init(trans
))
587 if (trans
->trans_cfg
->base_params
->shadow_reg_enable
) {
588 /* enable shadow regs in HW */
589 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
, 0x800FFFFF);
590 IWL_DEBUG_INFO(trans
, "Enabling shadow registers in device\n");
596 #define HW_READY_TIMEOUT (50)
598 /* Note: returns poll_bit return value, which is >= 0 if success */
599 static int iwl_pcie_set_hw_ready(struct iwl_trans
*trans
)
603 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
604 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
606 /* See if we got it */
607 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
608 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
609 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
613 iwl_set_bit(trans
, CSR_MBOX_SET_REG
, CSR_MBOX_SET_REG_OS_ALIVE
);
615 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
619 /* Note: returns standard 0/-ERROR code */
620 int iwl_pcie_prepare_card_hw(struct iwl_trans
*trans
)
626 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
628 ret
= iwl_pcie_set_hw_ready(trans
);
629 /* If the card is ready, exit 0 */
633 iwl_set_bit(trans
, CSR_DBG_LINK_PWR_MGMT_REG
,
634 CSR_RESET_LINK_PWR_MGMT_DISABLED
);
635 usleep_range(1000, 2000);
637 for (iter
= 0; iter
< 10; iter
++) {
638 /* If HW is not ready, prepare the conditions to check again */
639 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
640 CSR_HW_IF_CONFIG_REG_PREPARE
);
643 ret
= iwl_pcie_set_hw_ready(trans
);
647 usleep_range(200, 1000);
649 } while (t
< 150000);
653 IWL_ERR(trans
, "Couldn't prepare the card\n");
661 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans
*trans
,
662 u32 dst_addr
, dma_addr_t phy_addr
,
665 iwl_write32(trans
, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
666 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
668 iwl_write32(trans
, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
),
671 iwl_write32(trans
, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
672 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
674 iwl_write32(trans
, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
675 (iwl_get_dma_hi_addr(phy_addr
)
676 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
678 iwl_write32(trans
, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
679 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
) |
680 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
) |
681 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
683 iwl_write32(trans
, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
684 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
685 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
686 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
689 static int iwl_pcie_load_firmware_chunk(struct iwl_trans
*trans
,
690 u32 dst_addr
, dma_addr_t phy_addr
,
693 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
697 trans_pcie
->ucode_write_complete
= false;
699 if (!iwl_trans_grab_nic_access(trans
, &flags
))
702 iwl_pcie_load_firmware_chunk_fh(trans
, dst_addr
, phy_addr
,
704 iwl_trans_release_nic_access(trans
, &flags
);
706 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
707 trans_pcie
->ucode_write_complete
, 5 * HZ
);
709 IWL_ERR(trans
, "Failed to load firmware chunk!\n");
710 iwl_trans_pcie_dump_regs(trans
);
717 static int iwl_pcie_load_section(struct iwl_trans
*trans
, u8 section_num
,
718 const struct fw_desc
*section
)
722 u32 offset
, chunk_sz
= min_t(u32
, FH_MEM_TB_MAX_LENGTH
, section
->len
);
725 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
728 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
, &p_addr
,
729 GFP_KERNEL
| __GFP_NOWARN
);
731 IWL_DEBUG_INFO(trans
, "Falling back to small chunks of DMA\n");
732 chunk_sz
= PAGE_SIZE
;
733 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
,
734 &p_addr
, GFP_KERNEL
);
739 for (offset
= 0; offset
< section
->len
; offset
+= chunk_sz
) {
740 u32 copy_size
, dst_addr
;
741 bool extended_addr
= false;
743 copy_size
= min_t(u32
, chunk_sz
, section
->len
- offset
);
744 dst_addr
= section
->offset
+ offset
;
746 if (dst_addr
>= IWL_FW_MEM_EXTENDED_START
&&
747 dst_addr
<= IWL_FW_MEM_EXTENDED_END
)
748 extended_addr
= true;
751 iwl_set_bits_prph(trans
, LMPM_CHICK
,
752 LMPM_CHICK_EXTENDED_ADDR_SPACE
);
754 memcpy(v_addr
, (u8
*)section
->data
+ offset
, copy_size
);
755 ret
= iwl_pcie_load_firmware_chunk(trans
, dst_addr
, p_addr
,
759 iwl_clear_bits_prph(trans
, LMPM_CHICK
,
760 LMPM_CHICK_EXTENDED_ADDR_SPACE
);
764 "Could not load the [%d] uCode section\n",
770 dma_free_coherent(trans
->dev
, chunk_sz
, v_addr
, p_addr
);
774 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans
*trans
,
775 const struct fw_img
*image
,
777 int *first_ucode_section
)
780 int i
, ret
= 0, sec_num
= 0x1;
781 u32 val
, last_read_idx
= 0;
785 *first_ucode_section
= 0;
788 (*first_ucode_section
)++;
791 for (i
= *first_ucode_section
; i
< image
->num_sec
; i
++) {
795 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
797 * PAGING_SEPARATOR_SECTION delimiter - separate between
798 * CPU2 non paged to CPU2 paging sec.
800 if (!image
->sec
[i
].data
||
801 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
||
802 image
->sec
[i
].offset
== PAGING_SEPARATOR_SECTION
) {
804 "Break since Data not valid or Empty section, sec = %d\n",
809 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
813 /* Notify ucode of loaded section number and status */
814 val
= iwl_read_direct32(trans
, FH_UCODE_LOAD_STATUS
);
815 val
= val
| (sec_num
<< shift_param
);
816 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
, val
);
818 sec_num
= (sec_num
<< 1) | 0x1;
821 *first_ucode_section
= last_read_idx
;
823 iwl_enable_interrupts(trans
);
825 if (trans
->trans_cfg
->use_tfh
) {
827 iwl_write_prph(trans
, UREG_UCODE_LOAD_STATUS
,
830 iwl_write_prph(trans
, UREG_UCODE_LOAD_STATUS
,
834 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
,
837 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
,
844 static int iwl_pcie_load_cpu_sections(struct iwl_trans
*trans
,
845 const struct fw_img
*image
,
847 int *first_ucode_section
)
850 u32 last_read_idx
= 0;
853 *first_ucode_section
= 0;
855 (*first_ucode_section
)++;
857 for (i
= *first_ucode_section
; i
< image
->num_sec
; i
++) {
861 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
863 * PAGING_SEPARATOR_SECTION delimiter - separate between
864 * CPU2 non paged to CPU2 paging sec.
866 if (!image
->sec
[i
].data
||
867 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
||
868 image
->sec
[i
].offset
== PAGING_SEPARATOR_SECTION
) {
870 "Break since Data not valid or Empty section, sec = %d\n",
875 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
880 *first_ucode_section
= last_read_idx
;
885 static void iwl_pcie_apply_destination_ini(struct iwl_trans
*trans
)
887 enum iwl_fw_ini_allocation_id alloc_id
= IWL_FW_INI_ALLOCATION_ID_DBGC1
;
888 struct iwl_fw_ini_allocation_tlv
*fw_mon_cfg
=
889 &trans
->dbg
.fw_mon_cfg
[alloc_id
];
890 struct iwl_dram_data
*frag
;
892 if (!iwl_trans_dbg_ini_valid(trans
))
895 if (le32_to_cpu(fw_mon_cfg
->buf_location
) ==
896 IWL_FW_INI_LOCATION_SRAM_PATH
) {
897 IWL_DEBUG_FW(trans
, "WRT: Applying SMEM buffer destination\n");
898 /* set sram monitor by enabling bit 7 */
899 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
900 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM
);
905 if (le32_to_cpu(fw_mon_cfg
->buf_location
) !=
906 IWL_FW_INI_LOCATION_DRAM_PATH
||
907 !trans
->dbg
.fw_mon_ini
[alloc_id
].num_frags
)
910 frag
= &trans
->dbg
.fw_mon_ini
[alloc_id
].frags
[0];
912 IWL_DEBUG_FW(trans
, "WRT: Applying DRAM destination (alloc_id=%u)\n",
915 iwl_write_umac_prph(trans
, MON_BUFF_BASE_ADDR_VER2
,
916 frag
->physical
>> MON_BUFF_SHIFT_VER2
);
917 iwl_write_umac_prph(trans
, MON_BUFF_END_ADDR_VER2
,
918 (frag
->physical
+ frag
->size
- 256) >>
919 MON_BUFF_SHIFT_VER2
);
922 void iwl_pcie_apply_destination(struct iwl_trans
*trans
)
924 const struct iwl_fw_dbg_dest_tlv_v1
*dest
= trans
->dbg
.dest_tlv
;
925 const struct iwl_dram_data
*fw_mon
= &trans
->dbg
.fw_mon
;
928 if (iwl_trans_dbg_ini_valid(trans
)) {
929 iwl_pcie_apply_destination_ini(trans
);
933 IWL_INFO(trans
, "Applying debug destination %s\n",
934 get_fw_dbg_mode_string(dest
->monitor_mode
));
936 if (dest
->monitor_mode
== EXTERNAL_MODE
)
937 iwl_pcie_alloc_fw_monitor(trans
, dest
->size_power
);
939 IWL_WARN(trans
, "PCI should have external buffer debug\n");
941 for (i
= 0; i
< trans
->dbg
.n_dest_reg
; i
++) {
942 u32 addr
= le32_to_cpu(dest
->reg_ops
[i
].addr
);
943 u32 val
= le32_to_cpu(dest
->reg_ops
[i
].val
);
945 switch (dest
->reg_ops
[i
].op
) {
947 iwl_write32(trans
, addr
, val
);
950 iwl_set_bit(trans
, addr
, BIT(val
));
953 iwl_clear_bit(trans
, addr
, BIT(val
));
956 iwl_write_prph(trans
, addr
, val
);
959 iwl_set_bits_prph(trans
, addr
, BIT(val
));
962 iwl_clear_bits_prph(trans
, addr
, BIT(val
));
965 if (iwl_read_prph(trans
, addr
) & BIT(val
)) {
967 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
973 IWL_ERR(trans
, "FW debug - unknown OP %d\n",
974 dest
->reg_ops
[i
].op
);
980 if (dest
->monitor_mode
== EXTERNAL_MODE
&& fw_mon
->size
) {
981 iwl_write_prph(trans
, le32_to_cpu(dest
->base_reg
),
982 fw_mon
->physical
>> dest
->base_shift
);
983 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
)
984 iwl_write_prph(trans
, le32_to_cpu(dest
->end_reg
),
985 (fw_mon
->physical
+ fw_mon
->size
-
986 256) >> dest
->end_shift
);
988 iwl_write_prph(trans
, le32_to_cpu(dest
->end_reg
),
989 (fw_mon
->physical
+ fw_mon
->size
) >>
994 static int iwl_pcie_load_given_ucode(struct iwl_trans
*trans
,
995 const struct fw_img
*image
)
998 int first_ucode_section
;
1000 IWL_DEBUG_FW(trans
, "working with %s CPU\n",
1001 image
->is_dual_cpus
? "Dual" : "Single");
1003 /* load to FW the binary non secured sections of CPU1 */
1004 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 1, &first_ucode_section
);
1008 if (image
->is_dual_cpus
) {
1009 /* set CPU2 header address */
1010 iwl_write_prph(trans
,
1011 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR
,
1012 LMPM_SECURE_CPU2_HDR_MEM_SPACE
);
1014 /* load to FW the binary sections of CPU2 */
1015 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 2,
1016 &first_ucode_section
);
1021 if (iwl_pcie_dbg_on(trans
))
1022 iwl_pcie_apply_destination(trans
);
1024 iwl_enable_interrupts(trans
);
1026 /* release CPU reset */
1027 iwl_write32(trans
, CSR_RESET
, 0);
1032 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans
*trans
,
1033 const struct fw_img
*image
)
1036 int first_ucode_section
;
1038 IWL_DEBUG_FW(trans
, "working with %s CPU\n",
1039 image
->is_dual_cpus
? "Dual" : "Single");
1041 if (iwl_pcie_dbg_on(trans
))
1042 iwl_pcie_apply_destination(trans
);
1044 IWL_DEBUG_POWER(trans
, "Original WFPM value = 0x%08X\n",
1045 iwl_read_prph(trans
, WFPM_GP2
));
1048 * Set default value. On resume reading the values that were
1049 * zeored can provide debug data on the resume flow.
1050 * This is for debugging only and has no functional impact.
1052 iwl_write_prph(trans
, WFPM_GP2
, 0x01010101);
1054 /* configure the ucode to be ready to get the secured image */
1055 /* release CPU reset */
1056 iwl_write_prph(trans
, RELEASE_CPU_RESET
, RELEASE_CPU_RESET_BIT
);
1058 /* load to FW the binary Secured sections of CPU1 */
1059 ret
= iwl_pcie_load_cpu_sections_8000(trans
, image
, 1,
1060 &first_ucode_section
);
1064 /* load to FW the binary sections of CPU2 */
1065 return iwl_pcie_load_cpu_sections_8000(trans
, image
, 2,
1066 &first_ucode_section
);
1069 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans
*trans
)
1071 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1072 bool hw_rfkill
= iwl_is_rfkill_set(trans
);
1073 bool prev
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1077 set_bit(STATUS_RFKILL_HW
, &trans
->status
);
1078 set_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1080 clear_bit(STATUS_RFKILL_HW
, &trans
->status
);
1081 if (trans_pcie
->opmode_down
)
1082 clear_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1085 report
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1088 iwl_trans_pcie_rf_kill(trans
, report
);
1093 struct iwl_causes_list
{
1099 static struct iwl_causes_list causes_list
[] = {
1100 {MSIX_FH_INT_CAUSES_D2S_CH0_NUM
, CSR_MSIX_FH_INT_MASK_AD
, 0},
1101 {MSIX_FH_INT_CAUSES_D2S_CH1_NUM
, CSR_MSIX_FH_INT_MASK_AD
, 0x1},
1102 {MSIX_FH_INT_CAUSES_S2D
, CSR_MSIX_FH_INT_MASK_AD
, 0x3},
1103 {MSIX_FH_INT_CAUSES_FH_ERR
, CSR_MSIX_FH_INT_MASK_AD
, 0x5},
1104 {MSIX_HW_INT_CAUSES_REG_ALIVE
, CSR_MSIX_HW_INT_MASK_AD
, 0x10},
1105 {MSIX_HW_INT_CAUSES_REG_WAKEUP
, CSR_MSIX_HW_INT_MASK_AD
, 0x11},
1106 {MSIX_HW_INT_CAUSES_REG_CT_KILL
, CSR_MSIX_HW_INT_MASK_AD
, 0x16},
1107 {MSIX_HW_INT_CAUSES_REG_RF_KILL
, CSR_MSIX_HW_INT_MASK_AD
, 0x17},
1108 {MSIX_HW_INT_CAUSES_REG_PERIODIC
, CSR_MSIX_HW_INT_MASK_AD
, 0x18},
1109 {MSIX_HW_INT_CAUSES_REG_SW_ERR
, CSR_MSIX_HW_INT_MASK_AD
, 0x29},
1110 {MSIX_HW_INT_CAUSES_REG_SCD
, CSR_MSIX_HW_INT_MASK_AD
, 0x2A},
1111 {MSIX_HW_INT_CAUSES_REG_FH_TX
, CSR_MSIX_HW_INT_MASK_AD
, 0x2B},
1112 {MSIX_HW_INT_CAUSES_REG_HW_ERR
, CSR_MSIX_HW_INT_MASK_AD
, 0x2D},
1113 {MSIX_HW_INT_CAUSES_REG_HAP
, CSR_MSIX_HW_INT_MASK_AD
, 0x2E},
1116 static void iwl_pcie_map_non_rx_causes(struct iwl_trans
*trans
)
1118 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1119 int val
= trans_pcie
->def_irq
| MSIX_NON_AUTO_CLEAR_CAUSE
;
1120 int i
, arr_size
= ARRAY_SIZE(causes_list
);
1121 struct iwl_causes_list
*causes
= causes_list
;
1124 * Access all non RX causes and map them to the default irq.
1125 * In case we are missing at least one interrupt vector,
1126 * the first interrupt vector will serve non-RX and FBQ causes.
1128 for (i
= 0; i
< arr_size
; i
++) {
1129 iwl_write8(trans
, CSR_MSIX_IVAR(causes
[i
].addr
), val
);
1130 iwl_clear_bit(trans
, causes
[i
].mask_reg
,
1131 causes
[i
].cause_num
);
1135 static void iwl_pcie_map_rx_causes(struct iwl_trans
*trans
)
1137 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1139 trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_FIRST_RSS
? 1 : 0;
1143 * The first RX queue - fallback queue, which is designated for
1144 * management frame, command responses etc, is always mapped to the
1145 * first interrupt vector. The other RX queues are mapped to
1146 * the other (N - 2) interrupt vectors.
1148 val
= BIT(MSIX_FH_INT_CAUSES_Q(0));
1149 for (idx
= 1; idx
< trans
->num_rx_queues
; idx
++) {
1150 iwl_write8(trans
, CSR_MSIX_RX_IVAR(idx
),
1151 MSIX_FH_INT_CAUSES_Q(idx
- offset
));
1152 val
|= BIT(MSIX_FH_INT_CAUSES_Q(idx
));
1154 iwl_write32(trans
, CSR_MSIX_FH_INT_MASK_AD
, ~val
);
1156 val
= MSIX_FH_INT_CAUSES_Q(0);
1157 if (trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_NON_RX
)
1158 val
|= MSIX_NON_AUTO_CLEAR_CAUSE
;
1159 iwl_write8(trans
, CSR_MSIX_RX_IVAR(0), val
);
1161 if (trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_FIRST_RSS
)
1162 iwl_write8(trans
, CSR_MSIX_RX_IVAR(1), val
);
1165 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie
*trans_pcie
)
1167 struct iwl_trans
*trans
= trans_pcie
->trans
;
1169 if (!trans_pcie
->msix_enabled
) {
1170 if (trans
->trans_cfg
->mq_rx_supported
&&
1171 test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
1172 iwl_write_umac_prph(trans
, UREG_CHICK
,
1173 UREG_CHICK_MSI_ENABLE
);
1177 * The IVAR table needs to be configured again after reset,
1178 * but if the device is disabled, we can't write to
1181 if (test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
1182 iwl_write_umac_prph(trans
, UREG_CHICK
, UREG_CHICK_MSIX_ENABLE
);
1185 * Each cause from the causes list above and the RX causes is
1186 * represented as a byte in the IVAR table. The first nibble
1187 * represents the bound interrupt vector of the cause, the second
1188 * represents no auto clear for this cause. This will be set if its
1189 * interrupt vector is bound to serve other causes.
1191 iwl_pcie_map_rx_causes(trans
);
1193 iwl_pcie_map_non_rx_causes(trans
);
1196 static void iwl_pcie_init_msix(struct iwl_trans_pcie
*trans_pcie
)
1198 struct iwl_trans
*trans
= trans_pcie
->trans
;
1200 iwl_pcie_conf_msix_hw(trans_pcie
);
1202 if (!trans_pcie
->msix_enabled
)
1205 trans_pcie
->fh_init_mask
= ~iwl_read32(trans
, CSR_MSIX_FH_INT_MASK_AD
);
1206 trans_pcie
->fh_mask
= trans_pcie
->fh_init_mask
;
1207 trans_pcie
->hw_init_mask
= ~iwl_read32(trans
, CSR_MSIX_HW_INT_MASK_AD
);
1208 trans_pcie
->hw_mask
= trans_pcie
->hw_init_mask
;
1211 static void _iwl_trans_pcie_stop_device(struct iwl_trans
*trans
)
1213 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1215 lockdep_assert_held(&trans_pcie
->mutex
);
1217 if (trans_pcie
->is_down
)
1220 trans_pcie
->is_down
= true;
1222 /* tell the device to stop sending interrupts */
1223 iwl_disable_interrupts(trans
);
1225 /* device going down, Stop using ICT table */
1226 iwl_pcie_disable_ict(trans
);
1229 * If a HW restart happens during firmware loading,
1230 * then the firmware loading might call this function
1231 * and later it might be called again due to the
1232 * restart. So don't process again if the device is
1235 if (test_and_clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
)) {
1236 IWL_DEBUG_INFO(trans
,
1237 "DEVICE_ENABLED bit was set and is now cleared\n");
1238 iwl_pcie_tx_stop(trans
);
1239 iwl_pcie_rx_stop(trans
);
1241 /* Power-down device's busmaster DMA clocks */
1242 if (!trans
->cfg
->apmg_not_supported
) {
1243 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
1244 APMG_CLK_VAL_DMA_CLK_RQT
);
1249 /* Make sure (redundant) we've released our request to stay awake */
1250 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1251 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1253 /* Stop the device, and put it in low power state */
1254 iwl_pcie_apm_stop(trans
, false);
1256 iwl_trans_pcie_sw_reset(trans
);
1259 * Upon stop, the IVAR table gets erased, so msi-x won't
1260 * work. This causes a bug in RF-KILL flows, since the interrupt
1261 * that enables radio won't fire on the correct irq, and the
1262 * driver won't be able to handle the interrupt.
1263 * Configure the IVAR table again after reset.
1265 iwl_pcie_conf_msix_hw(trans_pcie
);
1268 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1269 * This is a bug in certain verions of the hardware.
1270 * Certain devices also keep sending HW RF kill interrupt all
1271 * the time, unless the interrupt is ACKed even if the interrupt
1272 * should be masked. Re-ACK all the interrupts here.
1274 iwl_disable_interrupts(trans
);
1276 /* clear all status bits */
1277 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1278 clear_bit(STATUS_INT_ENABLED
, &trans
->status
);
1279 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1282 * Even if we stop the HW, we still want the RF kill
1285 iwl_enable_rfkill_int(trans
);
1287 /* re-take ownership to prevent other users from stealing the device */
1288 iwl_pcie_prepare_card_hw(trans
);
1291 void iwl_pcie_synchronize_irqs(struct iwl_trans
*trans
)
1293 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1295 if (trans_pcie
->msix_enabled
) {
1298 for (i
= 0; i
< trans_pcie
->alloc_vecs
; i
++)
1299 synchronize_irq(trans_pcie
->msix_entries
[i
].vector
);
1301 synchronize_irq(trans_pcie
->pci_dev
->irq
);
1305 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
1306 const struct fw_img
*fw
, bool run_in_rfkill
)
1308 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1312 /* This may fail if AMT took ownership of the device */
1313 if (iwl_pcie_prepare_card_hw(trans
)) {
1314 IWL_WARN(trans
, "Exit HW not ready\n");
1319 iwl_enable_rfkill_int(trans
);
1321 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1324 * We enabled the RF-Kill interrupt and the handler may very
1325 * well be running. Disable the interrupts to make sure no other
1326 * interrupt can be fired.
1328 iwl_disable_interrupts(trans
);
1330 /* Make sure it finished running */
1331 iwl_pcie_synchronize_irqs(trans
);
1333 mutex_lock(&trans_pcie
->mutex
);
1335 /* If platform's RF_KILL switch is NOT set to KILL */
1336 hw_rfkill
= iwl_pcie_check_hw_rf_kill(trans
);
1337 if (hw_rfkill
&& !run_in_rfkill
) {
1342 /* Someone called stop_device, don't try to start_fw */
1343 if (trans_pcie
->is_down
) {
1345 "Can't start_fw since the HW hasn't been started\n");
1350 /* make sure rfkill handshake bits are cleared */
1351 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1352 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
1353 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
1355 /* clear (again), then enable host interrupts */
1356 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1358 ret
= iwl_pcie_nic_init(trans
);
1360 IWL_ERR(trans
, "Unable to init nic\n");
1365 * Now, we load the firmware and don't want to be interrupted, even
1366 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1367 * FH_TX interrupt which is needed to load the firmware). If the
1368 * RF-Kill switch is toggled, we will find out after having loaded
1369 * the firmware and return the proper value to the caller.
1371 iwl_enable_fw_load_int(trans
);
1373 /* really make sure rfkill handshake bits are cleared */
1374 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1375 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1377 /* Load the given image to the HW */
1378 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
)
1379 ret
= iwl_pcie_load_given_ucode_8000(trans
, fw
);
1381 ret
= iwl_pcie_load_given_ucode(trans
, fw
);
1383 /* re-check RF-Kill state since we may have missed the interrupt */
1384 hw_rfkill
= iwl_pcie_check_hw_rf_kill(trans
);
1385 if (hw_rfkill
&& !run_in_rfkill
)
1389 mutex_unlock(&trans_pcie
->mutex
);
1393 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
1395 iwl_pcie_reset_ict(trans
);
1396 iwl_pcie_tx_start(trans
, scd_addr
);
1399 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans
*trans
,
1405 * Check again since the RF kill state may have changed while
1406 * all the interrupts were disabled, in this case we couldn't
1407 * receive the RF kill interrupt and update the state in the
1409 * Don't call the op_mode if the rkfill state hasn't changed.
1410 * This allows the op_mode to call stop_device from the rfkill
1411 * notification without endless recursion. Under very rare
1412 * circumstances, we might have a small recursion if the rfkill
1413 * state changed exactly now while we were called from stop_device.
1414 * This is very unlikely but can happen and is supported.
1416 hw_rfkill
= iwl_is_rfkill_set(trans
);
1418 set_bit(STATUS_RFKILL_HW
, &trans
->status
);
1419 set_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1421 clear_bit(STATUS_RFKILL_HW
, &trans
->status
);
1422 clear_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1424 if (hw_rfkill
!= was_in_rfkill
)
1425 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1428 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
)
1430 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1433 mutex_lock(&trans_pcie
->mutex
);
1434 trans_pcie
->opmode_down
= true;
1435 was_in_rfkill
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1436 _iwl_trans_pcie_stop_device(trans
);
1437 iwl_trans_pcie_handle_stop_rfkill(trans
, was_in_rfkill
);
1438 mutex_unlock(&trans_pcie
->mutex
);
1441 void iwl_trans_pcie_rf_kill(struct iwl_trans
*trans
, bool state
)
1443 struct iwl_trans_pcie __maybe_unused
*trans_pcie
=
1444 IWL_TRANS_GET_PCIE_TRANS(trans
);
1446 lockdep_assert_held(&trans_pcie
->mutex
);
1448 IWL_WARN(trans
, "reporting RF_KILL (radio %s)\n",
1449 state
? "disabled" : "enabled");
1450 if (iwl_op_mode_hw_rf_kill(trans
->op_mode
, state
)) {
1451 if (trans
->trans_cfg
->gen2
)
1452 _iwl_trans_pcie_gen2_stop_device(trans
);
1454 _iwl_trans_pcie_stop_device(trans
);
1458 void iwl_pcie_d3_complete_suspend(struct iwl_trans
*trans
,
1459 bool test
, bool reset
)
1461 iwl_disable_interrupts(trans
);
1464 * in testing mode, the host stays awake and the
1465 * hardware won't be reset (not even partially)
1470 iwl_pcie_disable_ict(trans
);
1472 iwl_pcie_synchronize_irqs(trans
);
1474 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1475 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1476 iwl_clear_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1480 * reset TX queues -- some of their registers reset during S3
1481 * so if we don't reset everything here the D3 image would try
1482 * to execute some invalid memory upon resume
1484 iwl_trans_pcie_tx_reset(trans
);
1487 iwl_pcie_set_pwr(trans
, true);
1490 static int iwl_trans_pcie_d3_suspend(struct iwl_trans
*trans
, bool test
,
1494 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1497 /* Enable persistence mode to avoid reset */
1498 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
1499 CSR_HW_IF_CONFIG_REG_PERSIST_MODE
);
1501 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
1502 iwl_write_umac_prph(trans
, UREG_DOORBELL_TO_ISR6
,
1503 UREG_DOORBELL_TO_ISR6_SUSPEND
);
1505 ret
= wait_event_timeout(trans_pcie
->sx_waitq
,
1506 trans_pcie
->sx_complete
, 2 * HZ
);
1508 * Invalidate it toward resume.
1510 trans_pcie
->sx_complete
= false;
1513 IWL_ERR(trans
, "Timeout entering D3\n");
1517 iwl_pcie_d3_complete_suspend(trans
, test
, reset
);
1522 static int iwl_trans_pcie_d3_resume(struct iwl_trans
*trans
,
1523 enum iwl_d3_status
*status
,
1524 bool test
, bool reset
)
1526 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1531 iwl_enable_interrupts(trans
);
1532 *status
= IWL_D3_STATUS_ALIVE
;
1536 iwl_set_bit(trans
, CSR_GP_CNTRL
,
1537 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1539 ret
= iwl_finish_nic_init(trans
, trans
->trans_cfg
);
1544 * Reconfigure IVAR table in case of MSIX or reset ict table in
1545 * MSI mode since HW reset erased it.
1546 * Also enables interrupts - none will happen as
1547 * the device doesn't know we're waking it up, only when
1548 * the opmode actually tells it after this call.
1550 iwl_pcie_conf_msix_hw(trans_pcie
);
1551 if (!trans_pcie
->msix_enabled
)
1552 iwl_pcie_reset_ict(trans
);
1553 iwl_enable_interrupts(trans
);
1555 iwl_pcie_set_pwr(trans
, false);
1558 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1559 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1561 iwl_trans_pcie_tx_reset(trans
);
1563 ret
= iwl_pcie_rx_init(trans
);
1566 "Failed to resume the device (RX reset)\n");
1571 IWL_DEBUG_POWER(trans
, "WFPM value upon resume = 0x%08X\n",
1572 iwl_read_umac_prph(trans
, WFPM_GP2
));
1574 val
= iwl_read32(trans
, CSR_RESET
);
1575 if (val
& CSR_RESET_REG_FLAG_NEVO_RESET
)
1576 *status
= IWL_D3_STATUS_RESET
;
1578 *status
= IWL_D3_STATUS_ALIVE
;
1581 if (*status
== IWL_D3_STATUS_ALIVE
&&
1582 trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
1583 trans_pcie
->sx_complete
= false;
1584 iwl_write_umac_prph(trans
, UREG_DOORBELL_TO_ISR6
,
1585 UREG_DOORBELL_TO_ISR6_RESUME
);
1587 ret
= wait_event_timeout(trans_pcie
->sx_waitq
,
1588 trans_pcie
->sx_complete
, 2 * HZ
);
1590 * Invalidate it toward next suspend.
1592 trans_pcie
->sx_complete
= false;
1595 IWL_ERR(trans
, "Timeout exiting D3\n");
1603 iwl_pcie_set_interrupt_capa(struct pci_dev
*pdev
,
1604 struct iwl_trans
*trans
,
1605 const struct iwl_cfg_trans_params
*cfg_trans
)
1607 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1608 int max_irqs
, num_irqs
, i
, ret
;
1610 u32 max_rx_queues
= IWL_MAX_RX_HW_QUEUES
;
1612 if (!cfg_trans
->mq_rx_supported
)
1615 if (cfg_trans
->device_family
<= IWL_DEVICE_FAMILY_9000
)
1616 max_rx_queues
= IWL_9000_MAX_RX_HW_QUEUES
;
1618 max_irqs
= min_t(u32
, num_online_cpus() + 2, max_rx_queues
);
1619 for (i
= 0; i
< max_irqs
; i
++)
1620 trans_pcie
->msix_entries
[i
].entry
= i
;
1622 num_irqs
= pci_enable_msix_range(pdev
, trans_pcie
->msix_entries
,
1623 MSIX_MIN_INTERRUPT_VECTORS
,
1626 IWL_DEBUG_INFO(trans
,
1627 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1631 trans_pcie
->def_irq
= (num_irqs
== max_irqs
) ? num_irqs
- 1 : 0;
1633 IWL_DEBUG_INFO(trans
,
1634 "MSI-X enabled. %d interrupt vectors were allocated\n",
1638 * In case the OS provides fewer interrupts than requested, different
1639 * causes will share the same interrupt vector as follows:
1640 * One interrupt less: non rx causes shared with FBQ.
1641 * Two interrupts less: non rx causes shared with FBQ and RSS.
1642 * More than two interrupts: we will use fewer RSS queues.
1644 if (num_irqs
<= max_irqs
- 2) {
1645 trans_pcie
->trans
->num_rx_queues
= num_irqs
+ 1;
1646 trans_pcie
->shared_vec_mask
= IWL_SHARED_IRQ_NON_RX
|
1647 IWL_SHARED_IRQ_FIRST_RSS
;
1648 } else if (num_irqs
== max_irqs
- 1) {
1649 trans_pcie
->trans
->num_rx_queues
= num_irqs
;
1650 trans_pcie
->shared_vec_mask
= IWL_SHARED_IRQ_NON_RX
;
1652 trans_pcie
->trans
->num_rx_queues
= num_irqs
- 1;
1654 WARN_ON(trans_pcie
->trans
->num_rx_queues
> IWL_MAX_RX_HW_QUEUES
);
1656 trans_pcie
->alloc_vecs
= num_irqs
;
1657 trans_pcie
->msix_enabled
= true;
1661 ret
= pci_enable_msi(pdev
);
1663 dev_err(&pdev
->dev
, "pci_enable_msi failed - %d\n", ret
);
1664 /* enable rfkill interrupt: hw bug w/a */
1665 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
1666 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
1667 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
1668 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
1673 static void iwl_pcie_irq_set_affinity(struct iwl_trans
*trans
)
1675 int iter_rx_q
, i
, ret
, cpu
, offset
;
1676 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1678 i
= trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_FIRST_RSS
? 0 : 1;
1679 iter_rx_q
= trans_pcie
->trans
->num_rx_queues
- 1 + i
;
1681 for (; i
< iter_rx_q
; i
++) {
1683 * Get the cpu prior to the place to search
1684 * (i.e. return will be > i - 1).
1686 cpu
= cpumask_next(i
- offset
, cpu_online_mask
);
1687 cpumask_set_cpu(cpu
, &trans_pcie
->affinity_mask
[i
]);
1688 ret
= irq_set_affinity_hint(trans_pcie
->msix_entries
[i
].vector
,
1689 &trans_pcie
->affinity_mask
[i
]);
1691 IWL_ERR(trans_pcie
->trans
,
1692 "Failed to set affinity mask for IRQ %d\n",
1697 static int iwl_pcie_init_msix_handler(struct pci_dev
*pdev
,
1698 struct iwl_trans_pcie
*trans_pcie
)
1702 for (i
= 0; i
< trans_pcie
->alloc_vecs
; i
++) {
1704 struct msix_entry
*msix_entry
;
1705 const char *qname
= queue_name(&pdev
->dev
, trans_pcie
, i
);
1710 msix_entry
= &trans_pcie
->msix_entries
[i
];
1711 ret
= devm_request_threaded_irq(&pdev
->dev
,
1714 (i
== trans_pcie
->def_irq
) ?
1715 iwl_pcie_irq_msix_handler
:
1716 iwl_pcie_irq_rx_msix_handler
,
1721 IWL_ERR(trans_pcie
->trans
,
1722 "Error allocating IRQ %d\n", i
);
1727 iwl_pcie_irq_set_affinity(trans_pcie
->trans
);
1732 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans
*trans
)
1736 switch (trans
->trans_cfg
->device_family
) {
1737 case IWL_DEVICE_FAMILY_9000
:
1738 wprot
= PREG_PRPH_WPROT_9000
;
1740 case IWL_DEVICE_FAMILY_22000
:
1741 wprot
= PREG_PRPH_WPROT_22000
;
1747 hpm
= iwl_read_umac_prph_no_grab(trans
, HPM_DEBUG
);
1748 if (hpm
!= 0xa5a5a5a0 && (hpm
& PERSISTENCE_BIT
)) {
1749 u32 wprot_val
= iwl_read_umac_prph_no_grab(trans
, wprot
);
1751 if (wprot_val
& PREG_WFPM_ACCESS
) {
1753 "Error, can not clear persistence bit\n");
1756 iwl_write_umac_prph_no_grab(trans
, HPM_DEBUG
,
1757 hpm
& ~PERSISTENCE_BIT
);
1763 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans
*trans
)
1767 ret
= iwl_finish_nic_init(trans
, trans
->trans_cfg
);
1771 iwl_set_bits_prph(trans
, HPM_HIPM_GEN_CFG
,
1772 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE
);
1774 iwl_set_bits_prph(trans
, HPM_HIPM_GEN_CFG
,
1775 HPM_HIPM_GEN_CFG_CR_PG_EN
|
1776 HPM_HIPM_GEN_CFG_CR_SLP_EN
);
1778 iwl_clear_bits_prph(trans
, HPM_HIPM_GEN_CFG
,
1779 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE
);
1781 iwl_trans_pcie_sw_reset(trans
);
1786 static int _iwl_trans_pcie_start_hw(struct iwl_trans
*trans
)
1788 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1791 lockdep_assert_held(&trans_pcie
->mutex
);
1793 err
= iwl_pcie_prepare_card_hw(trans
);
1795 IWL_ERR(trans
, "Error while preparing HW: %d\n", err
);
1799 err
= iwl_trans_pcie_clear_persistence_bit(trans
);
1803 iwl_trans_pcie_sw_reset(trans
);
1805 if (trans
->trans_cfg
->device_family
== IWL_DEVICE_FAMILY_22000
&&
1806 trans
->trans_cfg
->integrated
) {
1807 err
= iwl_pcie_gen2_force_power_gating(trans
);
1812 err
= iwl_pcie_apm_init(trans
);
1816 iwl_pcie_init_msix(trans_pcie
);
1818 /* From now on, the op_mode will be kept updated about RF kill state */
1819 iwl_enable_rfkill_int(trans
);
1821 trans_pcie
->opmode_down
= false;
1823 /* Set is_down to false here so that...*/
1824 trans_pcie
->is_down
= false;
1826 /* ...rfkill can call stop_device and set it false if needed */
1827 iwl_pcie_check_hw_rf_kill(trans
);
1832 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
)
1834 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1837 mutex_lock(&trans_pcie
->mutex
);
1838 ret
= _iwl_trans_pcie_start_hw(trans
);
1839 mutex_unlock(&trans_pcie
->mutex
);
1844 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans
*trans
)
1846 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1848 mutex_lock(&trans_pcie
->mutex
);
1850 /* disable interrupts - don't enable HW RF kill interrupt */
1851 iwl_disable_interrupts(trans
);
1853 iwl_pcie_apm_stop(trans
, true);
1855 iwl_disable_interrupts(trans
);
1857 iwl_pcie_disable_ict(trans
);
1859 mutex_unlock(&trans_pcie
->mutex
);
1861 iwl_pcie_synchronize_irqs(trans
);
1864 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
1866 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1869 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
1871 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1874 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
1876 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1879 static u32
iwl_trans_pcie_prph_msk(struct iwl_trans
*trans
)
1881 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
)
1887 static u32
iwl_trans_pcie_read_prph(struct iwl_trans
*trans
, u32 reg
)
1889 u32 mask
= iwl_trans_pcie_prph_msk(trans
);
1891 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_RADDR
,
1892 ((reg
& mask
) | (3 << 24)));
1893 return iwl_trans_pcie_read32(trans
, HBUS_TARG_PRPH_RDAT
);
1896 static void iwl_trans_pcie_write_prph(struct iwl_trans
*trans
, u32 addr
,
1899 u32 mask
= iwl_trans_pcie_prph_msk(trans
);
1901 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WADDR
,
1902 ((addr
& mask
) | (3 << 24)));
1903 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WDAT
, val
);
1906 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
1907 const struct iwl_trans_config
*trans_cfg
)
1909 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1911 trans
->txqs
.cmd
.q_id
= trans_cfg
->cmd_queue
;
1912 trans
->txqs
.cmd
.fifo
= trans_cfg
->cmd_fifo
;
1913 trans
->txqs
.cmd
.wdg_timeout
= trans_cfg
->cmd_q_wdg_timeout
;
1914 trans
->txqs
.page_offs
= trans_cfg
->cb_data_offs
;
1915 trans
->txqs
.dev_cmd_offs
= trans_cfg
->cb_data_offs
+ sizeof(void *);
1917 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
1918 trans_pcie
->n_no_reclaim_cmds
= 0;
1920 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
1921 if (trans_pcie
->n_no_reclaim_cmds
)
1922 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
1923 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
1925 trans_pcie
->rx_buf_size
= trans_cfg
->rx_buf_size
;
1926 trans_pcie
->rx_page_order
=
1927 iwl_trans_get_rb_size_order(trans_pcie
->rx_buf_size
);
1928 trans_pcie
->rx_buf_bytes
=
1929 iwl_trans_get_rb_size(trans_pcie
->rx_buf_size
);
1930 trans_pcie
->supported_dma_mask
= DMA_BIT_MASK(12);
1931 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
)
1932 trans_pcie
->supported_dma_mask
= DMA_BIT_MASK(11);
1934 trans
->txqs
.bc_table_dword
= trans_cfg
->bc_table_dword
;
1935 trans_pcie
->scd_set_active
= trans_cfg
->scd_set_active
;
1937 trans
->command_groups
= trans_cfg
->command_groups
;
1938 trans
->command_groups_size
= trans_cfg
->command_groups_size
;
1940 /* Initialize NAPI here - it should be before registering to mac80211
1941 * in the opmode but after the HW struct is allocated.
1942 * As this function may be called again in some corner cases don't
1943 * do anything if NAPI was already initialized.
1945 if (trans_pcie
->napi_dev
.reg_state
!= NETREG_DUMMY
)
1946 init_dummy_netdev(&trans_pcie
->napi_dev
);
1949 void iwl_trans_pcie_free(struct iwl_trans
*trans
)
1951 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1954 iwl_pcie_synchronize_irqs(trans
);
1956 if (trans
->trans_cfg
->gen2
)
1957 iwl_txq_gen2_tx_free(trans
);
1959 iwl_pcie_tx_free(trans
);
1960 iwl_pcie_rx_free(trans
);
1962 if (trans_pcie
->rba
.alloc_wq
) {
1963 destroy_workqueue(trans_pcie
->rba
.alloc_wq
);
1964 trans_pcie
->rba
.alloc_wq
= NULL
;
1967 if (trans_pcie
->msix_enabled
) {
1968 for (i
= 0; i
< trans_pcie
->alloc_vecs
; i
++) {
1969 irq_set_affinity_hint(
1970 trans_pcie
->msix_entries
[i
].vector
,
1974 trans_pcie
->msix_enabled
= false;
1976 iwl_pcie_free_ict(trans
);
1979 iwl_pcie_free_fw_monitor(trans
);
1981 if (trans_pcie
->pnvm_dram
.size
)
1982 dma_free_coherent(trans
->dev
, trans_pcie
->pnvm_dram
.size
,
1983 trans_pcie
->pnvm_dram
.block
,
1984 trans_pcie
->pnvm_dram
.physical
);
1986 mutex_destroy(&trans_pcie
->mutex
);
1987 iwl_trans_free(trans
);
1990 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
1993 set_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1995 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1998 struct iwl_trans_pcie_removal
{
1999 struct pci_dev
*pdev
;
2000 struct work_struct work
;
2003 static void iwl_trans_pcie_removal_wk(struct work_struct
*wk
)
2005 struct iwl_trans_pcie_removal
*removal
=
2006 container_of(wk
, struct iwl_trans_pcie_removal
, work
);
2007 struct pci_dev
*pdev
= removal
->pdev
;
2008 static char *prop
[] = {"EVENT=INACCESSIBLE", NULL
};
2010 dev_err(&pdev
->dev
, "Device gone - attempting removal\n");
2011 kobject_uevent_env(&pdev
->dev
.kobj
, KOBJ_CHANGE
, prop
);
2012 pci_lock_rescan_remove();
2014 pci_stop_and_remove_bus_device(pdev
);
2015 pci_unlock_rescan_remove();
2018 module_put(THIS_MODULE
);
2021 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans
*trans
,
2022 unsigned long *flags
)
2025 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2027 spin_lock_irqsave(&trans_pcie
->reg_lock
, *flags
);
2029 if (trans_pcie
->cmd_hold_nic_awake
)
2032 /* this bit wakes up the NIC */
2033 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
2034 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
2035 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
)
2039 * These bits say the device is running, and should keep running for
2040 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2041 * but they do not indicate that embedded SRAM is restored yet;
2042 * HW with volatile SRAM must save/restore contents to/from
2043 * host DRAM when sleeping/waking for power-saving.
2044 * Each direction takes approximately 1/4 millisecond; with this
2045 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2046 * series of register accesses are expected (e.g. reading Event Log),
2047 * to keep device from sleeping.
2049 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2050 * SRAM is okay/restored. We don't check that here because this call
2051 * is just for hardware register access; but GP1 MAC_SLEEP
2052 * check is a good idea before accessing the SRAM of HW with
2053 * volatile SRAM (e.g. reading Event Log).
2055 * 5000 series and later (including 1000 series) have non-volatile SRAM,
2056 * and do not save/restore SRAM when power cycling.
2058 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
2059 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN
,
2060 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
|
2061 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP
), 15000);
2062 if (unlikely(ret
< 0)) {
2063 u32 cntrl
= iwl_read32(trans
, CSR_GP_CNTRL
);
2066 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2069 iwl_trans_pcie_dump_regs(trans
);
2071 if (iwlwifi_mod_params
.remove_when_gone
&& cntrl
== ~0U) {
2072 struct iwl_trans_pcie_removal
*removal
;
2074 if (test_bit(STATUS_TRANS_DEAD
, &trans
->status
))
2077 IWL_ERR(trans
, "Device gone - scheduling removal!\n");
2080 * get a module reference to avoid doing this
2081 * while unloading anyway and to avoid
2082 * scheduling a work with code that's being
2085 if (!try_module_get(THIS_MODULE
)) {
2087 "Module is being unloaded - abort\n");
2091 removal
= kzalloc(sizeof(*removal
), GFP_ATOMIC
);
2093 module_put(THIS_MODULE
);
2097 * we don't need to clear this flag, because
2098 * the trans will be freed and reallocated.
2100 set_bit(STATUS_TRANS_DEAD
, &trans
->status
);
2102 removal
->pdev
= to_pci_dev(trans
->dev
);
2103 INIT_WORK(&removal
->work
, iwl_trans_pcie_removal_wk
);
2104 pci_dev_get(removal
->pdev
);
2105 schedule_work(&removal
->work
);
2107 iwl_write32(trans
, CSR_RESET
,
2108 CSR_RESET_REG_FLAG_FORCE_NMI
);
2112 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
2118 * Fool sparse by faking we release the lock - sparse will
2119 * track nic_access anyway.
2121 __release(&trans_pcie
->reg_lock
);
2125 static void iwl_trans_pcie_release_nic_access(struct iwl_trans
*trans
,
2126 unsigned long *flags
)
2128 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2130 lockdep_assert_held(&trans_pcie
->reg_lock
);
2133 * Fool sparse by faking we acquiring the lock - sparse will
2134 * track nic_access anyway.
2136 __acquire(&trans_pcie
->reg_lock
);
2138 if (trans_pcie
->cmd_hold_nic_awake
)
2141 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
2142 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
2144 * Above we read the CSR_GP_CNTRL register, which will flush
2145 * any previous writes, but we need the write that clears the
2146 * MAC_ACCESS_REQ bit to be performed before any other writes
2147 * scheduled on different CPUs (after we drop reg_lock).
2150 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
2153 static int iwl_trans_pcie_read_mem(struct iwl_trans
*trans
, u32 addr
,
2154 void *buf
, int dwords
)
2156 unsigned long flags
;
2160 while (offs
< dwords
) {
2161 /* limit the time we spin here under lock to 1/2s */
2162 ktime_t timeout
= ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC
);
2164 if (iwl_trans_grab_nic_access(trans
, &flags
)) {
2165 iwl_write32(trans
, HBUS_TARG_MEM_RADDR
,
2168 while (offs
< dwords
) {
2169 vals
[offs
] = iwl_read32(trans
,
2170 HBUS_TARG_MEM_RDAT
);
2173 /* calling ktime_get is expensive so
2174 * do it once in 128 reads
2176 if (offs
% 128 == 0 && ktime_after(ktime_get(),
2180 iwl_trans_release_nic_access(trans
, &flags
);
2189 static int iwl_trans_pcie_write_mem(struct iwl_trans
*trans
, u32 addr
,
2190 const void *buf
, int dwords
)
2192 unsigned long flags
;
2194 const u32
*vals
= buf
;
2196 if (iwl_trans_grab_nic_access(trans
, &flags
)) {
2197 iwl_write32(trans
, HBUS_TARG_MEM_WADDR
, addr
);
2198 for (offs
= 0; offs
< dwords
; offs
++)
2199 iwl_write32(trans
, HBUS_TARG_MEM_WDAT
,
2200 vals
? vals
[offs
] : 0);
2201 iwl_trans_release_nic_access(trans
, &flags
);
2208 static int iwl_trans_pcie_read_config32(struct iwl_trans
*trans
, u32 ofs
,
2211 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans
)->pci_dev
,
2215 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans
*trans
, bool block
)
2219 for (i
= 0; i
< trans
->trans_cfg
->base_params
->num_of_queues
; i
++) {
2220 struct iwl_txq
*txq
= trans
->txqs
.txq
[i
];
2222 if (i
== trans
->txqs
.cmd
.q_id
)
2225 spin_lock_bh(&txq
->lock
);
2227 if (!block
&& !(WARN_ON_ONCE(!txq
->block
))) {
2230 iwl_write32(trans
, HBUS_TARG_WRPTR
,
2231 txq
->write_ptr
| (i
<< 8));
2237 spin_unlock_bh(&txq
->lock
);
2241 #define IWL_FLUSH_WAIT_MS 2000
2243 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans
*trans
, int queue
,
2244 struct iwl_trans_rxq_dma_data
*data
)
2246 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2248 if (queue
>= trans
->num_rx_queues
|| !trans_pcie
->rxq
)
2251 data
->fr_bd_cb
= trans_pcie
->rxq
[queue
].bd_dma
;
2252 data
->urbd_stts_wrptr
= trans_pcie
->rxq
[queue
].rb_stts_dma
;
2253 data
->ur_bd_cb
= trans_pcie
->rxq
[queue
].used_bd_dma
;
2254 data
->fr_bd_wid
= 0;
2259 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans
*trans
, int txq_idx
)
2261 struct iwl_txq
*txq
;
2262 unsigned long now
= jiffies
;
2266 /* Make sure the NIC is still alive in the bus */
2267 if (test_bit(STATUS_TRANS_DEAD
, &trans
->status
))
2270 if (!test_bit(txq_idx
, trans
->txqs
.queue_used
))
2273 IWL_DEBUG_TX_QUEUES(trans
, "Emptying queue %d...\n", txq_idx
);
2274 txq
= trans
->txqs
.txq
[txq_idx
];
2276 spin_lock_bh(&txq
->lock
);
2277 overflow_tx
= txq
->overflow_tx
||
2278 !skb_queue_empty(&txq
->overflow_q
);
2279 spin_unlock_bh(&txq
->lock
);
2281 wr_ptr
= READ_ONCE(txq
->write_ptr
);
2283 while ((txq
->read_ptr
!= READ_ONCE(txq
->write_ptr
) ||
2285 !time_after(jiffies
,
2286 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
))) {
2287 u8 write_ptr
= READ_ONCE(txq
->write_ptr
);
2290 * If write pointer moved during the wait, warn only
2291 * if the TX came from op mode. In case TX came from
2292 * trans layer (overflow TX) don't warn.
2294 if (WARN_ONCE(wr_ptr
!= write_ptr
&& !overflow_tx
,
2295 "WR pointer moved while flushing %d -> %d\n",
2300 usleep_range(1000, 2000);
2302 spin_lock_bh(&txq
->lock
);
2303 overflow_tx
= txq
->overflow_tx
||
2304 !skb_queue_empty(&txq
->overflow_q
);
2305 spin_unlock_bh(&txq
->lock
);
2308 if (txq
->read_ptr
!= txq
->write_ptr
) {
2310 "fail to flush all tx fifo queues Q %d\n", txq_idx
);
2311 iwl_txq_log_scd_error(trans
, txq
);
2315 IWL_DEBUG_TX_QUEUES(trans
, "Queue %d is now empty.\n", txq_idx
);
2320 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans
*trans
, u32 txq_bm
)
2325 /* waiting for all the tx frames complete might take a while */
2327 cnt
< trans
->trans_cfg
->base_params
->num_of_queues
;
2330 if (cnt
== trans
->txqs
.cmd
.q_id
)
2332 if (!test_bit(cnt
, trans
->txqs
.queue_used
))
2334 if (!(BIT(cnt
) & txq_bm
))
2337 ret
= iwl_trans_pcie_wait_txq_empty(trans
, cnt
);
2345 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
2346 u32 mask
, u32 value
)
2348 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2349 unsigned long flags
;
2351 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
2352 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
2353 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
2356 static const char *get_csr_string(int cmd
)
2358 #define IWL_CMD(x) case x: return #x
2360 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
2361 IWL_CMD(CSR_INT_COALESCING
);
2363 IWL_CMD(CSR_INT_MASK
);
2364 IWL_CMD(CSR_FH_INT_STATUS
);
2365 IWL_CMD(CSR_GPIO_IN
);
2367 IWL_CMD(CSR_GP_CNTRL
);
2368 IWL_CMD(CSR_HW_REV
);
2369 IWL_CMD(CSR_EEPROM_REG
);
2370 IWL_CMD(CSR_EEPROM_GP
);
2371 IWL_CMD(CSR_OTP_GP_REG
);
2372 IWL_CMD(CSR_GIO_REG
);
2373 IWL_CMD(CSR_GP_UCODE_REG
);
2374 IWL_CMD(CSR_GP_DRIVER_REG
);
2375 IWL_CMD(CSR_UCODE_DRV_GP1
);
2376 IWL_CMD(CSR_UCODE_DRV_GP2
);
2377 IWL_CMD(CSR_LED_REG
);
2378 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
2379 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
2380 IWL_CMD(CSR_ANA_PLL_CFG
);
2381 IWL_CMD(CSR_HW_REV_WA_REG
);
2382 IWL_CMD(CSR_MONITOR_STATUS_REG
);
2383 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
2390 void iwl_pcie_dump_csr(struct iwl_trans
*trans
)
2393 static const u32 csr_tbl
[] = {
2394 CSR_HW_IF_CONFIG_REG
,
2412 CSR_DRAM_INT_TBL_REG
,
2413 CSR_GIO_CHICKEN_BITS
,
2415 CSR_MONITOR_STATUS_REG
,
2417 CSR_DBG_HPET_MEM_REG
2419 IWL_ERR(trans
, "CSR values:\n");
2420 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
2421 "CSR_INT_PERIODIC_REG)\n");
2422 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
2423 IWL_ERR(trans
, " %25s: 0X%08x\n",
2424 get_csr_string(csr_tbl
[i
]),
2425 iwl_read32(trans
, csr_tbl
[i
]));
2429 #ifdef CONFIG_IWLWIFI_DEBUGFS
2430 /* create and remove of files */
2431 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
2432 debugfs_create_file(#name, mode, parent, trans, \
2433 &iwl_dbgfs_##name##_ops); \
2436 /* file operation */
2437 #define DEBUGFS_READ_FILE_OPS(name) \
2438 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2439 .read = iwl_dbgfs_##name##_read, \
2440 .open = simple_open, \
2441 .llseek = generic_file_llseek, \
2444 #define DEBUGFS_WRITE_FILE_OPS(name) \
2445 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2446 .write = iwl_dbgfs_##name##_write, \
2447 .open = simple_open, \
2448 .llseek = generic_file_llseek, \
2451 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
2452 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2453 .write = iwl_dbgfs_##name##_write, \
2454 .read = iwl_dbgfs_##name##_read, \
2455 .open = simple_open, \
2456 .llseek = generic_file_llseek, \
2459 struct iwl_dbgfs_tx_queue_priv
{
2460 struct iwl_trans
*trans
;
2463 struct iwl_dbgfs_tx_queue_state
{
2467 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2469 struct iwl_dbgfs_tx_queue_priv
*priv
= seq
->private;
2470 struct iwl_dbgfs_tx_queue_state
*state
;
2472 if (*pos
>= priv
->trans
->trans_cfg
->base_params
->num_of_queues
)
2475 state
= kmalloc(sizeof(*state
), GFP_KERNEL
);
2482 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file
*seq
,
2483 void *v
, loff_t
*pos
)
2485 struct iwl_dbgfs_tx_queue_priv
*priv
= seq
->private;
2486 struct iwl_dbgfs_tx_queue_state
*state
= v
;
2488 *pos
= ++state
->pos
;
2490 if (*pos
>= priv
->trans
->trans_cfg
->base_params
->num_of_queues
)
2496 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file
*seq
, void *v
)
2501 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file
*seq
, void *v
)
2503 struct iwl_dbgfs_tx_queue_priv
*priv
= seq
->private;
2504 struct iwl_dbgfs_tx_queue_state
*state
= v
;
2505 struct iwl_trans
*trans
= priv
->trans
;
2506 struct iwl_txq
*txq
= trans
->txqs
.txq
[state
->pos
];
2508 seq_printf(seq
, "hwq %.3u: used=%d stopped=%d ",
2509 (unsigned int)state
->pos
,
2510 !!test_bit(state
->pos
, trans
->txqs
.queue_used
),
2511 !!test_bit(state
->pos
, trans
->txqs
.queue_stopped
));
2514 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2515 txq
->read_ptr
, txq
->write_ptr
,
2516 txq
->need_update
, txq
->frozen
,
2517 txq
->n_window
, txq
->ampdu
);
2519 seq_puts(seq
, "(unallocated)");
2521 if (state
->pos
== trans
->txqs
.cmd
.q_id
)
2522 seq_puts(seq
, " (HCMD)");
2523 seq_puts(seq
, "\n");
2528 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops
= {
2529 .start
= iwl_dbgfs_tx_queue_seq_start
,
2530 .next
= iwl_dbgfs_tx_queue_seq_next
,
2531 .stop
= iwl_dbgfs_tx_queue_seq_stop
,
2532 .show
= iwl_dbgfs_tx_queue_seq_show
,
2535 static int iwl_dbgfs_tx_queue_open(struct inode
*inode
, struct file
*filp
)
2537 struct iwl_dbgfs_tx_queue_priv
*priv
;
2539 priv
= __seq_open_private(filp
, &iwl_dbgfs_tx_queue_seq_ops
,
2545 priv
->trans
= inode
->i_private
;
2549 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
2550 char __user
*user_buf
,
2551 size_t count
, loff_t
*ppos
)
2553 struct iwl_trans
*trans
= file
->private_data
;
2554 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2556 int pos
= 0, i
, ret
;
2559 bufsz
= sizeof(char) * 121 * trans
->num_rx_queues
;
2561 if (!trans_pcie
->rxq
)
2564 buf
= kzalloc(bufsz
, GFP_KERNEL
);
2568 for (i
= 0; i
< trans
->num_rx_queues
&& pos
< bufsz
; i
++) {
2569 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
2571 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "queue#: %2d\n",
2573 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\tread: %u\n",
2575 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\twrite: %u\n",
2577 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\twrite_actual: %u\n",
2579 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\tneed_update: %2d\n",
2581 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\tfree_count: %u\n",
2584 u32 r
= __le16_to_cpu(iwl_get_closed_rb_stts(trans
,
2586 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2587 "\tclosed_rb_num: %u\n",
2590 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2591 "\tclosed_rb_num: Not Allocated\n");
2594 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
2600 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
2601 char __user
*user_buf
,
2602 size_t count
, loff_t
*ppos
)
2604 struct iwl_trans
*trans
= file
->private_data
;
2605 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2606 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
2610 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
2613 buf
= kzalloc(bufsz
, GFP_KERNEL
);
2617 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2618 "Interrupt Statistics Report:\n");
2620 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
2622 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
2624 if (isr_stats
->sw
|| isr_stats
->hw
) {
2625 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2626 "\tLast Restarting Code: 0x%X\n",
2627 isr_stats
->err_code
);
2629 #ifdef CONFIG_IWLWIFI_DEBUG
2630 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
2632 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
2635 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2636 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
2638 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
2641 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
2644 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2645 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
2647 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
2650 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
2651 isr_stats
->unhandled
);
2653 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
2658 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
2659 const char __user
*user_buf
,
2660 size_t count
, loff_t
*ppos
)
2662 struct iwl_trans
*trans
= file
->private_data
;
2663 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2664 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
2668 ret
= kstrtou32_from_user(user_buf
, count
, 16, &reset_flag
);
2671 if (reset_flag
== 0)
2672 memset(isr_stats
, 0, sizeof(*isr_stats
));
2677 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
2678 const char __user
*user_buf
,
2679 size_t count
, loff_t
*ppos
)
2681 struct iwl_trans
*trans
= file
->private_data
;
2683 iwl_pcie_dump_csr(trans
);
2688 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
2689 char __user
*user_buf
,
2690 size_t count
, loff_t
*ppos
)
2692 struct iwl_trans
*trans
= file
->private_data
;
2696 ret
= iwl_dump_fh(trans
, &buf
);
2701 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, ret
);
2706 static ssize_t
iwl_dbgfs_rfkill_read(struct file
*file
,
2707 char __user
*user_buf
,
2708 size_t count
, loff_t
*ppos
)
2710 struct iwl_trans
*trans
= file
->private_data
;
2711 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2715 pos
= scnprintf(buf
, sizeof(buf
), "debug: %d\nhw: %d\n",
2716 trans_pcie
->debug_rfkill
,
2717 !(iwl_read32(trans
, CSR_GP_CNTRL
) &
2718 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
));
2720 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
2723 static ssize_t
iwl_dbgfs_rfkill_write(struct file
*file
,
2724 const char __user
*user_buf
,
2725 size_t count
, loff_t
*ppos
)
2727 struct iwl_trans
*trans
= file
->private_data
;
2728 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2732 ret
= kstrtobool_from_user(user_buf
, count
, &new_value
);
2735 if (new_value
== trans_pcie
->debug_rfkill
)
2737 IWL_WARN(trans
, "changing debug rfkill %d->%d\n",
2738 trans_pcie
->debug_rfkill
, new_value
);
2739 trans_pcie
->debug_rfkill
= new_value
;
2740 iwl_pcie_handle_rfkill_irq(trans
);
2745 static int iwl_dbgfs_monitor_data_open(struct inode
*inode
,
2748 struct iwl_trans
*trans
= inode
->i_private
;
2749 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2751 if (!trans
->dbg
.dest_tlv
||
2752 trans
->dbg
.dest_tlv
->monitor_mode
!= EXTERNAL_MODE
) {
2753 IWL_ERR(trans
, "Debug destination is not set to DRAM\n");
2757 if (trans_pcie
->fw_mon_data
.state
!= IWL_FW_MON_DBGFS_STATE_CLOSED
)
2760 trans_pcie
->fw_mon_data
.state
= IWL_FW_MON_DBGFS_STATE_OPEN
;
2761 return simple_open(inode
, file
);
2764 static int iwl_dbgfs_monitor_data_release(struct inode
*inode
,
2767 struct iwl_trans_pcie
*trans_pcie
=
2768 IWL_TRANS_GET_PCIE_TRANS(inode
->i_private
);
2770 if (trans_pcie
->fw_mon_data
.state
== IWL_FW_MON_DBGFS_STATE_OPEN
)
2771 trans_pcie
->fw_mon_data
.state
= IWL_FW_MON_DBGFS_STATE_CLOSED
;
2775 static bool iwl_write_to_user_buf(char __user
*user_buf
, ssize_t count
,
2776 void *buf
, ssize_t
*size
,
2777 ssize_t
*bytes_copied
)
2779 int buf_size_left
= count
- *bytes_copied
;
2781 buf_size_left
= buf_size_left
- (buf_size_left
% sizeof(u32
));
2782 if (*size
> buf_size_left
)
2783 *size
= buf_size_left
;
2785 *size
-= copy_to_user(user_buf
, buf
, *size
);
2786 *bytes_copied
+= *size
;
2788 if (buf_size_left
== *size
)
2793 static ssize_t
iwl_dbgfs_monitor_data_read(struct file
*file
,
2794 char __user
*user_buf
,
2795 size_t count
, loff_t
*ppos
)
2797 struct iwl_trans
*trans
= file
->private_data
;
2798 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2799 void *cpu_addr
= (void *)trans
->dbg
.fw_mon
.block
, *curr_buf
;
2800 struct cont_rec
*data
= &trans_pcie
->fw_mon_data
;
2801 u32 write_ptr_addr
, wrap_cnt_addr
, write_ptr
, wrap_cnt
;
2802 ssize_t size
, bytes_copied
= 0;
2805 if (trans
->dbg
.dest_tlv
) {
2807 le32_to_cpu(trans
->dbg
.dest_tlv
->write_ptr_reg
);
2808 wrap_cnt_addr
= le32_to_cpu(trans
->dbg
.dest_tlv
->wrap_count
);
2810 write_ptr_addr
= MON_BUFF_WRPTR
;
2811 wrap_cnt_addr
= MON_BUFF_CYCLE_CNT
;
2814 if (unlikely(!trans
->dbg
.rec_on
))
2817 mutex_lock(&data
->mutex
);
2819 IWL_FW_MON_DBGFS_STATE_DISABLED
) {
2820 mutex_unlock(&data
->mutex
);
2824 /* write_ptr position in bytes rather then DW */
2825 write_ptr
= iwl_read_prph(trans
, write_ptr_addr
) * sizeof(u32
);
2826 wrap_cnt
= iwl_read_prph(trans
, wrap_cnt_addr
);
2828 if (data
->prev_wrap_cnt
== wrap_cnt
) {
2829 size
= write_ptr
- data
->prev_wr_ptr
;
2830 curr_buf
= cpu_addr
+ data
->prev_wr_ptr
;
2831 b_full
= iwl_write_to_user_buf(user_buf
, count
,
2834 data
->prev_wr_ptr
+= size
;
2836 } else if (data
->prev_wrap_cnt
== wrap_cnt
- 1 &&
2837 write_ptr
< data
->prev_wr_ptr
) {
2838 size
= trans
->dbg
.fw_mon
.size
- data
->prev_wr_ptr
;
2839 curr_buf
= cpu_addr
+ data
->prev_wr_ptr
;
2840 b_full
= iwl_write_to_user_buf(user_buf
, count
,
2843 data
->prev_wr_ptr
+= size
;
2847 b_full
= iwl_write_to_user_buf(user_buf
, count
,
2850 data
->prev_wr_ptr
= size
;
2851 data
->prev_wrap_cnt
++;
2854 if (data
->prev_wrap_cnt
== wrap_cnt
- 1 &&
2855 write_ptr
> data
->prev_wr_ptr
)
2857 "write pointer passed previous write pointer, start copying from the beginning\n");
2858 else if (!unlikely(data
->prev_wrap_cnt
== 0 &&
2859 data
->prev_wr_ptr
== 0))
2861 "monitor data is out of sync, start copying from the beginning\n");
2864 b_full
= iwl_write_to_user_buf(user_buf
, count
,
2867 data
->prev_wr_ptr
= size
;
2868 data
->prev_wrap_cnt
= wrap_cnt
;
2871 mutex_unlock(&data
->mutex
);
2873 return bytes_copied
;
2876 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
2877 DEBUGFS_READ_FILE_OPS(fh_reg
);
2878 DEBUGFS_READ_FILE_OPS(rx_queue
);
2879 DEBUGFS_WRITE_FILE_OPS(csr
);
2880 DEBUGFS_READ_WRITE_FILE_OPS(rfkill
);
2881 static const struct file_operations iwl_dbgfs_tx_queue_ops
= {
2882 .owner
= THIS_MODULE
,
2883 .open
= iwl_dbgfs_tx_queue_open
,
2885 .llseek
= seq_lseek
,
2886 .release
= seq_release_private
,
2889 static const struct file_operations iwl_dbgfs_monitor_data_ops
= {
2890 .read
= iwl_dbgfs_monitor_data_read
,
2891 .open
= iwl_dbgfs_monitor_data_open
,
2892 .release
= iwl_dbgfs_monitor_data_release
,
2895 /* Create the debugfs files and directories */
2896 void iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
)
2898 struct dentry
*dir
= trans
->dbgfs_dir
;
2900 DEBUGFS_ADD_FILE(rx_queue
, dir
, 0400);
2901 DEBUGFS_ADD_FILE(tx_queue
, dir
, 0400);
2902 DEBUGFS_ADD_FILE(interrupt
, dir
, 0600);
2903 DEBUGFS_ADD_FILE(csr
, dir
, 0200);
2904 DEBUGFS_ADD_FILE(fh_reg
, dir
, 0400);
2905 DEBUGFS_ADD_FILE(rfkill
, dir
, 0600);
2906 DEBUGFS_ADD_FILE(monitor_data
, dir
, 0400);
2909 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans
*trans
)
2911 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2912 struct cont_rec
*data
= &trans_pcie
->fw_mon_data
;
2914 mutex_lock(&data
->mutex
);
2915 data
->state
= IWL_FW_MON_DBGFS_STATE_DISABLED
;
2916 mutex_unlock(&data
->mutex
);
2918 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2920 static u32
iwl_trans_pcie_get_cmdlen(struct iwl_trans
*trans
, void *tfd
)
2925 for (i
= 0; i
< trans
->txqs
.tfd
.max_tbs
; i
++)
2926 cmdlen
+= iwl_txq_gen1_tfd_tb_get_len(trans
, tfd
, i
);
2931 static u32
iwl_trans_pcie_dump_rbs(struct iwl_trans
*trans
,
2932 struct iwl_fw_error_dump_data
**data
,
2933 int allocated_rb_nums
)
2935 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2936 int max_len
= trans_pcie
->rx_buf_bytes
;
2937 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2938 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[0];
2939 u32 i
, r
, j
, rb_len
= 0;
2941 spin_lock(&rxq
->lock
);
2943 r
= le16_to_cpu(iwl_get_closed_rb_stts(trans
, rxq
)) & 0x0FFF;
2945 for (i
= rxq
->read
, j
= 0;
2946 i
!= r
&& j
< allocated_rb_nums
;
2947 i
= (i
+ 1) & RX_QUEUE_MASK
, j
++) {
2948 struct iwl_rx_mem_buffer
*rxb
= rxq
->queue
[i
];
2949 struct iwl_fw_error_dump_rb
*rb
;
2951 dma_unmap_page(trans
->dev
, rxb
->page_dma
, max_len
,
2954 rb_len
+= sizeof(**data
) + sizeof(*rb
) + max_len
;
2956 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_RB
);
2957 (*data
)->len
= cpu_to_le32(sizeof(*rb
) + max_len
);
2958 rb
= (void *)(*data
)->data
;
2959 rb
->index
= cpu_to_le32(i
);
2960 memcpy(rb
->data
, page_address(rxb
->page
), max_len
);
2961 /* remap the page for the free benefit */
2962 rxb
->page_dma
= dma_map_page(trans
->dev
, rxb
->page
,
2963 rxb
->offset
, max_len
,
2966 *data
= iwl_fw_error_next_data(*data
);
2969 spin_unlock(&rxq
->lock
);
2973 #define IWL_CSR_TO_DUMP (0x250)
2975 static u32
iwl_trans_pcie_dump_csr(struct iwl_trans
*trans
,
2976 struct iwl_fw_error_dump_data
**data
)
2978 u32 csr_len
= sizeof(**data
) + IWL_CSR_TO_DUMP
;
2982 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_CSR
);
2983 (*data
)->len
= cpu_to_le32(IWL_CSR_TO_DUMP
);
2984 val
= (void *)(*data
)->data
;
2986 for (i
= 0; i
< IWL_CSR_TO_DUMP
; i
+= 4)
2987 *val
++ = cpu_to_le32(iwl_trans_pcie_read32(trans
, i
));
2989 *data
= iwl_fw_error_next_data(*data
);
2994 static u32
iwl_trans_pcie_fh_regs_dump(struct iwl_trans
*trans
,
2995 struct iwl_fw_error_dump_data
**data
)
2997 u32 fh_regs_len
= FH_MEM_UPPER_BOUND
- FH_MEM_LOWER_BOUND
;
2998 unsigned long flags
;
3002 if (!iwl_trans_grab_nic_access(trans
, &flags
))
3005 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS
);
3006 (*data
)->len
= cpu_to_le32(fh_regs_len
);
3007 val
= (void *)(*data
)->data
;
3009 if (!trans
->trans_cfg
->gen2
)
3010 for (i
= FH_MEM_LOWER_BOUND
; i
< FH_MEM_UPPER_BOUND
;
3012 *val
++ = cpu_to_le32(iwl_trans_pcie_read32(trans
, i
));
3014 for (i
= iwl_umac_prph(trans
, FH_MEM_LOWER_BOUND_GEN2
);
3015 i
< iwl_umac_prph(trans
, FH_MEM_UPPER_BOUND_GEN2
);
3017 *val
++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans
,
3020 iwl_trans_release_nic_access(trans
, &flags
);
3022 *data
= iwl_fw_error_next_data(*data
);
3024 return sizeof(**data
) + fh_regs_len
;
3028 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans
*trans
,
3029 struct iwl_fw_error_dump_fw_mon
*fw_mon_data
,
3032 u32 buf_size_in_dwords
= (monitor_len
>> 2);
3033 u32
*buffer
= (u32
*)fw_mon_data
->data
;
3034 unsigned long flags
;
3037 if (!iwl_trans_grab_nic_access(trans
, &flags
))
3040 iwl_write_umac_prph_no_grab(trans
, MON_DMARB_RD_CTL_ADDR
, 0x1);
3041 for (i
= 0; i
< buf_size_in_dwords
; i
++)
3042 buffer
[i
] = iwl_read_umac_prph_no_grab(trans
,
3043 MON_DMARB_RD_DATA_ADDR
);
3044 iwl_write_umac_prph_no_grab(trans
, MON_DMARB_RD_CTL_ADDR
, 0x0);
3046 iwl_trans_release_nic_access(trans
, &flags
);
3052 iwl_trans_pcie_dump_pointers(struct iwl_trans
*trans
,
3053 struct iwl_fw_error_dump_fw_mon
*fw_mon_data
)
3055 u32 base
, base_high
, write_ptr
, write_ptr_val
, wrap_cnt
;
3057 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
3058 base
= DBGC_CUR_DBGBUF_BASE_ADDR_LSB
;
3059 base_high
= DBGC_CUR_DBGBUF_BASE_ADDR_MSB
;
3060 write_ptr
= DBGC_CUR_DBGBUF_STATUS
;
3061 wrap_cnt
= DBGC_DBGBUF_WRAP_AROUND
;
3062 } else if (trans
->dbg
.dest_tlv
) {
3063 write_ptr
= le32_to_cpu(trans
->dbg
.dest_tlv
->write_ptr_reg
);
3064 wrap_cnt
= le32_to_cpu(trans
->dbg
.dest_tlv
->wrap_count
);
3065 base
= le32_to_cpu(trans
->dbg
.dest_tlv
->base_reg
);
3067 base
= MON_BUFF_BASE_ADDR
;
3068 write_ptr
= MON_BUFF_WRPTR
;
3069 wrap_cnt
= MON_BUFF_CYCLE_CNT
;
3072 write_ptr_val
= iwl_read_prph(trans
, write_ptr
);
3073 fw_mon_data
->fw_mon_cycle_cnt
=
3074 cpu_to_le32(iwl_read_prph(trans
, wrap_cnt
));
3075 fw_mon_data
->fw_mon_base_ptr
=
3076 cpu_to_le32(iwl_read_prph(trans
, base
));
3077 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
3078 fw_mon_data
->fw_mon_base_high_ptr
=
3079 cpu_to_le32(iwl_read_prph(trans
, base_high
));
3080 write_ptr_val
&= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK
;
3081 /* convert wrtPtr to DWs, to align with all HWs */
3082 write_ptr_val
>>= 2;
3084 fw_mon_data
->fw_mon_wr_ptr
= cpu_to_le32(write_ptr_val
);
3088 iwl_trans_pcie_dump_monitor(struct iwl_trans
*trans
,
3089 struct iwl_fw_error_dump_data
**data
,
3092 struct iwl_dram_data
*fw_mon
= &trans
->dbg
.fw_mon
;
3095 if (trans
->dbg
.dest_tlv
||
3097 (trans
->trans_cfg
->device_family
== IWL_DEVICE_FAMILY_7000
||
3098 trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
))) {
3099 struct iwl_fw_error_dump_fw_mon
*fw_mon_data
;
3101 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR
);
3102 fw_mon_data
= (void *)(*data
)->data
;
3104 iwl_trans_pcie_dump_pointers(trans
, fw_mon_data
);
3106 len
+= sizeof(**data
) + sizeof(*fw_mon_data
);
3108 memcpy(fw_mon_data
->data
, fw_mon
->block
, fw_mon
->size
);
3109 monitor_len
= fw_mon
->size
;
3110 } else if (trans
->dbg
.dest_tlv
->monitor_mode
== SMEM_MODE
) {
3111 u32 base
= le32_to_cpu(fw_mon_data
->fw_mon_base_ptr
);
3113 * Update pointers to reflect actual values after
3116 if (trans
->dbg
.dest_tlv
->version
) {
3117 base
= (iwl_read_prph(trans
, base
) &
3118 IWL_LDBG_M2S_BUF_BA_MSK
) <<
3119 trans
->dbg
.dest_tlv
->base_shift
;
3120 base
*= IWL_M2S_UNIT_SIZE
;
3121 base
+= trans
->cfg
->smem_offset
;
3123 base
= iwl_read_prph(trans
, base
) <<
3124 trans
->dbg
.dest_tlv
->base_shift
;
3127 iwl_trans_read_mem(trans
, base
, fw_mon_data
->data
,
3128 monitor_len
/ sizeof(u32
));
3129 } else if (trans
->dbg
.dest_tlv
->monitor_mode
== MARBH_MODE
) {
3131 iwl_trans_pci_dump_marbh_monitor(trans
,
3135 /* Didn't match anything - output no monitor data */
3140 (*data
)->len
= cpu_to_le32(monitor_len
+ sizeof(*fw_mon_data
));
3146 static int iwl_trans_get_fw_monitor_len(struct iwl_trans
*trans
, u32
*len
)
3148 if (trans
->dbg
.fw_mon
.size
) {
3149 *len
+= sizeof(struct iwl_fw_error_dump_data
) +
3150 sizeof(struct iwl_fw_error_dump_fw_mon
) +
3151 trans
->dbg
.fw_mon
.size
;
3152 return trans
->dbg
.fw_mon
.size
;
3153 } else if (trans
->dbg
.dest_tlv
) {
3154 u32 base
, end
, cfg_reg
, monitor_len
;
3156 if (trans
->dbg
.dest_tlv
->version
== 1) {
3157 cfg_reg
= le32_to_cpu(trans
->dbg
.dest_tlv
->base_reg
);
3158 cfg_reg
= iwl_read_prph(trans
, cfg_reg
);
3159 base
= (cfg_reg
& IWL_LDBG_M2S_BUF_BA_MSK
) <<
3160 trans
->dbg
.dest_tlv
->base_shift
;
3161 base
*= IWL_M2S_UNIT_SIZE
;
3162 base
+= trans
->cfg
->smem_offset
;
3165 (cfg_reg
& IWL_LDBG_M2S_BUF_SIZE_MSK
) >>
3166 trans
->dbg
.dest_tlv
->end_shift
;
3167 monitor_len
*= IWL_M2S_UNIT_SIZE
;
3169 base
= le32_to_cpu(trans
->dbg
.dest_tlv
->base_reg
);
3170 end
= le32_to_cpu(trans
->dbg
.dest_tlv
->end_reg
);
3172 base
= iwl_read_prph(trans
, base
) <<
3173 trans
->dbg
.dest_tlv
->base_shift
;
3174 end
= iwl_read_prph(trans
, end
) <<
3175 trans
->dbg
.dest_tlv
->end_shift
;
3177 /* Make "end" point to the actual end */
3178 if (trans
->trans_cfg
->device_family
>=
3179 IWL_DEVICE_FAMILY_8000
||
3180 trans
->dbg
.dest_tlv
->monitor_mode
== MARBH_MODE
)
3181 end
+= (1 << trans
->dbg
.dest_tlv
->end_shift
);
3182 monitor_len
= end
- base
;
3184 *len
+= sizeof(struct iwl_fw_error_dump_data
) +
3185 sizeof(struct iwl_fw_error_dump_fw_mon
) +
3192 static struct iwl_trans_dump_data
3193 *iwl_trans_pcie_dump_data(struct iwl_trans
*trans
,
3196 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
3197 struct iwl_fw_error_dump_data
*data
;
3198 struct iwl_txq
*cmdq
= trans
->txqs
.txq
[trans
->txqs
.cmd
.q_id
];
3199 struct iwl_fw_error_dump_txcmd
*txcmd
;
3200 struct iwl_trans_dump_data
*dump_data
;
3201 u32 len
, num_rbs
= 0, monitor_len
= 0;
3203 bool dump_rbs
= test_bit(STATUS_FW_ERROR
, &trans
->status
) &&
3204 !trans
->trans_cfg
->mq_rx_supported
&&
3205 dump_mask
& BIT(IWL_FW_ERROR_DUMP_RB
);
3210 /* transport dump header */
3211 len
= sizeof(*dump_data
);
3214 if (dump_mask
& BIT(IWL_FW_ERROR_DUMP_TXCMD
) && cmdq
)
3215 len
+= sizeof(*data
) +
3216 cmdq
->n_window
* (sizeof(*txcmd
) +
3217 TFD_MAX_PAYLOAD_SIZE
);
3220 if (dump_mask
& BIT(IWL_FW_ERROR_DUMP_FW_MONITOR
))
3221 monitor_len
= iwl_trans_get_fw_monitor_len(trans
, &len
);
3224 if (dump_mask
& BIT(IWL_FW_ERROR_DUMP_CSR
))
3225 len
+= sizeof(*data
) + IWL_CSR_TO_DUMP
;
3228 if (dump_mask
& BIT(IWL_FW_ERROR_DUMP_FH_REGS
)) {
3229 if (trans
->trans_cfg
->gen2
)
3230 len
+= sizeof(*data
) +
3231 (iwl_umac_prph(trans
, FH_MEM_UPPER_BOUND_GEN2
) -
3232 iwl_umac_prph(trans
, FH_MEM_LOWER_BOUND_GEN2
));
3234 len
+= sizeof(*data
) +
3235 (FH_MEM_UPPER_BOUND
-
3236 FH_MEM_LOWER_BOUND
);
3240 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3241 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[0];
3244 le16_to_cpu(iwl_get_closed_rb_stts(trans
, rxq
))
3246 num_rbs
= (num_rbs
- rxq
->read
) & RX_QUEUE_MASK
;
3247 len
+= num_rbs
* (sizeof(*data
) +
3248 sizeof(struct iwl_fw_error_dump_rb
) +
3249 (PAGE_SIZE
<< trans_pcie
->rx_page_order
));
3252 /* Paged memory for gen2 HW */
3253 if (trans
->trans_cfg
->gen2
&& dump_mask
& BIT(IWL_FW_ERROR_DUMP_PAGING
))
3254 for (i
= 0; i
< trans
->init_dram
.paging_cnt
; i
++)
3255 len
+= sizeof(*data
) +
3256 sizeof(struct iwl_fw_error_dump_paging
) +
3257 trans
->init_dram
.paging
[i
].size
;
3259 dump_data
= vzalloc(len
);
3264 data
= (void *)dump_data
->data
;
3266 if (dump_mask
& BIT(IWL_FW_ERROR_DUMP_TXCMD
) && cmdq
) {
3267 u16 tfd_size
= trans
->txqs
.tfd
.size
;
3269 data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD
);
3270 txcmd
= (void *)data
->data
;
3271 spin_lock_bh(&cmdq
->lock
);
3272 ptr
= cmdq
->write_ptr
;
3273 for (i
= 0; i
< cmdq
->n_window
; i
++) {
3274 u8 idx
= iwl_txq_get_cmd_index(cmdq
, ptr
);
3278 if (trans
->trans_cfg
->use_tfh
)
3283 cmdlen
= iwl_trans_pcie_get_cmdlen(trans
,
3286 caplen
= min_t(u32
, TFD_MAX_PAYLOAD_SIZE
, cmdlen
);
3289 len
+= sizeof(*txcmd
) + caplen
;
3290 txcmd
->cmdlen
= cpu_to_le32(cmdlen
);
3291 txcmd
->caplen
= cpu_to_le32(caplen
);
3292 memcpy(txcmd
->data
, cmdq
->entries
[idx
].cmd
,
3294 txcmd
= (void *)((u8
*)txcmd
->data
+ caplen
);
3297 ptr
= iwl_txq_dec_wrap(trans
, ptr
);
3299 spin_unlock_bh(&cmdq
->lock
);
3301 data
->len
= cpu_to_le32(len
);
3302 len
+= sizeof(*data
);
3303 data
= iwl_fw_error_next_data(data
);
3306 if (dump_mask
& BIT(IWL_FW_ERROR_DUMP_CSR
))
3307 len
+= iwl_trans_pcie_dump_csr(trans
, &data
);
3308 if (dump_mask
& BIT(IWL_FW_ERROR_DUMP_FH_REGS
))
3309 len
+= iwl_trans_pcie_fh_regs_dump(trans
, &data
);
3311 len
+= iwl_trans_pcie_dump_rbs(trans
, &data
, num_rbs
);
3313 /* Paged memory for gen2 HW */
3314 if (trans
->trans_cfg
->gen2
&&
3315 dump_mask
& BIT(IWL_FW_ERROR_DUMP_PAGING
)) {
3316 for (i
= 0; i
< trans
->init_dram
.paging_cnt
; i
++) {
3317 struct iwl_fw_error_dump_paging
*paging
;
3318 u32 page_len
= trans
->init_dram
.paging
[i
].size
;
3320 data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING
);
3321 data
->len
= cpu_to_le32(sizeof(*paging
) + page_len
);
3322 paging
= (void *)data
->data
;
3323 paging
->index
= cpu_to_le32(i
);
3324 memcpy(paging
->data
,
3325 trans
->init_dram
.paging
[i
].block
, page_len
);
3326 data
= iwl_fw_error_next_data(data
);
3328 len
+= sizeof(*data
) + sizeof(*paging
) + page_len
;
3331 if (dump_mask
& BIT(IWL_FW_ERROR_DUMP_FW_MONITOR
))
3332 len
+= iwl_trans_pcie_dump_monitor(trans
, &data
, monitor_len
);
3334 dump_data
->len
= len
;
3339 #ifdef CONFIG_PM_SLEEP
3340 static int iwl_trans_pcie_suspend(struct iwl_trans
*trans
)
3345 static void iwl_trans_pcie_resume(struct iwl_trans
*trans
)
3348 #endif /* CONFIG_PM_SLEEP */
3350 #define IWL_TRANS_COMMON_OPS \
3351 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \
3352 .write8 = iwl_trans_pcie_write8, \
3353 .write32 = iwl_trans_pcie_write32, \
3354 .read32 = iwl_trans_pcie_read32, \
3355 .read_prph = iwl_trans_pcie_read_prph, \
3356 .write_prph = iwl_trans_pcie_write_prph, \
3357 .read_mem = iwl_trans_pcie_read_mem, \
3358 .write_mem = iwl_trans_pcie_write_mem, \
3359 .read_config32 = iwl_trans_pcie_read_config32, \
3360 .configure = iwl_trans_pcie_configure, \
3361 .set_pmi = iwl_trans_pcie_set_pmi, \
3362 .sw_reset = iwl_trans_pcie_sw_reset, \
3363 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
3364 .release_nic_access = iwl_trans_pcie_release_nic_access, \
3365 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \
3366 .dump_data = iwl_trans_pcie_dump_data, \
3367 .d3_suspend = iwl_trans_pcie_d3_suspend, \
3368 .d3_resume = iwl_trans_pcie_d3_resume, \
3369 .sync_nmi = iwl_trans_pcie_sync_nmi
3371 #ifdef CONFIG_PM_SLEEP
3372 #define IWL_TRANS_PM_OPS \
3373 .suspend = iwl_trans_pcie_suspend, \
3374 .resume = iwl_trans_pcie_resume,
3376 #define IWL_TRANS_PM_OPS
3377 #endif /* CONFIG_PM_SLEEP */
3379 static const struct iwl_trans_ops trans_ops_pcie
= {
3380 IWL_TRANS_COMMON_OPS
,
3382 .start_hw
= iwl_trans_pcie_start_hw
,
3383 .fw_alive
= iwl_trans_pcie_fw_alive
,
3384 .start_fw
= iwl_trans_pcie_start_fw
,
3385 .stop_device
= iwl_trans_pcie_stop_device
,
3387 .send_cmd
= iwl_trans_pcie_send_hcmd
,
3389 .tx
= iwl_trans_pcie_tx
,
3390 .reclaim
= iwl_txq_reclaim
,
3392 .txq_disable
= iwl_trans_pcie_txq_disable
,
3393 .txq_enable
= iwl_trans_pcie_txq_enable
,
3395 .txq_set_shared_mode
= iwl_trans_pcie_txq_set_shared_mode
,
3397 .wait_tx_queues_empty
= iwl_trans_pcie_wait_txqs_empty
,
3399 .freeze_txq_timer
= iwl_trans_txq_freeze_timer
,
3400 .block_txq_ptrs
= iwl_trans_pcie_block_txq_ptrs
,
3401 #ifdef CONFIG_IWLWIFI_DEBUGFS
3402 .debugfs_cleanup
= iwl_trans_pcie_debugfs_cleanup
,
3406 static const struct iwl_trans_ops trans_ops_pcie_gen2
= {
3407 IWL_TRANS_COMMON_OPS
,
3409 .start_hw
= iwl_trans_pcie_start_hw
,
3410 .fw_alive
= iwl_trans_pcie_gen2_fw_alive
,
3411 .start_fw
= iwl_trans_pcie_gen2_start_fw
,
3412 .stop_device
= iwl_trans_pcie_gen2_stop_device
,
3414 .send_cmd
= iwl_trans_pcie_gen2_send_hcmd
,
3416 .tx
= iwl_txq_gen2_tx
,
3417 .reclaim
= iwl_txq_reclaim
,
3419 .set_q_ptrs
= iwl_txq_set_q_ptrs
,
3421 .txq_alloc
= iwl_txq_dyn_alloc
,
3422 .txq_free
= iwl_txq_dyn_free
,
3423 .wait_txq_empty
= iwl_trans_pcie_wait_txq_empty
,
3424 .rxq_dma_data
= iwl_trans_pcie_rxq_dma_data
,
3425 .set_pnvm
= iwl_trans_pcie_ctx_info_gen3_set_pnvm
,
3426 #ifdef CONFIG_IWLWIFI_DEBUGFS
3427 .debugfs_cleanup
= iwl_trans_pcie_debugfs_cleanup
,
3431 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
3432 const struct pci_device_id
*ent
,
3433 const struct iwl_cfg_trans_params
*cfg_trans
)
3435 struct iwl_trans_pcie
*trans_pcie
;
3436 struct iwl_trans
*trans
;
3438 const struct iwl_trans_ops
*ops
= &trans_ops_pcie_gen2
;
3440 if (!cfg_trans
->gen2
)
3441 ops
= &trans_ops_pcie
;
3443 ret
= pcim_enable_device(pdev
);
3445 return ERR_PTR(ret
);
3447 trans
= iwl_trans_alloc(sizeof(struct iwl_trans_pcie
), &pdev
->dev
, ops
,
3450 return ERR_PTR(-ENOMEM
);
3452 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
3454 trans_pcie
->trans
= trans
;
3455 trans_pcie
->opmode_down
= true;
3456 spin_lock_init(&trans_pcie
->irq_lock
);
3457 spin_lock_init(&trans_pcie
->reg_lock
);
3458 spin_lock_init(&trans_pcie
->alloc_page_lock
);
3459 mutex_init(&trans_pcie
->mutex
);
3460 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
3462 trans_pcie
->rba
.alloc_wq
= alloc_workqueue("rb_allocator",
3463 WQ_HIGHPRI
| WQ_UNBOUND
, 1);
3464 if (!trans_pcie
->rba
.alloc_wq
) {
3466 goto out_free_trans
;
3468 INIT_WORK(&trans_pcie
->rba
.rx_alloc
, iwl_pcie_rx_allocator_work
);
3470 trans_pcie
->debug_rfkill
= -1;
3472 if (!cfg_trans
->base_params
->pcie_l1_allowed
) {
3474 * W/A - seems to solve weird behavior. We need to remove this
3475 * if we don't want to stay in L1 all the time. This wastes a
3478 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
|
3479 PCIE_LINK_STATE_L1
|
3480 PCIE_LINK_STATE_CLKPM
);
3483 trans_pcie
->def_rx_queue
= 0;
3485 pci_set_master(pdev
);
3487 addr_size
= trans
->txqs
.tfd
.addr_size
;
3488 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(addr_size
));
3490 ret
= pci_set_consistent_dma_mask(pdev
,
3491 DMA_BIT_MASK(addr_size
));
3493 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3495 ret
= pci_set_consistent_dma_mask(pdev
,
3497 /* both attempts failed: */
3499 dev_err(&pdev
->dev
, "No suitable DMA available\n");
3504 ret
= pcim_iomap_regions_request_all(pdev
, BIT(0), DRV_NAME
);
3506 dev_err(&pdev
->dev
, "pcim_iomap_regions_request_all failed\n");
3510 trans_pcie
->hw_base
= pcim_iomap_table(pdev
)[0];
3511 if (!trans_pcie
->hw_base
) {
3512 dev_err(&pdev
->dev
, "pcim_iomap_table failed\n");
3517 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3518 * PCI Tx retries from interfering with C3 CPU state */
3519 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
3521 trans_pcie
->pci_dev
= pdev
;
3522 iwl_disable_interrupts(trans
);
3524 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
3525 if (trans
->hw_rev
== 0xffffffff) {
3526 dev_err(&pdev
->dev
, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3532 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3533 * changed, and now the revision step also includes bit 0-1 (no more
3534 * "dash" value). To keep hw_rev backwards compatible - we'll store it
3535 * in the old format.
3537 if (cfg_trans
->device_family
>= IWL_DEVICE_FAMILY_8000
)
3538 trans
->hw_rev
= (trans
->hw_rev
& 0xfff0) |
3539 (CSR_HW_REV_STEP(trans
->hw_rev
<< 2) << 2);
3541 IWL_DEBUG_INFO(trans
, "HW REV: 0x%0x\n", trans
->hw_rev
);
3543 iwl_pcie_set_interrupt_capa(pdev
, trans
, cfg_trans
);
3544 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
3545 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
3546 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
3548 /* Initialize the wait queue for commands */
3549 init_waitqueue_head(&trans_pcie
->wait_command_queue
);
3551 init_waitqueue_head(&trans_pcie
->sx_waitq
);
3554 if (trans_pcie
->msix_enabled
) {
3555 ret
= iwl_pcie_init_msix_handler(pdev
, trans_pcie
);
3559 ret
= iwl_pcie_alloc_ict(trans
);
3563 ret
= devm_request_threaded_irq(&pdev
->dev
, pdev
->irq
,
3565 iwl_pcie_irq_handler
,
3566 IRQF_SHARED
, DRV_NAME
, trans
);
3568 IWL_ERR(trans
, "Error allocating IRQ %d\n", pdev
->irq
);
3571 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
3574 #ifdef CONFIG_IWLWIFI_DEBUGFS
3575 trans_pcie
->fw_mon_data
.state
= IWL_FW_MON_DBGFS_STATE_CLOSED
;
3576 mutex_init(&trans_pcie
->fw_mon_data
.mutex
);
3579 iwl_dbg_tlv_init(trans
);
3584 iwl_pcie_free_ict(trans
);
3586 destroy_workqueue(trans_pcie
->rba
.alloc_wq
);
3588 iwl_trans_free(trans
);
3589 return ERR_PTR(ret
);
3592 void iwl_trans_pcie_sync_nmi(struct iwl_trans
*trans
)
3594 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
3595 unsigned long timeout
= jiffies
+ IWL_TRANS_NMI_TIMEOUT
;
3596 bool interrupts_enabled
= test_bit(STATUS_INT_ENABLED
, &trans
->status
);
3597 u32 inta_addr
, sw_err_bit
;
3599 if (trans_pcie
->msix_enabled
) {
3600 inta_addr
= CSR_MSIX_HW_INT_CAUSES_AD
;
3601 sw_err_bit
= MSIX_HW_INT_CAUSES_REG_SW_ERR
;
3603 inta_addr
= CSR_INT
;
3604 sw_err_bit
= CSR_INT_BIT_SW_ERR
;
3607 /* if the interrupts were already disabled, there is no point in
3608 * calling iwl_disable_interrupts
3610 if (interrupts_enabled
)
3611 iwl_disable_interrupts(trans
);
3613 iwl_force_nmi(trans
);
3614 while (time_after(timeout
, jiffies
)) {
3615 u32 inta_hw
= iwl_read32(trans
, inta_addr
);
3617 /* Error detected by uCode */
3618 if (inta_hw
& sw_err_bit
) {
3619 /* Clear causes register */
3620 iwl_write32(trans
, inta_addr
, inta_hw
& sw_err_bit
);
3627 /* enable interrupts only if there were already enabled before this
3628 * function to avoid a case were the driver enable interrupts before
3629 * proper configurations were made
3631 if (interrupts_enabled
)
3632 iwl_enable_interrupts(trans
);
3634 iwl_trans_fw_error(trans
);