1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
72 #include "iwl-trans.h"
75 #include "iwl-agn-hw.h"
76 #include "iwl-fw-error-dump.h"
79 static void iwl_pcie_free_fw_monitor(struct iwl_trans
*trans
)
81 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
83 if (!trans_pcie
->fw_mon_page
)
86 dma_unmap_page(trans
->dev
, trans_pcie
->fw_mon_phys
,
87 trans_pcie
->fw_mon_size
, DMA_FROM_DEVICE
);
88 __free_pages(trans_pcie
->fw_mon_page
,
89 get_order(trans_pcie
->fw_mon_size
));
90 trans_pcie
->fw_mon_page
= NULL
;
91 trans_pcie
->fw_mon_phys
= 0;
92 trans_pcie
->fw_mon_size
= 0;
95 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans
*trans
)
97 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
103 if (trans_pcie
->fw_mon_page
) {
104 dma_sync_single_for_device(trans
->dev
, trans_pcie
->fw_mon_phys
,
105 trans_pcie
->fw_mon_size
,
111 for (power
= 26; power
>= 11; power
--) {
115 order
= get_order(size
);
116 page
= alloc_pages(__GFP_COMP
| __GFP_NOWARN
| __GFP_ZERO
,
121 phys
= dma_map_page(trans
->dev
, page
, 0, PAGE_SIZE
<< order
,
123 if (dma_mapping_error(trans
->dev
, phys
)) {
124 __free_pages(page
, order
);
128 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
136 trans_pcie
->fw_mon_page
= page
;
137 trans_pcie
->fw_mon_phys
= phys
;
138 trans_pcie
->fw_mon_size
= size
;
141 static u32
iwl_trans_pcie_read_shr(struct iwl_trans
*trans
, u32 reg
)
143 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
144 ((reg
& 0x0000ffff) | (2 << 28)));
145 return iwl_read32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
);
148 static void iwl_trans_pcie_write_shr(struct iwl_trans
*trans
, u32 reg
, u32 val
)
150 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
, val
);
151 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
152 ((reg
& 0x0000ffff) | (3 << 28)));
155 static void iwl_pcie_set_pwr(struct iwl_trans
*trans
, bool vaux
)
157 if (vaux
&& pci_pme_capable(to_pci_dev(trans
->dev
), PCI_D3cold
))
158 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
159 APMG_PS_CTRL_VAL_PWR_SRC_VAUX
,
160 ~APMG_PS_CTRL_MSK_PWR_SRC
);
162 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
163 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
164 ~APMG_PS_CTRL_MSK_PWR_SRC
);
168 #define PCI_CFG_RETRY_TIMEOUT 0x041
170 static void iwl_pcie_apm_config(struct iwl_trans
*trans
)
172 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
176 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
177 * Check if BIOS (or OS) enabled L1-ASPM on this device.
178 * If so (likely), disable L0S, so device moves directly L0->L1;
179 * costs negligible amount of power savings.
180 * If not (unlikely), enable L0S, so there is at least some
181 * power savings, even without L1.
183 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_LNKCTL
, &lctl
);
184 if (lctl
& PCI_EXP_LNKCTL_ASPM_L1
) {
185 /* L1-ASPM enabled; disable(!) L0S */
186 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
187 dev_info(trans
->dev
, "L1 Enabled; Disabling L0S\n");
189 /* L1-ASPM disabled; enable(!) L0S */
190 iwl_clear_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
191 dev_info(trans
->dev
, "L1 Disabled; Enabling L0S\n");
193 trans
->pm_support
= !(lctl
& PCI_EXP_LNKCTL_ASPM_L0S
);
197 * Start up NIC's basic functionality after it has been reset
198 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
199 * NOTE: This does not load uCode nor start the embedded processor
201 static int iwl_pcie_apm_init(struct iwl_trans
*trans
)
204 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
207 * Use "set_bit" below rather than "write", to preserve any hardware
208 * bits already set by default after reset.
211 /* Disable L0S exit timer (platform NMI Work/Around) */
212 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
)
213 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
214 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
217 * Disable L0s without affecting L1;
218 * don't wait for ICH L0s (ICH bug W/A)
220 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
221 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
223 /* Set FH wait threshold to maximum (HW error during stress W/A) */
224 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
227 * Enable HAP INTA (interrupt from management bus) to
228 * wake device's PCI Express link L1a -> L0s
230 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
231 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
233 iwl_pcie_apm_config(trans
);
235 /* Configure analog phase-lock-loop before activating to D0A */
236 if (trans
->cfg
->base_params
->pll_cfg_val
)
237 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
,
238 trans
->cfg
->base_params
->pll_cfg_val
);
241 * Set "initialization complete" bit to move adapter from
242 * D0U* --> D0A* (powered-up active) state.
244 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
247 * Wait for clock stabilization; once stabilized, access to
248 * device-internal resources is supported, e.g. iwl_write_prph()
249 * and accesses to uCode SRAM.
251 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
252 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
253 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
255 IWL_DEBUG_INFO(trans
, "Failed to init the card\n");
259 if (trans
->cfg
->host_interrupt_operation_mode
) {
261 * This is a bit of an abuse - This is needed for 7260 / 3160
262 * only check host_interrupt_operation_mode even if this is
263 * not related to host_interrupt_operation_mode.
265 * Enable the oscillator to count wake up time for L1 exit. This
266 * consumes slightly more power (100uA) - but allows to be sure
267 * that we wake up from L1 on time.
269 * This looks weird: read twice the same register, discard the
270 * value, set a bit, and yet again, read that same register
271 * just to discard the value. But that's the way the hardware
274 iwl_read_prph(trans
, OSC_CLK
);
275 iwl_read_prph(trans
, OSC_CLK
);
276 iwl_set_bits_prph(trans
, OSC_CLK
, OSC_CLK_FORCE_CONTROL
);
277 iwl_read_prph(trans
, OSC_CLK
);
278 iwl_read_prph(trans
, OSC_CLK
);
282 * Enable DMA clock and wait for it to stabilize.
284 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
285 * bits do not disable clocks. This preserves any hardware
286 * bits already set by default in "CLK_CTRL_REG" after reset.
288 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
) {
289 iwl_write_prph(trans
, APMG_CLK_EN_REG
,
290 APMG_CLK_VAL_DMA_CLK_RQT
);
293 /* Disable L1-Active */
294 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
295 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
297 /* Clear the interrupt in APMG if the NIC is in RFKILL */
298 iwl_write_prph(trans
, APMG_RTC_INT_STT_REG
,
299 APMG_RTC_INT_STT_RFKILL
);
302 set_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
309 * Enable LP XTAL to avoid HW bug where device may consume much power if
310 * FW is not loaded after device reset. LP XTAL is disabled by default
311 * after device HW reset. Do it only if XTAL is fed by internal source.
312 * Configure device's "persistence" mode to avoid resetting XTAL again when
313 * SHRD_HW_RST occurs in S3.
315 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans
*trans
)
319 u32 apmg_xtal_cfg_reg
;
323 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
324 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
326 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
327 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
332 * Set "initialization complete" bit to move adapter from
333 * D0U* --> D0A* (powered-up active) state.
335 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
338 * Wait for clock stabilization; once stabilized, access to
339 * device-internal resources is possible.
341 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
342 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
343 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
345 if (WARN_ON(ret
< 0)) {
346 IWL_ERR(trans
, "Access time out - failed to enable LP XTAL\n");
347 /* Release XTAL ON request */
348 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
349 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
354 * Clear "disable persistence" to avoid LP XTAL resetting when
355 * SHRD_HW_RST is applied in S3.
357 iwl_clear_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
358 APMG_PCIDEV_STT_VAL_PERSIST_DIS
);
361 * Force APMG XTAL to be active to prevent its disabling by HW
362 * caused by APMG idle state.
364 apmg_xtal_cfg_reg
= iwl_trans_pcie_read_shr(trans
,
365 SHR_APMG_XTAL_CFG_REG
);
366 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
368 SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
371 * Reset entire device again - do controller reset (results in
372 * SHRD_HW_RST). Turn MAC off before proceeding.
374 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
378 /* Enable LP XTAL by indirect access through CSR */
379 apmg_gp1_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_GP1_REG
);
380 iwl_trans_pcie_write_shr(trans
, SHR_APMG_GP1_REG
, apmg_gp1_reg
|
381 SHR_APMG_GP1_WF_XTAL_LP_EN
|
382 SHR_APMG_GP1_CHICKEN_BIT_SELECT
);
384 /* Clear delay line clock power up */
385 dl_cfg_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_DL_CFG_REG
);
386 iwl_trans_pcie_write_shr(trans
, SHR_APMG_DL_CFG_REG
, dl_cfg_reg
&
387 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP
);
390 * Enable persistence mode to avoid LP XTAL resetting when
391 * SHRD_HW_RST is applied in S3.
393 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
394 CSR_HW_IF_CONFIG_REG_PERSIST_MODE
);
397 * Clear "initialization complete" bit to move adapter from
398 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
400 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
401 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
403 /* Activates XTAL resources monitor */
404 __iwl_trans_pcie_set_bit(trans
, CSR_MONITOR_CFG_REG
,
405 CSR_MONITOR_XTAL_RESOURCES
);
407 /* Release XTAL ON request */
408 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
409 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
412 /* Release APMG XTAL */
413 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
415 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
418 static int iwl_pcie_apm_stop_master(struct iwl_trans
*trans
)
422 /* stop device's busmaster DMA activity */
423 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
425 ret
= iwl_poll_bit(trans
, CSR_RESET
,
426 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
427 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
429 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
431 IWL_DEBUG_INFO(trans
, "stop master\n");
436 static void iwl_pcie_apm_stop(struct iwl_trans
*trans
)
438 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
440 clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
442 /* Stop device's DMA activity */
443 iwl_pcie_apm_stop_master(trans
);
445 if (trans
->cfg
->lp_xtal_workaround
) {
446 iwl_pcie_apm_lp_xtal_enable(trans
);
450 /* Reset the entire device */
451 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
456 * Clear "initialization complete" bit to move adapter from
457 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
459 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
460 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
463 static int iwl_pcie_nic_init(struct iwl_trans
*trans
)
465 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
468 spin_lock(&trans_pcie
->irq_lock
);
469 iwl_pcie_apm_init(trans
);
471 spin_unlock(&trans_pcie
->irq_lock
);
473 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
)
474 iwl_pcie_set_pwr(trans
, false);
476 iwl_op_mode_nic_config(trans
->op_mode
);
478 /* Allocate the RX queue, or reset if it is already allocated */
479 iwl_pcie_rx_init(trans
);
481 /* Allocate or reset and init all Tx and Command queues */
482 if (iwl_pcie_tx_init(trans
))
485 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
486 /* enable shadow regs in HW */
487 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
, 0x800FFFFF);
488 IWL_DEBUG_INFO(trans
, "Enabling shadow registers in device\n");
494 #define HW_READY_TIMEOUT (50)
496 /* Note: returns poll_bit return value, which is >= 0 if success */
497 static int iwl_pcie_set_hw_ready(struct iwl_trans
*trans
)
501 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
502 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
504 /* See if we got it */
505 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
506 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
507 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
510 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
514 /* Note: returns standard 0/-ERROR code */
515 static int iwl_pcie_prepare_card_hw(struct iwl_trans
*trans
)
521 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
523 ret
= iwl_pcie_set_hw_ready(trans
);
524 /* If the card is ready, exit 0 */
528 for (iter
= 0; iter
< 10; iter
++) {
529 /* If HW is not ready, prepare the conditions to check again */
530 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
531 CSR_HW_IF_CONFIG_REG_PREPARE
);
534 ret
= iwl_pcie_set_hw_ready(trans
);
538 usleep_range(200, 1000);
540 } while (t
< 150000);
544 IWL_DEBUG_INFO(trans
, "got NIC after %d iterations\n", iter
);
552 static int iwl_pcie_load_firmware_chunk(struct iwl_trans
*trans
, u32 dst_addr
,
553 dma_addr_t phy_addr
, u32 byte_cnt
)
555 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
558 trans_pcie
->ucode_write_complete
= false;
560 iwl_write_direct32(trans
,
561 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
562 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
564 iwl_write_direct32(trans
,
565 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
),
568 iwl_write_direct32(trans
,
569 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
570 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
572 iwl_write_direct32(trans
,
573 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
574 (iwl_get_dma_hi_addr(phy_addr
)
575 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
577 iwl_write_direct32(trans
,
578 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
579 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
|
580 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
|
581 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
583 iwl_write_direct32(trans
,
584 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
585 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
586 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
587 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
589 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
590 trans_pcie
->ucode_write_complete
, 5 * HZ
);
592 IWL_ERR(trans
, "Failed to load firmware chunk!\n");
599 static int iwl_pcie_load_section(struct iwl_trans
*trans
, u8 section_num
,
600 const struct fw_desc
*section
)
604 u32 offset
, chunk_sz
= section
->len
;
607 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
610 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
, &p_addr
,
611 GFP_KERNEL
| __GFP_NOWARN
);
613 IWL_DEBUG_INFO(trans
, "Falling back to small chunks of DMA\n");
614 chunk_sz
= PAGE_SIZE
;
615 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
,
616 &p_addr
, GFP_KERNEL
);
621 for (offset
= 0; offset
< section
->len
; offset
+= chunk_sz
) {
624 copy_size
= min_t(u32
, chunk_sz
, section
->len
- offset
);
626 memcpy(v_addr
, (u8
*)section
->data
+ offset
, copy_size
);
627 ret
= iwl_pcie_load_firmware_chunk(trans
,
628 section
->offset
+ offset
,
632 "Could not load the [%d] uCode section\n",
638 dma_free_coherent(trans
->dev
, chunk_sz
, v_addr
, p_addr
);
642 static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans
*trans
,
643 const struct fw_img
*image
,
645 int *first_ucode_section
)
649 u32 last_read_idx
= 0;
653 *first_ucode_section
= 0;
656 (*first_ucode_section
)++;
659 for (i
= *first_ucode_section
; i
< IWL_UCODE_SECTION_MAX
; i
++) {
662 if (!image
->sec
[i
].data
||
663 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
) {
665 "Break since Data not valid or Empty section, sec = %d\n",
670 if (i
== (*first_ucode_section
) + 1)
671 /* set CPU to started */
672 iwl_set_bits_prph(trans
,
673 CSR_UCODE_LOAD_STATUS_ADDR
,
674 LMPM_CPU_HDRS_LOADING_COMPLETED
677 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
681 /* image loading complete */
682 iwl_set_bits_prph(trans
,
683 CSR_UCODE_LOAD_STATUS_ADDR
,
684 LMPM_CPU_UCODE_LOADING_COMPLETED
<< shift_param
);
686 *first_ucode_section
= last_read_idx
;
691 static int iwl_pcie_load_cpu_sections(struct iwl_trans
*trans
,
692 const struct fw_img
*image
,
694 int *first_ucode_section
)
698 u32 last_read_idx
= 0;
702 *first_ucode_section
= 0;
705 (*first_ucode_section
)++;
708 for (i
= *first_ucode_section
; i
< IWL_UCODE_SECTION_MAX
; i
++) {
711 if (!image
->sec
[i
].data
||
712 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
) {
714 "Break since Data not valid or Empty section, sec = %d\n",
719 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
724 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
725 iwl_set_bits_prph(trans
,
726 CSR_UCODE_LOAD_STATUS_ADDR
,
727 (LMPM_CPU_UCODE_LOADING_COMPLETED
|
728 LMPM_CPU_HDRS_LOADING_COMPLETED
|
729 LMPM_CPU_UCODE_LOADING_STARTED
) <<
732 *first_ucode_section
= last_read_idx
;
737 static int iwl_pcie_load_given_ucode(struct iwl_trans
*trans
,
738 const struct fw_img
*image
)
740 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
742 int first_ucode_section
;
745 "working with %s image\n",
746 image
->is_secure
? "Secured" : "Non Secured");
748 "working with %s CPU\n",
749 image
->is_dual_cpus
? "Dual" : "Single");
751 /* configure the ucode to be ready to get the secured image */
752 if (image
->is_secure
) {
753 /* set secure boot inspector addresses */
754 iwl_write_prph(trans
,
755 LMPM_SECURE_INSPECTOR_CODE_ADDR
,
756 LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE
);
758 iwl_write_prph(trans
,
759 LMPM_SECURE_INSPECTOR_DATA_ADDR
,
760 LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE
);
762 /* set CPU1 header address */
763 iwl_write_prph(trans
,
764 LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR
,
765 LMPM_SECURE_CPU1_HDR_MEM_SPACE
);
767 /* load to FW the binary Secured sections of CPU1 */
768 ret
= iwl_pcie_load_cpu_secured_sections(trans
, image
, 1,
769 &first_ucode_section
);
774 /* load to FW the binary Non secured sections of CPU1 */
775 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 1,
776 &first_ucode_section
);
781 if (image
->is_dual_cpus
) {
782 /* set CPU2 header address */
783 iwl_write_prph(trans
,
784 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR
,
785 LMPM_SECURE_CPU2_HDR_MEM_SPACE
);
787 /* load to FW the binary sections of CPU2 */
788 if (image
->is_secure
)
789 ret
= iwl_pcie_load_cpu_secured_sections(
791 &first_ucode_section
);
793 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 2,
794 &first_ucode_section
);
799 /* supported for 7000 only for the moment */
800 if (iwlwifi_mod_params
.fw_monitor
&&
801 trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) {
802 iwl_pcie_alloc_fw_monitor(trans
);
804 if (trans_pcie
->fw_mon_size
) {
805 iwl_write_prph(trans
, MON_BUFF_BASE_ADDR
,
806 trans_pcie
->fw_mon_phys
>> 4);
807 iwl_write_prph(trans
, MON_BUFF_END_ADDR
,
808 (trans_pcie
->fw_mon_phys
+
809 trans_pcie
->fw_mon_size
) >> 4);
813 /* release CPU reset */
814 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
815 iwl_write_prph(trans
, RELEASE_CPU_RESET
, RELEASE_CPU_RESET_BIT
);
817 iwl_write32(trans
, CSR_RESET
, 0);
819 if (image
->is_secure
) {
820 /* wait for image verification to complete */
821 ret
= iwl_poll_prph_bit(trans
,
822 LMPM_SECURE_BOOT_CPU1_STATUS_ADDR
,
823 LMPM_SECURE_BOOT_STATUS_SUCCESS
,
824 LMPM_SECURE_BOOT_STATUS_SUCCESS
,
825 LMPM_SECURE_TIME_OUT
);
828 IWL_ERR(trans
, "Time out on secure boot process\n");
836 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
837 const struct fw_img
*fw
, bool run_in_rfkill
)
842 /* This may fail if AMT took ownership of the device */
843 if (iwl_pcie_prepare_card_hw(trans
)) {
844 IWL_WARN(trans
, "Exit HW not ready\n");
848 iwl_enable_rfkill_int(trans
);
850 /* If platform's RF_KILL switch is NOT set to KILL */
851 hw_rfkill
= iwl_is_rfkill_set(trans
);
853 set_bit(STATUS_RFKILL
, &trans
->status
);
855 clear_bit(STATUS_RFKILL
, &trans
->status
);
856 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
857 if (hw_rfkill
&& !run_in_rfkill
)
860 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
862 ret
= iwl_pcie_nic_init(trans
);
864 IWL_ERR(trans
, "Unable to init nic\n");
868 /* make sure rfkill handshake bits are cleared */
869 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
870 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
871 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
873 /* clear (again), then enable host interrupts */
874 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
875 iwl_enable_interrupts(trans
);
877 /* really make sure rfkill handshake bits are cleared */
878 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
879 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
881 /* Load the given image to the HW */
882 return iwl_pcie_load_given_ucode(trans
, fw
);
885 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
887 iwl_pcie_reset_ict(trans
);
888 iwl_pcie_tx_start(trans
, scd_addr
);
891 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
)
893 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
894 bool hw_rfkill
, was_hw_rfkill
;
896 was_hw_rfkill
= iwl_is_rfkill_set(trans
);
898 /* tell the device to stop sending interrupts */
899 spin_lock(&trans_pcie
->irq_lock
);
900 iwl_disable_interrupts(trans
);
901 spin_unlock(&trans_pcie
->irq_lock
);
903 /* device going down, Stop using ICT table */
904 iwl_pcie_disable_ict(trans
);
907 * If a HW restart happens during firmware loading,
908 * then the firmware loading might call this function
909 * and later it might be called again due to the
910 * restart. So don't process again if the device is
913 if (test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
)) {
914 iwl_pcie_tx_stop(trans
);
915 iwl_pcie_rx_stop(trans
);
917 /* Power-down device's busmaster DMA clocks */
918 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
919 APMG_CLK_VAL_DMA_CLK_RQT
);
923 /* Make sure (redundant) we've released our request to stay awake */
924 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
925 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
927 /* Stop the device, and put it in low power state */
928 iwl_pcie_apm_stop(trans
);
930 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
931 * Clean again the interrupt here
933 spin_lock(&trans_pcie
->irq_lock
);
934 iwl_disable_interrupts(trans
);
935 spin_unlock(&trans_pcie
->irq_lock
);
937 /* stop and reset the on-board processor */
938 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
940 /* clear all status bits */
941 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
942 clear_bit(STATUS_INT_ENABLED
, &trans
->status
);
943 clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
944 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
945 clear_bit(STATUS_RFKILL
, &trans
->status
);
948 * Even if we stop the HW, we still want the RF kill
951 iwl_enable_rfkill_int(trans
);
954 * Check again since the RF kill state may have changed while
955 * all the interrupts were disabled, in this case we couldn't
956 * receive the RF kill interrupt and update the state in the
958 * Don't call the op_mode if the rkfill state hasn't changed.
959 * This allows the op_mode to call stop_device from the rfkill
960 * notification without endless recursion. Under very rare
961 * circumstances, we might have a small recursion if the rfkill
962 * state changed exactly now while we were called from stop_device.
963 * This is very unlikely but can happen and is supported.
965 hw_rfkill
= iwl_is_rfkill_set(trans
);
967 set_bit(STATUS_RFKILL
, &trans
->status
);
969 clear_bit(STATUS_RFKILL
, &trans
->status
);
970 if (hw_rfkill
!= was_hw_rfkill
)
971 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
974 void iwl_trans_pcie_rf_kill(struct iwl_trans
*trans
, bool state
)
976 if (iwl_op_mode_hw_rf_kill(trans
->op_mode
, state
))
977 iwl_trans_pcie_stop_device(trans
);
980 static void iwl_trans_pcie_d3_suspend(struct iwl_trans
*trans
, bool test
)
982 iwl_disable_interrupts(trans
);
985 * in testing mode, the host stays awake and the
986 * hardware won't be reset (not even partially)
991 iwl_pcie_disable_ict(trans
);
993 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
994 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
995 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
996 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
999 * reset TX queues -- some of their registers reset during S3
1000 * so if we don't reset everything here the D3 image would try
1001 * to execute some invalid memory upon resume
1003 iwl_trans_pcie_tx_reset(trans
);
1005 iwl_pcie_set_pwr(trans
, true);
1008 static int iwl_trans_pcie_d3_resume(struct iwl_trans
*trans
,
1009 enum iwl_d3_status
*status
,
1016 iwl_enable_interrupts(trans
);
1017 *status
= IWL_D3_STATUS_ALIVE
;
1021 iwl_pcie_set_pwr(trans
, false);
1023 val
= iwl_read32(trans
, CSR_RESET
);
1024 if (val
& CSR_RESET_REG_FLAG_NEVO_RESET
) {
1025 *status
= IWL_D3_STATUS_RESET
;
1030 * Also enables interrupts - none will happen as the device doesn't
1031 * know we're waking it up, only when the opmode actually tells it
1034 iwl_pcie_reset_ict(trans
);
1036 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1037 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1039 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
1040 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1041 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1044 IWL_ERR(trans
, "Failed to resume the device (mac ready)\n");
1048 iwl_trans_pcie_tx_reset(trans
);
1050 ret
= iwl_pcie_rx_init(trans
);
1052 IWL_ERR(trans
, "Failed to resume the device (RX reset)\n");
1056 *status
= IWL_D3_STATUS_ALIVE
;
1060 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
)
1065 err
= iwl_pcie_prepare_card_hw(trans
);
1067 IWL_ERR(trans
, "Error while preparing HW: %d\n", err
);
1071 /* Reset the entire device */
1072 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1074 usleep_range(10, 15);
1076 iwl_pcie_apm_init(trans
);
1078 /* From now on, the op_mode will be kept updated about RF kill state */
1079 iwl_enable_rfkill_int(trans
);
1081 hw_rfkill
= iwl_is_rfkill_set(trans
);
1083 set_bit(STATUS_RFKILL
, &trans
->status
);
1085 clear_bit(STATUS_RFKILL
, &trans
->status
);
1086 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1091 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans
*trans
)
1093 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1095 /* disable interrupts - don't enable HW RF kill interrupt */
1096 spin_lock(&trans_pcie
->irq_lock
);
1097 iwl_disable_interrupts(trans
);
1098 spin_unlock(&trans_pcie
->irq_lock
);
1100 iwl_pcie_apm_stop(trans
);
1102 spin_lock(&trans_pcie
->irq_lock
);
1103 iwl_disable_interrupts(trans
);
1104 spin_unlock(&trans_pcie
->irq_lock
);
1106 iwl_pcie_disable_ict(trans
);
1109 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
1111 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1114 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
1116 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1119 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
1121 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1124 static u32
iwl_trans_pcie_read_prph(struct iwl_trans
*trans
, u32 reg
)
1126 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_RADDR
,
1127 ((reg
& 0x000FFFFF) | (3 << 24)));
1128 return iwl_trans_pcie_read32(trans
, HBUS_TARG_PRPH_RDAT
);
1131 static void iwl_trans_pcie_write_prph(struct iwl_trans
*trans
, u32 addr
,
1134 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WADDR
,
1135 ((addr
& 0x000FFFFF) | (3 << 24)));
1136 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WDAT
, val
);
1139 static int iwl_pcie_dummy_napi_poll(struct napi_struct
*napi
, int budget
)
1145 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
1146 const struct iwl_trans_config
*trans_cfg
)
1148 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1150 trans_pcie
->cmd_queue
= trans_cfg
->cmd_queue
;
1151 trans_pcie
->cmd_fifo
= trans_cfg
->cmd_fifo
;
1152 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
1153 trans_pcie
->n_no_reclaim_cmds
= 0;
1155 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
1156 if (trans_pcie
->n_no_reclaim_cmds
)
1157 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
1158 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
1160 trans_pcie
->rx_buf_size_8k
= trans_cfg
->rx_buf_size_8k
;
1161 if (trans_pcie
->rx_buf_size_8k
)
1162 trans_pcie
->rx_page_order
= get_order(8 * 1024);
1164 trans_pcie
->rx_page_order
= get_order(4 * 1024);
1166 trans_pcie
->wd_timeout
=
1167 msecs_to_jiffies(trans_cfg
->queue_watchdog_timeout
);
1169 trans_pcie
->command_names
= trans_cfg
->command_names
;
1170 trans_pcie
->bc_table_dword
= trans_cfg
->bc_table_dword
;
1172 /* Initialize NAPI here - it should be before registering to mac80211
1173 * in the opmode but after the HW struct is allocated.
1174 * As this function may be called again in some corner cases don't
1175 * do anything if NAPI was already initialized.
1177 if (!trans_pcie
->napi
.poll
&& trans
->op_mode
->ops
->napi_add
) {
1178 init_dummy_netdev(&trans_pcie
->napi_dev
);
1179 iwl_op_mode_napi_add(trans
->op_mode
, &trans_pcie
->napi
,
1180 &trans_pcie
->napi_dev
,
1181 iwl_pcie_dummy_napi_poll
, 64);
1185 void iwl_trans_pcie_free(struct iwl_trans
*trans
)
1187 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1189 synchronize_irq(trans_pcie
->pci_dev
->irq
);
1191 iwl_pcie_tx_free(trans
);
1192 iwl_pcie_rx_free(trans
);
1194 free_irq(trans_pcie
->pci_dev
->irq
, trans
);
1195 iwl_pcie_free_ict(trans
);
1197 pci_disable_msi(trans_pcie
->pci_dev
);
1198 iounmap(trans_pcie
->hw_base
);
1199 pci_release_regions(trans_pcie
->pci_dev
);
1200 pci_disable_device(trans_pcie
->pci_dev
);
1201 kmem_cache_destroy(trans
->dev_cmd_pool
);
1203 if (trans_pcie
->napi
.poll
)
1204 netif_napi_del(&trans_pcie
->napi
);
1206 iwl_pcie_free_fw_monitor(trans
);
1211 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
1214 set_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1216 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1219 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans
*trans
, bool silent
,
1220 unsigned long *flags
)
1223 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1225 spin_lock_irqsave(&trans_pcie
->reg_lock
, *flags
);
1227 if (trans_pcie
->cmd_in_flight
)
1230 /* this bit wakes up the NIC */
1231 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
1232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1235 * These bits say the device is running, and should keep running for
1236 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1237 * but they do not indicate that embedded SRAM is restored yet;
1238 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1239 * to/from host DRAM when sleeping/waking for power-saving.
1240 * Each direction takes approximately 1/4 millisecond; with this
1241 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1242 * series of register accesses are expected (e.g. reading Event Log),
1243 * to keep device from sleeping.
1245 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1246 * SRAM is okay/restored. We don't check that here because this call
1247 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1248 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1250 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1251 * and do not save/restore SRAM when power cycling.
1253 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
1254 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN
,
1255 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
|
1256 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP
), 15000);
1257 if (unlikely(ret
< 0)) {
1258 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_FORCE_NMI
);
1260 u32 val
= iwl_read32(trans
, CSR_GP_CNTRL
);
1262 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1264 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1271 * Fool sparse by faking we release the lock - sparse will
1272 * track nic_access anyway.
1274 __release(&trans_pcie
->reg_lock
);
1278 static void iwl_trans_pcie_release_nic_access(struct iwl_trans
*trans
,
1279 unsigned long *flags
)
1281 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1283 lockdep_assert_held(&trans_pcie
->reg_lock
);
1286 * Fool sparse by faking we acquiring the lock - sparse will
1287 * track nic_access anyway.
1289 __acquire(&trans_pcie
->reg_lock
);
1291 if (trans_pcie
->cmd_in_flight
)
1294 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
1295 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1297 * Above we read the CSR_GP_CNTRL register, which will flush
1298 * any previous writes, but we need the write that clears the
1299 * MAC_ACCESS_REQ bit to be performed before any other writes
1300 * scheduled on different CPUs (after we drop reg_lock).
1304 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1307 static int iwl_trans_pcie_read_mem(struct iwl_trans
*trans
, u32 addr
,
1308 void *buf
, int dwords
)
1310 unsigned long flags
;
1314 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
1315 iwl_write32(trans
, HBUS_TARG_MEM_RADDR
, addr
);
1316 for (offs
= 0; offs
< dwords
; offs
++)
1317 vals
[offs
] = iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
1318 iwl_trans_release_nic_access(trans
, &flags
);
1325 static int iwl_trans_pcie_write_mem(struct iwl_trans
*trans
, u32 addr
,
1326 const void *buf
, int dwords
)
1328 unsigned long flags
;
1330 const u32
*vals
= buf
;
1332 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
1333 iwl_write32(trans
, HBUS_TARG_MEM_WADDR
, addr
);
1334 for (offs
= 0; offs
< dwords
; offs
++)
1335 iwl_write32(trans
, HBUS_TARG_MEM_WDAT
,
1336 vals
? vals
[offs
] : 0);
1337 iwl_trans_release_nic_access(trans
, &flags
);
1344 #define IWL_FLUSH_WAIT_MS 2000
1346 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans
*trans
, u32 txq_bm
)
1348 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1349 struct iwl_txq
*txq
;
1350 struct iwl_queue
*q
;
1352 unsigned long now
= jiffies
;
1357 /* waiting for all the tx frames complete might take a while */
1358 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1361 if (cnt
== trans_pcie
->cmd_queue
)
1363 if (!test_bit(cnt
, trans_pcie
->queue_used
))
1365 if (!(BIT(cnt
) & txq_bm
))
1368 IWL_DEBUG_TX_QUEUES(trans
, "Emptying queue %d...\n", cnt
);
1369 txq
= &trans_pcie
->txq
[cnt
];
1371 wr_ptr
= ACCESS_ONCE(q
->write_ptr
);
1373 while (q
->read_ptr
!= ACCESS_ONCE(q
->write_ptr
) &&
1374 !time_after(jiffies
,
1375 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
))) {
1376 u8 write_ptr
= ACCESS_ONCE(q
->write_ptr
);
1378 if (WARN_ONCE(wr_ptr
!= write_ptr
,
1379 "WR pointer moved while flushing %d -> %d\n",
1385 if (q
->read_ptr
!= q
->write_ptr
) {
1387 "fail to flush all tx fifo queues Q %d\n", cnt
);
1391 IWL_DEBUG_TX_QUEUES(trans
, "Queue %d is now empty.\n", cnt
);
1397 IWL_ERR(trans
, "Current SW read_ptr %d write_ptr %d\n",
1398 txq
->q
.read_ptr
, txq
->q
.write_ptr
);
1400 scd_sram_addr
= trans_pcie
->scd_base_addr
+
1401 SCD_TX_STTS_QUEUE_OFFSET(txq
->q
.id
);
1402 iwl_trans_read_mem_bytes(trans
, scd_sram_addr
, buf
, sizeof(buf
));
1404 iwl_print_hex_error(trans
, buf
, sizeof(buf
));
1406 for (cnt
= 0; cnt
< FH_TCSR_CHNL_NUM
; cnt
++)
1407 IWL_ERR(trans
, "FH TRBs(%d) = 0x%08x\n", cnt
,
1408 iwl_read_direct32(trans
, FH_TX_TRB_REG(cnt
)));
1410 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1411 u32 status
= iwl_read_prph(trans
, SCD_QUEUE_STATUS_BITS(cnt
));
1412 u8 fifo
= (status
>> SCD_QUEUE_STTS_REG_POS_TXF
) & 0x7;
1413 bool active
= !!(status
& BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE
));
1415 iwl_trans_read_mem32(trans
, trans_pcie
->scd_base_addr
+
1416 SCD_TRANS_TBL_OFFSET_QUEUE(cnt
));
1419 tbl_dw
= (tbl_dw
& 0xFFFF0000) >> 16;
1421 tbl_dw
= tbl_dw
& 0x0000FFFF;
1424 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1425 cnt
, active
? "" : "in", fifo
, tbl_dw
,
1426 iwl_read_prph(trans
, SCD_QUEUE_RDPTR(cnt
)) &
1427 (TFD_QUEUE_SIZE_MAX
- 1),
1428 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(cnt
)));
1434 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
1435 u32 mask
, u32 value
)
1437 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1438 unsigned long flags
;
1440 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
1441 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
1442 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1445 static const char *get_csr_string(int cmd
)
1447 #define IWL_CMD(x) case x: return #x
1449 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
1450 IWL_CMD(CSR_INT_COALESCING
);
1452 IWL_CMD(CSR_INT_MASK
);
1453 IWL_CMD(CSR_FH_INT_STATUS
);
1454 IWL_CMD(CSR_GPIO_IN
);
1456 IWL_CMD(CSR_GP_CNTRL
);
1457 IWL_CMD(CSR_HW_REV
);
1458 IWL_CMD(CSR_EEPROM_REG
);
1459 IWL_CMD(CSR_EEPROM_GP
);
1460 IWL_CMD(CSR_OTP_GP_REG
);
1461 IWL_CMD(CSR_GIO_REG
);
1462 IWL_CMD(CSR_GP_UCODE_REG
);
1463 IWL_CMD(CSR_GP_DRIVER_REG
);
1464 IWL_CMD(CSR_UCODE_DRV_GP1
);
1465 IWL_CMD(CSR_UCODE_DRV_GP2
);
1466 IWL_CMD(CSR_LED_REG
);
1467 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
1468 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
1469 IWL_CMD(CSR_ANA_PLL_CFG
);
1470 IWL_CMD(CSR_HW_REV_WA_REG
);
1471 IWL_CMD(CSR_MONITOR_STATUS_REG
);
1472 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
1479 void iwl_pcie_dump_csr(struct iwl_trans
*trans
)
1482 static const u32 csr_tbl
[] = {
1483 CSR_HW_IF_CONFIG_REG
,
1501 CSR_DRAM_INT_TBL_REG
,
1502 CSR_GIO_CHICKEN_BITS
,
1504 CSR_MONITOR_STATUS_REG
,
1506 CSR_DBG_HPET_MEM_REG
1508 IWL_ERR(trans
, "CSR values:\n");
1509 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
1510 "CSR_INT_PERIODIC_REG)\n");
1511 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
1512 IWL_ERR(trans
, " %25s: 0X%08x\n",
1513 get_csr_string(csr_tbl
[i
]),
1514 iwl_read32(trans
, csr_tbl
[i
]));
1518 #ifdef CONFIG_IWLWIFI_DEBUGFS
1519 /* create and remove of files */
1520 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1521 if (!debugfs_create_file(#name, mode, parent, trans, \
1522 &iwl_dbgfs_##name##_ops)) \
1526 /* file operation */
1527 #define DEBUGFS_READ_FILE_OPS(name) \
1528 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1529 .read = iwl_dbgfs_##name##_read, \
1530 .open = simple_open, \
1531 .llseek = generic_file_llseek, \
1534 #define DEBUGFS_WRITE_FILE_OPS(name) \
1535 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1536 .write = iwl_dbgfs_##name##_write, \
1537 .open = simple_open, \
1538 .llseek = generic_file_llseek, \
1541 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1542 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1543 .write = iwl_dbgfs_##name##_write, \
1544 .read = iwl_dbgfs_##name##_read, \
1545 .open = simple_open, \
1546 .llseek = generic_file_llseek, \
1549 static ssize_t
iwl_dbgfs_tx_queue_read(struct file
*file
,
1550 char __user
*user_buf
,
1551 size_t count
, loff_t
*ppos
)
1553 struct iwl_trans
*trans
= file
->private_data
;
1554 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1555 struct iwl_txq
*txq
;
1556 struct iwl_queue
*q
;
1563 bufsz
= sizeof(char) * 64 * trans
->cfg
->base_params
->num_of_queues
;
1565 if (!trans_pcie
->txq
)
1568 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1572 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1573 txq
= &trans_pcie
->txq
[cnt
];
1575 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1576 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n",
1577 cnt
, q
->read_ptr
, q
->write_ptr
,
1578 !!test_bit(cnt
, trans_pcie
->queue_used
),
1579 !!test_bit(cnt
, trans_pcie
->queue_stopped
),
1581 (cnt
== trans_pcie
->cmd_queue
? " HCMD" : ""));
1583 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1588 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
1589 char __user
*user_buf
,
1590 size_t count
, loff_t
*ppos
)
1592 struct iwl_trans
*trans
= file
->private_data
;
1593 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1594 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
;
1597 const size_t bufsz
= sizeof(buf
);
1599 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "read: %u\n",
1601 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write: %u\n",
1603 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write_actual: %u\n",
1605 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "need_update: %d\n",
1607 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "free_count: %u\n",
1610 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "closed_rb_num: %u\n",
1611 le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF);
1613 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1614 "closed_rb_num: Not Allocated\n");
1616 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1619 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
1620 char __user
*user_buf
,
1621 size_t count
, loff_t
*ppos
)
1623 struct iwl_trans
*trans
= file
->private_data
;
1624 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1625 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1629 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
1632 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1636 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1637 "Interrupt Statistics Report:\n");
1639 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
1641 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
1643 if (isr_stats
->sw
|| isr_stats
->hw
) {
1644 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1645 "\tLast Restarting Code: 0x%X\n",
1646 isr_stats
->err_code
);
1648 #ifdef CONFIG_IWLWIFI_DEBUG
1649 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
1651 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
1654 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1655 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
1657 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
1660 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
1663 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1664 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
1666 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
1669 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
1670 isr_stats
->unhandled
);
1672 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1677 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
1678 const char __user
*user_buf
,
1679 size_t count
, loff_t
*ppos
)
1681 struct iwl_trans
*trans
= file
->private_data
;
1682 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1683 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1689 memset(buf
, 0, sizeof(buf
));
1690 buf_size
= min(count
, sizeof(buf
) - 1);
1691 if (copy_from_user(buf
, user_buf
, buf_size
))
1693 if (sscanf(buf
, "%x", &reset_flag
) != 1)
1695 if (reset_flag
== 0)
1696 memset(isr_stats
, 0, sizeof(*isr_stats
));
1701 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
1702 const char __user
*user_buf
,
1703 size_t count
, loff_t
*ppos
)
1705 struct iwl_trans
*trans
= file
->private_data
;
1710 memset(buf
, 0, sizeof(buf
));
1711 buf_size
= min(count
, sizeof(buf
) - 1);
1712 if (copy_from_user(buf
, user_buf
, buf_size
))
1714 if (sscanf(buf
, "%d", &csr
) != 1)
1717 iwl_pcie_dump_csr(trans
);
1722 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
1723 char __user
*user_buf
,
1724 size_t count
, loff_t
*ppos
)
1726 struct iwl_trans
*trans
= file
->private_data
;
1730 ret
= iwl_dump_fh(trans
, &buf
);
1735 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, ret
);
1740 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
1741 DEBUGFS_READ_FILE_OPS(fh_reg
);
1742 DEBUGFS_READ_FILE_OPS(rx_queue
);
1743 DEBUGFS_READ_FILE_OPS(tx_queue
);
1744 DEBUGFS_WRITE_FILE_OPS(csr
);
1747 * Create the debugfs files and directories
1750 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
1753 DEBUGFS_ADD_FILE(rx_queue
, dir
, S_IRUSR
);
1754 DEBUGFS_ADD_FILE(tx_queue
, dir
, S_IRUSR
);
1755 DEBUGFS_ADD_FILE(interrupt
, dir
, S_IWUSR
| S_IRUSR
);
1756 DEBUGFS_ADD_FILE(csr
, dir
, S_IWUSR
);
1757 DEBUGFS_ADD_FILE(fh_reg
, dir
, S_IRUSR
);
1761 IWL_ERR(trans
, "failed to create the trans debugfs entry\n");
1765 static u32
iwl_trans_pcie_get_cmdlen(struct iwl_tfd
*tfd
)
1770 for (i
= 0; i
< IWL_NUM_OF_TBS
; i
++)
1771 cmdlen
+= iwl_pcie_tfd_tb_get_len(tfd
, i
);
1776 static u32
iwl_trans_pcie_dump_data(struct iwl_trans
*trans
,
1777 void *buf
, u32 buflen
)
1779 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1780 struct iwl_fw_error_dump_data
*data
;
1781 struct iwl_txq
*cmdq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
1782 struct iwl_fw_error_dump_txcmd
*txcmd
;
1786 len
= sizeof(*data
) +
1787 cmdq
->q
.n_window
* (sizeof(*txcmd
) + TFD_MAX_PAYLOAD_SIZE
);
1789 if (trans_pcie
->fw_mon_page
)
1790 len
+= sizeof(*data
) + sizeof(struct iwl_fw_error_fw_mon
) +
1791 trans_pcie
->fw_mon_size
;
1798 data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD
);
1799 txcmd
= (void *)data
->data
;
1800 spin_lock_bh(&cmdq
->lock
);
1801 ptr
= cmdq
->q
.write_ptr
;
1802 for (i
= 0; i
< cmdq
->q
.n_window
; i
++) {
1803 u8 idx
= get_cmd_index(&cmdq
->q
, ptr
);
1806 cmdlen
= iwl_trans_pcie_get_cmdlen(&cmdq
->tfds
[ptr
]);
1807 caplen
= min_t(u32
, TFD_MAX_PAYLOAD_SIZE
, cmdlen
);
1810 len
+= sizeof(*txcmd
) + caplen
;
1811 txcmd
->cmdlen
= cpu_to_le32(cmdlen
);
1812 txcmd
->caplen
= cpu_to_le32(caplen
);
1813 memcpy(txcmd
->data
, cmdq
->entries
[idx
].cmd
, caplen
);
1814 txcmd
= (void *)((u8
*)txcmd
->data
+ caplen
);
1817 ptr
= iwl_queue_dec_wrap(ptr
);
1819 spin_unlock_bh(&cmdq
->lock
);
1821 data
->len
= cpu_to_le32(len
);
1822 len
+= sizeof(*data
);
1824 if (trans_pcie
->fw_mon_page
) {
1825 struct iwl_fw_error_fw_mon
*fw_mon_data
;
1827 data
= iwl_fw_error_next_data(data
);
1828 data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR
);
1829 data
->len
= cpu_to_le32(trans_pcie
->fw_mon_size
+
1830 sizeof(*fw_mon_data
));
1831 fw_mon_data
= (void *)data
->data
;
1832 fw_mon_data
->fw_mon_wr_ptr
=
1833 cpu_to_le32(iwl_read_prph(trans
, MON_BUFF_WRPTR
));
1834 fw_mon_data
->fw_mon_cycle_cnt
=
1835 cpu_to_le32(iwl_read_prph(trans
, MON_BUFF_CYCLE_CNT
));
1836 fw_mon_data
->fw_mon_base_ptr
=
1837 cpu_to_le32(iwl_read_prph(trans
, MON_BUFF_BASE_ADDR
));
1840 * The firmware is now asserted, it won't write anything to
1841 * the buffer. CPU can take ownership to fetch the data.
1842 * The buffer will be handed back to the device before the
1843 * firmware will be restarted.
1845 dma_sync_single_for_cpu(trans
->dev
, trans_pcie
->fw_mon_phys
,
1846 trans_pcie
->fw_mon_size
,
1848 memcpy(fw_mon_data
->data
, page_address(trans_pcie
->fw_mon_page
),
1849 trans_pcie
->fw_mon_size
);
1851 len
+= sizeof(*data
) + sizeof(*fw_mon_data
) +
1852 trans_pcie
->fw_mon_size
;
1858 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
1863 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1865 static const struct iwl_trans_ops trans_ops_pcie
= {
1866 .start_hw
= iwl_trans_pcie_start_hw
,
1867 .op_mode_leave
= iwl_trans_pcie_op_mode_leave
,
1868 .fw_alive
= iwl_trans_pcie_fw_alive
,
1869 .start_fw
= iwl_trans_pcie_start_fw
,
1870 .stop_device
= iwl_trans_pcie_stop_device
,
1872 .d3_suspend
= iwl_trans_pcie_d3_suspend
,
1873 .d3_resume
= iwl_trans_pcie_d3_resume
,
1875 .send_cmd
= iwl_trans_pcie_send_hcmd
,
1877 .tx
= iwl_trans_pcie_tx
,
1878 .reclaim
= iwl_trans_pcie_reclaim
,
1880 .txq_disable
= iwl_trans_pcie_txq_disable
,
1881 .txq_enable
= iwl_trans_pcie_txq_enable
,
1883 .dbgfs_register
= iwl_trans_pcie_dbgfs_register
,
1885 .wait_tx_queue_empty
= iwl_trans_pcie_wait_txq_empty
,
1887 .write8
= iwl_trans_pcie_write8
,
1888 .write32
= iwl_trans_pcie_write32
,
1889 .read32
= iwl_trans_pcie_read32
,
1890 .read_prph
= iwl_trans_pcie_read_prph
,
1891 .write_prph
= iwl_trans_pcie_write_prph
,
1892 .read_mem
= iwl_trans_pcie_read_mem
,
1893 .write_mem
= iwl_trans_pcie_write_mem
,
1894 .configure
= iwl_trans_pcie_configure
,
1895 .set_pmi
= iwl_trans_pcie_set_pmi
,
1896 .grab_nic_access
= iwl_trans_pcie_grab_nic_access
,
1897 .release_nic_access
= iwl_trans_pcie_release_nic_access
,
1898 .set_bits_mask
= iwl_trans_pcie_set_bits_mask
,
1900 #ifdef CONFIG_IWLWIFI_DEBUGFS
1901 .dump_data
= iwl_trans_pcie_dump_data
,
1905 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
1906 const struct pci_device_id
*ent
,
1907 const struct iwl_cfg
*cfg
)
1909 struct iwl_trans_pcie
*trans_pcie
;
1910 struct iwl_trans
*trans
;
1914 trans
= kzalloc(sizeof(struct iwl_trans
) +
1915 sizeof(struct iwl_trans_pcie
), GFP_KERNEL
);
1921 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1923 trans
->ops
= &trans_ops_pcie
;
1925 trans_lockdep_init(trans
);
1926 trans_pcie
->trans
= trans
;
1927 spin_lock_init(&trans_pcie
->irq_lock
);
1928 spin_lock_init(&trans_pcie
->reg_lock
);
1929 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
1931 err
= pci_enable_device(pdev
);
1935 if (!cfg
->base_params
->pcie_l1_allowed
) {
1937 * W/A - seems to solve weird behavior. We need to remove this
1938 * if we don't want to stay in L1 all the time. This wastes a
1941 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
|
1942 PCIE_LINK_STATE_L1
|
1943 PCIE_LINK_STATE_CLKPM
);
1946 pci_set_master(pdev
);
1948 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(36));
1950 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(36));
1952 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1954 err
= pci_set_consistent_dma_mask(pdev
,
1956 /* both attempts failed: */
1958 dev_err(&pdev
->dev
, "No suitable DMA available\n");
1959 goto out_pci_disable_device
;
1963 err
= pci_request_regions(pdev
, DRV_NAME
);
1965 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
1966 goto out_pci_disable_device
;
1969 trans_pcie
->hw_base
= pci_ioremap_bar(pdev
, 0);
1970 if (!trans_pcie
->hw_base
) {
1971 dev_err(&pdev
->dev
, "pci_ioremap_bar failed\n");
1973 goto out_pci_release_regions
;
1976 /* We disable the RETRY_TIMEOUT register (0x41) to keep
1977 * PCI Tx retries from interfering with C3 CPU state */
1978 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
1980 trans
->dev
= &pdev
->dev
;
1981 trans_pcie
->pci_dev
= pdev
;
1982 iwl_disable_interrupts(trans
);
1984 err
= pci_enable_msi(pdev
);
1986 dev_err(&pdev
->dev
, "pci_enable_msi failed(0X%x)\n", err
);
1987 /* enable rfkill interrupt: hw bug w/a */
1988 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
1989 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
1990 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
1991 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
1995 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
1996 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
1997 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
1998 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
2000 /* Initialize the wait queue for commands */
2001 init_waitqueue_head(&trans_pcie
->wait_command_queue
);
2003 snprintf(trans
->dev_cmd_pool_name
, sizeof(trans
->dev_cmd_pool_name
),
2004 "iwl_cmd_pool:%s", dev_name(trans
->dev
));
2006 trans
->dev_cmd_headroom
= 0;
2007 trans
->dev_cmd_pool
=
2008 kmem_cache_create(trans
->dev_cmd_pool_name
,
2009 sizeof(struct iwl_device_cmd
)
2010 + trans
->dev_cmd_headroom
,
2015 if (!trans
->dev_cmd_pool
) {
2017 goto out_pci_disable_msi
;
2020 if (iwl_pcie_alloc_ict(trans
))
2021 goto out_free_cmd_pool
;
2023 err
= request_threaded_irq(pdev
->irq
, iwl_pcie_isr
,
2024 iwl_pcie_irq_handler
,
2025 IRQF_SHARED
, DRV_NAME
, trans
);
2027 IWL_ERR(trans
, "Error allocating IRQ %d\n", pdev
->irq
);
2031 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
2036 iwl_pcie_free_ict(trans
);
2038 kmem_cache_destroy(trans
->dev_cmd_pool
);
2039 out_pci_disable_msi
:
2040 pci_disable_msi(pdev
);
2041 out_pci_release_regions
:
2042 pci_release_regions(pdev
);
2043 out_pci_disable_device
:
2044 pci_disable_device(pdev
);
2048 return ERR_PTR(err
);