]>
Commit | Line | Data |
---|---|---|
c85eb619 EG |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
4e318262 | 8 | * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. |
c85eb619 EG |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
22 | * USA | |
23 | * | |
24 | * The full GNU General Public License is included in this distribution | |
25 | * in the file called LICENSE.GPL. | |
26 | * | |
27 | * Contact Information: | |
28 | * Intel Linux Wireless <ilw@linux.intel.com> | |
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
30 | * | |
31 | * BSD LICENSE | |
32 | * | |
4e318262 | 33 | * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. |
c85eb619 EG |
34 | * All rights reserved. |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * | |
40 | * * Redistributions of source code must retain the above copyright | |
41 | * notice, this list of conditions and the following disclaimer. | |
42 | * * Redistributions in binary form must reproduce the above copyright | |
43 | * notice, this list of conditions and the following disclaimer in | |
44 | * the documentation and/or other materials provided with the | |
45 | * distribution. | |
46 | * * Neither the name Intel Corporation nor the names of its | |
47 | * contributors may be used to endorse or promote products derived | |
48 | * from this software without specific prior written permission. | |
49 | * | |
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
61 | * | |
62 | *****************************************************************************/ | |
a42a1844 EG |
63 | #include <linux/pci.h> |
64 | #include <linux/pci-aspm.h> | |
e6bb4c9c | 65 | #include <linux/interrupt.h> |
87e5666c | 66 | #include <linux/debugfs.h> |
6d8f6eeb EG |
67 | #include <linux/bitops.h> |
68 | #include <linux/gfp.h> | |
e6bb4c9c | 69 | |
c85eb619 | 70 | #include "iwl-trans.h" |
c17d0681 | 71 | #include "iwl-trans-pcie-int.h" |
522376d2 EG |
72 | #include "iwl-csr.h" |
73 | #include "iwl-prph.h" | |
48f20d35 | 74 | #include "iwl-shared.h" |
522376d2 | 75 | #include "iwl-eeprom.h" |
7a10e3e4 | 76 | #include "iwl-agn-hw.h" |
a6c684ee | 77 | #include "iwl-core.h" |
c85eb619 | 78 | |
5a878bf6 | 79 | static int iwl_trans_rx_alloc(struct iwl_trans *trans) |
c85eb619 | 80 | { |
5a878bf6 EG |
81 | struct iwl_trans_pcie *trans_pcie = |
82 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
83 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
1042db2a | 84 | struct device *dev = trans->dev; |
c85eb619 | 85 | |
5a878bf6 | 86 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); |
c85eb619 EG |
87 | |
88 | spin_lock_init(&rxq->lock); | |
c85eb619 EG |
89 | |
90 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | |
91 | return -EINVAL; | |
92 | ||
93 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | |
84c816da DH |
94 | rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
95 | &rxq->bd_dma, GFP_KERNEL); | |
c85eb619 EG |
96 | if (!rxq->bd) |
97 | goto err_bd; | |
c85eb619 EG |
98 | |
99 | /*Allocate the driver's pointer to receive buffer status */ | |
84c816da DH |
100 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), |
101 | &rxq->rb_stts_dma, GFP_KERNEL); | |
c85eb619 EG |
102 | if (!rxq->rb_stts) |
103 | goto err_rb_stts; | |
c85eb619 EG |
104 | |
105 | return 0; | |
106 | ||
107 | err_rb_stts: | |
a0f6b0a2 EG |
108 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
109 | rxq->bd, rxq->bd_dma); | |
c85eb619 EG |
110 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); |
111 | rxq->bd = NULL; | |
112 | err_bd: | |
113 | return -ENOMEM; | |
114 | } | |
115 | ||
5a878bf6 | 116 | static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) |
c85eb619 | 117 | { |
5a878bf6 EG |
118 | struct iwl_trans_pcie *trans_pcie = |
119 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
120 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
a0f6b0a2 | 121 | int i; |
c85eb619 EG |
122 | |
123 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
124 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
125 | /* In the reset function, these buffers may have been allocated | |
126 | * to an SKB, so we need to unmap and free potential storage */ | |
127 | if (rxq->pool[i].page != NULL) { | |
1042db2a | 128 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, |
5a878bf6 | 129 | PAGE_SIZE << hw_params(trans).rx_page_order, |
c85eb619 | 130 | DMA_FROM_DEVICE); |
790428b6 EG |
131 | __free_pages(rxq->pool[i].page, |
132 | hw_params(trans).rx_page_order); | |
c85eb619 EG |
133 | rxq->pool[i].page = NULL; |
134 | } | |
135 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
136 | } | |
a0f6b0a2 EG |
137 | } |
138 | ||
fd656935 | 139 | static void iwl_trans_rx_hw_init(struct iwl_trans *trans, |
ab697a9f EG |
140 | struct iwl_rx_queue *rxq) |
141 | { | |
142 | u32 rb_size; | |
143 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | |
c17d0681 | 144 | u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */ |
ab697a9f EG |
145 | |
146 | if (iwlagn_mod_params.amsdu_size_8K) | |
147 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | |
148 | else | |
149 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
150 | ||
151 | /* Stop Rx DMA */ | |
1042db2a | 152 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
ab697a9f EG |
153 | |
154 | /* Reset driver's Rx queue write index */ | |
1042db2a | 155 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); |
ab697a9f EG |
156 | |
157 | /* Tell device where to find RBD circular buffer in DRAM */ | |
1042db2a | 158 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, |
ab697a9f EG |
159 | (u32)(rxq->bd_dma >> 8)); |
160 | ||
161 | /* Tell device where in DRAM to update its Rx status */ | |
1042db2a | 162 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, |
ab697a9f EG |
163 | rxq->rb_stts_dma >> 4); |
164 | ||
165 | /* Enable Rx DMA | |
166 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
167 | * the credit mechanism in 5000 HW RX FIFO | |
168 | * Direct rx interrupts to hosts | |
169 | * Rx buffer size 4 or 8k | |
170 | * RB timeout 0x10 | |
171 | * 256 RBDs | |
172 | */ | |
1042db2a | 173 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, |
ab697a9f EG |
174 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | |
175 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
176 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
177 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | |
178 | rb_size| | |
179 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | |
180 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | |
181 | ||
182 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
1042db2a | 183 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); |
ab697a9f EG |
184 | } |
185 | ||
5a878bf6 | 186 | static int iwl_rx_init(struct iwl_trans *trans) |
a0f6b0a2 | 187 | { |
5a878bf6 EG |
188 | struct iwl_trans_pcie *trans_pcie = |
189 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
190 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
191 | ||
a0f6b0a2 EG |
192 | int i, err; |
193 | unsigned long flags; | |
194 | ||
195 | if (!rxq->bd) { | |
5a878bf6 | 196 | err = iwl_trans_rx_alloc(trans); |
a0f6b0a2 EG |
197 | if (err) |
198 | return err; | |
199 | } | |
200 | ||
201 | spin_lock_irqsave(&rxq->lock, flags); | |
202 | INIT_LIST_HEAD(&rxq->rx_free); | |
203 | INIT_LIST_HEAD(&rxq->rx_used); | |
204 | ||
5a878bf6 | 205 | iwl_trans_rxq_free_rx_bufs(trans); |
c85eb619 EG |
206 | |
207 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
208 | rxq->queue[i] = NULL; | |
209 | ||
210 | /* Set us so that we have processed and used all buffers, but have | |
211 | * not restocked the Rx queue with fresh buffers */ | |
212 | rxq->read = rxq->write = 0; | |
213 | rxq->write_actual = 0; | |
214 | rxq->free_count = 0; | |
215 | spin_unlock_irqrestore(&rxq->lock, flags); | |
216 | ||
5a878bf6 | 217 | iwlagn_rx_replenish(trans); |
ab697a9f | 218 | |
fd656935 | 219 | iwl_trans_rx_hw_init(trans, rxq); |
ab697a9f | 220 | |
5a878bf6 | 221 | spin_lock_irqsave(&trans->shrd->lock, flags); |
ab697a9f | 222 | rxq->need_update = 1; |
5a878bf6 EG |
223 | iwl_rx_queue_update_write_ptr(trans, rxq); |
224 | spin_unlock_irqrestore(&trans->shrd->lock, flags); | |
ab697a9f | 225 | |
c85eb619 EG |
226 | return 0; |
227 | } | |
228 | ||
5a878bf6 | 229 | static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) |
a0f6b0a2 | 230 | { |
5a878bf6 EG |
231 | struct iwl_trans_pcie *trans_pcie = |
232 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
233 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
234 | ||
a0f6b0a2 EG |
235 | unsigned long flags; |
236 | ||
237 | /*if rxq->bd is NULL, it means that nothing has been allocated, | |
238 | * exit now */ | |
239 | if (!rxq->bd) { | |
5a878bf6 | 240 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); |
a0f6b0a2 EG |
241 | return; |
242 | } | |
243 | ||
244 | spin_lock_irqsave(&rxq->lock, flags); | |
5a878bf6 | 245 | iwl_trans_rxq_free_rx_bufs(trans); |
a0f6b0a2 EG |
246 | spin_unlock_irqrestore(&rxq->lock, flags); |
247 | ||
1042db2a | 248 | dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, |
a0f6b0a2 EG |
249 | rxq->bd, rxq->bd_dma); |
250 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | |
251 | rxq->bd = NULL; | |
252 | ||
253 | if (rxq->rb_stts) | |
1042db2a | 254 | dma_free_coherent(trans->dev, |
a0f6b0a2 EG |
255 | sizeof(struct iwl_rb_status), |
256 | rxq->rb_stts, rxq->rb_stts_dma); | |
257 | else | |
5a878bf6 | 258 | IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); |
a0f6b0a2 EG |
259 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); |
260 | rxq->rb_stts = NULL; | |
261 | } | |
262 | ||
6d8f6eeb | 263 | static int iwl_trans_rx_stop(struct iwl_trans *trans) |
c2c52e8b EG |
264 | { |
265 | ||
266 | /* stop Rx DMA */ | |
1042db2a EG |
267 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
268 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | |
c2c52e8b EG |
269 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); |
270 | } | |
271 | ||
6d8f6eeb | 272 | static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans, |
02aca585 EG |
273 | struct iwl_dma_ptr *ptr, size_t size) |
274 | { | |
275 | if (WARN_ON(ptr->addr)) | |
276 | return -EINVAL; | |
277 | ||
1042db2a | 278 | ptr->addr = dma_alloc_coherent(trans->dev, size, |
02aca585 EG |
279 | &ptr->dma, GFP_KERNEL); |
280 | if (!ptr->addr) | |
281 | return -ENOMEM; | |
282 | ptr->size = size; | |
283 | return 0; | |
284 | } | |
285 | ||
6d8f6eeb | 286 | static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans, |
1359ca4f EG |
287 | struct iwl_dma_ptr *ptr) |
288 | { | |
289 | if (unlikely(!ptr->addr)) | |
290 | return; | |
291 | ||
1042db2a | 292 | dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); |
1359ca4f EG |
293 | memset(ptr, 0, sizeof(*ptr)); |
294 | } | |
295 | ||
6d8f6eeb EG |
296 | static int iwl_trans_txq_alloc(struct iwl_trans *trans, |
297 | struct iwl_tx_queue *txq, int slots_num, | |
298 | u32 txq_id) | |
02aca585 | 299 | { |
ab9e212e | 300 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; |
02aca585 EG |
301 | int i; |
302 | ||
2c452297 | 303 | if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds)) |
02aca585 EG |
304 | return -EINVAL; |
305 | ||
1359ca4f EG |
306 | txq->q.n_window = slots_num; |
307 | ||
7f90dce1 EG |
308 | txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL); |
309 | txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL); | |
02aca585 EG |
310 | |
311 | if (!txq->meta || !txq->cmd) | |
312 | goto error; | |
313 | ||
dfa2bdba EG |
314 | if (txq_id == trans->shrd->cmd_queue) |
315 | for (i = 0; i < slots_num; i++) { | |
316 | txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), | |
317 | GFP_KERNEL); | |
318 | if (!txq->cmd[i]) | |
319 | goto error; | |
320 | } | |
02aca585 EG |
321 | |
322 | /* Alloc driver data array and TFD circular buffer */ | |
323 | /* Driver private data, only for Tx (not command) queues, | |
324 | * not shared with device. */ | |
6d8f6eeb | 325 | if (txq_id != trans->shrd->cmd_queue) { |
7f90dce1 EG |
326 | txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]), |
327 | GFP_KERNEL); | |
2c452297 | 328 | if (!txq->skbs) { |
6d8f6eeb | 329 | IWL_ERR(trans, "kmalloc for auxiliary BD " |
02aca585 EG |
330 | "structures failed\n"); |
331 | goto error; | |
332 | } | |
333 | } else { | |
2c452297 | 334 | txq->skbs = NULL; |
02aca585 EG |
335 | } |
336 | ||
337 | /* Circular buffer of transmit frame descriptors (TFDs), | |
338 | * shared with device */ | |
1042db2a | 339 | txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, |
6d8f6eeb | 340 | &txq->q.dma_addr, GFP_KERNEL); |
02aca585 | 341 | if (!txq->tfds) { |
6d8f6eeb | 342 | IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); |
02aca585 EG |
343 | goto error; |
344 | } | |
345 | txq->q.id = txq_id; | |
346 | ||
347 | return 0; | |
348 | error: | |
2c452297 EG |
349 | kfree(txq->skbs); |
350 | txq->skbs = NULL; | |
02aca585 EG |
351 | /* since txq->cmd has been zeroed, |
352 | * all non allocated cmd[i] will be NULL */ | |
dfa2bdba | 353 | if (txq->cmd && txq_id == trans->shrd->cmd_queue) |
02aca585 EG |
354 | for (i = 0; i < slots_num; i++) |
355 | kfree(txq->cmd[i]); | |
356 | kfree(txq->meta); | |
357 | kfree(txq->cmd); | |
358 | txq->meta = NULL; | |
359 | txq->cmd = NULL; | |
360 | ||
361 | return -ENOMEM; | |
362 | ||
363 | } | |
364 | ||
6d8f6eeb | 365 | static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, |
02aca585 EG |
366 | int slots_num, u32 txq_id) |
367 | { | |
368 | int ret; | |
369 | ||
370 | txq->need_update = 0; | |
371 | memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); | |
372 | ||
373 | /* | |
374 | * For the default queues 0-3, set up the swq_id | |
375 | * already -- all others need to get one later | |
376 | * (if they need one at all). | |
377 | */ | |
378 | if (txq_id < 4) | |
379 | iwl_set_swq_id(txq, txq_id, txq_id); | |
380 | ||
381 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | |
382 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | |
383 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | |
384 | ||
385 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
6d8f6eeb | 386 | ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, |
02aca585 EG |
387 | txq_id); |
388 | if (ret) | |
389 | return ret; | |
390 | ||
391 | /* | |
392 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | |
393 | * given Tx queue, and enable the DMA channel used for that queue. | |
394 | * Circular buffer (TFD queue in DRAM) physical base address */ | |
1042db2a | 395 | iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), |
02aca585 EG |
396 | txq->q.dma_addr >> 8); |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
c170b867 EG |
401 | /** |
402 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | |
403 | */ | |
6d8f6eeb | 404 | static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) |
c170b867 | 405 | { |
8ad71bef EG |
406 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
407 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | |
c170b867 | 408 | struct iwl_queue *q = &txq->q; |
39644e9a | 409 | enum dma_data_direction dma_dir; |
984ecb92 | 410 | unsigned long flags; |
cda4ee3f | 411 | spinlock_t *lock; |
c170b867 EG |
412 | |
413 | if (!q->n_bd) | |
414 | return; | |
415 | ||
39644e9a EG |
416 | /* In the command queue, all the TBs are mapped as BIDI |
417 | * so unmap them as such. | |
418 | */ | |
cda4ee3f | 419 | if (txq_id == trans->shrd->cmd_queue) { |
39644e9a | 420 | dma_dir = DMA_BIDIRECTIONAL; |
cda4ee3f EG |
421 | lock = &trans->hcmd_lock; |
422 | } else { | |
39644e9a | 423 | dma_dir = DMA_TO_DEVICE; |
cda4ee3f EG |
424 | lock = &trans->shrd->sta_lock; |
425 | } | |
39644e9a | 426 | |
cda4ee3f | 427 | spin_lock_irqsave(lock, flags); |
c170b867 EG |
428 | while (q->write_ptr != q->read_ptr) { |
429 | /* The read_ptr needs to bound by q->n_window */ | |
39644e9a EG |
430 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), |
431 | dma_dir); | |
c170b867 EG |
432 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
433 | } | |
cda4ee3f | 434 | spin_unlock_irqrestore(lock, flags); |
c170b867 EG |
435 | } |
436 | ||
1359ca4f EG |
437 | /** |
438 | * iwl_tx_queue_free - Deallocate DMA queue. | |
439 | * @txq: Transmit queue to deallocate. | |
440 | * | |
441 | * Empty queue by removing and destroying all BD's. | |
442 | * Free all buffers. | |
443 | * 0-fill, but do not free "txq" descriptor structure. | |
444 | */ | |
6d8f6eeb | 445 | static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) |
1359ca4f | 446 | { |
8ad71bef EG |
447 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
448 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | |
1042db2a | 449 | struct device *dev = trans->dev; |
1359ca4f EG |
450 | int i; |
451 | if (WARN_ON(!txq)) | |
452 | return; | |
453 | ||
6d8f6eeb | 454 | iwl_tx_queue_unmap(trans, txq_id); |
1359ca4f EG |
455 | |
456 | /* De-alloc array of command/tx buffers */ | |
dfa2bdba EG |
457 | |
458 | if (txq_id == trans->shrd->cmd_queue) | |
459 | for (i = 0; i < txq->q.n_window; i++) | |
460 | kfree(txq->cmd[i]); | |
1359ca4f EG |
461 | |
462 | /* De-alloc circular buffer of TFDs */ | |
463 | if (txq->q.n_bd) { | |
ab9e212e | 464 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * |
1359ca4f EG |
465 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
466 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | |
467 | } | |
468 | ||
469 | /* De-alloc array of per-TFD driver data */ | |
2c452297 EG |
470 | kfree(txq->skbs); |
471 | txq->skbs = NULL; | |
1359ca4f EG |
472 | |
473 | /* deallocate arrays */ | |
474 | kfree(txq->cmd); | |
475 | kfree(txq->meta); | |
476 | txq->cmd = NULL; | |
477 | txq->meta = NULL; | |
478 | ||
479 | /* 0-fill queue descriptor structure */ | |
480 | memset(txq, 0, sizeof(*txq)); | |
481 | } | |
482 | ||
483 | /** | |
484 | * iwl_trans_tx_free - Free TXQ Context | |
485 | * | |
486 | * Destroy all TX DMA queues and structures | |
487 | */ | |
6d8f6eeb | 488 | static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) |
1359ca4f EG |
489 | { |
490 | int txq_id; | |
8ad71bef | 491 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1359ca4f EG |
492 | |
493 | /* Tx queues */ | |
8ad71bef | 494 | if (trans_pcie->txq) { |
d6189124 | 495 | for (txq_id = 0; |
6d8f6eeb EG |
496 | txq_id < hw_params(trans).max_txq_num; txq_id++) |
497 | iwl_tx_queue_free(trans, txq_id); | |
1359ca4f EG |
498 | } |
499 | ||
8ad71bef EG |
500 | kfree(trans_pcie->txq); |
501 | trans_pcie->txq = NULL; | |
1359ca4f | 502 | |
9d6b2cb1 | 503 | iwlagn_free_dma_ptr(trans, &trans_pcie->kw); |
1359ca4f | 504 | |
6d8f6eeb | 505 | iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); |
1359ca4f EG |
506 | } |
507 | ||
02aca585 EG |
508 | /** |
509 | * iwl_trans_tx_alloc - allocate TX context | |
510 | * Allocate all Tx DMA structures and initialize them | |
511 | * | |
512 | * @param priv | |
513 | * @return error code | |
514 | */ | |
6d8f6eeb | 515 | static int iwl_trans_tx_alloc(struct iwl_trans *trans) |
02aca585 EG |
516 | { |
517 | int ret; | |
518 | int txq_id, slots_num; | |
8ad71bef | 519 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
02aca585 | 520 | |
fd656935 | 521 | u16 scd_bc_tbls_size = hw_params(trans).max_txq_num * |
ab9e212e EG |
522 | sizeof(struct iwlagn_scd_bc_tbl); |
523 | ||
02aca585 EG |
524 | /*It is not allowed to alloc twice, so warn when this happens. |
525 | * We cannot rely on the previous allocation, so free and fail */ | |
8ad71bef | 526 | if (WARN_ON(trans_pcie->txq)) { |
02aca585 EG |
527 | ret = -EINVAL; |
528 | goto error; | |
529 | } | |
530 | ||
6d8f6eeb | 531 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, |
ab9e212e | 532 | scd_bc_tbls_size); |
02aca585 | 533 | if (ret) { |
6d8f6eeb | 534 | IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); |
02aca585 EG |
535 | goto error; |
536 | } | |
537 | ||
538 | /* Alloc keep-warm buffer */ | |
9d6b2cb1 | 539 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); |
02aca585 | 540 | if (ret) { |
6d8f6eeb | 541 | IWL_ERR(trans, "Keep Warm allocation failed\n"); |
02aca585 EG |
542 | goto error; |
543 | } | |
544 | ||
7f90dce1 EG |
545 | trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num, |
546 | sizeof(struct iwl_tx_queue), GFP_KERNEL); | |
8ad71bef | 547 | if (!trans_pcie->txq) { |
6d8f6eeb | 548 | IWL_ERR(trans, "Not enough memory for txq\n"); |
02aca585 EG |
549 | ret = ENOMEM; |
550 | goto error; | |
551 | } | |
552 | ||
553 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
6d8f6eeb EG |
554 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { |
555 | slots_num = (txq_id == trans->shrd->cmd_queue) ? | |
02aca585 | 556 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
8ad71bef EG |
557 | ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id], |
558 | slots_num, txq_id); | |
02aca585 | 559 | if (ret) { |
6d8f6eeb | 560 | IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); |
02aca585 EG |
561 | goto error; |
562 | } | |
563 | } | |
564 | ||
565 | return 0; | |
566 | ||
567 | error: | |
ae2c30bf | 568 | iwl_trans_pcie_tx_free(trans); |
02aca585 EG |
569 | |
570 | return ret; | |
571 | } | |
6d8f6eeb | 572 | static int iwl_tx_init(struct iwl_trans *trans) |
02aca585 EG |
573 | { |
574 | int ret; | |
575 | int txq_id, slots_num; | |
576 | unsigned long flags; | |
577 | bool alloc = false; | |
8ad71bef | 578 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
02aca585 | 579 | |
8ad71bef | 580 | if (!trans_pcie->txq) { |
6d8f6eeb | 581 | ret = iwl_trans_tx_alloc(trans); |
02aca585 EG |
582 | if (ret) |
583 | goto error; | |
584 | alloc = true; | |
585 | } | |
586 | ||
6d8f6eeb | 587 | spin_lock_irqsave(&trans->shrd->lock, flags); |
02aca585 EG |
588 | |
589 | /* Turn off all Tx DMA fifos */ | |
1042db2a | 590 | iwl_write_prph(trans, SCD_TXFACT, 0); |
02aca585 EG |
591 | |
592 | /* Tell NIC where to find the "keep warm" buffer */ | |
1042db2a | 593 | iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, |
83ed9015 | 594 | trans_pcie->kw.dma >> 4); |
02aca585 | 595 | |
6d8f6eeb | 596 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
02aca585 EG |
597 | |
598 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
6d8f6eeb EG |
599 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { |
600 | slots_num = (txq_id == trans->shrd->cmd_queue) ? | |
02aca585 | 601 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
8ad71bef EG |
602 | ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id], |
603 | slots_num, txq_id); | |
02aca585 | 604 | if (ret) { |
6d8f6eeb | 605 | IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); |
02aca585 EG |
606 | goto error; |
607 | } | |
608 | } | |
609 | ||
610 | return 0; | |
611 | error: | |
612 | /*Upon error, free only if we allocated something */ | |
613 | if (alloc) | |
ae2c30bf | 614 | iwl_trans_pcie_tx_free(trans); |
02aca585 EG |
615 | return ret; |
616 | } | |
617 | ||
3e10caeb | 618 | static void iwl_set_pwr_vmain(struct iwl_trans *trans) |
392f8b78 EG |
619 | { |
620 | /* | |
621 | * (for documentation purposes) | |
622 | * to set power to V_AUX, do: | |
623 | ||
624 | if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) | |
1042db2a | 625 | iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, |
392f8b78 EG |
626 | APMG_PS_CTRL_VAL_PWR_SRC_VAUX, |
627 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
628 | */ | |
629 | ||
1042db2a | 630 | iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, |
392f8b78 EG |
631 | APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, |
632 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
633 | } | |
634 | ||
a6c684ee EG |
635 | /* |
636 | * Start up NIC's basic functionality after it has been reset | |
637 | * (e.g. after platform boot, or shutdown via iwl_apm_stop()) | |
638 | * NOTE: This does not load uCode nor start the embedded processor | |
639 | */ | |
640 | static int iwl_apm_init(struct iwl_trans *trans) | |
641 | { | |
642 | int ret = 0; | |
643 | IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); | |
644 | ||
645 | /* | |
646 | * Use "set_bit" below rather than "write", to preserve any hardware | |
647 | * bits already set by default after reset. | |
648 | */ | |
649 | ||
650 | /* Disable L0S exit timer (platform NMI Work/Around) */ | |
651 | iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, | |
652 | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); | |
653 | ||
654 | /* | |
655 | * Disable L0s without affecting L1; | |
656 | * don't wait for ICH L0s (ICH bug W/A) | |
657 | */ | |
658 | iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, | |
659 | CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); | |
660 | ||
661 | /* Set FH wait threshold to maximum (HW error during stress W/A) */ | |
662 | iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); | |
663 | ||
664 | /* | |
665 | * Enable HAP INTA (interrupt from management bus) to | |
666 | * wake device's PCI Express link L1a -> L0s | |
667 | */ | |
668 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, | |
669 | CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); | |
670 | ||
671 | bus_apm_config(bus(trans)); | |
672 | ||
673 | /* Configure analog phase-lock-loop before activating to D0A */ | |
674 | if (cfg(trans)->base_params->pll_cfg_val) | |
675 | iwl_set_bit(trans, CSR_ANA_PLL_CFG, | |
676 | cfg(trans)->base_params->pll_cfg_val); | |
677 | ||
678 | /* | |
679 | * Set "initialization complete" bit to move adapter from | |
680 | * D0U* --> D0A* (powered-up active) state. | |
681 | */ | |
682 | iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | |
683 | ||
684 | /* | |
685 | * Wait for clock stabilization; once stabilized, access to | |
686 | * device-internal resources is supported, e.g. iwl_write_prph() | |
687 | * and accesses to uCode SRAM. | |
688 | */ | |
689 | ret = iwl_poll_bit(trans, CSR_GP_CNTRL, | |
690 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, | |
691 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); | |
692 | if (ret < 0) { | |
693 | IWL_DEBUG_INFO(trans, "Failed to init the card\n"); | |
694 | goto out; | |
695 | } | |
696 | ||
697 | /* | |
698 | * Enable DMA clock and wait for it to stabilize. | |
699 | * | |
700 | * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits | |
701 | * do not disable clocks. This preserves any hardware bits already | |
702 | * set by default in "CLK_CTRL_REG" after reset. | |
703 | */ | |
704 | iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); | |
705 | udelay(20); | |
706 | ||
707 | /* Disable L1-Active */ | |
708 | iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, | |
709 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | |
710 | ||
711 | set_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status); | |
712 | ||
713 | out: | |
714 | return ret; | |
715 | } | |
716 | ||
cc56feb2 EG |
717 | static int iwl_apm_stop_master(struct iwl_trans *trans) |
718 | { | |
719 | int ret = 0; | |
720 | ||
721 | /* stop device's busmaster DMA activity */ | |
722 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); | |
723 | ||
724 | ret = iwl_poll_bit(trans, CSR_RESET, | |
725 | CSR_RESET_REG_FLAG_MASTER_DISABLED, | |
726 | CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); | |
727 | if (ret) | |
728 | IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); | |
729 | ||
730 | IWL_DEBUG_INFO(trans, "stop master\n"); | |
731 | ||
732 | return ret; | |
733 | } | |
734 | ||
735 | static void iwl_apm_stop(struct iwl_trans *trans) | |
736 | { | |
737 | IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); | |
738 | ||
739 | clear_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status); | |
740 | ||
741 | /* Stop device's DMA activity */ | |
742 | iwl_apm_stop_master(trans); | |
743 | ||
744 | /* Reset the entire device */ | |
745 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | |
746 | ||
747 | udelay(10); | |
748 | ||
749 | /* | |
750 | * Clear "initialization complete" bit to move adapter from | |
751 | * D0A* (powered-up Active) --> D0U* (Uninitialized) state. | |
752 | */ | |
753 | iwl_clear_bit(trans, CSR_GP_CNTRL, | |
754 | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | |
755 | } | |
756 | ||
6d8f6eeb | 757 | static int iwl_nic_init(struct iwl_trans *trans) |
392f8b78 EG |
758 | { |
759 | unsigned long flags; | |
760 | ||
761 | /* nic_init */ | |
6d8f6eeb | 762 | spin_lock_irqsave(&trans->shrd->lock, flags); |
a6c684ee | 763 | iwl_apm_init(trans); |
392f8b78 EG |
764 | |
765 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | |
1042db2a | 766 | iwl_write8(trans, CSR_INT_COALESCING, |
83ed9015 | 767 | IWL_HOST_INT_CALIB_TIMEOUT_DEF); |
392f8b78 | 768 | |
6d8f6eeb | 769 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
392f8b78 | 770 | |
3e10caeb | 771 | iwl_set_pwr_vmain(trans); |
392f8b78 | 772 | |
7a10e3e4 | 773 | iwl_nic_config(priv(trans)); |
392f8b78 | 774 | |
a5916977 | 775 | #ifndef CONFIG_IWLWIFI_IDI |
392f8b78 | 776 | /* Allocate the RX queue, or reset if it is already allocated */ |
6d8f6eeb | 777 | iwl_rx_init(trans); |
a5916977 | 778 | #endif |
392f8b78 EG |
779 | |
780 | /* Allocate or reset and init all Tx and Command queues */ | |
6d8f6eeb | 781 | if (iwl_tx_init(trans)) |
392f8b78 EG |
782 | return -ENOMEM; |
783 | ||
fd656935 | 784 | if (hw_params(trans).shadow_reg_enable) { |
392f8b78 | 785 | /* enable shadow regs in HW */ |
1042db2a | 786 | iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, |
392f8b78 EG |
787 | 0x800FFFFF); |
788 | } | |
789 | ||
6d8f6eeb | 790 | set_bit(STATUS_INIT, &trans->shrd->status); |
392f8b78 EG |
791 | |
792 | return 0; | |
793 | } | |
794 | ||
795 | #define HW_READY_TIMEOUT (50) | |
796 | ||
797 | /* Note: returns poll_bit return value, which is >= 0 if success */ | |
6d8f6eeb | 798 | static int iwl_set_hw_ready(struct iwl_trans *trans) |
392f8b78 EG |
799 | { |
800 | int ret; | |
801 | ||
1042db2a | 802 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
803 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); |
804 | ||
805 | /* See if we got it */ | |
1042db2a | 806 | ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
807 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, |
808 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | |
809 | HW_READY_TIMEOUT); | |
810 | ||
6d8f6eeb | 811 | IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); |
392f8b78 EG |
812 | return ret; |
813 | } | |
814 | ||
815 | /* Note: returns standard 0/-ERROR code */ | |
ebb7678d | 816 | static int iwl_prepare_card_hw(struct iwl_trans *trans) |
392f8b78 EG |
817 | { |
818 | int ret; | |
819 | ||
6d8f6eeb | 820 | IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); |
392f8b78 | 821 | |
6d8f6eeb | 822 | ret = iwl_set_hw_ready(trans); |
ebb7678d | 823 | /* If the card is ready, exit 0 */ |
392f8b78 EG |
824 | if (ret >= 0) |
825 | return 0; | |
826 | ||
827 | /* If HW is not ready, prepare the conditions to check again */ | |
1042db2a | 828 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
829 | CSR_HW_IF_CONFIG_REG_PREPARE); |
830 | ||
1042db2a | 831 | ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
832 | ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, |
833 | CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); | |
834 | ||
835 | if (ret < 0) | |
836 | return ret; | |
837 | ||
838 | /* HW should be ready by now, check again. */ | |
6d8f6eeb | 839 | ret = iwl_set_hw_ready(trans); |
392f8b78 EG |
840 | if (ret >= 0) |
841 | return 0; | |
842 | return ret; | |
843 | } | |
844 | ||
e13c0c59 EG |
845 | #define IWL_AC_UNSET -1 |
846 | ||
847 | struct queue_to_fifo_ac { | |
848 | s8 fifo, ac; | |
849 | }; | |
850 | ||
851 | static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = { | |
852 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | |
853 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | |
854 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | |
855 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | |
856 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | |
857 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
858 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
859 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
860 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
861 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
862 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
863 | }; | |
864 | ||
865 | static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = { | |
866 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | |
867 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | |
868 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | |
869 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | |
870 | { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, }, | |
871 | { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, }, | |
872 | { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, }, | |
873 | { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, }, | |
874 | { IWL_TX_FIFO_BE_IPAN, 2, }, | |
875 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | |
876 | { IWL_TX_FIFO_AUX, IWL_AC_UNSET, }, | |
877 | }; | |
878 | ||
879 | static const u8 iwlagn_bss_ac_to_fifo[] = { | |
880 | IWL_TX_FIFO_VO, | |
881 | IWL_TX_FIFO_VI, | |
882 | IWL_TX_FIFO_BE, | |
883 | IWL_TX_FIFO_BK, | |
884 | }; | |
885 | static const u8 iwlagn_bss_ac_to_queue[] = { | |
886 | 0, 1, 2, 3, | |
887 | }; | |
888 | static const u8 iwlagn_pan_ac_to_fifo[] = { | |
889 | IWL_TX_FIFO_VO_IPAN, | |
890 | IWL_TX_FIFO_VI_IPAN, | |
891 | IWL_TX_FIFO_BE_IPAN, | |
892 | IWL_TX_FIFO_BK_IPAN, | |
893 | }; | |
894 | static const u8 iwlagn_pan_ac_to_queue[] = { | |
895 | 7, 6, 5, 4, | |
896 | }; | |
897 | ||
6d8f6eeb | 898 | static int iwl_trans_pcie_start_device(struct iwl_trans *trans) |
392f8b78 EG |
899 | { |
900 | int ret; | |
e13c0c59 EG |
901 | struct iwl_trans_pcie *trans_pcie = |
902 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
392f8b78 | 903 | |
c91bd124 | 904 | trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER; |
e13c0c59 EG |
905 | trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue; |
906 | trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue; | |
907 | ||
908 | trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo; | |
909 | trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo; | |
910 | ||
911 | trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0; | |
912 | trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE; | |
392f8b78 | 913 | |
c91bd124 | 914 | if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) && |
ebb7678d | 915 | iwl_prepare_card_hw(trans)) { |
6d8f6eeb | 916 | IWL_WARN(trans, "Exit HW not ready\n"); |
392f8b78 EG |
917 | return -EIO; |
918 | } | |
919 | ||
920 | /* If platform's RF_KILL switch is NOT set to KILL */ | |
1042db2a | 921 | if (iwl_read32(trans, CSR_GP_CNTRL) & |
392f8b78 | 922 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) |
6d8f6eeb | 923 | clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); |
392f8b78 | 924 | else |
6d8f6eeb | 925 | set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); |
392f8b78 | 926 | |
6d8f6eeb | 927 | if (iwl_is_rfkill(trans->shrd)) { |
3e10caeb | 928 | iwl_set_hw_rfkill_state(priv(trans), true); |
6d8f6eeb | 929 | iwl_enable_interrupts(trans); |
392f8b78 EG |
930 | return -ERFKILL; |
931 | } | |
932 | ||
1042db2a | 933 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); |
392f8b78 | 934 | |
6d8f6eeb | 935 | ret = iwl_nic_init(trans); |
392f8b78 | 936 | if (ret) { |
6d8f6eeb | 937 | IWL_ERR(trans, "Unable to init nic\n"); |
392f8b78 EG |
938 | return ret; |
939 | } | |
940 | ||
941 | /* make sure rfkill handshake bits are cleared */ | |
1042db2a EG |
942 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
943 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, | |
392f8b78 EG |
944 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); |
945 | ||
946 | /* clear (again), then enable host interrupts */ | |
1042db2a | 947 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); |
6d8f6eeb | 948 | iwl_enable_interrupts(trans); |
392f8b78 EG |
949 | |
950 | /* really make sure rfkill handshake bits are cleared */ | |
1042db2a EG |
951 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
952 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
392f8b78 EG |
953 | |
954 | return 0; | |
955 | } | |
956 | ||
b3c2ce13 EG |
957 | /* |
958 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | |
10b15e6f | 959 | * must be called under priv->shrd->lock and mac access |
b3c2ce13 | 960 | */ |
6d8f6eeb | 961 | static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) |
b3c2ce13 | 962 | { |
1042db2a | 963 | iwl_write_prph(trans, SCD_TXFACT, mask); |
b3c2ce13 EG |
964 | } |
965 | ||
ed6a3803 | 966 | static void iwl_tx_start(struct iwl_trans *trans) |
b3c2ce13 EG |
967 | { |
968 | const struct queue_to_fifo_ac *queue_to_fifo; | |
105183b1 EG |
969 | struct iwl_trans_pcie *trans_pcie = |
970 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
b3c2ce13 EG |
971 | u32 a; |
972 | unsigned long flags; | |
973 | int i, chan; | |
974 | u32 reg_val; | |
975 | ||
105183b1 | 976 | spin_lock_irqsave(&trans->shrd->lock, flags); |
b3c2ce13 | 977 | |
83ed9015 | 978 | trans_pcie->scd_base_addr = |
1042db2a | 979 | iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); |
105183b1 | 980 | a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; |
b3c2ce13 | 981 | /* reset conext data memory */ |
105183b1 | 982 | for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; |
b3c2ce13 | 983 | a += 4) |
1042db2a | 984 | iwl_write_targ_mem(trans, a, 0); |
b3c2ce13 | 985 | /* reset tx status memory */ |
105183b1 | 986 | for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; |
b3c2ce13 | 987 | a += 4) |
1042db2a | 988 | iwl_write_targ_mem(trans, a, 0); |
105183b1 | 989 | for (; a < trans_pcie->scd_base_addr + |
c91bd124 | 990 | SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num); |
d6189124 | 991 | a += 4) |
1042db2a | 992 | iwl_write_targ_mem(trans, a, 0); |
b3c2ce13 | 993 | |
1042db2a | 994 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, |
105183b1 | 995 | trans_pcie->scd_bc_tbls.dma >> 10); |
b3c2ce13 EG |
996 | |
997 | /* Enable DMA channel */ | |
998 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | |
1042db2a | 999 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), |
b3c2ce13 EG |
1000 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | |
1001 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | |
1002 | ||
1003 | /* Update FH chicken bits */ | |
1042db2a EG |
1004 | reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); |
1005 | iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, | |
b3c2ce13 EG |
1006 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); |
1007 | ||
1042db2a | 1008 | iwl_write_prph(trans, SCD_QUEUECHAIN_SEL, |
c91bd124 | 1009 | SCD_QUEUECHAIN_SEL_ALL(trans)); |
1042db2a | 1010 | iwl_write_prph(trans, SCD_AGGR_SEL, 0); |
b3c2ce13 EG |
1011 | |
1012 | /* initiate the queues */ | |
c91bd124 | 1013 | for (i = 0; i < hw_params(trans).max_txq_num; i++) { |
1042db2a EG |
1014 | iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0); |
1015 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8)); | |
1016 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + | |
b3c2ce13 | 1017 | SCD_CONTEXT_QUEUE_OFFSET(i), 0); |
1042db2a | 1018 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + |
b3c2ce13 EG |
1019 | SCD_CONTEXT_QUEUE_OFFSET(i) + |
1020 | sizeof(u32), | |
1021 | ((SCD_WIN_SIZE << | |
1022 | SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | |
1023 | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | |
1024 | ((SCD_FRAME_LIMIT << | |
1025 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | |
1026 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | |
1027 | } | |
1028 | ||
1042db2a | 1029 | iwl_write_prph(trans, SCD_INTERRUPT_MASK, |
105183b1 | 1030 | IWL_MASK(0, hw_params(trans).max_txq_num)); |
b3c2ce13 EG |
1031 | |
1032 | /* Activate all Tx DMA/FIFO channels */ | |
6d8f6eeb | 1033 | iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); |
b3c2ce13 EG |
1034 | |
1035 | /* map queues to FIFOs */ | |
7a10e3e4 | 1036 | if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS)) |
b3c2ce13 EG |
1037 | queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; |
1038 | else | |
1039 | queue_to_fifo = iwlagn_default_queue_to_tx_fifo; | |
1040 | ||
6d8f6eeb | 1041 | iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0); |
b3c2ce13 EG |
1042 | |
1043 | /* make sure all queue are not stopped */ | |
8ad71bef EG |
1044 | memset(&trans_pcie->queue_stopped[0], 0, |
1045 | sizeof(trans_pcie->queue_stopped)); | |
b3c2ce13 | 1046 | for (i = 0; i < 4; i++) |
8ad71bef | 1047 | atomic_set(&trans_pcie->queue_stop_count[i], 0); |
b3c2ce13 EG |
1048 | |
1049 | /* reset to 0 to enable all the queue first */ | |
8ad71bef | 1050 | trans_pcie->txq_ctx_active_msk = 0; |
b3c2ce13 | 1051 | |
effcea16 | 1052 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) < |
72c04ce0 | 1053 | IWLAGN_FIRST_AMPDU_QUEUE); |
effcea16 | 1054 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) < |
72c04ce0 | 1055 | IWLAGN_FIRST_AMPDU_QUEUE); |
b3c2ce13 | 1056 | |
72c04ce0 | 1057 | for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) { |
b3c2ce13 EG |
1058 | int fifo = queue_to_fifo[i].fifo; |
1059 | int ac = queue_to_fifo[i].ac; | |
1060 | ||
8ad71bef | 1061 | iwl_txq_ctx_activate(trans_pcie, i); |
b3c2ce13 EG |
1062 | |
1063 | if (fifo == IWL_TX_FIFO_UNUSED) | |
1064 | continue; | |
1065 | ||
1066 | if (ac != IWL_AC_UNSET) | |
8ad71bef EG |
1067 | iwl_set_swq_id(&trans_pcie->txq[i], ac, i); |
1068 | iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i], | |
1069 | fifo, 0); | |
b3c2ce13 EG |
1070 | } |
1071 | ||
6d8f6eeb | 1072 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
b3c2ce13 EG |
1073 | |
1074 | /* Enable L1-Active */ | |
1042db2a | 1075 | iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, |
b3c2ce13 EG |
1076 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); |
1077 | } | |
1078 | ||
ed6a3803 EG |
1079 | static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) |
1080 | { | |
1081 | iwl_reset_ict(trans); | |
1082 | iwl_tx_start(trans); | |
1083 | } | |
1084 | ||
c170b867 EG |
1085 | /** |
1086 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | |
1087 | */ | |
6d8f6eeb | 1088 | static int iwl_trans_tx_stop(struct iwl_trans *trans) |
c170b867 EG |
1089 | { |
1090 | int ch, txq_id; | |
1091 | unsigned long flags; | |
8ad71bef | 1092 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
c170b867 EG |
1093 | |
1094 | /* Turn off all Tx DMA fifos */ | |
6d8f6eeb | 1095 | spin_lock_irqsave(&trans->shrd->lock, flags); |
c170b867 | 1096 | |
6d8f6eeb | 1097 | iwl_trans_txq_set_sched(trans, 0); |
c170b867 EG |
1098 | |
1099 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
02f6f659 | 1100 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { |
1042db2a | 1101 | iwl_write_direct32(trans, |
6d8f6eeb | 1102 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); |
1042db2a | 1103 | if (iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, |
c170b867 EG |
1104 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), |
1105 | 1000)) | |
6d8f6eeb | 1106 | IWL_ERR(trans, "Failing on timeout while stopping" |
c170b867 | 1107 | " DMA channel %d [0x%08x]", ch, |
1042db2a | 1108 | iwl_read_direct32(trans, |
6d8f6eeb | 1109 | FH_TSSR_TX_STATUS_REG)); |
c170b867 | 1110 | } |
6d8f6eeb | 1111 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
c170b867 | 1112 | |
8ad71bef | 1113 | if (!trans_pcie->txq) { |
6d8f6eeb | 1114 | IWL_WARN(trans, "Stopping tx queues that aren't allocated..."); |
c170b867 EG |
1115 | return 0; |
1116 | } | |
1117 | ||
1118 | /* Unmap DMA from host system and free skb's */ | |
6d8f6eeb EG |
1119 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) |
1120 | iwl_tx_queue_unmap(trans, txq_id); | |
c170b867 EG |
1121 | |
1122 | return 0; | |
1123 | } | |
1124 | ||
43e58856 | 1125 | static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) |
ae2c30bf EG |
1126 | { |
1127 | unsigned long flags; | |
43e58856 | 1128 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
ae2c30bf | 1129 | |
43e58856 | 1130 | /* tell the device to stop sending interrupts */ |
ae2c30bf EG |
1131 | spin_lock_irqsave(&trans->shrd->lock, flags); |
1132 | iwl_disable_interrupts(trans); | |
1133 | spin_unlock_irqrestore(&trans->shrd->lock, flags); | |
1134 | ||
ab6cf8e8 | 1135 | /* device going down, Stop using ICT table */ |
6d8f6eeb | 1136 | iwl_disable_ict(trans); |
ab6cf8e8 EG |
1137 | |
1138 | /* | |
1139 | * If a HW restart happens during firmware loading, | |
1140 | * then the firmware loading might call this function | |
1141 | * and later it might be called again due to the | |
1142 | * restart. So don't process again if the device is | |
1143 | * already dead. | |
1144 | */ | |
6d8f6eeb EG |
1145 | if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) { |
1146 | iwl_trans_tx_stop(trans); | |
a5916977 | 1147 | #ifndef CONFIG_IWLWIFI_IDI |
6d8f6eeb | 1148 | iwl_trans_rx_stop(trans); |
a5916977 | 1149 | #endif |
ab6cf8e8 | 1150 | /* Power-down device's busmaster DMA clocks */ |
1042db2a | 1151 | iwl_write_prph(trans, APMG_CLK_DIS_REG, |
ab6cf8e8 EG |
1152 | APMG_CLK_VAL_DMA_CLK_RQT); |
1153 | udelay(5); | |
1154 | } | |
1155 | ||
1156 | /* Make sure (redundant) we've released our request to stay awake */ | |
1042db2a | 1157 | iwl_clear_bit(trans, CSR_GP_CNTRL, |
6d8f6eeb | 1158 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
ab6cf8e8 EG |
1159 | |
1160 | /* Stop the device, and put it in low power state */ | |
cc56feb2 | 1161 | iwl_apm_stop(trans); |
43e58856 EG |
1162 | |
1163 | /* Upon stop, the APM issues an interrupt if HW RF kill is set. | |
1164 | * Clean again the interrupt here | |
1165 | */ | |
1166 | spin_lock_irqsave(&trans->shrd->lock, flags); | |
1167 | iwl_disable_interrupts(trans); | |
1168 | spin_unlock_irqrestore(&trans->shrd->lock, flags); | |
1169 | ||
1170 | /* wait to make sure we flush pending tasklet*/ | |
a42a1844 | 1171 | synchronize_irq(trans->irq); |
43e58856 EG |
1172 | tasklet_kill(&trans_pcie->irq_tasklet); |
1173 | ||
1174 | /* stop and reset the on-board processor */ | |
1042db2a | 1175 | iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); |
ab6cf8e8 EG |
1176 | } |
1177 | ||
e13c0c59 | 1178 | static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
14991a9d | 1179 | struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, |
34b5321e | 1180 | u8 sta_id, u8 tid) |
47c1b496 | 1181 | { |
e13c0c59 EG |
1182 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1183 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
1184 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
132f98c2 | 1185 | struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; |
47c1b496 | 1186 | struct iwl_cmd_meta *out_meta; |
e13c0c59 EG |
1187 | struct iwl_tx_queue *txq; |
1188 | struct iwl_queue *q; | |
47c1b496 EG |
1189 | |
1190 | dma_addr_t phys_addr = 0; | |
1191 | dma_addr_t txcmd_phys; | |
1192 | dma_addr_t scratch_phys; | |
1193 | u16 len, firstlen, secondlen; | |
1194 | u8 wait_write_ptr = 0; | |
e13c0c59 | 1195 | u8 txq_id; |
e13c0c59 EG |
1196 | bool is_agg = false; |
1197 | __le16 fc = hdr->frame_control; | |
47c1b496 | 1198 | u8 hdr_len = ieee80211_hdrlen(fc); |
631b84c5 | 1199 | u16 __maybe_unused wifi_seq; |
47c1b496 | 1200 | |
e13c0c59 EG |
1201 | /* |
1202 | * Send this frame after DTIM -- there's a special queue | |
1203 | * reserved for this for contexts that support AP mode. | |
1204 | */ | |
1205 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { | |
1206 | txq_id = trans_pcie->mcast_queue[ctx]; | |
1207 | ||
1208 | /* | |
1209 | * The microcode will clear the more data | |
1210 | * bit in the last frame it transmits. | |
1211 | */ | |
1212 | hdr->frame_control |= | |
1213 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | |
1214 | } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) | |
1215 | txq_id = IWL_AUX_QUEUE; | |
1216 | else | |
1217 | txq_id = | |
1218 | trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)]; | |
1219 | ||
97756fb1 EG |
1220 | /* aggregation is on for this <sta,tid> */ |
1221 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { | |
1222 | WARN_ON(tid >= IWL_MAX_TID_COUNT); | |
1223 | txq_id = trans_pcie->agg_txq[sta_id][tid]; | |
1224 | is_agg = true; | |
e13c0c59 EG |
1225 | } |
1226 | ||
8ad71bef | 1227 | txq = &trans_pcie->txq[txq_id]; |
e13c0c59 EG |
1228 | q = &txq->q; |
1229 | ||
631b84c5 EG |
1230 | /* In AGG mode, the index in the ring must correspond to the WiFi |
1231 | * sequence number. This is a HW requirements to help the SCD to parse | |
1232 | * the BA. | |
1233 | * Check here that the packets are in the right place on the ring. | |
1234 | */ | |
1235 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1236 | wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | |
1237 | WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr), | |
1238 | "Q: %d WiFi Seq %d tfdNum %d", | |
1239 | txq_id, wifi_seq, q->write_ptr); | |
1240 | #endif | |
1241 | ||
47c1b496 | 1242 | /* Set up driver data for this TFD */ |
2c452297 | 1243 | txq->skbs[q->write_ptr] = skb; |
dfa2bdba EG |
1244 | txq->cmd[q->write_ptr] = dev_cmd; |
1245 | ||
1246 | dev_cmd->hdr.cmd = REPLY_TX; | |
1247 | dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
1248 | INDEX_TO_SEQ(q->write_ptr))); | |
47c1b496 EG |
1249 | |
1250 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
1251 | out_meta = &txq->meta[q->write_ptr]; | |
1252 | ||
1253 | /* | |
1254 | * Use the first empty entry in this queue's command buffer array | |
1255 | * to contain the Tx command and MAC header concatenated together | |
1256 | * (payload data will be in another buffer). | |
1257 | * Size of this varies, due to varying MAC header length. | |
1258 | * If end is not dword aligned, we'll have 2 extra bytes at the end | |
1259 | * of the MAC header (device reads on dword boundaries). | |
1260 | * We'll tell device about this padding later. | |
1261 | */ | |
1262 | len = sizeof(struct iwl_tx_cmd) + | |
1263 | sizeof(struct iwl_cmd_header) + hdr_len; | |
1264 | firstlen = (len + 3) & ~3; | |
1265 | ||
1266 | /* Tell NIC about any 2-byte padding after MAC header */ | |
1267 | if (firstlen != len) | |
1268 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
1269 | ||
1270 | /* Physical address of this Tx command's header (not MAC header!), | |
1271 | * within command buffer array. */ | |
1042db2a | 1272 | txcmd_phys = dma_map_single(trans->dev, |
47c1b496 EG |
1273 | &dev_cmd->hdr, firstlen, |
1274 | DMA_BIDIRECTIONAL); | |
1042db2a | 1275 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) |
47c1b496 EG |
1276 | return -1; |
1277 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | |
1278 | dma_unmap_len_set(out_meta, len, firstlen); | |
1279 | ||
1280 | if (!ieee80211_has_morefrags(fc)) { | |
1281 | txq->need_update = 1; | |
1282 | } else { | |
1283 | wait_write_ptr = 1; | |
1284 | txq->need_update = 0; | |
1285 | } | |
1286 | ||
1287 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | |
1288 | * if any (802.11 null frames have no payload). */ | |
1289 | secondlen = skb->len - hdr_len; | |
1290 | if (secondlen > 0) { | |
1042db2a | 1291 | phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, |
47c1b496 | 1292 | secondlen, DMA_TO_DEVICE); |
1042db2a EG |
1293 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { |
1294 | dma_unmap_single(trans->dev, | |
47c1b496 EG |
1295 | dma_unmap_addr(out_meta, mapping), |
1296 | dma_unmap_len(out_meta, len), | |
1297 | DMA_BIDIRECTIONAL); | |
1298 | return -1; | |
1299 | } | |
1300 | } | |
1301 | ||
1302 | /* Attach buffers to TFD */ | |
e13c0c59 | 1303 | iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1); |
47c1b496 | 1304 | if (secondlen > 0) |
e13c0c59 | 1305 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, |
47c1b496 EG |
1306 | secondlen, 0); |
1307 | ||
1308 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | |
1309 | offsetof(struct iwl_tx_cmd, scratch); | |
1310 | ||
1311 | /* take back ownership of DMA buffer to enable update */ | |
1042db2a | 1312 | dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, |
47c1b496 EG |
1313 | DMA_BIDIRECTIONAL); |
1314 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | |
1315 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | |
1316 | ||
e13c0c59 | 1317 | IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", |
47c1b496 | 1318 | le16_to_cpu(dev_cmd->hdr.sequence)); |
e13c0c59 EG |
1319 | IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); |
1320 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | |
1321 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | |
47c1b496 EG |
1322 | |
1323 | /* Set up entry for this TFD in Tx byte-count array */ | |
96f1f05a | 1324 | iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); |
47c1b496 | 1325 | |
1042db2a | 1326 | dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, |
47c1b496 EG |
1327 | DMA_BIDIRECTIONAL); |
1328 | ||
e13c0c59 | 1329 | trace_iwlwifi_dev_tx(priv(trans), |
47c1b496 EG |
1330 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], |
1331 | sizeof(struct iwl_tfd), | |
1332 | &dev_cmd->hdr, firstlen, | |
1333 | skb->data + hdr_len, secondlen); | |
1334 | ||
1335 | /* Tell device the write index *just past* this latest filled TFD */ | |
1336 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
e13c0c59 EG |
1337 | iwl_txq_update_write_ptr(trans, txq); |
1338 | ||
47c1b496 EG |
1339 | /* |
1340 | * At this point the frame is "transmitted" successfully | |
1341 | * and we will get a TX status notification eventually, | |
1342 | * regardless of the value of ret. "ret" only indicates | |
1343 | * whether or not we should update the write pointer. | |
1344 | */ | |
a0eaad71 | 1345 | if (iwl_queue_space(q) < q->high_mark) { |
47c1b496 EG |
1346 | if (wait_write_ptr) { |
1347 | txq->need_update = 1; | |
e13c0c59 | 1348 | iwl_txq_update_write_ptr(trans, txq); |
47c1b496 | 1349 | } else { |
81a3de1c | 1350 | iwl_stop_queue(trans, txq, "Queue is full"); |
47c1b496 EG |
1351 | } |
1352 | } | |
1353 | return 0; | |
1354 | } | |
1355 | ||
6d8f6eeb | 1356 | static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans) |
56d90f4c EG |
1357 | { |
1358 | /* Remove all resets to allow NIC to operate */ | |
1042db2a | 1359 | iwl_write32(trans, CSR_RESET, 0); |
56d90f4c EG |
1360 | } |
1361 | ||
57a1dc89 | 1362 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) |
e6bb4c9c | 1363 | { |
5a878bf6 EG |
1364 | struct iwl_trans_pcie *trans_pcie = |
1365 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
e6bb4c9c EG |
1366 | int err; |
1367 | ||
0c325769 EG |
1368 | trans_pcie->inta_mask = CSR_INI_SET_MASK; |
1369 | ||
57a1dc89 EG |
1370 | if (!trans_pcie->irq_requested) { |
1371 | tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) | |
1372 | iwl_irq_tasklet, (unsigned long)trans); | |
e6bb4c9c | 1373 | |
57a1dc89 | 1374 | iwl_alloc_isr_ict(trans); |
e6bb4c9c | 1375 | |
57a1dc89 EG |
1376 | err = request_irq(trans->irq, iwl_isr_ict, IRQF_SHARED, |
1377 | DRV_NAME, trans); | |
1378 | if (err) { | |
1379 | IWL_ERR(trans, "Error allocating IRQ %d\n", | |
1380 | trans->irq); | |
ebb7678d | 1381 | goto error; |
57a1dc89 EG |
1382 | } |
1383 | ||
1384 | INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish); | |
1385 | trans_pcie->irq_requested = true; | |
e6bb4c9c EG |
1386 | } |
1387 | ||
ebb7678d EG |
1388 | err = iwl_prepare_card_hw(trans); |
1389 | if (err) { | |
1390 | IWL_ERR(trans, "Error while preparing HW: %d", err); | |
1391 | goto error; | |
1392 | } | |
a6c684ee EG |
1393 | |
1394 | iwl_apm_init(trans); | |
1395 | ||
ebb7678d EG |
1396 | return err; |
1397 | ||
1398 | error: | |
1399 | iwl_free_isr_ict(trans); | |
1400 | tasklet_kill(&trans_pcie->irq_tasklet); | |
1401 | return err; | |
e6bb4c9c EG |
1402 | } |
1403 | ||
cc56feb2 EG |
1404 | static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans) |
1405 | { | |
1406 | iwl_apm_stop(trans); | |
1407 | ||
1408 | /* Even if we stop the HW, we still want the RF kill interrupt */ | |
1409 | IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); | |
1410 | iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); | |
1411 | } | |
1412 | ||
76bc10fc | 1413 | static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, |
464021ff EG |
1414 | int txq_id, int ssn, u32 status, |
1415 | struct sk_buff_head *skbs) | |
1416 | { | |
8ad71bef EG |
1417 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1418 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | |
a0eaad71 EG |
1419 | /* n_bd is usually 256 => n_bd - 1 = 0xff */ |
1420 | int tfd_num = ssn & (txq->q.n_bd - 1); | |
464021ff | 1421 | int freed = 0; |
a0eaad71 | 1422 | |
8ad71bef EG |
1423 | txq->time_stamp = jiffies; |
1424 | ||
76bc10fc EG |
1425 | if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && |
1426 | txq_id != trans_pcie->agg_txq[sta_id][tid])) { | |
1427 | /* | |
1428 | * FIXME: this is a uCode bug which need to be addressed, | |
1429 | * log the information and return for now. | |
1430 | * Since it is can possibly happen very often and in order | |
1431 | * not to fill the syslog, don't use IWL_ERR or IWL_WARN | |
1432 | */ | |
1433 | IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, " | |
1434 | "agg_txq[sta_id[tid] %d", txq_id, | |
1435 | trans_pcie->agg_txq[sta_id][tid]); | |
1436 | return 1; | |
a0eaad71 EG |
1437 | } |
1438 | ||
1439 | if (txq->q.read_ptr != tfd_num) { | |
1daf04b8 EG |
1440 | IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n", |
1441 | txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, | |
1442 | tfd_num, ssn); | |
464021ff | 1443 | freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); |
1ba42da4 EG |
1444 | if (iwl_queue_space(&txq->q) > txq->q.low_mark && |
1445 | (!txq->sched_retry || | |
1446 | status != TX_STATUS_FAIL_PASSIVE_NO_RX)) | |
81a3de1c | 1447 | iwl_wake_queue(trans, txq, "Packets reclaimed"); |
a0eaad71 | 1448 | } |
76bc10fc | 1449 | return 0; |
a0eaad71 EG |
1450 | } |
1451 | ||
03905495 EG |
1452 | static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) |
1453 | { | |
1454 | iowrite8(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | |
1455 | } | |
1456 | ||
1457 | static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) | |
1458 | { | |
1459 | iowrite32(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | |
1460 | } | |
1461 | ||
1462 | static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) | |
1463 | { | |
1464 | u32 val = ioread32(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | |
1465 | return val; | |
1466 | } | |
1467 | ||
6d8f6eeb | 1468 | static void iwl_trans_pcie_free(struct iwl_trans *trans) |
34c1b7ba | 1469 | { |
a42a1844 EG |
1470 | struct iwl_trans_pcie *trans_pcie = |
1471 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1472 | ||
45c30dba | 1473 | iwl_calib_free_results(trans); |
ae2c30bf | 1474 | iwl_trans_pcie_tx_free(trans); |
a5916977 | 1475 | #ifndef CONFIG_IWLWIFI_IDI |
ae2c30bf | 1476 | iwl_trans_pcie_rx_free(trans); |
a5916977 | 1477 | #endif |
57a1dc89 EG |
1478 | if (trans_pcie->irq_requested == true) { |
1479 | free_irq(trans->irq, trans); | |
1480 | iwl_free_isr_ict(trans); | |
1481 | } | |
a42a1844 EG |
1482 | |
1483 | pci_disable_msi(trans_pcie->pci_dev); | |
1484 | pci_iounmap(trans_pcie->pci_dev, trans_pcie->hw_base); | |
1485 | pci_release_regions(trans_pcie->pci_dev); | |
1486 | pci_disable_device(trans_pcie->pci_dev); | |
1487 | ||
6d8f6eeb EG |
1488 | trans->shrd->trans = NULL; |
1489 | kfree(trans); | |
34c1b7ba EG |
1490 | } |
1491 | ||
c01a4047 | 1492 | #ifdef CONFIG_PM_SLEEP |
57210f7c EG |
1493 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) |
1494 | { | |
1495 | /* | |
1496 | * This function is called when system goes into suspend state | |
ade4c649 WYG |
1497 | * mac80211 will call iwlagn_mac_stop() from the mac80211 suspend |
1498 | * function first but since iwlagn_mac_stop() has no knowledge of | |
1499 | * who the caller is, | |
57210f7c EG |
1500 | * it will not call apm_ops.stop() to stop the DMA operation. |
1501 | * Calling apm_ops.stop here to make sure we stop the DMA. | |
1502 | * | |
1503 | * But of course ... if we have configured WoWLAN then we did other | |
1504 | * things already :-) | |
1505 | */ | |
d36120c6 | 1506 | if (!trans->shrd->wowlan) { |
cc56feb2 | 1507 | iwl_apm_stop(trans); |
d36120c6 JB |
1508 | } else { |
1509 | iwl_disable_interrupts(trans); | |
1042db2a | 1510 | iwl_clear_bit(trans, CSR_GP_CNTRL, |
d36120c6 JB |
1511 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
1512 | } | |
57210f7c EG |
1513 | |
1514 | return 0; | |
1515 | } | |
1516 | ||
1517 | static int iwl_trans_pcie_resume(struct iwl_trans *trans) | |
1518 | { | |
1519 | bool hw_rfkill = false; | |
1520 | ||
0c325769 | 1521 | iwl_enable_interrupts(trans); |
57210f7c | 1522 | |
1042db2a | 1523 | if (!(iwl_read32(trans, CSR_GP_CNTRL) & |
57210f7c EG |
1524 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) |
1525 | hw_rfkill = true; | |
1526 | ||
1527 | if (hw_rfkill) | |
1528 | set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | |
1529 | else | |
1530 | clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | |
1531 | ||
3e10caeb | 1532 | iwl_set_hw_rfkill_state(priv(trans), hw_rfkill); |
57210f7c EG |
1533 | |
1534 | return 0; | |
1535 | } | |
c01a4047 | 1536 | #endif /* CONFIG_PM_SLEEP */ |
57210f7c | 1537 | |
e13c0c59 | 1538 | static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans, |
81a3de1c EG |
1539 | enum iwl_rxon_context_id ctx, |
1540 | const char *msg) | |
e13c0c59 EG |
1541 | { |
1542 | u8 ac, txq_id; | |
1543 | struct iwl_trans_pcie *trans_pcie = | |
1544 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1545 | ||
1546 | for (ac = 0; ac < AC_NUM; ac++) { | |
1547 | txq_id = trans_pcie->ac_to_queue[ctx][ac]; | |
81a3de1c | 1548 | IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n", |
e13c0c59 | 1549 | ac, |
8ad71bef | 1550 | (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0) |
e13c0c59 | 1551 | ? "stopped" : "awake"); |
81a3de1c | 1552 | iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg); |
e13c0c59 EG |
1553 | } |
1554 | } | |
1555 | ||
81a3de1c EG |
1556 | static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id, |
1557 | const char *msg) | |
e20d4341 | 1558 | { |
8ad71bef EG |
1559 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1560 | ||
81a3de1c | 1561 | iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg); |
e20d4341 EG |
1562 | } |
1563 | ||
5f178cd2 EG |
1564 | #define IWL_FLUSH_WAIT_MS 2000 |
1565 | ||
1566 | static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) | |
1567 | { | |
8ad71bef | 1568 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
5f178cd2 EG |
1569 | struct iwl_tx_queue *txq; |
1570 | struct iwl_queue *q; | |
1571 | int cnt; | |
1572 | unsigned long now = jiffies; | |
1573 | int ret = 0; | |
1574 | ||
1575 | /* waiting for all the tx frames complete might take a while */ | |
1576 | for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { | |
1577 | if (cnt == trans->shrd->cmd_queue) | |
1578 | continue; | |
8ad71bef | 1579 | txq = &trans_pcie->txq[cnt]; |
5f178cd2 EG |
1580 | q = &txq->q; |
1581 | while (q->read_ptr != q->write_ptr && !time_after(jiffies, | |
1582 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) | |
1583 | msleep(1); | |
1584 | ||
1585 | if (q->read_ptr != q->write_ptr) { | |
1586 | IWL_ERR(trans, "fail to flush all tx fifo queues\n"); | |
1587 | ret = -ETIMEDOUT; | |
1588 | break; | |
1589 | } | |
1590 | } | |
1591 | return ret; | |
1592 | } | |
1593 | ||
f22be624 EG |
1594 | /* |
1595 | * On every watchdog tick we check (latest) time stamp. If it does not | |
1596 | * change during timeout period and queue is not empty we reset firmware. | |
1597 | */ | |
1598 | static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt) | |
1599 | { | |
8ad71bef EG |
1600 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1601 | struct iwl_tx_queue *txq = &trans_pcie->txq[cnt]; | |
f22be624 EG |
1602 | struct iwl_queue *q = &txq->q; |
1603 | unsigned long timeout; | |
1604 | ||
1605 | if (q->read_ptr == q->write_ptr) { | |
1606 | txq->time_stamp = jiffies; | |
1607 | return 0; | |
1608 | } | |
1609 | ||
1610 | timeout = txq->time_stamp + | |
1611 | msecs_to_jiffies(hw_params(trans).wd_timeout); | |
1612 | ||
1613 | if (time_after(jiffies, timeout)) { | |
1614 | IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id, | |
1615 | hw_params(trans).wd_timeout); | |
08d1700d | 1616 | IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", |
05f8a09f | 1617 | q->read_ptr, q->write_ptr); |
08d1700d | 1618 | IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", |
1042db2a | 1619 | iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) |
08d1700d | 1620 | & (TFD_QUEUE_SIZE_MAX - 1), |
1042db2a | 1621 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); |
f22be624 EG |
1622 | return 1; |
1623 | } | |
1624 | ||
1625 | return 0; | |
1626 | } | |
1627 | ||
ff620849 EG |
1628 | static const char *get_fh_string(int cmd) |
1629 | { | |
1630 | switch (cmd) { | |
1631 | IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); | |
1632 | IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); | |
1633 | IWL_CMD(FH_RSCSR_CHNL0_WPTR); | |
1634 | IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); | |
1635 | IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); | |
1636 | IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); | |
1637 | IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); | |
1638 | IWL_CMD(FH_TSSR_TX_STATUS_REG); | |
1639 | IWL_CMD(FH_TSSR_TX_ERROR_REG); | |
1640 | default: | |
1641 | return "UNKNOWN"; | |
1642 | } | |
1643 | } | |
1644 | ||
1645 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) | |
1646 | { | |
1647 | int i; | |
1648 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1649 | int pos = 0; | |
1650 | size_t bufsz = 0; | |
1651 | #endif | |
1652 | static const u32 fh_tbl[] = { | |
1653 | FH_RSCSR_CHNL0_STTS_WPTR_REG, | |
1654 | FH_RSCSR_CHNL0_RBDCB_BASE_REG, | |
1655 | FH_RSCSR_CHNL0_WPTR, | |
1656 | FH_MEM_RCSR_CHNL0_CONFIG_REG, | |
1657 | FH_MEM_RSSR_SHARED_CTRL_REG, | |
1658 | FH_MEM_RSSR_RX_STATUS_REG, | |
1659 | FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, | |
1660 | FH_TSSR_TX_STATUS_REG, | |
1661 | FH_TSSR_TX_ERROR_REG | |
1662 | }; | |
1663 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1664 | if (display) { | |
1665 | bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; | |
1666 | *buf = kmalloc(bufsz, GFP_KERNEL); | |
1667 | if (!*buf) | |
1668 | return -ENOMEM; | |
1669 | pos += scnprintf(*buf + pos, bufsz - pos, | |
1670 | "FH register values:\n"); | |
1671 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | |
1672 | pos += scnprintf(*buf + pos, bufsz - pos, | |
1673 | " %34s: 0X%08x\n", | |
1674 | get_fh_string(fh_tbl[i]), | |
1042db2a | 1675 | iwl_read_direct32(trans, fh_tbl[i])); |
ff620849 EG |
1676 | } |
1677 | return pos; | |
1678 | } | |
1679 | #endif | |
1680 | IWL_ERR(trans, "FH register values:\n"); | |
1681 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | |
1682 | IWL_ERR(trans, " %34s: 0X%08x\n", | |
1683 | get_fh_string(fh_tbl[i]), | |
1042db2a | 1684 | iwl_read_direct32(trans, fh_tbl[i])); |
ff620849 EG |
1685 | } |
1686 | return 0; | |
1687 | } | |
1688 | ||
1689 | static const char *get_csr_string(int cmd) | |
1690 | { | |
1691 | switch (cmd) { | |
1692 | IWL_CMD(CSR_HW_IF_CONFIG_REG); | |
1693 | IWL_CMD(CSR_INT_COALESCING); | |
1694 | IWL_CMD(CSR_INT); | |
1695 | IWL_CMD(CSR_INT_MASK); | |
1696 | IWL_CMD(CSR_FH_INT_STATUS); | |
1697 | IWL_CMD(CSR_GPIO_IN); | |
1698 | IWL_CMD(CSR_RESET); | |
1699 | IWL_CMD(CSR_GP_CNTRL); | |
1700 | IWL_CMD(CSR_HW_REV); | |
1701 | IWL_CMD(CSR_EEPROM_REG); | |
1702 | IWL_CMD(CSR_EEPROM_GP); | |
1703 | IWL_CMD(CSR_OTP_GP_REG); | |
1704 | IWL_CMD(CSR_GIO_REG); | |
1705 | IWL_CMD(CSR_GP_UCODE_REG); | |
1706 | IWL_CMD(CSR_GP_DRIVER_REG); | |
1707 | IWL_CMD(CSR_UCODE_DRV_GP1); | |
1708 | IWL_CMD(CSR_UCODE_DRV_GP2); | |
1709 | IWL_CMD(CSR_LED_REG); | |
1710 | IWL_CMD(CSR_DRAM_INT_TBL_REG); | |
1711 | IWL_CMD(CSR_GIO_CHICKEN_BITS); | |
1712 | IWL_CMD(CSR_ANA_PLL_CFG); | |
1713 | IWL_CMD(CSR_HW_REV_WA_REG); | |
1714 | IWL_CMD(CSR_DBG_HPET_MEM_REG); | |
1715 | default: | |
1716 | return "UNKNOWN"; | |
1717 | } | |
1718 | } | |
1719 | ||
1720 | void iwl_dump_csr(struct iwl_trans *trans) | |
1721 | { | |
1722 | int i; | |
1723 | static const u32 csr_tbl[] = { | |
1724 | CSR_HW_IF_CONFIG_REG, | |
1725 | CSR_INT_COALESCING, | |
1726 | CSR_INT, | |
1727 | CSR_INT_MASK, | |
1728 | CSR_FH_INT_STATUS, | |
1729 | CSR_GPIO_IN, | |
1730 | CSR_RESET, | |
1731 | CSR_GP_CNTRL, | |
1732 | CSR_HW_REV, | |
1733 | CSR_EEPROM_REG, | |
1734 | CSR_EEPROM_GP, | |
1735 | CSR_OTP_GP_REG, | |
1736 | CSR_GIO_REG, | |
1737 | CSR_GP_UCODE_REG, | |
1738 | CSR_GP_DRIVER_REG, | |
1739 | CSR_UCODE_DRV_GP1, | |
1740 | CSR_UCODE_DRV_GP2, | |
1741 | CSR_LED_REG, | |
1742 | CSR_DRAM_INT_TBL_REG, | |
1743 | CSR_GIO_CHICKEN_BITS, | |
1744 | CSR_ANA_PLL_CFG, | |
1745 | CSR_HW_REV_WA_REG, | |
1746 | CSR_DBG_HPET_MEM_REG | |
1747 | }; | |
1748 | IWL_ERR(trans, "CSR values:\n"); | |
1749 | IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " | |
1750 | "CSR_INT_PERIODIC_REG)\n"); | |
1751 | for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { | |
1752 | IWL_ERR(trans, " %25s: 0X%08x\n", | |
1753 | get_csr_string(csr_tbl[i]), | |
1042db2a | 1754 | iwl_read32(trans, csr_tbl[i])); |
ff620849 EG |
1755 | } |
1756 | } | |
1757 | ||
87e5666c EG |
1758 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
1759 | /* create and remove of files */ | |
1760 | #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ | |
5a878bf6 | 1761 | if (!debugfs_create_file(#name, mode, parent, trans, \ |
87e5666c EG |
1762 | &iwl_dbgfs_##name##_ops)) \ |
1763 | return -ENOMEM; \ | |
1764 | } while (0) | |
1765 | ||
1766 | /* file operation */ | |
1767 | #define DEBUGFS_READ_FUNC(name) \ | |
1768 | static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ | |
1769 | char __user *user_buf, \ | |
1770 | size_t count, loff_t *ppos); | |
1771 | ||
1772 | #define DEBUGFS_WRITE_FUNC(name) \ | |
1773 | static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ | |
1774 | const char __user *user_buf, \ | |
1775 | size_t count, loff_t *ppos); | |
1776 | ||
1777 | ||
1778 | static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file) | |
1779 | { | |
1780 | file->private_data = inode->i_private; | |
1781 | return 0; | |
1782 | } | |
1783 | ||
1784 | #define DEBUGFS_READ_FILE_OPS(name) \ | |
1785 | DEBUGFS_READ_FUNC(name); \ | |
1786 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | |
1787 | .read = iwl_dbgfs_##name##_read, \ | |
1788 | .open = iwl_dbgfs_open_file_generic, \ | |
1789 | .llseek = generic_file_llseek, \ | |
1790 | }; | |
1791 | ||
16db88ba EG |
1792 | #define DEBUGFS_WRITE_FILE_OPS(name) \ |
1793 | DEBUGFS_WRITE_FUNC(name); \ | |
1794 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | |
1795 | .write = iwl_dbgfs_##name##_write, \ | |
1796 | .open = iwl_dbgfs_open_file_generic, \ | |
1797 | .llseek = generic_file_llseek, \ | |
1798 | }; | |
1799 | ||
87e5666c EG |
1800 | #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ |
1801 | DEBUGFS_READ_FUNC(name); \ | |
1802 | DEBUGFS_WRITE_FUNC(name); \ | |
1803 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | |
1804 | .write = iwl_dbgfs_##name##_write, \ | |
1805 | .read = iwl_dbgfs_##name##_read, \ | |
1806 | .open = iwl_dbgfs_open_file_generic, \ | |
1807 | .llseek = generic_file_llseek, \ | |
1808 | }; | |
1809 | ||
87e5666c EG |
1810 | static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, |
1811 | char __user *user_buf, | |
8ad71bef EG |
1812 | size_t count, loff_t *ppos) |
1813 | { | |
5a878bf6 | 1814 | struct iwl_trans *trans = file->private_data; |
8ad71bef | 1815 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
87e5666c EG |
1816 | struct iwl_tx_queue *txq; |
1817 | struct iwl_queue *q; | |
1818 | char *buf; | |
1819 | int pos = 0; | |
1820 | int cnt; | |
1821 | int ret; | |
fd656935 | 1822 | const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num; |
87e5666c | 1823 | |
8ad71bef | 1824 | if (!trans_pcie->txq) { |
3e10caeb | 1825 | IWL_ERR(trans, "txq not ready\n"); |
87e5666c EG |
1826 | return -EAGAIN; |
1827 | } | |
1828 | buf = kzalloc(bufsz, GFP_KERNEL); | |
1829 | if (!buf) | |
1830 | return -ENOMEM; | |
1831 | ||
5a878bf6 | 1832 | for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { |
8ad71bef | 1833 | txq = &trans_pcie->txq[cnt]; |
87e5666c EG |
1834 | q = &txq->q; |
1835 | pos += scnprintf(buf + pos, bufsz - pos, | |
1836 | "hwq %.2d: read=%u write=%u stop=%d" | |
1837 | " swq_id=%#.2x (ac %d/hwq %d)\n", | |
1838 | cnt, q->read_ptr, q->write_ptr, | |
8ad71bef | 1839 | !!test_bit(cnt, trans_pcie->queue_stopped), |
87e5666c EG |
1840 | txq->swq_id, txq->swq_id & 3, |
1841 | (txq->swq_id >> 2) & 0x1f); | |
1842 | if (cnt >= 4) | |
1843 | continue; | |
1844 | /* for the ACs, display the stop count too */ | |
1845 | pos += scnprintf(buf + pos, bufsz - pos, | |
8ad71bef EG |
1846 | " stop-count: %d\n", |
1847 | atomic_read(&trans_pcie->queue_stop_count[cnt])); | |
87e5666c EG |
1848 | } |
1849 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1850 | kfree(buf); | |
1851 | return ret; | |
1852 | } | |
1853 | ||
1854 | static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, | |
1855 | char __user *user_buf, | |
1856 | size_t count, loff_t *ppos) { | |
5a878bf6 EG |
1857 | struct iwl_trans *trans = file->private_data; |
1858 | struct iwl_trans_pcie *trans_pcie = | |
1859 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1860 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
87e5666c EG |
1861 | char buf[256]; |
1862 | int pos = 0; | |
1863 | const size_t bufsz = sizeof(buf); | |
1864 | ||
1865 | pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", | |
1866 | rxq->read); | |
1867 | pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", | |
1868 | rxq->write); | |
1869 | pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", | |
1870 | rxq->free_count); | |
1871 | if (rxq->rb_stts) { | |
1872 | pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", | |
1873 | le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); | |
1874 | } else { | |
1875 | pos += scnprintf(buf + pos, bufsz - pos, | |
1876 | "closed_rb_num: Not Allocated\n"); | |
1877 | } | |
1878 | return simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1879 | } | |
1880 | ||
7ff94706 EG |
1881 | static ssize_t iwl_dbgfs_log_event_read(struct file *file, |
1882 | char __user *user_buf, | |
1883 | size_t count, loff_t *ppos) | |
1884 | { | |
1885 | struct iwl_trans *trans = file->private_data; | |
1886 | char *buf; | |
1887 | int pos = 0; | |
1888 | ssize_t ret = -ENOMEM; | |
1889 | ||
6bb78847 | 1890 | ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true); |
7ff94706 EG |
1891 | if (buf) { |
1892 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1893 | kfree(buf); | |
1894 | } | |
1895 | return ret; | |
1896 | } | |
1897 | ||
1898 | static ssize_t iwl_dbgfs_log_event_write(struct file *file, | |
1899 | const char __user *user_buf, | |
1900 | size_t count, loff_t *ppos) | |
1901 | { | |
1902 | struct iwl_trans *trans = file->private_data; | |
1903 | u32 event_log_flag; | |
1904 | char buf[8]; | |
1905 | int buf_size; | |
1906 | ||
1907 | memset(buf, 0, sizeof(buf)); | |
1908 | buf_size = min(count, sizeof(buf) - 1); | |
1909 | if (copy_from_user(buf, user_buf, buf_size)) | |
1910 | return -EFAULT; | |
1911 | if (sscanf(buf, "%d", &event_log_flag) != 1) | |
1912 | return -EFAULT; | |
1913 | if (event_log_flag == 1) | |
6bb78847 | 1914 | iwl_dump_nic_event_log(trans, true, NULL, false); |
7ff94706 EG |
1915 | |
1916 | return count; | |
1917 | } | |
1918 | ||
1f7b6172 EG |
1919 | static ssize_t iwl_dbgfs_interrupt_read(struct file *file, |
1920 | char __user *user_buf, | |
1921 | size_t count, loff_t *ppos) { | |
1922 | ||
1923 | struct iwl_trans *trans = file->private_data; | |
1924 | struct iwl_trans_pcie *trans_pcie = | |
1925 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1926 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
1927 | ||
1928 | int pos = 0; | |
1929 | char *buf; | |
1930 | int bufsz = 24 * 64; /* 24 items * 64 char per item */ | |
1931 | ssize_t ret; | |
1932 | ||
1933 | buf = kzalloc(bufsz, GFP_KERNEL); | |
1934 | if (!buf) { | |
1935 | IWL_ERR(trans, "Can not allocate Buffer\n"); | |
1936 | return -ENOMEM; | |
1937 | } | |
1938 | ||
1939 | pos += scnprintf(buf + pos, bufsz - pos, | |
1940 | "Interrupt Statistics Report:\n"); | |
1941 | ||
1942 | pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", | |
1943 | isr_stats->hw); | |
1944 | pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", | |
1945 | isr_stats->sw); | |
1946 | if (isr_stats->sw || isr_stats->hw) { | |
1947 | pos += scnprintf(buf + pos, bufsz - pos, | |
1948 | "\tLast Restarting Code: 0x%X\n", | |
1949 | isr_stats->err_code); | |
1950 | } | |
1951 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1952 | pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", | |
1953 | isr_stats->sch); | |
1954 | pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", | |
1955 | isr_stats->alive); | |
1956 | #endif | |
1957 | pos += scnprintf(buf + pos, bufsz - pos, | |
1958 | "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); | |
1959 | ||
1960 | pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", | |
1961 | isr_stats->ctkill); | |
1962 | ||
1963 | pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", | |
1964 | isr_stats->wakeup); | |
1965 | ||
1966 | pos += scnprintf(buf + pos, bufsz - pos, | |
1967 | "Rx command responses:\t\t %u\n", isr_stats->rx); | |
1968 | ||
1969 | pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", | |
1970 | isr_stats->tx); | |
1971 | ||
1972 | pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", | |
1973 | isr_stats->unhandled); | |
1974 | ||
1975 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1976 | kfree(buf); | |
1977 | return ret; | |
1978 | } | |
1979 | ||
1980 | static ssize_t iwl_dbgfs_interrupt_write(struct file *file, | |
1981 | const char __user *user_buf, | |
1982 | size_t count, loff_t *ppos) | |
1983 | { | |
1984 | struct iwl_trans *trans = file->private_data; | |
1985 | struct iwl_trans_pcie *trans_pcie = | |
1986 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1987 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
1988 | ||
1989 | char buf[8]; | |
1990 | int buf_size; | |
1991 | u32 reset_flag; | |
1992 | ||
1993 | memset(buf, 0, sizeof(buf)); | |
1994 | buf_size = min(count, sizeof(buf) - 1); | |
1995 | if (copy_from_user(buf, user_buf, buf_size)) | |
1996 | return -EFAULT; | |
1997 | if (sscanf(buf, "%x", &reset_flag) != 1) | |
1998 | return -EFAULT; | |
1999 | if (reset_flag == 0) | |
2000 | memset(isr_stats, 0, sizeof(*isr_stats)); | |
2001 | ||
2002 | return count; | |
2003 | } | |
2004 | ||
16db88ba EG |
2005 | static ssize_t iwl_dbgfs_csr_write(struct file *file, |
2006 | const char __user *user_buf, | |
2007 | size_t count, loff_t *ppos) | |
2008 | { | |
2009 | struct iwl_trans *trans = file->private_data; | |
2010 | char buf[8]; | |
2011 | int buf_size; | |
2012 | int csr; | |
2013 | ||
2014 | memset(buf, 0, sizeof(buf)); | |
2015 | buf_size = min(count, sizeof(buf) - 1); | |
2016 | if (copy_from_user(buf, user_buf, buf_size)) | |
2017 | return -EFAULT; | |
2018 | if (sscanf(buf, "%d", &csr) != 1) | |
2019 | return -EFAULT; | |
2020 | ||
2021 | iwl_dump_csr(trans); | |
2022 | ||
2023 | return count; | |
2024 | } | |
2025 | ||
16db88ba EG |
2026 | static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, |
2027 | char __user *user_buf, | |
2028 | size_t count, loff_t *ppos) | |
2029 | { | |
2030 | struct iwl_trans *trans = file->private_data; | |
2031 | char *buf; | |
2032 | int pos = 0; | |
2033 | ssize_t ret = -EFAULT; | |
2034 | ||
2035 | ret = pos = iwl_dump_fh(trans, &buf, true); | |
2036 | if (buf) { | |
2037 | ret = simple_read_from_buffer(user_buf, | |
2038 | count, ppos, buf, pos); | |
2039 | kfree(buf); | |
2040 | } | |
2041 | ||
2042 | return ret; | |
2043 | } | |
2044 | ||
7ff94706 | 2045 | DEBUGFS_READ_WRITE_FILE_OPS(log_event); |
1f7b6172 | 2046 | DEBUGFS_READ_WRITE_FILE_OPS(interrupt); |
16db88ba | 2047 | DEBUGFS_READ_FILE_OPS(fh_reg); |
87e5666c EG |
2048 | DEBUGFS_READ_FILE_OPS(rx_queue); |
2049 | DEBUGFS_READ_FILE_OPS(tx_queue); | |
16db88ba | 2050 | DEBUGFS_WRITE_FILE_OPS(csr); |
87e5666c EG |
2051 | |
2052 | /* | |
2053 | * Create the debugfs files and directories | |
2054 | * | |
2055 | */ | |
2056 | static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, | |
2057 | struct dentry *dir) | |
2058 | { | |
87e5666c EG |
2059 | DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); |
2060 | DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); | |
7ff94706 | 2061 | DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR); |
1f7b6172 | 2062 | DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); |
16db88ba EG |
2063 | DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); |
2064 | DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); | |
87e5666c EG |
2065 | return 0; |
2066 | } | |
2067 | #else | |
2068 | static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, | |
2069 | struct dentry *dir) | |
2070 | { return 0; } | |
2071 | ||
2072 | #endif /*CONFIG_IWLWIFI_DEBUGFS */ | |
2073 | ||
e6bb4c9c | 2074 | const struct iwl_trans_ops trans_ops_pcie = { |
57a1dc89 | 2075 | .start_hw = iwl_trans_pcie_start_hw, |
cc56feb2 | 2076 | .stop_hw = iwl_trans_pcie_stop_hw, |
ed6a3803 | 2077 | .fw_alive = iwl_trans_pcie_fw_alive, |
e6bb4c9c | 2078 | .start_device = iwl_trans_pcie_start_device, |
e6bb4c9c | 2079 | .stop_device = iwl_trans_pcie_stop_device, |
48d42c42 | 2080 | |
e13c0c59 | 2081 | .wake_any_queue = iwl_trans_pcie_wake_any_queue, |
48d42c42 | 2082 | |
e6bb4c9c | 2083 | .send_cmd = iwl_trans_pcie_send_cmd, |
c85eb619 | 2084 | |
e6bb4c9c | 2085 | .tx = iwl_trans_pcie_tx, |
a0eaad71 | 2086 | .reclaim = iwl_trans_pcie_reclaim, |
34c1b7ba | 2087 | |
7f01d567 | 2088 | .tx_agg_disable = iwl_trans_pcie_tx_agg_disable, |
288712a6 | 2089 | .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc, |
c91bd124 | 2090 | .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, |
34c1b7ba | 2091 | |
e6bb4c9c | 2092 | .kick_nic = iwl_trans_pcie_kick_nic, |
1e89cbac | 2093 | |
e6bb4c9c | 2094 | .free = iwl_trans_pcie_free, |
e20d4341 | 2095 | .stop_queue = iwl_trans_pcie_stop_queue, |
87e5666c EG |
2096 | |
2097 | .dbgfs_register = iwl_trans_pcie_dbgfs_register, | |
5f178cd2 EG |
2098 | |
2099 | .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, | |
f22be624 | 2100 | .check_stuck_queue = iwl_trans_pcie_check_stuck_queue, |
5f178cd2 | 2101 | |
c01a4047 | 2102 | #ifdef CONFIG_PM_SLEEP |
57210f7c EG |
2103 | .suspend = iwl_trans_pcie_suspend, |
2104 | .resume = iwl_trans_pcie_resume, | |
c01a4047 | 2105 | #endif |
03905495 EG |
2106 | .write8 = iwl_trans_pcie_write8, |
2107 | .write32 = iwl_trans_pcie_write32, | |
2108 | .read32 = iwl_trans_pcie_read32, | |
e6bb4c9c | 2109 | }; |
a42a1844 | 2110 | |
a42a1844 EG |
2111 | /* PCI registers */ |
2112 | #define PCI_CFG_RETRY_TIMEOUT 0x041 | |
2113 | ||
2114 | struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, | |
2115 | struct pci_dev *pdev, | |
2116 | const struct pci_device_id *ent) | |
2117 | { | |
a42a1844 EG |
2118 | struct iwl_trans_pcie *trans_pcie; |
2119 | struct iwl_trans *trans; | |
2120 | u16 pci_cmd; | |
2121 | int err; | |
2122 | ||
2123 | trans = kzalloc(sizeof(struct iwl_trans) + | |
2124 | sizeof(struct iwl_trans_pcie), GFP_KERNEL); | |
2125 | ||
2126 | if (WARN_ON(!trans)) | |
2127 | return NULL; | |
2128 | ||
2129 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
2130 | ||
2131 | trans->ops = &trans_ops_pcie; | |
2132 | trans->shrd = shrd; | |
2133 | trans_pcie->trans = trans; | |
2134 | spin_lock_init(&trans->hcmd_lock); | |
2135 | ||
2136 | /* W/A - seems to solve weird behavior. We need to remove this if we | |
2137 | * don't want to stay in L1 all the time. This wastes a lot of power */ | |
2138 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | |
2139 | PCIE_LINK_STATE_CLKPM); | |
2140 | ||
2141 | if (pci_enable_device(pdev)) { | |
2142 | err = -ENODEV; | |
2143 | goto out_no_pci; | |
2144 | } | |
2145 | ||
2146 | pci_set_master(pdev); | |
2147 | ||
2148 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | |
2149 | if (!err) | |
2150 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); | |
2151 | if (err) { | |
2152 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
2153 | if (!err) | |
2154 | err = pci_set_consistent_dma_mask(pdev, | |
2155 | DMA_BIT_MASK(32)); | |
2156 | /* both attempts failed: */ | |
2157 | if (err) { | |
2158 | dev_printk(KERN_ERR, &pdev->dev, | |
2159 | "No suitable DMA available.\n"); | |
2160 | goto out_pci_disable_device; | |
2161 | } | |
2162 | } | |
2163 | ||
2164 | err = pci_request_regions(pdev, DRV_NAME); | |
2165 | if (err) { | |
2166 | dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed"); | |
2167 | goto out_pci_disable_device; | |
2168 | } | |
2169 | ||
2170 | trans_pcie->hw_base = pci_iomap(pdev, 0, 0); | |
2171 | if (!trans_pcie->hw_base) { | |
2172 | dev_printk(KERN_ERR, &pdev->dev, "pci_iomap failed"); | |
2173 | err = -ENODEV; | |
2174 | goto out_pci_release_regions; | |
2175 | } | |
2176 | ||
a42a1844 EG |
2177 | dev_printk(KERN_INFO, &pdev->dev, |
2178 | "pci_resource_len = 0x%08llx\n", | |
2179 | (unsigned long long) pci_resource_len(pdev, 0)); | |
2180 | dev_printk(KERN_INFO, &pdev->dev, | |
2181 | "pci_resource_base = %p\n", trans_pcie->hw_base); | |
2182 | ||
2183 | dev_printk(KERN_INFO, &pdev->dev, | |
2184 | "HW Revision ID = 0x%X\n", pdev->revision); | |
2185 | ||
2186 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | |
2187 | * PCI Tx retries from interfering with C3 CPU state */ | |
2188 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | |
2189 | ||
2190 | err = pci_enable_msi(pdev); | |
2191 | if (err) | |
2192 | dev_printk(KERN_ERR, &pdev->dev, | |
2193 | "pci_enable_msi failed(0X%x)", err); | |
2194 | ||
2195 | trans->dev = &pdev->dev; | |
2196 | trans->irq = pdev->irq; | |
2197 | trans_pcie->pci_dev = pdev; | |
2198 | ||
2199 | /* TODO: Move this away, not needed if not MSI */ | |
2200 | /* enable rfkill interrupt: hw bug w/a */ | |
2201 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | |
2202 | if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { | |
2203 | pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; | |
2204 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | |
2205 | } | |
2206 | ||
2207 | return trans; | |
2208 | ||
2209 | out_pci_release_regions: | |
2210 | pci_release_regions(pdev); | |
2211 | out_pci_disable_device: | |
2212 | pci_disable_device(pdev); | |
2213 | out_no_pci: | |
2214 | kfree(trans); | |
2215 | return NULL; | |
2216 | } | |
2217 |