]>
Commit | Line | Data |
---|---|---|
c85eb619 EG |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
22 | * USA | |
23 | * | |
24 | * The full GNU General Public License is included in this distribution | |
25 | * in the file called LICENSE.GPL. | |
26 | * | |
27 | * Contact Information: | |
28 | * Intel Linux Wireless <ilw@linux.intel.com> | |
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
30 | * | |
31 | * BSD LICENSE | |
32 | * | |
33 | * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. | |
34 | * All rights reserved. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * | |
40 | * * Redistributions of source code must retain the above copyright | |
41 | * notice, this list of conditions and the following disclaimer. | |
42 | * * Redistributions in binary form must reproduce the above copyright | |
43 | * notice, this list of conditions and the following disclaimer in | |
44 | * the documentation and/or other materials provided with the | |
45 | * distribution. | |
46 | * * Neither the name Intel Corporation nor the names of its | |
47 | * contributors may be used to endorse or promote products derived | |
48 | * from this software without specific prior written permission. | |
49 | * | |
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
61 | * | |
62 | *****************************************************************************/ | |
e6bb4c9c | 63 | #include <linux/interrupt.h> |
87e5666c | 64 | #include <linux/debugfs.h> |
6d8f6eeb EG |
65 | #include <linux/bitops.h> |
66 | #include <linux/gfp.h> | |
e6bb4c9c | 67 | |
a0f6b0a2 | 68 | #include "iwl-dev.h" |
c85eb619 | 69 | #include "iwl-trans.h" |
02aca585 EG |
70 | #include "iwl-core.h" |
71 | #include "iwl-helpers.h" | |
ab697a9f | 72 | #include "iwl-trans-int-pcie.h" |
02aca585 EG |
73 | /*TODO remove uneeded includes when the transport layer tx_free will be here */ |
74 | #include "iwl-agn.h" | |
48f20d35 | 75 | #include "iwl-shared.h" |
c85eb619 | 76 | |
5a878bf6 | 77 | static int iwl_trans_rx_alloc(struct iwl_trans *trans) |
c85eb619 | 78 | { |
5a878bf6 EG |
79 | struct iwl_trans_pcie *trans_pcie = |
80 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
81 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
82 | struct device *dev = bus(trans)->dev; | |
c85eb619 | 83 | |
5a878bf6 | 84 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); |
c85eb619 EG |
85 | |
86 | spin_lock_init(&rxq->lock); | |
87 | INIT_LIST_HEAD(&rxq->rx_free); | |
88 | INIT_LIST_HEAD(&rxq->rx_used); | |
89 | ||
90 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | |
91 | return -EINVAL; | |
92 | ||
93 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | |
a0f6b0a2 EG |
94 | rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
95 | &rxq->bd_dma, GFP_KERNEL); | |
c85eb619 EG |
96 | if (!rxq->bd) |
97 | goto err_bd; | |
a0f6b0a2 | 98 | memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE); |
c85eb619 EG |
99 | |
100 | /*Allocate the driver's pointer to receive buffer status */ | |
101 | rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts), | |
102 | &rxq->rb_stts_dma, GFP_KERNEL); | |
103 | if (!rxq->rb_stts) | |
104 | goto err_rb_stts; | |
105 | memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); | |
106 | ||
107 | return 0; | |
108 | ||
109 | err_rb_stts: | |
a0f6b0a2 EG |
110 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
111 | rxq->bd, rxq->bd_dma); | |
c85eb619 EG |
112 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); |
113 | rxq->bd = NULL; | |
114 | err_bd: | |
115 | return -ENOMEM; | |
116 | } | |
117 | ||
5a878bf6 | 118 | static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) |
c85eb619 | 119 | { |
5a878bf6 EG |
120 | struct iwl_trans_pcie *trans_pcie = |
121 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
122 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
a0f6b0a2 | 123 | int i; |
c85eb619 EG |
124 | |
125 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
126 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
127 | /* In the reset function, these buffers may have been allocated | |
128 | * to an SKB, so we need to unmap and free potential storage */ | |
129 | if (rxq->pool[i].page != NULL) { | |
5a878bf6 EG |
130 | dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma, |
131 | PAGE_SIZE << hw_params(trans).rx_page_order, | |
c85eb619 | 132 | DMA_FROM_DEVICE); |
790428b6 EG |
133 | __free_pages(rxq->pool[i].page, |
134 | hw_params(trans).rx_page_order); | |
c85eb619 EG |
135 | rxq->pool[i].page = NULL; |
136 | } | |
137 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
138 | } | |
a0f6b0a2 EG |
139 | } |
140 | ||
fd656935 | 141 | static void iwl_trans_rx_hw_init(struct iwl_trans *trans, |
ab697a9f EG |
142 | struct iwl_rx_queue *rxq) |
143 | { | |
144 | u32 rb_size; | |
145 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | |
146 | u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ | |
147 | ||
148 | rb_timeout = RX_RB_TIMEOUT; | |
149 | ||
150 | if (iwlagn_mod_params.amsdu_size_8K) | |
151 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | |
152 | else | |
153 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
154 | ||
155 | /* Stop Rx DMA */ | |
83ed9015 | 156 | iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
ab697a9f EG |
157 | |
158 | /* Reset driver's Rx queue write index */ | |
83ed9015 | 159 | iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); |
ab697a9f EG |
160 | |
161 | /* Tell device where to find RBD circular buffer in DRAM */ | |
83ed9015 | 162 | iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG, |
ab697a9f EG |
163 | (u32)(rxq->bd_dma >> 8)); |
164 | ||
165 | /* Tell device where in DRAM to update its Rx status */ | |
83ed9015 | 166 | iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG, |
ab697a9f EG |
167 | rxq->rb_stts_dma >> 4); |
168 | ||
169 | /* Enable Rx DMA | |
170 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
171 | * the credit mechanism in 5000 HW RX FIFO | |
172 | * Direct rx interrupts to hosts | |
173 | * Rx buffer size 4 or 8k | |
174 | * RB timeout 0x10 | |
175 | * 256 RBDs | |
176 | */ | |
83ed9015 | 177 | iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, |
ab697a9f EG |
178 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | |
179 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
180 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
181 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | |
182 | rb_size| | |
183 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | |
184 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | |
185 | ||
186 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
83ed9015 | 187 | iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); |
ab697a9f EG |
188 | } |
189 | ||
5a878bf6 | 190 | static int iwl_rx_init(struct iwl_trans *trans) |
a0f6b0a2 | 191 | { |
5a878bf6 EG |
192 | struct iwl_trans_pcie *trans_pcie = |
193 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
194 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
195 | ||
a0f6b0a2 EG |
196 | int i, err; |
197 | unsigned long flags; | |
198 | ||
199 | if (!rxq->bd) { | |
5a878bf6 | 200 | err = iwl_trans_rx_alloc(trans); |
a0f6b0a2 EG |
201 | if (err) |
202 | return err; | |
203 | } | |
204 | ||
205 | spin_lock_irqsave(&rxq->lock, flags); | |
206 | INIT_LIST_HEAD(&rxq->rx_free); | |
207 | INIT_LIST_HEAD(&rxq->rx_used); | |
208 | ||
5a878bf6 | 209 | iwl_trans_rxq_free_rx_bufs(trans); |
c85eb619 EG |
210 | |
211 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
212 | rxq->queue[i] = NULL; | |
213 | ||
214 | /* Set us so that we have processed and used all buffers, but have | |
215 | * not restocked the Rx queue with fresh buffers */ | |
216 | rxq->read = rxq->write = 0; | |
217 | rxq->write_actual = 0; | |
218 | rxq->free_count = 0; | |
219 | spin_unlock_irqrestore(&rxq->lock, flags); | |
220 | ||
5a878bf6 | 221 | iwlagn_rx_replenish(trans); |
ab697a9f | 222 | |
fd656935 | 223 | iwl_trans_rx_hw_init(trans, rxq); |
ab697a9f | 224 | |
5a878bf6 | 225 | spin_lock_irqsave(&trans->shrd->lock, flags); |
ab697a9f | 226 | rxq->need_update = 1; |
5a878bf6 EG |
227 | iwl_rx_queue_update_write_ptr(trans, rxq); |
228 | spin_unlock_irqrestore(&trans->shrd->lock, flags); | |
ab697a9f | 229 | |
c85eb619 EG |
230 | return 0; |
231 | } | |
232 | ||
5a878bf6 | 233 | static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) |
a0f6b0a2 | 234 | { |
5a878bf6 EG |
235 | struct iwl_trans_pcie *trans_pcie = |
236 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
237 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
238 | ||
a0f6b0a2 EG |
239 | unsigned long flags; |
240 | ||
241 | /*if rxq->bd is NULL, it means that nothing has been allocated, | |
242 | * exit now */ | |
243 | if (!rxq->bd) { | |
5a878bf6 | 244 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); |
a0f6b0a2 EG |
245 | return; |
246 | } | |
247 | ||
248 | spin_lock_irqsave(&rxq->lock, flags); | |
5a878bf6 | 249 | iwl_trans_rxq_free_rx_bufs(trans); |
a0f6b0a2 EG |
250 | spin_unlock_irqrestore(&rxq->lock, flags); |
251 | ||
5a878bf6 | 252 | dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE, |
a0f6b0a2 EG |
253 | rxq->bd, rxq->bd_dma); |
254 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | |
255 | rxq->bd = NULL; | |
256 | ||
257 | if (rxq->rb_stts) | |
5a878bf6 | 258 | dma_free_coherent(bus(trans)->dev, |
a0f6b0a2 EG |
259 | sizeof(struct iwl_rb_status), |
260 | rxq->rb_stts, rxq->rb_stts_dma); | |
261 | else | |
5a878bf6 | 262 | IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); |
a0f6b0a2 EG |
263 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); |
264 | rxq->rb_stts = NULL; | |
265 | } | |
266 | ||
6d8f6eeb | 267 | static int iwl_trans_rx_stop(struct iwl_trans *trans) |
c2c52e8b EG |
268 | { |
269 | ||
270 | /* stop Rx DMA */ | |
83ed9015 EG |
271 | iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
272 | return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG, | |
c2c52e8b EG |
273 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); |
274 | } | |
275 | ||
6d8f6eeb | 276 | static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans, |
02aca585 EG |
277 | struct iwl_dma_ptr *ptr, size_t size) |
278 | { | |
279 | if (WARN_ON(ptr->addr)) | |
280 | return -EINVAL; | |
281 | ||
6d8f6eeb | 282 | ptr->addr = dma_alloc_coherent(bus(trans)->dev, size, |
02aca585 EG |
283 | &ptr->dma, GFP_KERNEL); |
284 | if (!ptr->addr) | |
285 | return -ENOMEM; | |
286 | ptr->size = size; | |
287 | return 0; | |
288 | } | |
289 | ||
6d8f6eeb | 290 | static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans, |
1359ca4f EG |
291 | struct iwl_dma_ptr *ptr) |
292 | { | |
293 | if (unlikely(!ptr->addr)) | |
294 | return; | |
295 | ||
6d8f6eeb | 296 | dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma); |
1359ca4f EG |
297 | memset(ptr, 0, sizeof(*ptr)); |
298 | } | |
299 | ||
6d8f6eeb EG |
300 | static int iwl_trans_txq_alloc(struct iwl_trans *trans, |
301 | struct iwl_tx_queue *txq, int slots_num, | |
302 | u32 txq_id) | |
02aca585 | 303 | { |
ab9e212e | 304 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; |
02aca585 EG |
305 | int i; |
306 | ||
2c452297 | 307 | if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds)) |
02aca585 EG |
308 | return -EINVAL; |
309 | ||
1359ca4f EG |
310 | txq->q.n_window = slots_num; |
311 | ||
02aca585 EG |
312 | txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num, |
313 | GFP_KERNEL); | |
314 | txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num, | |
315 | GFP_KERNEL); | |
316 | ||
317 | if (!txq->meta || !txq->cmd) | |
318 | goto error; | |
319 | ||
dfa2bdba EG |
320 | if (txq_id == trans->shrd->cmd_queue) |
321 | for (i = 0; i < slots_num; i++) { | |
322 | txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), | |
323 | GFP_KERNEL); | |
324 | if (!txq->cmd[i]) | |
325 | goto error; | |
326 | } | |
02aca585 EG |
327 | |
328 | /* Alloc driver data array and TFD circular buffer */ | |
329 | /* Driver private data, only for Tx (not command) queues, | |
330 | * not shared with device. */ | |
6d8f6eeb | 331 | if (txq_id != trans->shrd->cmd_queue) { |
2c452297 | 332 | txq->skbs = kzalloc(sizeof(txq->skbs[0]) * |
02aca585 | 333 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); |
2c452297 | 334 | if (!txq->skbs) { |
6d8f6eeb | 335 | IWL_ERR(trans, "kmalloc for auxiliary BD " |
02aca585 EG |
336 | "structures failed\n"); |
337 | goto error; | |
338 | } | |
339 | } else { | |
2c452297 | 340 | txq->skbs = NULL; |
02aca585 EG |
341 | } |
342 | ||
343 | /* Circular buffer of transmit frame descriptors (TFDs), | |
344 | * shared with device */ | |
6d8f6eeb EG |
345 | txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz, |
346 | &txq->q.dma_addr, GFP_KERNEL); | |
02aca585 | 347 | if (!txq->tfds) { |
6d8f6eeb | 348 | IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); |
02aca585 EG |
349 | goto error; |
350 | } | |
351 | txq->q.id = txq_id; | |
352 | ||
353 | return 0; | |
354 | error: | |
2c452297 EG |
355 | kfree(txq->skbs); |
356 | txq->skbs = NULL; | |
02aca585 EG |
357 | /* since txq->cmd has been zeroed, |
358 | * all non allocated cmd[i] will be NULL */ | |
dfa2bdba | 359 | if (txq->cmd && txq_id == trans->shrd->cmd_queue) |
02aca585 EG |
360 | for (i = 0; i < slots_num; i++) |
361 | kfree(txq->cmd[i]); | |
362 | kfree(txq->meta); | |
363 | kfree(txq->cmd); | |
364 | txq->meta = NULL; | |
365 | txq->cmd = NULL; | |
366 | ||
367 | return -ENOMEM; | |
368 | ||
369 | } | |
370 | ||
6d8f6eeb | 371 | static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, |
02aca585 EG |
372 | int slots_num, u32 txq_id) |
373 | { | |
374 | int ret; | |
375 | ||
376 | txq->need_update = 0; | |
377 | memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); | |
378 | ||
379 | /* | |
380 | * For the default queues 0-3, set up the swq_id | |
381 | * already -- all others need to get one later | |
382 | * (if they need one at all). | |
383 | */ | |
384 | if (txq_id < 4) | |
385 | iwl_set_swq_id(txq, txq_id, txq_id); | |
386 | ||
387 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | |
388 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | |
389 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | |
390 | ||
391 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
6d8f6eeb | 392 | ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, |
02aca585 EG |
393 | txq_id); |
394 | if (ret) | |
395 | return ret; | |
396 | ||
397 | /* | |
398 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | |
399 | * given Tx queue, and enable the DMA channel used for that queue. | |
400 | * Circular buffer (TFD queue in DRAM) physical base address */ | |
83ed9015 | 401 | iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id), |
02aca585 EG |
402 | txq->q.dma_addr >> 8); |
403 | ||
404 | return 0; | |
405 | } | |
406 | ||
c170b867 EG |
407 | /** |
408 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | |
409 | */ | |
6d8f6eeb | 410 | static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) |
c170b867 | 411 | { |
8ad71bef EG |
412 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
413 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | |
c170b867 EG |
414 | struct iwl_queue *q = &txq->q; |
415 | ||
416 | if (!q->n_bd) | |
417 | return; | |
418 | ||
419 | while (q->write_ptr != q->read_ptr) { | |
420 | /* The read_ptr needs to bound by q->n_window */ | |
6d8f6eeb | 421 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr)); |
c170b867 EG |
422 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
423 | } | |
424 | } | |
425 | ||
1359ca4f EG |
426 | /** |
427 | * iwl_tx_queue_free - Deallocate DMA queue. | |
428 | * @txq: Transmit queue to deallocate. | |
429 | * | |
430 | * Empty queue by removing and destroying all BD's. | |
431 | * Free all buffers. | |
432 | * 0-fill, but do not free "txq" descriptor structure. | |
433 | */ | |
6d8f6eeb | 434 | static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) |
1359ca4f | 435 | { |
8ad71bef EG |
436 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
437 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | |
6d8f6eeb | 438 | struct device *dev = bus(trans)->dev; |
1359ca4f EG |
439 | int i; |
440 | if (WARN_ON(!txq)) | |
441 | return; | |
442 | ||
6d8f6eeb | 443 | iwl_tx_queue_unmap(trans, txq_id); |
1359ca4f EG |
444 | |
445 | /* De-alloc array of command/tx buffers */ | |
dfa2bdba EG |
446 | |
447 | if (txq_id == trans->shrd->cmd_queue) | |
448 | for (i = 0; i < txq->q.n_window; i++) | |
449 | kfree(txq->cmd[i]); | |
1359ca4f EG |
450 | |
451 | /* De-alloc circular buffer of TFDs */ | |
452 | if (txq->q.n_bd) { | |
ab9e212e | 453 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * |
1359ca4f EG |
454 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
455 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | |
456 | } | |
457 | ||
458 | /* De-alloc array of per-TFD driver data */ | |
2c452297 EG |
459 | kfree(txq->skbs); |
460 | txq->skbs = NULL; | |
1359ca4f EG |
461 | |
462 | /* deallocate arrays */ | |
463 | kfree(txq->cmd); | |
464 | kfree(txq->meta); | |
465 | txq->cmd = NULL; | |
466 | txq->meta = NULL; | |
467 | ||
468 | /* 0-fill queue descriptor structure */ | |
469 | memset(txq, 0, sizeof(*txq)); | |
470 | } | |
471 | ||
472 | /** | |
473 | * iwl_trans_tx_free - Free TXQ Context | |
474 | * | |
475 | * Destroy all TX DMA queues and structures | |
476 | */ | |
6d8f6eeb | 477 | static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) |
1359ca4f EG |
478 | { |
479 | int txq_id; | |
8ad71bef | 480 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1359ca4f EG |
481 | |
482 | /* Tx queues */ | |
8ad71bef | 483 | if (trans_pcie->txq) { |
d6189124 | 484 | for (txq_id = 0; |
6d8f6eeb EG |
485 | txq_id < hw_params(trans).max_txq_num; txq_id++) |
486 | iwl_tx_queue_free(trans, txq_id); | |
1359ca4f EG |
487 | } |
488 | ||
8ad71bef EG |
489 | kfree(trans_pcie->txq); |
490 | trans_pcie->txq = NULL; | |
1359ca4f | 491 | |
9d6b2cb1 | 492 | iwlagn_free_dma_ptr(trans, &trans_pcie->kw); |
1359ca4f | 493 | |
6d8f6eeb | 494 | iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); |
1359ca4f EG |
495 | } |
496 | ||
02aca585 EG |
497 | /** |
498 | * iwl_trans_tx_alloc - allocate TX context | |
499 | * Allocate all Tx DMA structures and initialize them | |
500 | * | |
501 | * @param priv | |
502 | * @return error code | |
503 | */ | |
6d8f6eeb | 504 | static int iwl_trans_tx_alloc(struct iwl_trans *trans) |
02aca585 EG |
505 | { |
506 | int ret; | |
507 | int txq_id, slots_num; | |
8ad71bef | 508 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
02aca585 | 509 | |
fd656935 | 510 | u16 scd_bc_tbls_size = hw_params(trans).max_txq_num * |
ab9e212e EG |
511 | sizeof(struct iwlagn_scd_bc_tbl); |
512 | ||
02aca585 EG |
513 | /*It is not allowed to alloc twice, so warn when this happens. |
514 | * We cannot rely on the previous allocation, so free and fail */ | |
8ad71bef | 515 | if (WARN_ON(trans_pcie->txq)) { |
02aca585 EG |
516 | ret = -EINVAL; |
517 | goto error; | |
518 | } | |
519 | ||
6d8f6eeb | 520 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, |
ab9e212e | 521 | scd_bc_tbls_size); |
02aca585 | 522 | if (ret) { |
6d8f6eeb | 523 | IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); |
02aca585 EG |
524 | goto error; |
525 | } | |
526 | ||
527 | /* Alloc keep-warm buffer */ | |
9d6b2cb1 | 528 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); |
02aca585 | 529 | if (ret) { |
6d8f6eeb | 530 | IWL_ERR(trans, "Keep Warm allocation failed\n"); |
02aca585 EG |
531 | goto error; |
532 | } | |
533 | ||
8ad71bef | 534 | trans_pcie->txq = kzalloc(sizeof(struct iwl_tx_queue) * |
fd656935 | 535 | hw_params(trans).max_txq_num, GFP_KERNEL); |
8ad71bef | 536 | if (!trans_pcie->txq) { |
6d8f6eeb | 537 | IWL_ERR(trans, "Not enough memory for txq\n"); |
02aca585 EG |
538 | ret = ENOMEM; |
539 | goto error; | |
540 | } | |
541 | ||
542 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
6d8f6eeb EG |
543 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { |
544 | slots_num = (txq_id == trans->shrd->cmd_queue) ? | |
02aca585 | 545 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
8ad71bef EG |
546 | ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id], |
547 | slots_num, txq_id); | |
02aca585 | 548 | if (ret) { |
6d8f6eeb | 549 | IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); |
02aca585 EG |
550 | goto error; |
551 | } | |
552 | } | |
553 | ||
554 | return 0; | |
555 | ||
556 | error: | |
ae2c30bf | 557 | iwl_trans_pcie_tx_free(trans); |
02aca585 EG |
558 | |
559 | return ret; | |
560 | } | |
6d8f6eeb | 561 | static int iwl_tx_init(struct iwl_trans *trans) |
02aca585 EG |
562 | { |
563 | int ret; | |
564 | int txq_id, slots_num; | |
565 | unsigned long flags; | |
566 | bool alloc = false; | |
8ad71bef | 567 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
02aca585 | 568 | |
8ad71bef | 569 | if (!trans_pcie->txq) { |
6d8f6eeb | 570 | ret = iwl_trans_tx_alloc(trans); |
02aca585 EG |
571 | if (ret) |
572 | goto error; | |
573 | alloc = true; | |
574 | } | |
575 | ||
6d8f6eeb | 576 | spin_lock_irqsave(&trans->shrd->lock, flags); |
02aca585 EG |
577 | |
578 | /* Turn off all Tx DMA fifos */ | |
83ed9015 | 579 | iwl_write_prph(bus(trans), SCD_TXFACT, 0); |
02aca585 EG |
580 | |
581 | /* Tell NIC where to find the "keep warm" buffer */ | |
83ed9015 EG |
582 | iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG, |
583 | trans_pcie->kw.dma >> 4); | |
02aca585 | 584 | |
6d8f6eeb | 585 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
02aca585 EG |
586 | |
587 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
6d8f6eeb EG |
588 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { |
589 | slots_num = (txq_id == trans->shrd->cmd_queue) ? | |
02aca585 | 590 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
8ad71bef EG |
591 | ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id], |
592 | slots_num, txq_id); | |
02aca585 | 593 | if (ret) { |
6d8f6eeb | 594 | IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); |
02aca585 EG |
595 | goto error; |
596 | } | |
597 | } | |
598 | ||
599 | return 0; | |
600 | error: | |
601 | /*Upon error, free only if we allocated something */ | |
602 | if (alloc) | |
ae2c30bf | 603 | iwl_trans_pcie_tx_free(trans); |
02aca585 EG |
604 | return ret; |
605 | } | |
606 | ||
3e10caeb | 607 | static void iwl_set_pwr_vmain(struct iwl_trans *trans) |
392f8b78 EG |
608 | { |
609 | /* | |
610 | * (for documentation purposes) | |
611 | * to set power to V_AUX, do: | |
612 | ||
613 | if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) | |
83ed9015 | 614 | iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG, |
392f8b78 EG |
615 | APMG_PS_CTRL_VAL_PWR_SRC_VAUX, |
616 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
617 | */ | |
618 | ||
83ed9015 | 619 | iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG, |
392f8b78 EG |
620 | APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, |
621 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
622 | } | |
623 | ||
6d8f6eeb | 624 | static int iwl_nic_init(struct iwl_trans *trans) |
392f8b78 EG |
625 | { |
626 | unsigned long flags; | |
627 | ||
628 | /* nic_init */ | |
6d8f6eeb | 629 | spin_lock_irqsave(&trans->shrd->lock, flags); |
3e10caeb | 630 | iwl_apm_init(priv(trans)); |
392f8b78 EG |
631 | |
632 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | |
83ed9015 EG |
633 | iwl_write8(bus(trans), CSR_INT_COALESCING, |
634 | IWL_HOST_INT_CALIB_TIMEOUT_DEF); | |
392f8b78 | 635 | |
6d8f6eeb | 636 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
392f8b78 | 637 | |
3e10caeb | 638 | iwl_set_pwr_vmain(trans); |
392f8b78 | 639 | |
3e10caeb | 640 | priv(trans)->cfg->lib->nic_config(priv(trans)); |
392f8b78 EG |
641 | |
642 | /* Allocate the RX queue, or reset if it is already allocated */ | |
6d8f6eeb | 643 | iwl_rx_init(trans); |
392f8b78 EG |
644 | |
645 | /* Allocate or reset and init all Tx and Command queues */ | |
6d8f6eeb | 646 | if (iwl_tx_init(trans)) |
392f8b78 EG |
647 | return -ENOMEM; |
648 | ||
fd656935 | 649 | if (hw_params(trans).shadow_reg_enable) { |
392f8b78 | 650 | /* enable shadow regs in HW */ |
83ed9015 | 651 | iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL, |
392f8b78 EG |
652 | 0x800FFFFF); |
653 | } | |
654 | ||
6d8f6eeb | 655 | set_bit(STATUS_INIT, &trans->shrd->status); |
392f8b78 EG |
656 | |
657 | return 0; | |
658 | } | |
659 | ||
660 | #define HW_READY_TIMEOUT (50) | |
661 | ||
662 | /* Note: returns poll_bit return value, which is >= 0 if success */ | |
6d8f6eeb | 663 | static int iwl_set_hw_ready(struct iwl_trans *trans) |
392f8b78 EG |
664 | { |
665 | int ret; | |
666 | ||
83ed9015 | 667 | iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
668 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); |
669 | ||
670 | /* See if we got it */ | |
83ed9015 | 671 | ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
672 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, |
673 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | |
674 | HW_READY_TIMEOUT); | |
675 | ||
6d8f6eeb | 676 | IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); |
392f8b78 EG |
677 | return ret; |
678 | } | |
679 | ||
680 | /* Note: returns standard 0/-ERROR code */ | |
6d8f6eeb | 681 | static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans) |
392f8b78 EG |
682 | { |
683 | int ret; | |
684 | ||
6d8f6eeb | 685 | IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); |
392f8b78 | 686 | |
6d8f6eeb | 687 | ret = iwl_set_hw_ready(trans); |
392f8b78 EG |
688 | if (ret >= 0) |
689 | return 0; | |
690 | ||
691 | /* If HW is not ready, prepare the conditions to check again */ | |
83ed9015 | 692 | iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
693 | CSR_HW_IF_CONFIG_REG_PREPARE); |
694 | ||
83ed9015 | 695 | ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG, |
392f8b78 EG |
696 | ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, |
697 | CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); | |
698 | ||
699 | if (ret < 0) | |
700 | return ret; | |
701 | ||
702 | /* HW should be ready by now, check again. */ | |
6d8f6eeb | 703 | ret = iwl_set_hw_ready(trans); |
392f8b78 EG |
704 | if (ret >= 0) |
705 | return 0; | |
706 | return ret; | |
707 | } | |
708 | ||
e13c0c59 EG |
709 | #define IWL_AC_UNSET -1 |
710 | ||
711 | struct queue_to_fifo_ac { | |
712 | s8 fifo, ac; | |
713 | }; | |
714 | ||
715 | static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = { | |
716 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | |
717 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | |
718 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | |
719 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | |
720 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | |
721 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
722 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
723 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
724 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
725 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
726 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
727 | }; | |
728 | ||
729 | static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = { | |
730 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | |
731 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | |
732 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | |
733 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | |
734 | { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, }, | |
735 | { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, }, | |
736 | { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, }, | |
737 | { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, }, | |
738 | { IWL_TX_FIFO_BE_IPAN, 2, }, | |
739 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | |
740 | { IWL_TX_FIFO_AUX, IWL_AC_UNSET, }, | |
741 | }; | |
742 | ||
743 | static const u8 iwlagn_bss_ac_to_fifo[] = { | |
744 | IWL_TX_FIFO_VO, | |
745 | IWL_TX_FIFO_VI, | |
746 | IWL_TX_FIFO_BE, | |
747 | IWL_TX_FIFO_BK, | |
748 | }; | |
749 | static const u8 iwlagn_bss_ac_to_queue[] = { | |
750 | 0, 1, 2, 3, | |
751 | }; | |
752 | static const u8 iwlagn_pan_ac_to_fifo[] = { | |
753 | IWL_TX_FIFO_VO_IPAN, | |
754 | IWL_TX_FIFO_VI_IPAN, | |
755 | IWL_TX_FIFO_BE_IPAN, | |
756 | IWL_TX_FIFO_BK_IPAN, | |
757 | }; | |
758 | static const u8 iwlagn_pan_ac_to_queue[] = { | |
759 | 7, 6, 5, 4, | |
760 | }; | |
761 | ||
6d8f6eeb | 762 | static int iwl_trans_pcie_start_device(struct iwl_trans *trans) |
392f8b78 EG |
763 | { |
764 | int ret; | |
e13c0c59 EG |
765 | struct iwl_trans_pcie *trans_pcie = |
766 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
392f8b78 | 767 | |
c91bd124 | 768 | trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER; |
e13c0c59 EG |
769 | trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue; |
770 | trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue; | |
771 | ||
772 | trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo; | |
773 | trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo; | |
774 | ||
775 | trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0; | |
776 | trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE; | |
392f8b78 | 777 | |
c91bd124 | 778 | if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) && |
6d8f6eeb EG |
779 | iwl_trans_pcie_prepare_card_hw(trans)) { |
780 | IWL_WARN(trans, "Exit HW not ready\n"); | |
392f8b78 EG |
781 | return -EIO; |
782 | } | |
783 | ||
784 | /* If platform's RF_KILL switch is NOT set to KILL */ | |
83ed9015 | 785 | if (iwl_read32(bus(trans), CSR_GP_CNTRL) & |
392f8b78 | 786 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) |
6d8f6eeb | 787 | clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); |
392f8b78 | 788 | else |
6d8f6eeb | 789 | set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); |
392f8b78 | 790 | |
6d8f6eeb | 791 | if (iwl_is_rfkill(trans->shrd)) { |
3e10caeb | 792 | iwl_set_hw_rfkill_state(priv(trans), true); |
6d8f6eeb | 793 | iwl_enable_interrupts(trans); |
392f8b78 EG |
794 | return -ERFKILL; |
795 | } | |
796 | ||
83ed9015 | 797 | iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF); |
392f8b78 | 798 | |
6d8f6eeb | 799 | ret = iwl_nic_init(trans); |
392f8b78 | 800 | if (ret) { |
6d8f6eeb | 801 | IWL_ERR(trans, "Unable to init nic\n"); |
392f8b78 EG |
802 | return ret; |
803 | } | |
804 | ||
805 | /* make sure rfkill handshake bits are cleared */ | |
83ed9015 EG |
806 | iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
807 | iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, | |
392f8b78 EG |
808 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); |
809 | ||
810 | /* clear (again), then enable host interrupts */ | |
83ed9015 | 811 | iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF); |
6d8f6eeb | 812 | iwl_enable_interrupts(trans); |
392f8b78 EG |
813 | |
814 | /* really make sure rfkill handshake bits are cleared */ | |
83ed9015 EG |
815 | iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
816 | iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
392f8b78 EG |
817 | |
818 | return 0; | |
819 | } | |
820 | ||
b3c2ce13 EG |
821 | /* |
822 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | |
10b15e6f | 823 | * must be called under priv->shrd->lock and mac access |
b3c2ce13 | 824 | */ |
6d8f6eeb | 825 | static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) |
b3c2ce13 | 826 | { |
83ed9015 | 827 | iwl_write_prph(bus(trans), SCD_TXFACT, mask); |
b3c2ce13 EG |
828 | } |
829 | ||
6d8f6eeb | 830 | static void iwl_trans_pcie_tx_start(struct iwl_trans *trans) |
b3c2ce13 EG |
831 | { |
832 | const struct queue_to_fifo_ac *queue_to_fifo; | |
833 | struct iwl_rxon_context *ctx; | |
6d8f6eeb | 834 | struct iwl_priv *priv = priv(trans); |
105183b1 EG |
835 | struct iwl_trans_pcie *trans_pcie = |
836 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
b3c2ce13 EG |
837 | u32 a; |
838 | unsigned long flags; | |
839 | int i, chan; | |
840 | u32 reg_val; | |
841 | ||
105183b1 | 842 | spin_lock_irqsave(&trans->shrd->lock, flags); |
b3c2ce13 | 843 | |
83ed9015 EG |
844 | trans_pcie->scd_base_addr = |
845 | iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR); | |
105183b1 | 846 | a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; |
b3c2ce13 | 847 | /* reset conext data memory */ |
105183b1 | 848 | for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; |
b3c2ce13 | 849 | a += 4) |
83ed9015 | 850 | iwl_write_targ_mem(bus(trans), a, 0); |
b3c2ce13 | 851 | /* reset tx status memory */ |
105183b1 | 852 | for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; |
b3c2ce13 | 853 | a += 4) |
83ed9015 | 854 | iwl_write_targ_mem(bus(trans), a, 0); |
105183b1 | 855 | for (; a < trans_pcie->scd_base_addr + |
c91bd124 | 856 | SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num); |
d6189124 | 857 | a += 4) |
83ed9015 | 858 | iwl_write_targ_mem(bus(trans), a, 0); |
b3c2ce13 | 859 | |
83ed9015 | 860 | iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR, |
105183b1 | 861 | trans_pcie->scd_bc_tbls.dma >> 10); |
b3c2ce13 EG |
862 | |
863 | /* Enable DMA channel */ | |
864 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | |
83ed9015 | 865 | iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan), |
b3c2ce13 EG |
866 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | |
867 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | |
868 | ||
869 | /* Update FH chicken bits */ | |
83ed9015 EG |
870 | reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG); |
871 | iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG, | |
b3c2ce13 EG |
872 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); |
873 | ||
83ed9015 | 874 | iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL, |
c91bd124 | 875 | SCD_QUEUECHAIN_SEL_ALL(trans)); |
83ed9015 | 876 | iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0); |
b3c2ce13 EG |
877 | |
878 | /* initiate the queues */ | |
c91bd124 | 879 | for (i = 0; i < hw_params(trans).max_txq_num; i++) { |
83ed9015 EG |
880 | iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0); |
881 | iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8)); | |
882 | iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr + | |
b3c2ce13 | 883 | SCD_CONTEXT_QUEUE_OFFSET(i), 0); |
83ed9015 | 884 | iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr + |
b3c2ce13 EG |
885 | SCD_CONTEXT_QUEUE_OFFSET(i) + |
886 | sizeof(u32), | |
887 | ((SCD_WIN_SIZE << | |
888 | SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | |
889 | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | |
890 | ((SCD_FRAME_LIMIT << | |
891 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | |
892 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | |
893 | } | |
894 | ||
83ed9015 | 895 | iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK, |
105183b1 | 896 | IWL_MASK(0, hw_params(trans).max_txq_num)); |
b3c2ce13 EG |
897 | |
898 | /* Activate all Tx DMA/FIFO channels */ | |
6d8f6eeb | 899 | iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); |
b3c2ce13 EG |
900 | |
901 | /* map queues to FIFOs */ | |
902 | if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) | |
903 | queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; | |
904 | else | |
905 | queue_to_fifo = iwlagn_default_queue_to_tx_fifo; | |
906 | ||
6d8f6eeb | 907 | iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0); |
b3c2ce13 EG |
908 | |
909 | /* make sure all queue are not stopped */ | |
8ad71bef EG |
910 | memset(&trans_pcie->queue_stopped[0], 0, |
911 | sizeof(trans_pcie->queue_stopped)); | |
b3c2ce13 | 912 | for (i = 0; i < 4; i++) |
8ad71bef | 913 | atomic_set(&trans_pcie->queue_stop_count[i], 0); |
b3c2ce13 EG |
914 | for_each_context(priv, ctx) |
915 | ctx->last_tx_rejected = false; | |
916 | ||
917 | /* reset to 0 to enable all the queue first */ | |
8ad71bef | 918 | trans_pcie->txq_ctx_active_msk = 0; |
b3c2ce13 | 919 | |
effcea16 | 920 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) < |
72c04ce0 | 921 | IWLAGN_FIRST_AMPDU_QUEUE); |
effcea16 | 922 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) < |
72c04ce0 | 923 | IWLAGN_FIRST_AMPDU_QUEUE); |
b3c2ce13 | 924 | |
72c04ce0 | 925 | for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) { |
b3c2ce13 EG |
926 | int fifo = queue_to_fifo[i].fifo; |
927 | int ac = queue_to_fifo[i].ac; | |
928 | ||
8ad71bef | 929 | iwl_txq_ctx_activate(trans_pcie, i); |
b3c2ce13 EG |
930 | |
931 | if (fifo == IWL_TX_FIFO_UNUSED) | |
932 | continue; | |
933 | ||
934 | if (ac != IWL_AC_UNSET) | |
8ad71bef EG |
935 | iwl_set_swq_id(&trans_pcie->txq[i], ac, i); |
936 | iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i], | |
937 | fifo, 0); | |
b3c2ce13 EG |
938 | } |
939 | ||
6d8f6eeb | 940 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
b3c2ce13 EG |
941 | |
942 | /* Enable L1-Active */ | |
83ed9015 | 943 | iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG, |
b3c2ce13 EG |
944 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); |
945 | } | |
946 | ||
c170b867 EG |
947 | /** |
948 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | |
949 | */ | |
6d8f6eeb | 950 | static int iwl_trans_tx_stop(struct iwl_trans *trans) |
c170b867 EG |
951 | { |
952 | int ch, txq_id; | |
953 | unsigned long flags; | |
8ad71bef | 954 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
c170b867 EG |
955 | |
956 | /* Turn off all Tx DMA fifos */ | |
6d8f6eeb | 957 | spin_lock_irqsave(&trans->shrd->lock, flags); |
c170b867 | 958 | |
6d8f6eeb | 959 | iwl_trans_txq_set_sched(trans, 0); |
c170b867 EG |
960 | |
961 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
02f6f659 | 962 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { |
83ed9015 | 963 | iwl_write_direct32(bus(trans), |
6d8f6eeb | 964 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); |
83ed9015 | 965 | if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG, |
c170b867 EG |
966 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), |
967 | 1000)) | |
6d8f6eeb | 968 | IWL_ERR(trans, "Failing on timeout while stopping" |
c170b867 | 969 | " DMA channel %d [0x%08x]", ch, |
83ed9015 | 970 | iwl_read_direct32(bus(trans), |
6d8f6eeb | 971 | FH_TSSR_TX_STATUS_REG)); |
c170b867 | 972 | } |
6d8f6eeb | 973 | spin_unlock_irqrestore(&trans->shrd->lock, flags); |
c170b867 | 974 | |
8ad71bef | 975 | if (!trans_pcie->txq) { |
6d8f6eeb | 976 | IWL_WARN(trans, "Stopping tx queues that aren't allocated..."); |
c170b867 EG |
977 | return 0; |
978 | } | |
979 | ||
980 | /* Unmap DMA from host system and free skb's */ | |
6d8f6eeb EG |
981 | for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) |
982 | iwl_tx_queue_unmap(trans, txq_id); | |
c170b867 EG |
983 | |
984 | return 0; | |
985 | } | |
986 | ||
ae2c30bf EG |
987 | static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans) |
988 | { | |
989 | unsigned long flags; | |
990 | struct iwl_trans_pcie *trans_pcie = | |
991 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
992 | ||
993 | spin_lock_irqsave(&trans->shrd->lock, flags); | |
994 | iwl_disable_interrupts(trans); | |
995 | spin_unlock_irqrestore(&trans->shrd->lock, flags); | |
996 | ||
997 | /* wait to make sure we flush pending tasklet*/ | |
998 | synchronize_irq(bus(trans)->irq); | |
999 | tasklet_kill(&trans_pcie->irq_tasklet); | |
1000 | } | |
1001 | ||
6d8f6eeb | 1002 | static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) |
ab6cf8e8 | 1003 | { |
ab6cf8e8 | 1004 | /* stop and reset the on-board processor */ |
83ed9015 | 1005 | iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); |
ab6cf8e8 EG |
1006 | |
1007 | /* tell the device to stop sending interrupts */ | |
ae2c30bf | 1008 | iwl_trans_pcie_disable_sync_irq(trans); |
ab6cf8e8 EG |
1009 | |
1010 | /* device going down, Stop using ICT table */ | |
6d8f6eeb | 1011 | iwl_disable_ict(trans); |
ab6cf8e8 EG |
1012 | |
1013 | /* | |
1014 | * If a HW restart happens during firmware loading, | |
1015 | * then the firmware loading might call this function | |
1016 | * and later it might be called again due to the | |
1017 | * restart. So don't process again if the device is | |
1018 | * already dead. | |
1019 | */ | |
6d8f6eeb EG |
1020 | if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) { |
1021 | iwl_trans_tx_stop(trans); | |
1022 | iwl_trans_rx_stop(trans); | |
ab6cf8e8 EG |
1023 | |
1024 | /* Power-down device's busmaster DMA clocks */ | |
83ed9015 | 1025 | iwl_write_prph(bus(trans), APMG_CLK_DIS_REG, |
ab6cf8e8 EG |
1026 | APMG_CLK_VAL_DMA_CLK_RQT); |
1027 | udelay(5); | |
1028 | } | |
1029 | ||
1030 | /* Make sure (redundant) we've released our request to stay awake */ | |
83ed9015 | 1031 | iwl_clear_bit(bus(trans), CSR_GP_CNTRL, |
6d8f6eeb | 1032 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
ab6cf8e8 EG |
1033 | |
1034 | /* Stop the device, and put it in low power state */ | |
6d8f6eeb | 1035 | iwl_apm_stop(priv(trans)); |
ab6cf8e8 EG |
1036 | } |
1037 | ||
e13c0c59 EG |
1038 | static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
1039 | struct iwl_device_cmd *dev_cmd, u8 ctx, u8 sta_id) | |
47c1b496 | 1040 | { |
e13c0c59 EG |
1041 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1042 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
1043 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
dfa2bdba | 1044 | struct iwl_tx_cmd *tx_cmd = &dev_cmd->cmd.tx; |
47c1b496 | 1045 | struct iwl_cmd_meta *out_meta; |
e13c0c59 EG |
1046 | struct iwl_tx_queue *txq; |
1047 | struct iwl_queue *q; | |
47c1b496 EG |
1048 | |
1049 | dma_addr_t phys_addr = 0; | |
1050 | dma_addr_t txcmd_phys; | |
1051 | dma_addr_t scratch_phys; | |
1052 | u16 len, firstlen, secondlen; | |
e13c0c59 | 1053 | u16 seq_number = 0; |
47c1b496 | 1054 | u8 wait_write_ptr = 0; |
e13c0c59 EG |
1055 | u8 txq_id; |
1056 | u8 tid = 0; | |
1057 | bool is_agg = false; | |
1058 | __le16 fc = hdr->frame_control; | |
47c1b496 EG |
1059 | u8 hdr_len = ieee80211_hdrlen(fc); |
1060 | ||
e13c0c59 EG |
1061 | /* |
1062 | * Send this frame after DTIM -- there's a special queue | |
1063 | * reserved for this for contexts that support AP mode. | |
1064 | */ | |
1065 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { | |
1066 | txq_id = trans_pcie->mcast_queue[ctx]; | |
1067 | ||
1068 | /* | |
1069 | * The microcode will clear the more data | |
1070 | * bit in the last frame it transmits. | |
1071 | */ | |
1072 | hdr->frame_control |= | |
1073 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | |
1074 | } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) | |
1075 | txq_id = IWL_AUX_QUEUE; | |
1076 | else | |
1077 | txq_id = | |
1078 | trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)]; | |
1079 | ||
1080 | if (ieee80211_is_data_qos(fc)) { | |
1081 | u8 *qc = NULL; | |
1082 | struct iwl_tid_data *tid_data; | |
1083 | qc = ieee80211_get_qos_ctl(hdr); | |
1084 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | |
1085 | tid_data = &trans->shrd->tid_data[sta_id][tid]; | |
1086 | ||
1087 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) | |
1088 | return -1; | |
1089 | ||
1090 | seq_number = tid_data->seq_number; | |
1091 | seq_number &= IEEE80211_SCTL_SEQ; | |
1092 | hdr->seq_ctrl = hdr->seq_ctrl & | |
1093 | cpu_to_le16(IEEE80211_SCTL_FRAG); | |
1094 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | |
1095 | seq_number += 0x10; | |
1096 | /* aggregation is on for this <sta,tid> */ | |
1097 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | |
1098 | tid_data->agg.state == IWL_AGG_ON) { | |
1099 | txq_id = tid_data->agg.txq_id; | |
1100 | is_agg = true; | |
1101 | } | |
1102 | } | |
1103 | ||
8ad71bef | 1104 | txq = &trans_pcie->txq[txq_id]; |
e13c0c59 EG |
1105 | q = &txq->q; |
1106 | ||
47c1b496 | 1107 | /* Set up driver data for this TFD */ |
2c452297 | 1108 | txq->skbs[q->write_ptr] = skb; |
dfa2bdba EG |
1109 | txq->cmd[q->write_ptr] = dev_cmd; |
1110 | ||
1111 | dev_cmd->hdr.cmd = REPLY_TX; | |
1112 | dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
1113 | INDEX_TO_SEQ(q->write_ptr))); | |
47c1b496 EG |
1114 | |
1115 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
1116 | out_meta = &txq->meta[q->write_ptr]; | |
1117 | ||
1118 | /* | |
1119 | * Use the first empty entry in this queue's command buffer array | |
1120 | * to contain the Tx command and MAC header concatenated together | |
1121 | * (payload data will be in another buffer). | |
1122 | * Size of this varies, due to varying MAC header length. | |
1123 | * If end is not dword aligned, we'll have 2 extra bytes at the end | |
1124 | * of the MAC header (device reads on dword boundaries). | |
1125 | * We'll tell device about this padding later. | |
1126 | */ | |
1127 | len = sizeof(struct iwl_tx_cmd) + | |
1128 | sizeof(struct iwl_cmd_header) + hdr_len; | |
1129 | firstlen = (len + 3) & ~3; | |
1130 | ||
1131 | /* Tell NIC about any 2-byte padding after MAC header */ | |
1132 | if (firstlen != len) | |
1133 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
1134 | ||
1135 | /* Physical address of this Tx command's header (not MAC header!), | |
1136 | * within command buffer array. */ | |
e13c0c59 | 1137 | txcmd_phys = dma_map_single(bus(trans)->dev, |
47c1b496 EG |
1138 | &dev_cmd->hdr, firstlen, |
1139 | DMA_BIDIRECTIONAL); | |
e13c0c59 | 1140 | if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys))) |
47c1b496 EG |
1141 | return -1; |
1142 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | |
1143 | dma_unmap_len_set(out_meta, len, firstlen); | |
1144 | ||
1145 | if (!ieee80211_has_morefrags(fc)) { | |
1146 | txq->need_update = 1; | |
1147 | } else { | |
1148 | wait_write_ptr = 1; | |
1149 | txq->need_update = 0; | |
1150 | } | |
1151 | ||
1152 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | |
1153 | * if any (802.11 null frames have no payload). */ | |
1154 | secondlen = skb->len - hdr_len; | |
1155 | if (secondlen > 0) { | |
e13c0c59 | 1156 | phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len, |
47c1b496 | 1157 | secondlen, DMA_TO_DEVICE); |
e13c0c59 EG |
1158 | if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) { |
1159 | dma_unmap_single(bus(trans)->dev, | |
47c1b496 EG |
1160 | dma_unmap_addr(out_meta, mapping), |
1161 | dma_unmap_len(out_meta, len), | |
1162 | DMA_BIDIRECTIONAL); | |
1163 | return -1; | |
1164 | } | |
1165 | } | |
1166 | ||
1167 | /* Attach buffers to TFD */ | |
e13c0c59 | 1168 | iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1); |
47c1b496 | 1169 | if (secondlen > 0) |
e13c0c59 | 1170 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, |
47c1b496 EG |
1171 | secondlen, 0); |
1172 | ||
1173 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | |
1174 | offsetof(struct iwl_tx_cmd, scratch); | |
1175 | ||
1176 | /* take back ownership of DMA buffer to enable update */ | |
e13c0c59 | 1177 | dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen, |
47c1b496 EG |
1178 | DMA_BIDIRECTIONAL); |
1179 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | |
1180 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | |
1181 | ||
e13c0c59 | 1182 | IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", |
47c1b496 | 1183 | le16_to_cpu(dev_cmd->hdr.sequence)); |
e13c0c59 EG |
1184 | IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); |
1185 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | |
1186 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | |
47c1b496 EG |
1187 | |
1188 | /* Set up entry for this TFD in Tx byte-count array */ | |
e13c0c59 EG |
1189 | if (is_agg) |
1190 | iwl_trans_txq_update_byte_cnt_tbl(trans, txq, | |
47c1b496 EG |
1191 | le16_to_cpu(tx_cmd->len)); |
1192 | ||
e13c0c59 | 1193 | dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen, |
47c1b496 EG |
1194 | DMA_BIDIRECTIONAL); |
1195 | ||
e13c0c59 | 1196 | trace_iwlwifi_dev_tx(priv(trans), |
47c1b496 EG |
1197 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], |
1198 | sizeof(struct iwl_tfd), | |
1199 | &dev_cmd->hdr, firstlen, | |
1200 | skb->data + hdr_len, secondlen); | |
1201 | ||
1202 | /* Tell device the write index *just past* this latest filled TFD */ | |
1203 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
e13c0c59 EG |
1204 | iwl_txq_update_write_ptr(trans, txq); |
1205 | ||
1206 | if (ieee80211_is_data_qos(fc)) { | |
1207 | trans->shrd->tid_data[sta_id][tid].tfds_in_queue++; | |
1208 | if (!ieee80211_has_morefrags(fc)) | |
1209 | trans->shrd->tid_data[sta_id][tid].seq_number = | |
1210 | seq_number; | |
1211 | } | |
47c1b496 EG |
1212 | |
1213 | /* | |
1214 | * At this point the frame is "transmitted" successfully | |
1215 | * and we will get a TX status notification eventually, | |
1216 | * regardless of the value of ret. "ret" only indicates | |
1217 | * whether or not we should update the write pointer. | |
1218 | */ | |
a0eaad71 | 1219 | if (iwl_queue_space(q) < q->high_mark) { |
47c1b496 EG |
1220 | if (wait_write_ptr) { |
1221 | txq->need_update = 1; | |
e13c0c59 | 1222 | iwl_txq_update_write_ptr(trans, txq); |
47c1b496 | 1223 | } else { |
e20d4341 | 1224 | iwl_stop_queue(trans, txq); |
47c1b496 EG |
1225 | } |
1226 | } | |
1227 | return 0; | |
1228 | } | |
1229 | ||
6d8f6eeb | 1230 | static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans) |
56d90f4c EG |
1231 | { |
1232 | /* Remove all resets to allow NIC to operate */ | |
83ed9015 | 1233 | iwl_write32(bus(trans), CSR_RESET, 0); |
56d90f4c EG |
1234 | } |
1235 | ||
e6bb4c9c EG |
1236 | static int iwl_trans_pcie_request_irq(struct iwl_trans *trans) |
1237 | { | |
5a878bf6 EG |
1238 | struct iwl_trans_pcie *trans_pcie = |
1239 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
e6bb4c9c EG |
1240 | int err; |
1241 | ||
0c325769 EG |
1242 | trans_pcie->inta_mask = CSR_INI_SET_MASK; |
1243 | ||
1244 | tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) | |
1245 | iwl_irq_tasklet, (unsigned long)trans); | |
e6bb4c9c | 1246 | |
0c325769 | 1247 | iwl_alloc_isr_ict(trans); |
e6bb4c9c EG |
1248 | |
1249 | err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED, | |
0c325769 | 1250 | DRV_NAME, trans); |
e6bb4c9c | 1251 | if (err) { |
0c325769 EG |
1252 | IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq); |
1253 | iwl_free_isr_ict(trans); | |
e6bb4c9c EG |
1254 | return err; |
1255 | } | |
1256 | ||
5a878bf6 | 1257 | INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish); |
e6bb4c9c EG |
1258 | return 0; |
1259 | } | |
1260 | ||
464021ff EG |
1261 | static int iwlagn_txq_check_empty(struct iwl_trans *trans, |
1262 | int sta_id, u8 tid, int txq_id) | |
a0eaad71 | 1263 | { |
8ad71bef EG |
1264 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1265 | struct iwl_queue *q = &trans_pcie->txq[txq_id].q; | |
464021ff EG |
1266 | struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid]; |
1267 | ||
1268 | lockdep_assert_held(&trans->shrd->sta_lock); | |
1269 | ||
1270 | switch (trans->shrd->tid_data[sta_id][tid].agg.state) { | |
1271 | case IWL_EMPTYING_HW_QUEUE_DELBA: | |
1272 | /* We are reclaiming the last packet of the */ | |
1273 | /* aggregated HW queue */ | |
1274 | if ((txq_id == tid_data->agg.txq_id) && | |
1275 | (q->read_ptr == q->write_ptr)) { | |
1276 | IWL_DEBUG_HT(trans, | |
1277 | "HW queue empty: continue DELBA flow\n"); | |
7f01d567 | 1278 | iwl_trans_pcie_txq_agg_disable(trans, txq_id); |
464021ff EG |
1279 | tid_data->agg.state = IWL_AGG_OFF; |
1280 | iwl_stop_tx_ba_trans_ready(priv(trans), | |
1281 | NUM_IWL_RXON_CTX, | |
1282 | sta_id, tid); | |
8ad71bef | 1283 | iwl_wake_queue(trans, &trans_pcie->txq[txq_id]); |
464021ff EG |
1284 | } |
1285 | break; | |
1286 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | |
1287 | /* We are reclaiming the last packet of the queue */ | |
1288 | if (tid_data->tfds_in_queue == 0) { | |
1289 | IWL_DEBUG_HT(trans, | |
1290 | "HW queue empty: continue ADDBA flow\n"); | |
1291 | tid_data->agg.state = IWL_AGG_ON; | |
1292 | iwl_start_tx_ba_trans_ready(priv(trans), | |
1293 | NUM_IWL_RXON_CTX, | |
1294 | sta_id, tid); | |
1295 | } | |
1296 | break; | |
1297 | } | |
1298 | ||
1299 | return 0; | |
1300 | } | |
1301 | ||
1302 | static void iwl_free_tfds_in_queue(struct iwl_trans *trans, | |
1303 | int sta_id, int tid, int freed) | |
1304 | { | |
1305 | lockdep_assert_held(&trans->shrd->sta_lock); | |
1306 | ||
1307 | if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed) | |
1308 | trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed; | |
1309 | else { | |
1310 | IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n", | |
1311 | trans->shrd->tid_data[sta_id][tid].tfds_in_queue, | |
1312 | freed); | |
1313 | trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0; | |
1314 | } | |
1315 | } | |
1316 | ||
1317 | static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, | |
1318 | int txq_id, int ssn, u32 status, | |
1319 | struct sk_buff_head *skbs) | |
1320 | { | |
8ad71bef EG |
1321 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1322 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | |
a0eaad71 EG |
1323 | /* n_bd is usually 256 => n_bd - 1 = 0xff */ |
1324 | int tfd_num = ssn & (txq->q.n_bd - 1); | |
464021ff | 1325 | int freed = 0; |
a0eaad71 EG |
1326 | u8 agg_state; |
1327 | bool cond; | |
1328 | ||
8ad71bef EG |
1329 | txq->time_stamp = jiffies; |
1330 | ||
a0eaad71 EG |
1331 | if (txq->sched_retry) { |
1332 | agg_state = | |
464021ff | 1333 | trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state; |
a0eaad71 EG |
1334 | cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA); |
1335 | } else { | |
1336 | cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX); | |
1337 | } | |
1338 | ||
1339 | if (txq->q.read_ptr != tfd_num) { | |
1340 | IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim " | |
1341 | "scd_ssn=%d idx=%d txq=%d swq=%d\n", | |
1342 | ssn , tfd_num, txq_id, txq->swq_id); | |
464021ff | 1343 | freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); |
a0eaad71 | 1344 | if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond) |
e20d4341 | 1345 | iwl_wake_queue(trans, txq); |
a0eaad71 | 1346 | } |
464021ff EG |
1347 | |
1348 | iwl_free_tfds_in_queue(trans, sta_id, tid, freed); | |
1349 | iwlagn_txq_check_empty(trans, sta_id, tid, txq_id); | |
a0eaad71 EG |
1350 | } |
1351 | ||
6d8f6eeb | 1352 | static void iwl_trans_pcie_free(struct iwl_trans *trans) |
34c1b7ba | 1353 | { |
ae2c30bf EG |
1354 | iwl_trans_pcie_tx_free(trans); |
1355 | iwl_trans_pcie_rx_free(trans); | |
6d8f6eeb EG |
1356 | free_irq(bus(trans)->irq, trans); |
1357 | iwl_free_isr_ict(trans); | |
1358 | trans->shrd->trans = NULL; | |
1359 | kfree(trans); | |
34c1b7ba EG |
1360 | } |
1361 | ||
57210f7c EG |
1362 | #ifdef CONFIG_PM |
1363 | ||
1364 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) | |
1365 | { | |
1366 | /* | |
1367 | * This function is called when system goes into suspend state | |
1368 | * mac80211 will call iwl_mac_stop() from the mac80211 suspend function | |
1369 | * first but since iwl_mac_stop() has no knowledge of who the caller is, | |
1370 | * it will not call apm_ops.stop() to stop the DMA operation. | |
1371 | * Calling apm_ops.stop here to make sure we stop the DMA. | |
1372 | * | |
1373 | * But of course ... if we have configured WoWLAN then we did other | |
1374 | * things already :-) | |
1375 | */ | |
1376 | if (!trans->shrd->wowlan) | |
1377 | iwl_apm_stop(priv(trans)); | |
1378 | ||
1379 | return 0; | |
1380 | } | |
1381 | ||
1382 | static int iwl_trans_pcie_resume(struct iwl_trans *trans) | |
1383 | { | |
1384 | bool hw_rfkill = false; | |
1385 | ||
0c325769 | 1386 | iwl_enable_interrupts(trans); |
57210f7c | 1387 | |
83ed9015 | 1388 | if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) & |
57210f7c EG |
1389 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) |
1390 | hw_rfkill = true; | |
1391 | ||
1392 | if (hw_rfkill) | |
1393 | set_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | |
1394 | else | |
1395 | clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status); | |
1396 | ||
3e10caeb | 1397 | iwl_set_hw_rfkill_state(priv(trans), hw_rfkill); |
57210f7c EG |
1398 | |
1399 | return 0; | |
1400 | } | |
1401 | #else /* CONFIG_PM */ | |
1402 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) | |
1403 | { return 0; } | |
1404 | ||
1405 | static int iwl_trans_pcie_resume(struct iwl_trans *trans) | |
1406 | { return 0; } | |
1407 | ||
1408 | #endif /* CONFIG_PM */ | |
1409 | ||
e13c0c59 EG |
1410 | static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans, |
1411 | u8 ctx) | |
1412 | { | |
1413 | u8 ac, txq_id; | |
1414 | struct iwl_trans_pcie *trans_pcie = | |
1415 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1416 | ||
1417 | for (ac = 0; ac < AC_NUM; ac++) { | |
1418 | txq_id = trans_pcie->ac_to_queue[ctx][ac]; | |
1419 | IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n", | |
1420 | ac, | |
8ad71bef | 1421 | (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0) |
e13c0c59 | 1422 | ? "stopped" : "awake"); |
8ad71bef | 1423 | iwl_wake_queue(trans, &trans_pcie->txq[txq_id]); |
e13c0c59 EG |
1424 | } |
1425 | } | |
1426 | ||
e6bb4c9c | 1427 | const struct iwl_trans_ops trans_ops_pcie; |
e419d62d | 1428 | |
e6bb4c9c EG |
1429 | static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd) |
1430 | { | |
1431 | struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) + | |
1432 | sizeof(struct iwl_trans_pcie), | |
1433 | GFP_KERNEL); | |
1434 | if (iwl_trans) { | |
5a878bf6 EG |
1435 | struct iwl_trans_pcie *trans_pcie = |
1436 | IWL_TRANS_GET_PCIE_TRANS(iwl_trans); | |
e6bb4c9c EG |
1437 | iwl_trans->ops = &trans_ops_pcie; |
1438 | iwl_trans->shrd = shrd; | |
5a878bf6 | 1439 | trans_pcie->trans = iwl_trans; |
72012474 | 1440 | spin_lock_init(&iwl_trans->hcmd_lock); |
e6bb4c9c | 1441 | } |
ab6cf8e8 | 1442 | |
e6bb4c9c EG |
1443 | return iwl_trans; |
1444 | } | |
47c1b496 | 1445 | |
e20d4341 EG |
1446 | static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id) |
1447 | { | |
8ad71bef EG |
1448 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1449 | ||
1450 | iwl_stop_queue(trans, &trans_pcie->txq[txq_id]); | |
e20d4341 EG |
1451 | } |
1452 | ||
5f178cd2 EG |
1453 | #define IWL_FLUSH_WAIT_MS 2000 |
1454 | ||
1455 | static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) | |
1456 | { | |
8ad71bef | 1457 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
5f178cd2 EG |
1458 | struct iwl_tx_queue *txq; |
1459 | struct iwl_queue *q; | |
1460 | int cnt; | |
1461 | unsigned long now = jiffies; | |
1462 | int ret = 0; | |
1463 | ||
1464 | /* waiting for all the tx frames complete might take a while */ | |
1465 | for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { | |
1466 | if (cnt == trans->shrd->cmd_queue) | |
1467 | continue; | |
8ad71bef | 1468 | txq = &trans_pcie->txq[cnt]; |
5f178cd2 EG |
1469 | q = &txq->q; |
1470 | while (q->read_ptr != q->write_ptr && !time_after(jiffies, | |
1471 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) | |
1472 | msleep(1); | |
1473 | ||
1474 | if (q->read_ptr != q->write_ptr) { | |
1475 | IWL_ERR(trans, "fail to flush all tx fifo queues\n"); | |
1476 | ret = -ETIMEDOUT; | |
1477 | break; | |
1478 | } | |
1479 | } | |
1480 | return ret; | |
1481 | } | |
1482 | ||
f22be624 EG |
1483 | /* |
1484 | * On every watchdog tick we check (latest) time stamp. If it does not | |
1485 | * change during timeout period and queue is not empty we reset firmware. | |
1486 | */ | |
1487 | static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt) | |
1488 | { | |
8ad71bef EG |
1489 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1490 | struct iwl_tx_queue *txq = &trans_pcie->txq[cnt]; | |
f22be624 EG |
1491 | struct iwl_queue *q = &txq->q; |
1492 | unsigned long timeout; | |
1493 | ||
1494 | if (q->read_ptr == q->write_ptr) { | |
1495 | txq->time_stamp = jiffies; | |
1496 | return 0; | |
1497 | } | |
1498 | ||
1499 | timeout = txq->time_stamp + | |
1500 | msecs_to_jiffies(hw_params(trans).wd_timeout); | |
1501 | ||
1502 | if (time_after(jiffies, timeout)) { | |
1503 | IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id, | |
1504 | hw_params(trans).wd_timeout); | |
1505 | return 1; | |
1506 | } | |
1507 | ||
1508 | return 0; | |
1509 | } | |
1510 | ||
87e5666c EG |
1511 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
1512 | /* create and remove of files */ | |
1513 | #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ | |
5a878bf6 | 1514 | if (!debugfs_create_file(#name, mode, parent, trans, \ |
87e5666c EG |
1515 | &iwl_dbgfs_##name##_ops)) \ |
1516 | return -ENOMEM; \ | |
1517 | } while (0) | |
1518 | ||
1519 | /* file operation */ | |
1520 | #define DEBUGFS_READ_FUNC(name) \ | |
1521 | static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ | |
1522 | char __user *user_buf, \ | |
1523 | size_t count, loff_t *ppos); | |
1524 | ||
1525 | #define DEBUGFS_WRITE_FUNC(name) \ | |
1526 | static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ | |
1527 | const char __user *user_buf, \ | |
1528 | size_t count, loff_t *ppos); | |
1529 | ||
1530 | ||
1531 | static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file) | |
1532 | { | |
1533 | file->private_data = inode->i_private; | |
1534 | return 0; | |
1535 | } | |
1536 | ||
1537 | #define DEBUGFS_READ_FILE_OPS(name) \ | |
1538 | DEBUGFS_READ_FUNC(name); \ | |
1539 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | |
1540 | .read = iwl_dbgfs_##name##_read, \ | |
1541 | .open = iwl_dbgfs_open_file_generic, \ | |
1542 | .llseek = generic_file_llseek, \ | |
1543 | }; | |
1544 | ||
16db88ba EG |
1545 | #define DEBUGFS_WRITE_FILE_OPS(name) \ |
1546 | DEBUGFS_WRITE_FUNC(name); \ | |
1547 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | |
1548 | .write = iwl_dbgfs_##name##_write, \ | |
1549 | .open = iwl_dbgfs_open_file_generic, \ | |
1550 | .llseek = generic_file_llseek, \ | |
1551 | }; | |
1552 | ||
87e5666c EG |
1553 | #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ |
1554 | DEBUGFS_READ_FUNC(name); \ | |
1555 | DEBUGFS_WRITE_FUNC(name); \ | |
1556 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | |
1557 | .write = iwl_dbgfs_##name##_write, \ | |
1558 | .read = iwl_dbgfs_##name##_read, \ | |
1559 | .open = iwl_dbgfs_open_file_generic, \ | |
1560 | .llseek = generic_file_llseek, \ | |
1561 | }; | |
1562 | ||
1563 | static ssize_t iwl_dbgfs_traffic_log_read(struct file *file, | |
1564 | char __user *user_buf, | |
1565 | size_t count, loff_t *ppos) | |
1566 | { | |
5a878bf6 EG |
1567 | struct iwl_trans *trans = file->private_data; |
1568 | struct iwl_priv *priv = priv(trans); | |
87e5666c EG |
1569 | int pos = 0, ofs = 0; |
1570 | int cnt = 0, entry; | |
5a878bf6 EG |
1571 | struct iwl_trans_pcie *trans_pcie = |
1572 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
87e5666c EG |
1573 | struct iwl_tx_queue *txq; |
1574 | struct iwl_queue *q; | |
5a878bf6 | 1575 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; |
87e5666c EG |
1576 | char *buf; |
1577 | int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + | |
fd656935 | 1578 | (hw_params(trans).max_txq_num * 32 * 8) + 400; |
87e5666c EG |
1579 | const u8 *ptr; |
1580 | ssize_t ret; | |
1581 | ||
8ad71bef | 1582 | if (!trans_pcie->txq) { |
5a878bf6 | 1583 | IWL_ERR(trans, "txq not ready\n"); |
87e5666c EG |
1584 | return -EAGAIN; |
1585 | } | |
1586 | buf = kzalloc(bufsz, GFP_KERNEL); | |
1587 | if (!buf) { | |
5a878bf6 | 1588 | IWL_ERR(trans, "Can not allocate buffer\n"); |
87e5666c EG |
1589 | return -ENOMEM; |
1590 | } | |
1591 | pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); | |
5a878bf6 | 1592 | for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { |
8ad71bef | 1593 | txq = &trans_pcie->txq[cnt]; |
87e5666c EG |
1594 | q = &txq->q; |
1595 | pos += scnprintf(buf + pos, bufsz - pos, | |
1596 | "q[%d]: read_ptr: %u, write_ptr: %u\n", | |
1597 | cnt, q->read_ptr, q->write_ptr); | |
1598 | } | |
1599 | if (priv->tx_traffic && | |
5a878bf6 | 1600 | (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) { |
87e5666c EG |
1601 | ptr = priv->tx_traffic; |
1602 | pos += scnprintf(buf + pos, bufsz - pos, | |
5a878bf6 | 1603 | "Tx Traffic idx: %u\n", priv->tx_traffic_idx); |
87e5666c EG |
1604 | for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { |
1605 | for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; | |
1606 | entry++, ofs += 16) { | |
1607 | pos += scnprintf(buf + pos, bufsz - pos, | |
1608 | "0x%.4x ", ofs); | |
1609 | hex_dump_to_buffer(ptr + ofs, 16, 16, 2, | |
1610 | buf + pos, bufsz - pos, 0); | |
1611 | pos += strlen(buf + pos); | |
1612 | if (bufsz - pos > 0) | |
1613 | buf[pos++] = '\n'; | |
1614 | } | |
1615 | } | |
1616 | } | |
1617 | ||
1618 | pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n"); | |
1619 | pos += scnprintf(buf + pos, bufsz - pos, | |
1620 | "read: %u, write: %u\n", | |
1621 | rxq->read, rxq->write); | |
1622 | ||
1623 | if (priv->rx_traffic && | |
5a878bf6 | 1624 | (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) { |
87e5666c EG |
1625 | ptr = priv->rx_traffic; |
1626 | pos += scnprintf(buf + pos, bufsz - pos, | |
5a878bf6 | 1627 | "Rx Traffic idx: %u\n", priv->rx_traffic_idx); |
87e5666c EG |
1628 | for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { |
1629 | for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; | |
1630 | entry++, ofs += 16) { | |
1631 | pos += scnprintf(buf + pos, bufsz - pos, | |
1632 | "0x%.4x ", ofs); | |
1633 | hex_dump_to_buffer(ptr + ofs, 16, 16, 2, | |
1634 | buf + pos, bufsz - pos, 0); | |
1635 | pos += strlen(buf + pos); | |
1636 | if (bufsz - pos > 0) | |
1637 | buf[pos++] = '\n'; | |
1638 | } | |
1639 | } | |
1640 | } | |
1641 | ||
1642 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1643 | kfree(buf); | |
1644 | return ret; | |
1645 | } | |
1646 | ||
1647 | static ssize_t iwl_dbgfs_traffic_log_write(struct file *file, | |
1648 | const char __user *user_buf, | |
1649 | size_t count, loff_t *ppos) | |
1650 | { | |
5a878bf6 | 1651 | struct iwl_trans *trans = file->private_data; |
87e5666c EG |
1652 | char buf[8]; |
1653 | int buf_size; | |
1654 | int traffic_log; | |
1655 | ||
1656 | memset(buf, 0, sizeof(buf)); | |
1657 | buf_size = min(count, sizeof(buf) - 1); | |
1658 | if (copy_from_user(buf, user_buf, buf_size)) | |
1659 | return -EFAULT; | |
1660 | if (sscanf(buf, "%d", &traffic_log) != 1) | |
1661 | return -EFAULT; | |
1662 | if (traffic_log == 0) | |
5a878bf6 | 1663 | iwl_reset_traffic_log(priv(trans)); |
87e5666c EG |
1664 | |
1665 | return count; | |
1666 | } | |
1667 | ||
1668 | static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, | |
1669 | char __user *user_buf, | |
8ad71bef EG |
1670 | size_t count, loff_t *ppos) |
1671 | { | |
5a878bf6 | 1672 | struct iwl_trans *trans = file->private_data; |
8ad71bef | 1673 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
87e5666c EG |
1674 | struct iwl_tx_queue *txq; |
1675 | struct iwl_queue *q; | |
1676 | char *buf; | |
1677 | int pos = 0; | |
1678 | int cnt; | |
1679 | int ret; | |
fd656935 | 1680 | const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num; |
87e5666c | 1681 | |
8ad71bef | 1682 | if (!trans_pcie->txq) { |
3e10caeb | 1683 | IWL_ERR(trans, "txq not ready\n"); |
87e5666c EG |
1684 | return -EAGAIN; |
1685 | } | |
1686 | buf = kzalloc(bufsz, GFP_KERNEL); | |
1687 | if (!buf) | |
1688 | return -ENOMEM; | |
1689 | ||
5a878bf6 | 1690 | for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { |
8ad71bef | 1691 | txq = &trans_pcie->txq[cnt]; |
87e5666c EG |
1692 | q = &txq->q; |
1693 | pos += scnprintf(buf + pos, bufsz - pos, | |
1694 | "hwq %.2d: read=%u write=%u stop=%d" | |
1695 | " swq_id=%#.2x (ac %d/hwq %d)\n", | |
1696 | cnt, q->read_ptr, q->write_ptr, | |
8ad71bef | 1697 | !!test_bit(cnt, trans_pcie->queue_stopped), |
87e5666c EG |
1698 | txq->swq_id, txq->swq_id & 3, |
1699 | (txq->swq_id >> 2) & 0x1f); | |
1700 | if (cnt >= 4) | |
1701 | continue; | |
1702 | /* for the ACs, display the stop count too */ | |
1703 | pos += scnprintf(buf + pos, bufsz - pos, | |
8ad71bef EG |
1704 | " stop-count: %d\n", |
1705 | atomic_read(&trans_pcie->queue_stop_count[cnt])); | |
87e5666c EG |
1706 | } |
1707 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1708 | kfree(buf); | |
1709 | return ret; | |
1710 | } | |
1711 | ||
1712 | static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, | |
1713 | char __user *user_buf, | |
1714 | size_t count, loff_t *ppos) { | |
5a878bf6 EG |
1715 | struct iwl_trans *trans = file->private_data; |
1716 | struct iwl_trans_pcie *trans_pcie = | |
1717 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1718 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | |
87e5666c EG |
1719 | char buf[256]; |
1720 | int pos = 0; | |
1721 | const size_t bufsz = sizeof(buf); | |
1722 | ||
1723 | pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", | |
1724 | rxq->read); | |
1725 | pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", | |
1726 | rxq->write); | |
1727 | pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", | |
1728 | rxq->free_count); | |
1729 | if (rxq->rb_stts) { | |
1730 | pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", | |
1731 | le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); | |
1732 | } else { | |
1733 | pos += scnprintf(buf + pos, bufsz - pos, | |
1734 | "closed_rb_num: Not Allocated\n"); | |
1735 | } | |
1736 | return simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1737 | } | |
1738 | ||
7ff94706 EG |
1739 | static ssize_t iwl_dbgfs_log_event_read(struct file *file, |
1740 | char __user *user_buf, | |
1741 | size_t count, loff_t *ppos) | |
1742 | { | |
1743 | struct iwl_trans *trans = file->private_data; | |
1744 | char *buf; | |
1745 | int pos = 0; | |
1746 | ssize_t ret = -ENOMEM; | |
1747 | ||
6bb78847 | 1748 | ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true); |
7ff94706 EG |
1749 | if (buf) { |
1750 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1751 | kfree(buf); | |
1752 | } | |
1753 | return ret; | |
1754 | } | |
1755 | ||
1756 | static ssize_t iwl_dbgfs_log_event_write(struct file *file, | |
1757 | const char __user *user_buf, | |
1758 | size_t count, loff_t *ppos) | |
1759 | { | |
1760 | struct iwl_trans *trans = file->private_data; | |
1761 | u32 event_log_flag; | |
1762 | char buf[8]; | |
1763 | int buf_size; | |
1764 | ||
1765 | memset(buf, 0, sizeof(buf)); | |
1766 | buf_size = min(count, sizeof(buf) - 1); | |
1767 | if (copy_from_user(buf, user_buf, buf_size)) | |
1768 | return -EFAULT; | |
1769 | if (sscanf(buf, "%d", &event_log_flag) != 1) | |
1770 | return -EFAULT; | |
1771 | if (event_log_flag == 1) | |
6bb78847 | 1772 | iwl_dump_nic_event_log(trans, true, NULL, false); |
7ff94706 EG |
1773 | |
1774 | return count; | |
1775 | } | |
1776 | ||
1f7b6172 EG |
1777 | static ssize_t iwl_dbgfs_interrupt_read(struct file *file, |
1778 | char __user *user_buf, | |
1779 | size_t count, loff_t *ppos) { | |
1780 | ||
1781 | struct iwl_trans *trans = file->private_data; | |
1782 | struct iwl_trans_pcie *trans_pcie = | |
1783 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1784 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
1785 | ||
1786 | int pos = 0; | |
1787 | char *buf; | |
1788 | int bufsz = 24 * 64; /* 24 items * 64 char per item */ | |
1789 | ssize_t ret; | |
1790 | ||
1791 | buf = kzalloc(bufsz, GFP_KERNEL); | |
1792 | if (!buf) { | |
1793 | IWL_ERR(trans, "Can not allocate Buffer\n"); | |
1794 | return -ENOMEM; | |
1795 | } | |
1796 | ||
1797 | pos += scnprintf(buf + pos, bufsz - pos, | |
1798 | "Interrupt Statistics Report:\n"); | |
1799 | ||
1800 | pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", | |
1801 | isr_stats->hw); | |
1802 | pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", | |
1803 | isr_stats->sw); | |
1804 | if (isr_stats->sw || isr_stats->hw) { | |
1805 | pos += scnprintf(buf + pos, bufsz - pos, | |
1806 | "\tLast Restarting Code: 0x%X\n", | |
1807 | isr_stats->err_code); | |
1808 | } | |
1809 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1810 | pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", | |
1811 | isr_stats->sch); | |
1812 | pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", | |
1813 | isr_stats->alive); | |
1814 | #endif | |
1815 | pos += scnprintf(buf + pos, bufsz - pos, | |
1816 | "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); | |
1817 | ||
1818 | pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", | |
1819 | isr_stats->ctkill); | |
1820 | ||
1821 | pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", | |
1822 | isr_stats->wakeup); | |
1823 | ||
1824 | pos += scnprintf(buf + pos, bufsz - pos, | |
1825 | "Rx command responses:\t\t %u\n", isr_stats->rx); | |
1826 | ||
1827 | pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", | |
1828 | isr_stats->tx); | |
1829 | ||
1830 | pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", | |
1831 | isr_stats->unhandled); | |
1832 | ||
1833 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); | |
1834 | kfree(buf); | |
1835 | return ret; | |
1836 | } | |
1837 | ||
1838 | static ssize_t iwl_dbgfs_interrupt_write(struct file *file, | |
1839 | const char __user *user_buf, | |
1840 | size_t count, loff_t *ppos) | |
1841 | { | |
1842 | struct iwl_trans *trans = file->private_data; | |
1843 | struct iwl_trans_pcie *trans_pcie = | |
1844 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
1845 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
1846 | ||
1847 | char buf[8]; | |
1848 | int buf_size; | |
1849 | u32 reset_flag; | |
1850 | ||
1851 | memset(buf, 0, sizeof(buf)); | |
1852 | buf_size = min(count, sizeof(buf) - 1); | |
1853 | if (copy_from_user(buf, user_buf, buf_size)) | |
1854 | return -EFAULT; | |
1855 | if (sscanf(buf, "%x", &reset_flag) != 1) | |
1856 | return -EFAULT; | |
1857 | if (reset_flag == 0) | |
1858 | memset(isr_stats, 0, sizeof(*isr_stats)); | |
1859 | ||
1860 | return count; | |
1861 | } | |
1862 | ||
16db88ba EG |
1863 | static const char *get_csr_string(int cmd) |
1864 | { | |
1865 | switch (cmd) { | |
1866 | IWL_CMD(CSR_HW_IF_CONFIG_REG); | |
1867 | IWL_CMD(CSR_INT_COALESCING); | |
1868 | IWL_CMD(CSR_INT); | |
1869 | IWL_CMD(CSR_INT_MASK); | |
1870 | IWL_CMD(CSR_FH_INT_STATUS); | |
1871 | IWL_CMD(CSR_GPIO_IN); | |
1872 | IWL_CMD(CSR_RESET); | |
1873 | IWL_CMD(CSR_GP_CNTRL); | |
1874 | IWL_CMD(CSR_HW_REV); | |
1875 | IWL_CMD(CSR_EEPROM_REG); | |
1876 | IWL_CMD(CSR_EEPROM_GP); | |
1877 | IWL_CMD(CSR_OTP_GP_REG); | |
1878 | IWL_CMD(CSR_GIO_REG); | |
1879 | IWL_CMD(CSR_GP_UCODE_REG); | |
1880 | IWL_CMD(CSR_GP_DRIVER_REG); | |
1881 | IWL_CMD(CSR_UCODE_DRV_GP1); | |
1882 | IWL_CMD(CSR_UCODE_DRV_GP2); | |
1883 | IWL_CMD(CSR_LED_REG); | |
1884 | IWL_CMD(CSR_DRAM_INT_TBL_REG); | |
1885 | IWL_CMD(CSR_GIO_CHICKEN_BITS); | |
1886 | IWL_CMD(CSR_ANA_PLL_CFG); | |
1887 | IWL_CMD(CSR_HW_REV_WA_REG); | |
1888 | IWL_CMD(CSR_DBG_HPET_MEM_REG); | |
1889 | default: | |
1890 | return "UNKNOWN"; | |
1891 | } | |
1892 | } | |
1893 | ||
1894 | void iwl_dump_csr(struct iwl_trans *trans) | |
1895 | { | |
1896 | int i; | |
1897 | static const u32 csr_tbl[] = { | |
1898 | CSR_HW_IF_CONFIG_REG, | |
1899 | CSR_INT_COALESCING, | |
1900 | CSR_INT, | |
1901 | CSR_INT_MASK, | |
1902 | CSR_FH_INT_STATUS, | |
1903 | CSR_GPIO_IN, | |
1904 | CSR_RESET, | |
1905 | CSR_GP_CNTRL, | |
1906 | CSR_HW_REV, | |
1907 | CSR_EEPROM_REG, | |
1908 | CSR_EEPROM_GP, | |
1909 | CSR_OTP_GP_REG, | |
1910 | CSR_GIO_REG, | |
1911 | CSR_GP_UCODE_REG, | |
1912 | CSR_GP_DRIVER_REG, | |
1913 | CSR_UCODE_DRV_GP1, | |
1914 | CSR_UCODE_DRV_GP2, | |
1915 | CSR_LED_REG, | |
1916 | CSR_DRAM_INT_TBL_REG, | |
1917 | CSR_GIO_CHICKEN_BITS, | |
1918 | CSR_ANA_PLL_CFG, | |
1919 | CSR_HW_REV_WA_REG, | |
1920 | CSR_DBG_HPET_MEM_REG | |
1921 | }; | |
1922 | IWL_ERR(trans, "CSR values:\n"); | |
1923 | IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " | |
1924 | "CSR_INT_PERIODIC_REG)\n"); | |
1925 | for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { | |
1926 | IWL_ERR(trans, " %25s: 0X%08x\n", | |
1927 | get_csr_string(csr_tbl[i]), | |
83ed9015 | 1928 | iwl_read32(bus(trans), csr_tbl[i])); |
16db88ba EG |
1929 | } |
1930 | } | |
1931 | ||
1932 | static ssize_t iwl_dbgfs_csr_write(struct file *file, | |
1933 | const char __user *user_buf, | |
1934 | size_t count, loff_t *ppos) | |
1935 | { | |
1936 | struct iwl_trans *trans = file->private_data; | |
1937 | char buf[8]; | |
1938 | int buf_size; | |
1939 | int csr; | |
1940 | ||
1941 | memset(buf, 0, sizeof(buf)); | |
1942 | buf_size = min(count, sizeof(buf) - 1); | |
1943 | if (copy_from_user(buf, user_buf, buf_size)) | |
1944 | return -EFAULT; | |
1945 | if (sscanf(buf, "%d", &csr) != 1) | |
1946 | return -EFAULT; | |
1947 | ||
1948 | iwl_dump_csr(trans); | |
1949 | ||
1950 | return count; | |
1951 | } | |
1952 | ||
1953 | static const char *get_fh_string(int cmd) | |
1954 | { | |
1955 | switch (cmd) { | |
1956 | IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); | |
1957 | IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); | |
1958 | IWL_CMD(FH_RSCSR_CHNL0_WPTR); | |
1959 | IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); | |
1960 | IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); | |
1961 | IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); | |
1962 | IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); | |
1963 | IWL_CMD(FH_TSSR_TX_STATUS_REG); | |
1964 | IWL_CMD(FH_TSSR_TX_ERROR_REG); | |
1965 | default: | |
1966 | return "UNKNOWN"; | |
1967 | } | |
1968 | } | |
1969 | ||
1970 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) | |
1971 | { | |
1972 | int i; | |
1973 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1974 | int pos = 0; | |
1975 | size_t bufsz = 0; | |
1976 | #endif | |
1977 | static const u32 fh_tbl[] = { | |
1978 | FH_RSCSR_CHNL0_STTS_WPTR_REG, | |
1979 | FH_RSCSR_CHNL0_RBDCB_BASE_REG, | |
1980 | FH_RSCSR_CHNL0_WPTR, | |
1981 | FH_MEM_RCSR_CHNL0_CONFIG_REG, | |
1982 | FH_MEM_RSSR_SHARED_CTRL_REG, | |
1983 | FH_MEM_RSSR_RX_STATUS_REG, | |
1984 | FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, | |
1985 | FH_TSSR_TX_STATUS_REG, | |
1986 | FH_TSSR_TX_ERROR_REG | |
1987 | }; | |
1988 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1989 | if (display) { | |
1990 | bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; | |
1991 | *buf = kmalloc(bufsz, GFP_KERNEL); | |
1992 | if (!*buf) | |
1993 | return -ENOMEM; | |
1994 | pos += scnprintf(*buf + pos, bufsz - pos, | |
1995 | "FH register values:\n"); | |
1996 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | |
1997 | pos += scnprintf(*buf + pos, bufsz - pos, | |
1998 | " %34s: 0X%08x\n", | |
1999 | get_fh_string(fh_tbl[i]), | |
83ed9015 | 2000 | iwl_read_direct32(bus(trans), fh_tbl[i])); |
16db88ba EG |
2001 | } |
2002 | return pos; | |
2003 | } | |
2004 | #endif | |
2005 | IWL_ERR(trans, "FH register values:\n"); | |
2006 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | |
2007 | IWL_ERR(trans, " %34s: 0X%08x\n", | |
2008 | get_fh_string(fh_tbl[i]), | |
83ed9015 | 2009 | iwl_read_direct32(bus(trans), fh_tbl[i])); |
16db88ba EG |
2010 | } |
2011 | return 0; | |
2012 | } | |
2013 | ||
2014 | static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, | |
2015 | char __user *user_buf, | |
2016 | size_t count, loff_t *ppos) | |
2017 | { | |
2018 | struct iwl_trans *trans = file->private_data; | |
2019 | char *buf; | |
2020 | int pos = 0; | |
2021 | ssize_t ret = -EFAULT; | |
2022 | ||
2023 | ret = pos = iwl_dump_fh(trans, &buf, true); | |
2024 | if (buf) { | |
2025 | ret = simple_read_from_buffer(user_buf, | |
2026 | count, ppos, buf, pos); | |
2027 | kfree(buf); | |
2028 | } | |
2029 | ||
2030 | return ret; | |
2031 | } | |
2032 | ||
87e5666c | 2033 | DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); |
7ff94706 | 2034 | DEBUGFS_READ_WRITE_FILE_OPS(log_event); |
1f7b6172 | 2035 | DEBUGFS_READ_WRITE_FILE_OPS(interrupt); |
16db88ba | 2036 | DEBUGFS_READ_FILE_OPS(fh_reg); |
87e5666c EG |
2037 | DEBUGFS_READ_FILE_OPS(rx_queue); |
2038 | DEBUGFS_READ_FILE_OPS(tx_queue); | |
16db88ba | 2039 | DEBUGFS_WRITE_FILE_OPS(csr); |
87e5666c EG |
2040 | |
2041 | /* | |
2042 | * Create the debugfs files and directories | |
2043 | * | |
2044 | */ | |
2045 | static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, | |
2046 | struct dentry *dir) | |
2047 | { | |
87e5666c EG |
2048 | DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR); |
2049 | DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); | |
2050 | DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); | |
7ff94706 | 2051 | DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR); |
1f7b6172 | 2052 | DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); |
16db88ba EG |
2053 | DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); |
2054 | DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); | |
87e5666c EG |
2055 | return 0; |
2056 | } | |
2057 | #else | |
2058 | static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, | |
2059 | struct dentry *dir) | |
2060 | { return 0; } | |
2061 | ||
2062 | #endif /*CONFIG_IWLWIFI_DEBUGFS */ | |
2063 | ||
e6bb4c9c EG |
2064 | const struct iwl_trans_ops trans_ops_pcie = { |
2065 | .alloc = iwl_trans_pcie_alloc, | |
2066 | .request_irq = iwl_trans_pcie_request_irq, | |
2067 | .start_device = iwl_trans_pcie_start_device, | |
2068 | .prepare_card_hw = iwl_trans_pcie_prepare_card_hw, | |
2069 | .stop_device = iwl_trans_pcie_stop_device, | |
48d42c42 | 2070 | |
e6bb4c9c | 2071 | .tx_start = iwl_trans_pcie_tx_start, |
e13c0c59 | 2072 | .wake_any_queue = iwl_trans_pcie_wake_any_queue, |
48d42c42 | 2073 | |
e6bb4c9c EG |
2074 | .send_cmd = iwl_trans_pcie_send_cmd, |
2075 | .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu, | |
c85eb619 | 2076 | |
e6bb4c9c | 2077 | .tx = iwl_trans_pcie_tx, |
a0eaad71 | 2078 | .reclaim = iwl_trans_pcie_reclaim, |
34c1b7ba | 2079 | |
7f01d567 | 2080 | .tx_agg_disable = iwl_trans_pcie_tx_agg_disable, |
288712a6 | 2081 | .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc, |
c91bd124 | 2082 | .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, |
34c1b7ba | 2083 | |
e6bb4c9c | 2084 | .kick_nic = iwl_trans_pcie_kick_nic, |
1e89cbac | 2085 | |
e6bb4c9c | 2086 | .free = iwl_trans_pcie_free, |
e20d4341 | 2087 | .stop_queue = iwl_trans_pcie_stop_queue, |
87e5666c EG |
2088 | |
2089 | .dbgfs_register = iwl_trans_pcie_dbgfs_register, | |
5f178cd2 EG |
2090 | |
2091 | .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, | |
f22be624 | 2092 | .check_stuck_queue = iwl_trans_pcie_check_stuck_queue, |
5f178cd2 | 2093 | |
57210f7c EG |
2094 | .suspend = iwl_trans_pcie_suspend, |
2095 | .resume = iwl_trans_pcie_resume, | |
e6bb4c9c | 2096 | }; |
ab697a9f | 2097 |