]>
Commit | Line | Data |
---|---|---|
c85eb619 EG |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
22 | * USA | |
23 | * | |
24 | * The full GNU General Public License is included in this distribution | |
25 | * in the file called LICENSE.GPL. | |
26 | * | |
27 | * Contact Information: | |
28 | * Intel Linux Wireless <ilw@linux.intel.com> | |
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
30 | * | |
31 | * BSD LICENSE | |
32 | * | |
33 | * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. | |
34 | * All rights reserved. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * | |
40 | * * Redistributions of source code must retain the above copyright | |
41 | * notice, this list of conditions and the following disclaimer. | |
42 | * * Redistributions in binary form must reproduce the above copyright | |
43 | * notice, this list of conditions and the following disclaimer in | |
44 | * the documentation and/or other materials provided with the | |
45 | * distribution. | |
46 | * * Neither the name Intel Corporation nor the names of its | |
47 | * contributors may be used to endorse or promote products derived | |
48 | * from this software without specific prior written permission. | |
49 | * | |
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
61 | * | |
62 | *****************************************************************************/ | |
e6bb4c9c EG |
63 | #include <linux/interrupt.h> |
64 | ||
a0f6b0a2 | 65 | #include "iwl-dev.h" |
c85eb619 | 66 | #include "iwl-trans.h" |
02aca585 EG |
67 | #include "iwl-core.h" |
68 | #include "iwl-helpers.h" | |
ab697a9f | 69 | #include "iwl-trans-int-pcie.h" |
02aca585 EG |
70 | /*TODO remove uneeded includes when the transport layer tx_free will be here */ |
71 | #include "iwl-agn.h" | |
e419d62d | 72 | #include "iwl-core.h" |
48f20d35 | 73 | #include "iwl-shared.h" |
c85eb619 EG |
74 | |
75 | static int iwl_trans_rx_alloc(struct iwl_priv *priv) | |
76 | { | |
77 | struct iwl_rx_queue *rxq = &priv->rxq; | |
d5934110 | 78 | struct device *dev = priv->bus->dev; |
c85eb619 EG |
79 | |
80 | memset(&priv->rxq, 0, sizeof(priv->rxq)); | |
81 | ||
82 | spin_lock_init(&rxq->lock); | |
83 | INIT_LIST_HEAD(&rxq->rx_free); | |
84 | INIT_LIST_HEAD(&rxq->rx_used); | |
85 | ||
86 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | |
87 | return -EINVAL; | |
88 | ||
89 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | |
a0f6b0a2 EG |
90 | rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
91 | &rxq->bd_dma, GFP_KERNEL); | |
c85eb619 EG |
92 | if (!rxq->bd) |
93 | goto err_bd; | |
a0f6b0a2 | 94 | memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE); |
c85eb619 EG |
95 | |
96 | /*Allocate the driver's pointer to receive buffer status */ | |
97 | rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts), | |
98 | &rxq->rb_stts_dma, GFP_KERNEL); | |
99 | if (!rxq->rb_stts) | |
100 | goto err_rb_stts; | |
101 | memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); | |
102 | ||
103 | return 0; | |
104 | ||
105 | err_rb_stts: | |
a0f6b0a2 EG |
106 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
107 | rxq->bd, rxq->bd_dma); | |
c85eb619 EG |
108 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); |
109 | rxq->bd = NULL; | |
110 | err_bd: | |
111 | return -ENOMEM; | |
112 | } | |
113 | ||
a0f6b0a2 | 114 | static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv) |
c85eb619 EG |
115 | { |
116 | struct iwl_rx_queue *rxq = &priv->rxq; | |
a0f6b0a2 | 117 | int i; |
c85eb619 EG |
118 | |
119 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
120 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
121 | /* In the reset function, these buffers may have been allocated | |
122 | * to an SKB, so we need to unmap and free potential storage */ | |
123 | if (rxq->pool[i].page != NULL) { | |
d5934110 | 124 | dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma, |
d6189124 | 125 | PAGE_SIZE << hw_params(priv).rx_page_order, |
c85eb619 EG |
126 | DMA_FROM_DEVICE); |
127 | __iwl_free_pages(priv, rxq->pool[i].page); | |
128 | rxq->pool[i].page = NULL; | |
129 | } | |
130 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
131 | } | |
a0f6b0a2 EG |
132 | } |
133 | ||
ab697a9f EG |
134 | static void iwl_trans_rx_hw_init(struct iwl_priv *priv, |
135 | struct iwl_rx_queue *rxq) | |
136 | { | |
137 | u32 rb_size; | |
138 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | |
139 | u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ | |
140 | ||
141 | rb_timeout = RX_RB_TIMEOUT; | |
142 | ||
143 | if (iwlagn_mod_params.amsdu_size_8K) | |
144 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | |
145 | else | |
146 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
147 | ||
148 | /* Stop Rx DMA */ | |
149 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
150 | ||
151 | /* Reset driver's Rx queue write index */ | |
152 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | |
153 | ||
154 | /* Tell device where to find RBD circular buffer in DRAM */ | |
155 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | |
156 | (u32)(rxq->bd_dma >> 8)); | |
157 | ||
158 | /* Tell device where in DRAM to update its Rx status */ | |
159 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, | |
160 | rxq->rb_stts_dma >> 4); | |
161 | ||
162 | /* Enable Rx DMA | |
163 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
164 | * the credit mechanism in 5000 HW RX FIFO | |
165 | * Direct rx interrupts to hosts | |
166 | * Rx buffer size 4 or 8k | |
167 | * RB timeout 0x10 | |
168 | * 256 RBDs | |
169 | */ | |
170 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, | |
171 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | |
172 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
173 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
174 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | |
175 | rb_size| | |
176 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | |
177 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | |
178 | ||
179 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
180 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
181 | } | |
182 | ||
392f8b78 | 183 | static int iwl_rx_init(struct iwl_priv *priv) |
a0f6b0a2 EG |
184 | { |
185 | struct iwl_rx_queue *rxq = &priv->rxq; | |
186 | int i, err; | |
187 | unsigned long flags; | |
188 | ||
189 | if (!rxq->bd) { | |
190 | err = iwl_trans_rx_alloc(priv); | |
191 | if (err) | |
192 | return err; | |
193 | } | |
194 | ||
195 | spin_lock_irqsave(&rxq->lock, flags); | |
196 | INIT_LIST_HEAD(&rxq->rx_free); | |
197 | INIT_LIST_HEAD(&rxq->rx_used); | |
198 | ||
199 | iwl_trans_rxq_free_rx_bufs(priv); | |
c85eb619 EG |
200 | |
201 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
202 | rxq->queue[i] = NULL; | |
203 | ||
204 | /* Set us so that we have processed and used all buffers, but have | |
205 | * not restocked the Rx queue with fresh buffers */ | |
206 | rxq->read = rxq->write = 0; | |
207 | rxq->write_actual = 0; | |
208 | rxq->free_count = 0; | |
209 | spin_unlock_irqrestore(&rxq->lock, flags); | |
210 | ||
ab697a9f EG |
211 | iwlagn_rx_replenish(priv); |
212 | ||
213 | iwl_trans_rx_hw_init(priv, rxq); | |
214 | ||
10b15e6f | 215 | spin_lock_irqsave(&priv->shrd->lock, flags); |
ab697a9f EG |
216 | rxq->need_update = 1; |
217 | iwl_rx_queue_update_write_ptr(priv, rxq); | |
10b15e6f | 218 | spin_unlock_irqrestore(&priv->shrd->lock, flags); |
ab697a9f | 219 | |
c85eb619 EG |
220 | return 0; |
221 | } | |
222 | ||
e6bb4c9c | 223 | static void iwl_trans_pcie_rx_free(struct iwl_priv *priv) |
a0f6b0a2 EG |
224 | { |
225 | struct iwl_rx_queue *rxq = &priv->rxq; | |
226 | unsigned long flags; | |
227 | ||
228 | /*if rxq->bd is NULL, it means that nothing has been allocated, | |
229 | * exit now */ | |
230 | if (!rxq->bd) { | |
231 | IWL_DEBUG_INFO(priv, "Free NULL rx context\n"); | |
232 | return; | |
233 | } | |
234 | ||
235 | spin_lock_irqsave(&rxq->lock, flags); | |
236 | iwl_trans_rxq_free_rx_bufs(priv); | |
237 | spin_unlock_irqrestore(&rxq->lock, flags); | |
238 | ||
d5934110 | 239 | dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE, |
a0f6b0a2 EG |
240 | rxq->bd, rxq->bd_dma); |
241 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | |
242 | rxq->bd = NULL; | |
243 | ||
244 | if (rxq->rb_stts) | |
d5934110 | 245 | dma_free_coherent(priv->bus->dev, |
a0f6b0a2 EG |
246 | sizeof(struct iwl_rb_status), |
247 | rxq->rb_stts, rxq->rb_stts_dma); | |
248 | else | |
249 | IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n"); | |
250 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); | |
251 | rxq->rb_stts = NULL; | |
252 | } | |
253 | ||
c2c52e8b EG |
254 | static int iwl_trans_rx_stop(struct iwl_priv *priv) |
255 | { | |
256 | ||
257 | /* stop Rx DMA */ | |
258 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
259 | return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, | |
260 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | |
261 | } | |
262 | ||
02aca585 EG |
263 | static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, |
264 | struct iwl_dma_ptr *ptr, size_t size) | |
265 | { | |
266 | if (WARN_ON(ptr->addr)) | |
267 | return -EINVAL; | |
268 | ||
d5934110 | 269 | ptr->addr = dma_alloc_coherent(priv->bus->dev, size, |
02aca585 EG |
270 | &ptr->dma, GFP_KERNEL); |
271 | if (!ptr->addr) | |
272 | return -ENOMEM; | |
273 | ptr->size = size; | |
274 | return 0; | |
275 | } | |
276 | ||
1359ca4f EG |
277 | static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, |
278 | struct iwl_dma_ptr *ptr) | |
279 | { | |
280 | if (unlikely(!ptr->addr)) | |
281 | return; | |
282 | ||
d5934110 | 283 | dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma); |
1359ca4f EG |
284 | memset(ptr, 0, sizeof(*ptr)); |
285 | } | |
286 | ||
02aca585 EG |
287 | static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
288 | int slots_num, u32 txq_id) | |
289 | { | |
d6189124 | 290 | size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX; |
02aca585 EG |
291 | int i; |
292 | ||
293 | if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds)) | |
294 | return -EINVAL; | |
295 | ||
1359ca4f EG |
296 | txq->q.n_window = slots_num; |
297 | ||
02aca585 EG |
298 | txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num, |
299 | GFP_KERNEL); | |
300 | txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num, | |
301 | GFP_KERNEL); | |
302 | ||
303 | if (!txq->meta || !txq->cmd) | |
304 | goto error; | |
305 | ||
306 | for (i = 0; i < slots_num; i++) { | |
307 | txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), | |
308 | GFP_KERNEL); | |
309 | if (!txq->cmd[i]) | |
310 | goto error; | |
311 | } | |
312 | ||
313 | /* Alloc driver data array and TFD circular buffer */ | |
314 | /* Driver private data, only for Tx (not command) queues, | |
315 | * not shared with device. */ | |
cefeaa5f | 316 | if (txq_id != priv->shrd->cmd_queue) { |
02aca585 EG |
317 | txq->txb = kzalloc(sizeof(txq->txb[0]) * |
318 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | |
319 | if (!txq->txb) { | |
320 | IWL_ERR(priv, "kmalloc for auxiliary BD " | |
321 | "structures failed\n"); | |
322 | goto error; | |
323 | } | |
324 | } else { | |
325 | txq->txb = NULL; | |
326 | } | |
327 | ||
328 | /* Circular buffer of transmit frame descriptors (TFDs), | |
329 | * shared with device */ | |
d5934110 | 330 | txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr, |
02aca585 EG |
331 | GFP_KERNEL); |
332 | if (!txq->tfds) { | |
333 | IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz); | |
334 | goto error; | |
335 | } | |
336 | txq->q.id = txq_id; | |
337 | ||
338 | return 0; | |
339 | error: | |
340 | kfree(txq->txb); | |
341 | txq->txb = NULL; | |
342 | /* since txq->cmd has been zeroed, | |
343 | * all non allocated cmd[i] will be NULL */ | |
344 | if (txq->cmd) | |
345 | for (i = 0; i < slots_num; i++) | |
346 | kfree(txq->cmd[i]); | |
347 | kfree(txq->meta); | |
348 | kfree(txq->cmd); | |
349 | txq->meta = NULL; | |
350 | txq->cmd = NULL; | |
351 | ||
352 | return -ENOMEM; | |
353 | ||
354 | } | |
355 | ||
356 | static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |
357 | int slots_num, u32 txq_id) | |
358 | { | |
359 | int ret; | |
360 | ||
361 | txq->need_update = 0; | |
362 | memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); | |
363 | ||
364 | /* | |
365 | * For the default queues 0-3, set up the swq_id | |
366 | * already -- all others need to get one later | |
367 | * (if they need one at all). | |
368 | */ | |
369 | if (txq_id < 4) | |
370 | iwl_set_swq_id(txq, txq_id, txq_id); | |
371 | ||
372 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | |
373 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | |
374 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | |
375 | ||
376 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
377 | ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, | |
378 | txq_id); | |
379 | if (ret) | |
380 | return ret; | |
381 | ||
382 | /* | |
383 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | |
384 | * given Tx queue, and enable the DMA channel used for that queue. | |
385 | * Circular buffer (TFD queue in DRAM) physical base address */ | |
386 | iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), | |
387 | txq->q.dma_addr >> 8); | |
388 | ||
389 | return 0; | |
390 | } | |
391 | ||
c170b867 EG |
392 | /** |
393 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | |
394 | */ | |
395 | static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) | |
396 | { | |
397 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
398 | struct iwl_queue *q = &txq->q; | |
399 | ||
400 | if (!q->n_bd) | |
401 | return; | |
402 | ||
403 | while (q->write_ptr != q->read_ptr) { | |
404 | /* The read_ptr needs to bound by q->n_window */ | |
405 | iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr)); | |
406 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | |
407 | } | |
408 | } | |
409 | ||
1359ca4f EG |
410 | /** |
411 | * iwl_tx_queue_free - Deallocate DMA queue. | |
412 | * @txq: Transmit queue to deallocate. | |
413 | * | |
414 | * Empty queue by removing and destroying all BD's. | |
415 | * Free all buffers. | |
416 | * 0-fill, but do not free "txq" descriptor structure. | |
417 | */ | |
418 | static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) | |
419 | { | |
420 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
d5934110 | 421 | struct device *dev = priv->bus->dev; |
1359ca4f EG |
422 | int i; |
423 | if (WARN_ON(!txq)) | |
424 | return; | |
425 | ||
426 | iwl_tx_queue_unmap(priv, txq_id); | |
427 | ||
428 | /* De-alloc array of command/tx buffers */ | |
429 | for (i = 0; i < txq->q.n_window; i++) | |
430 | kfree(txq->cmd[i]); | |
431 | ||
432 | /* De-alloc circular buffer of TFDs */ | |
433 | if (txq->q.n_bd) { | |
d6189124 | 434 | dma_free_coherent(dev, hw_params(priv).tfd_size * |
1359ca4f EG |
435 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
436 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | |
437 | } | |
438 | ||
439 | /* De-alloc array of per-TFD driver data */ | |
440 | kfree(txq->txb); | |
441 | txq->txb = NULL; | |
442 | ||
443 | /* deallocate arrays */ | |
444 | kfree(txq->cmd); | |
445 | kfree(txq->meta); | |
446 | txq->cmd = NULL; | |
447 | txq->meta = NULL; | |
448 | ||
449 | /* 0-fill queue descriptor structure */ | |
450 | memset(txq, 0, sizeof(*txq)); | |
451 | } | |
452 | ||
453 | /** | |
454 | * iwl_trans_tx_free - Free TXQ Context | |
455 | * | |
456 | * Destroy all TX DMA queues and structures | |
457 | */ | |
e6bb4c9c | 458 | static void iwl_trans_pcie_tx_free(struct iwl_priv *priv) |
1359ca4f EG |
459 | { |
460 | int txq_id; | |
461 | ||
462 | /* Tx queues */ | |
463 | if (priv->txq) { | |
d6189124 EG |
464 | for (txq_id = 0; |
465 | txq_id < hw_params(priv).max_txq_num; txq_id++) | |
1359ca4f EG |
466 | iwl_tx_queue_free(priv, txq_id); |
467 | } | |
468 | ||
469 | kfree(priv->txq); | |
470 | priv->txq = NULL; | |
471 | ||
472 | iwlagn_free_dma_ptr(priv, &priv->kw); | |
473 | ||
474 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | |
475 | } | |
476 | ||
02aca585 EG |
477 | /** |
478 | * iwl_trans_tx_alloc - allocate TX context | |
479 | * Allocate all Tx DMA structures and initialize them | |
480 | * | |
481 | * @param priv | |
482 | * @return error code | |
483 | */ | |
484 | static int iwl_trans_tx_alloc(struct iwl_priv *priv) | |
485 | { | |
486 | int ret; | |
487 | int txq_id, slots_num; | |
488 | ||
489 | /*It is not allowed to alloc twice, so warn when this happens. | |
490 | * We cannot rely on the previous allocation, so free and fail */ | |
491 | if (WARN_ON(priv->txq)) { | |
492 | ret = -EINVAL; | |
493 | goto error; | |
494 | } | |
495 | ||
496 | ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, | |
d6189124 | 497 | hw_params(priv).scd_bc_tbls_size); |
02aca585 EG |
498 | if (ret) { |
499 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); | |
500 | goto error; | |
501 | } | |
502 | ||
503 | /* Alloc keep-warm buffer */ | |
504 | ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); | |
505 | if (ret) { | |
506 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | |
507 | goto error; | |
508 | } | |
509 | ||
510 | priv->txq = kzalloc(sizeof(struct iwl_tx_queue) * | |
511 | priv->cfg->base_params->num_of_queues, GFP_KERNEL); | |
512 | if (!priv->txq) { | |
513 | IWL_ERR(priv, "Not enough memory for txq\n"); | |
514 | ret = ENOMEM; | |
515 | goto error; | |
516 | } | |
517 | ||
518 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
d6189124 | 519 | for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) { |
cefeaa5f | 520 | slots_num = (txq_id == priv->shrd->cmd_queue) ? |
02aca585 EG |
521 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
522 | ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num, | |
523 | txq_id); | |
524 | if (ret) { | |
525 | IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id); | |
526 | goto error; | |
527 | } | |
528 | } | |
529 | ||
530 | return 0; | |
531 | ||
532 | error: | |
e6bb4c9c | 533 | iwl_trans_tx_free(trans(priv)); |
02aca585 EG |
534 | |
535 | return ret; | |
536 | } | |
392f8b78 | 537 | static int iwl_tx_init(struct iwl_priv *priv) |
02aca585 EG |
538 | { |
539 | int ret; | |
540 | int txq_id, slots_num; | |
541 | unsigned long flags; | |
542 | bool alloc = false; | |
543 | ||
544 | if (!priv->txq) { | |
545 | ret = iwl_trans_tx_alloc(priv); | |
546 | if (ret) | |
547 | goto error; | |
548 | alloc = true; | |
549 | } | |
550 | ||
10b15e6f | 551 | spin_lock_irqsave(&priv->shrd->lock, flags); |
02aca585 EG |
552 | |
553 | /* Turn off all Tx DMA fifos */ | |
b3c2ce13 | 554 | iwl_write_prph(priv, SCD_TXFACT, 0); |
02aca585 EG |
555 | |
556 | /* Tell NIC where to find the "keep warm" buffer */ | |
557 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | |
558 | ||
10b15e6f | 559 | spin_unlock_irqrestore(&priv->shrd->lock, flags); |
02aca585 EG |
560 | |
561 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
d6189124 | 562 | for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) { |
cefeaa5f | 563 | slots_num = (txq_id == priv->shrd->cmd_queue) ? |
02aca585 EG |
564 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
565 | ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num, | |
566 | txq_id); | |
567 | if (ret) { | |
568 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); | |
569 | goto error; | |
570 | } | |
571 | } | |
572 | ||
573 | return 0; | |
574 | error: | |
575 | /*Upon error, free only if we allocated something */ | |
576 | if (alloc) | |
e6bb4c9c | 577 | iwl_trans_tx_free(trans(priv)); |
02aca585 EG |
578 | return ret; |
579 | } | |
580 | ||
392f8b78 EG |
581 | static void iwl_set_pwr_vmain(struct iwl_priv *priv) |
582 | { | |
583 | /* | |
584 | * (for documentation purposes) | |
585 | * to set power to V_AUX, do: | |
586 | ||
587 | if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) | |
588 | iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, | |
589 | APMG_PS_CTRL_VAL_PWR_SRC_VAUX, | |
590 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
591 | */ | |
592 | ||
593 | iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, | |
594 | APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, | |
595 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
596 | } | |
597 | ||
598 | static int iwl_nic_init(struct iwl_priv *priv) | |
599 | { | |
600 | unsigned long flags; | |
601 | ||
602 | /* nic_init */ | |
10b15e6f | 603 | spin_lock_irqsave(&priv->shrd->lock, flags); |
392f8b78 EG |
604 | iwl_apm_init(priv); |
605 | ||
606 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | |
607 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | |
608 | ||
10b15e6f | 609 | spin_unlock_irqrestore(&priv->shrd->lock, flags); |
392f8b78 EG |
610 | |
611 | iwl_set_pwr_vmain(priv); | |
612 | ||
613 | priv->cfg->lib->nic_config(priv); | |
614 | ||
615 | /* Allocate the RX queue, or reset if it is already allocated */ | |
616 | iwl_rx_init(priv); | |
617 | ||
618 | /* Allocate or reset and init all Tx and Command queues */ | |
619 | if (iwl_tx_init(priv)) | |
620 | return -ENOMEM; | |
621 | ||
622 | if (priv->cfg->base_params->shadow_reg_enable) { | |
623 | /* enable shadow regs in HW */ | |
624 | iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL, | |
625 | 0x800FFFFF); | |
626 | } | |
627 | ||
63013ae3 | 628 | set_bit(STATUS_INIT, &priv->shrd->status); |
392f8b78 EG |
629 | |
630 | return 0; | |
631 | } | |
632 | ||
633 | #define HW_READY_TIMEOUT (50) | |
634 | ||
635 | /* Note: returns poll_bit return value, which is >= 0 if success */ | |
636 | static int iwl_set_hw_ready(struct iwl_priv *priv) | |
637 | { | |
638 | int ret; | |
639 | ||
640 | iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, | |
641 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); | |
642 | ||
643 | /* See if we got it */ | |
644 | ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, | |
645 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | |
646 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | |
647 | HW_READY_TIMEOUT); | |
648 | ||
649 | IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : ""); | |
650 | return ret; | |
651 | } | |
652 | ||
653 | /* Note: returns standard 0/-ERROR code */ | |
e6bb4c9c | 654 | static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv) |
392f8b78 EG |
655 | { |
656 | int ret; | |
657 | ||
0286cee0 | 658 | IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n"); |
392f8b78 EG |
659 | |
660 | ret = iwl_set_hw_ready(priv); | |
661 | if (ret >= 0) | |
662 | return 0; | |
663 | ||
664 | /* If HW is not ready, prepare the conditions to check again */ | |
665 | iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, | |
666 | CSR_HW_IF_CONFIG_REG_PREPARE); | |
667 | ||
668 | ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, | |
669 | ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, | |
670 | CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); | |
671 | ||
672 | if (ret < 0) | |
673 | return ret; | |
674 | ||
675 | /* HW should be ready by now, check again. */ | |
676 | ret = iwl_set_hw_ready(priv); | |
677 | if (ret >= 0) | |
678 | return 0; | |
679 | return ret; | |
680 | } | |
681 | ||
e6bb4c9c | 682 | static int iwl_trans_pcie_start_device(struct iwl_priv *priv) |
392f8b78 EG |
683 | { |
684 | int ret; | |
685 | ||
686 | priv->ucode_owner = IWL_OWNERSHIP_DRIVER; | |
687 | ||
688 | if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) && | |
e6bb4c9c | 689 | iwl_trans_pcie_prepare_card_hw(priv)) { |
392f8b78 EG |
690 | IWL_WARN(priv, "Exit HW not ready\n"); |
691 | return -EIO; | |
692 | } | |
693 | ||
694 | /* If platform's RF_KILL switch is NOT set to KILL */ | |
695 | if (iwl_read32(priv, CSR_GP_CNTRL) & | |
696 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | |
63013ae3 | 697 | clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status); |
392f8b78 | 698 | else |
63013ae3 | 699 | set_bit(STATUS_RF_KILL_HW, &priv->shrd->status); |
392f8b78 EG |
700 | |
701 | if (iwl_is_rfkill(priv)) { | |
702 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); | |
703 | iwl_enable_interrupts(priv); | |
704 | return -ERFKILL; | |
705 | } | |
706 | ||
707 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | |
708 | ||
709 | ret = iwl_nic_init(priv); | |
710 | if (ret) { | |
711 | IWL_ERR(priv, "Unable to init nic\n"); | |
712 | return ret; | |
713 | } | |
714 | ||
715 | /* make sure rfkill handshake bits are cleared */ | |
716 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
717 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | |
718 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | |
719 | ||
720 | /* clear (again), then enable host interrupts */ | |
721 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | |
722 | iwl_enable_interrupts(priv); | |
723 | ||
724 | /* really make sure rfkill handshake bits are cleared */ | |
725 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
726 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
727 | ||
728 | return 0; | |
729 | } | |
730 | ||
b3c2ce13 EG |
731 | /* |
732 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | |
10b15e6f | 733 | * must be called under priv->shrd->lock and mac access |
b3c2ce13 EG |
734 | */ |
735 | static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask) | |
736 | { | |
737 | iwl_write_prph(priv, SCD_TXFACT, mask); | |
738 | } | |
739 | ||
740 | #define IWL_AC_UNSET -1 | |
741 | ||
742 | struct queue_to_fifo_ac { | |
743 | s8 fifo, ac; | |
744 | }; | |
745 | ||
746 | static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = { | |
747 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | |
748 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | |
749 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | |
750 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | |
751 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | |
752 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
753 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
754 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
755 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
756 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | |
72c04ce0 | 757 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, |
b3c2ce13 EG |
758 | }; |
759 | ||
760 | static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = { | |
761 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | |
762 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | |
763 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | |
764 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | |
765 | { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, }, | |
766 | { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, }, | |
767 | { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, }, | |
768 | { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, }, | |
769 | { IWL_TX_FIFO_BE_IPAN, 2, }, | |
770 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | |
72c04ce0 | 771 | { IWL_TX_FIFO_AUX, IWL_AC_UNSET, }, |
b3c2ce13 | 772 | }; |
e6bb4c9c | 773 | static void iwl_trans_pcie_tx_start(struct iwl_priv *priv) |
b3c2ce13 EG |
774 | { |
775 | const struct queue_to_fifo_ac *queue_to_fifo; | |
776 | struct iwl_rxon_context *ctx; | |
777 | u32 a; | |
778 | unsigned long flags; | |
779 | int i, chan; | |
780 | u32 reg_val; | |
781 | ||
10b15e6f | 782 | spin_lock_irqsave(&priv->shrd->lock, flags); |
b3c2ce13 EG |
783 | |
784 | priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR); | |
785 | a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; | |
786 | /* reset conext data memory */ | |
787 | for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; | |
788 | a += 4) | |
789 | iwl_write_targ_mem(priv, a, 0); | |
790 | /* reset tx status memory */ | |
791 | for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; | |
792 | a += 4) | |
793 | iwl_write_targ_mem(priv, a, 0); | |
794 | for (; a < priv->scd_base_addr + | |
d6189124 EG |
795 | SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num); |
796 | a += 4) | |
b3c2ce13 EG |
797 | iwl_write_targ_mem(priv, a, 0); |
798 | ||
799 | iwl_write_prph(priv, SCD_DRAM_BASE_ADDR, | |
800 | priv->scd_bc_tbls.dma >> 10); | |
801 | ||
802 | /* Enable DMA channel */ | |
803 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | |
804 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | |
805 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | |
806 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | |
807 | ||
808 | /* Update FH chicken bits */ | |
809 | reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); | |
810 | iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, | |
811 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); | |
812 | ||
813 | iwl_write_prph(priv, SCD_QUEUECHAIN_SEL, | |
814 | SCD_QUEUECHAIN_SEL_ALL(priv)); | |
815 | iwl_write_prph(priv, SCD_AGGR_SEL, 0); | |
816 | ||
817 | /* initiate the queues */ | |
d6189124 | 818 | for (i = 0; i < hw_params(priv).max_txq_num; i++) { |
b3c2ce13 EG |
819 | iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0); |
820 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); | |
821 | iwl_write_targ_mem(priv, priv->scd_base_addr + | |
822 | SCD_CONTEXT_QUEUE_OFFSET(i), 0); | |
823 | iwl_write_targ_mem(priv, priv->scd_base_addr + | |
824 | SCD_CONTEXT_QUEUE_OFFSET(i) + | |
825 | sizeof(u32), | |
826 | ((SCD_WIN_SIZE << | |
827 | SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | |
828 | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | |
829 | ((SCD_FRAME_LIMIT << | |
830 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | |
831 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | |
832 | } | |
833 | ||
834 | iwl_write_prph(priv, SCD_INTERRUPT_MASK, | |
d6189124 | 835 | IWL_MASK(0, hw_params(priv).max_txq_num)); |
b3c2ce13 EG |
836 | |
837 | /* Activate all Tx DMA/FIFO channels */ | |
838 | iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7)); | |
839 | ||
840 | /* map queues to FIFOs */ | |
841 | if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) | |
842 | queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; | |
843 | else | |
844 | queue_to_fifo = iwlagn_default_queue_to_tx_fifo; | |
845 | ||
cefeaa5f | 846 | iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0); |
b3c2ce13 EG |
847 | |
848 | /* make sure all queue are not stopped */ | |
849 | memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); | |
850 | for (i = 0; i < 4; i++) | |
851 | atomic_set(&priv->queue_stop_count[i], 0); | |
852 | for_each_context(priv, ctx) | |
853 | ctx->last_tx_rejected = false; | |
854 | ||
855 | /* reset to 0 to enable all the queue first */ | |
856 | priv->txq_ctx_active_msk = 0; | |
857 | ||
72c04ce0 JB |
858 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != |
859 | IWLAGN_FIRST_AMPDU_QUEUE); | |
860 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != | |
861 | IWLAGN_FIRST_AMPDU_QUEUE); | |
b3c2ce13 | 862 | |
72c04ce0 | 863 | for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) { |
b3c2ce13 EG |
864 | int fifo = queue_to_fifo[i].fifo; |
865 | int ac = queue_to_fifo[i].ac; | |
866 | ||
867 | iwl_txq_ctx_activate(priv, i); | |
868 | ||
869 | if (fifo == IWL_TX_FIFO_UNUSED) | |
870 | continue; | |
871 | ||
872 | if (ac != IWL_AC_UNSET) | |
873 | iwl_set_swq_id(&priv->txq[i], ac, i); | |
48d42c42 | 874 | iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0); |
b3c2ce13 EG |
875 | } |
876 | ||
10b15e6f | 877 | spin_unlock_irqrestore(&priv->shrd->lock, flags); |
b3c2ce13 EG |
878 | |
879 | /* Enable L1-Active */ | |
880 | iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG, | |
881 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | |
882 | } | |
883 | ||
c170b867 EG |
884 | /** |
885 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | |
886 | */ | |
887 | static int iwl_trans_tx_stop(struct iwl_priv *priv) | |
888 | { | |
889 | int ch, txq_id; | |
890 | unsigned long flags; | |
891 | ||
892 | /* Turn off all Tx DMA fifos */ | |
10b15e6f | 893 | spin_lock_irqsave(&priv->shrd->lock, flags); |
c170b867 | 894 | |
b3c2ce13 | 895 | iwl_trans_txq_set_sched(priv, 0); |
c170b867 EG |
896 | |
897 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
02f6f659 | 898 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { |
c170b867 EG |
899 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); |
900 | if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | |
901 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | |
902 | 1000)) | |
903 | IWL_ERR(priv, "Failing on timeout while stopping" | |
904 | " DMA channel %d [0x%08x]", ch, | |
905 | iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); | |
906 | } | |
10b15e6f | 907 | spin_unlock_irqrestore(&priv->shrd->lock, flags); |
c170b867 EG |
908 | |
909 | if (!priv->txq) { | |
910 | IWL_WARN(priv, "Stopping tx queues that aren't allocated..."); | |
911 | return 0; | |
912 | } | |
913 | ||
914 | /* Unmap DMA from host system and free skb's */ | |
d6189124 | 915 | for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) |
c170b867 EG |
916 | iwl_tx_queue_unmap(priv, txq_id); |
917 | ||
918 | return 0; | |
919 | } | |
920 | ||
e6bb4c9c | 921 | static void iwl_trans_pcie_stop_device(struct iwl_priv *priv) |
ab6cf8e8 EG |
922 | { |
923 | unsigned long flags; | |
924 | ||
925 | /* stop and reset the on-board processor */ | |
926 | iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | |
927 | ||
928 | /* tell the device to stop sending interrupts */ | |
10b15e6f | 929 | spin_lock_irqsave(&priv->shrd->lock, flags); |
ab6cf8e8 | 930 | iwl_disable_interrupts(priv); |
10b15e6f | 931 | spin_unlock_irqrestore(&priv->shrd->lock, flags); |
e6bb4c9c | 932 | iwl_trans_sync_irq(trans(priv)); |
ab6cf8e8 EG |
933 | |
934 | /* device going down, Stop using ICT table */ | |
935 | iwl_disable_ict(priv); | |
936 | ||
937 | /* | |
938 | * If a HW restart happens during firmware loading, | |
939 | * then the firmware loading might call this function | |
940 | * and later it might be called again due to the | |
941 | * restart. So don't process again if the device is | |
942 | * already dead. | |
943 | */ | |
63013ae3 | 944 | if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) { |
ab6cf8e8 EG |
945 | iwl_trans_tx_stop(priv); |
946 | iwl_trans_rx_stop(priv); | |
947 | ||
948 | /* Power-down device's busmaster DMA clocks */ | |
949 | iwl_write_prph(priv, APMG_CLK_DIS_REG, | |
950 | APMG_CLK_VAL_DMA_CLK_RQT); | |
951 | udelay(5); | |
952 | } | |
953 | ||
954 | /* Make sure (redundant) we've released our request to stay awake */ | |
955 | iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
956 | ||
957 | /* Stop the device, and put it in low power state */ | |
958 | iwl_apm_stop(priv); | |
959 | } | |
960 | ||
e6bb4c9c | 961 | static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv, |
47c1b496 EG |
962 | int txq_id) |
963 | { | |
964 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
965 | struct iwl_queue *q = &txq->q; | |
966 | struct iwl_device_cmd *dev_cmd; | |
967 | ||
968 | if (unlikely(iwl_queue_space(q) < q->high_mark)) | |
969 | return NULL; | |
970 | ||
971 | /* | |
972 | * Set up the Tx-command (not MAC!) header. | |
973 | * Store the chosen Tx queue and TFD index within the sequence field; | |
974 | * after Tx, uCode's Tx response will return this value so driver can | |
975 | * locate the frame within the tx queue and do post-tx processing. | |
976 | */ | |
977 | dev_cmd = txq->cmd[q->write_ptr]; | |
978 | memset(dev_cmd, 0, sizeof(*dev_cmd)); | |
979 | dev_cmd->hdr.cmd = REPLY_TX; | |
980 | dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
981 | INDEX_TO_SEQ(q->write_ptr))); | |
982 | return &dev_cmd->cmd.tx; | |
983 | } | |
984 | ||
e6bb4c9c | 985 | static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb, |
47c1b496 EG |
986 | struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu, |
987 | struct iwl_rxon_context *ctx) | |
988 | { | |
989 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
990 | struct iwl_queue *q = &txq->q; | |
991 | struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr]; | |
992 | struct iwl_cmd_meta *out_meta; | |
993 | ||
994 | dma_addr_t phys_addr = 0; | |
995 | dma_addr_t txcmd_phys; | |
996 | dma_addr_t scratch_phys; | |
997 | u16 len, firstlen, secondlen; | |
998 | u8 wait_write_ptr = 0; | |
999 | u8 hdr_len = ieee80211_hdrlen(fc); | |
1000 | ||
1001 | /* Set up driver data for this TFD */ | |
1002 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | |
1003 | txq->txb[q->write_ptr].skb = skb; | |
1004 | txq->txb[q->write_ptr].ctx = ctx; | |
1005 | ||
1006 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
1007 | out_meta = &txq->meta[q->write_ptr]; | |
1008 | ||
1009 | /* | |
1010 | * Use the first empty entry in this queue's command buffer array | |
1011 | * to contain the Tx command and MAC header concatenated together | |
1012 | * (payload data will be in another buffer). | |
1013 | * Size of this varies, due to varying MAC header length. | |
1014 | * If end is not dword aligned, we'll have 2 extra bytes at the end | |
1015 | * of the MAC header (device reads on dword boundaries). | |
1016 | * We'll tell device about this padding later. | |
1017 | */ | |
1018 | len = sizeof(struct iwl_tx_cmd) + | |
1019 | sizeof(struct iwl_cmd_header) + hdr_len; | |
1020 | firstlen = (len + 3) & ~3; | |
1021 | ||
1022 | /* Tell NIC about any 2-byte padding after MAC header */ | |
1023 | if (firstlen != len) | |
1024 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
1025 | ||
1026 | /* Physical address of this Tx command's header (not MAC header!), | |
1027 | * within command buffer array. */ | |
d5934110 | 1028 | txcmd_phys = dma_map_single(priv->bus->dev, |
47c1b496 EG |
1029 | &dev_cmd->hdr, firstlen, |
1030 | DMA_BIDIRECTIONAL); | |
d5934110 | 1031 | if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys))) |
47c1b496 EG |
1032 | return -1; |
1033 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | |
1034 | dma_unmap_len_set(out_meta, len, firstlen); | |
1035 | ||
1036 | if (!ieee80211_has_morefrags(fc)) { | |
1037 | txq->need_update = 1; | |
1038 | } else { | |
1039 | wait_write_ptr = 1; | |
1040 | txq->need_update = 0; | |
1041 | } | |
1042 | ||
1043 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | |
1044 | * if any (802.11 null frames have no payload). */ | |
1045 | secondlen = skb->len - hdr_len; | |
1046 | if (secondlen > 0) { | |
d5934110 | 1047 | phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len, |
47c1b496 | 1048 | secondlen, DMA_TO_DEVICE); |
d5934110 EG |
1049 | if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) { |
1050 | dma_unmap_single(priv->bus->dev, | |
47c1b496 EG |
1051 | dma_unmap_addr(out_meta, mapping), |
1052 | dma_unmap_len(out_meta, len), | |
1053 | DMA_BIDIRECTIONAL); | |
1054 | return -1; | |
1055 | } | |
1056 | } | |
1057 | ||
1058 | /* Attach buffers to TFD */ | |
1059 | iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1); | |
1060 | if (secondlen > 0) | |
1061 | iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, | |
1062 | secondlen, 0); | |
1063 | ||
1064 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | |
1065 | offsetof(struct iwl_tx_cmd, scratch); | |
1066 | ||
1067 | /* take back ownership of DMA buffer to enable update */ | |
d5934110 | 1068 | dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen, |
47c1b496 EG |
1069 | DMA_BIDIRECTIONAL); |
1070 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | |
1071 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | |
1072 | ||
1073 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", | |
1074 | le16_to_cpu(dev_cmd->hdr.sequence)); | |
1075 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | |
1076 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | |
1077 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | |
1078 | ||
1079 | /* Set up entry for this TFD in Tx byte-count array */ | |
1080 | if (ampdu) | |
48d42c42 | 1081 | iwl_trans_txq_update_byte_cnt_tbl(priv, txq, |
47c1b496 EG |
1082 | le16_to_cpu(tx_cmd->len)); |
1083 | ||
d5934110 | 1084 | dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen, |
47c1b496 EG |
1085 | DMA_BIDIRECTIONAL); |
1086 | ||
1087 | trace_iwlwifi_dev_tx(priv, | |
1088 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | |
1089 | sizeof(struct iwl_tfd), | |
1090 | &dev_cmd->hdr, firstlen, | |
1091 | skb->data + hdr_len, secondlen); | |
1092 | ||
1093 | /* Tell device the write index *just past* this latest filled TFD */ | |
1094 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
1095 | iwl_txq_update_write_ptr(priv, txq); | |
1096 | ||
1097 | /* | |
1098 | * At this point the frame is "transmitted" successfully | |
1099 | * and we will get a TX status notification eventually, | |
1100 | * regardless of the value of ret. "ret" only indicates | |
1101 | * whether or not we should update the write pointer. | |
1102 | */ | |
1103 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { | |
1104 | if (wait_write_ptr) { | |
1105 | txq->need_update = 1; | |
1106 | iwl_txq_update_write_ptr(priv, txq); | |
1107 | } else { | |
1108 | iwl_stop_queue(priv, txq); | |
1109 | } | |
1110 | } | |
1111 | return 0; | |
1112 | } | |
1113 | ||
e6bb4c9c | 1114 | static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv) |
56d90f4c EG |
1115 | { |
1116 | /* Remove all resets to allow NIC to operate */ | |
1117 | iwl_write32(priv, CSR_RESET, 0); | |
1118 | } | |
1119 | ||
e6bb4c9c EG |
1120 | static int iwl_trans_pcie_request_irq(struct iwl_trans *trans) |
1121 | { | |
1122 | struct iwl_priv *priv = priv(trans); | |
1123 | int err; | |
1124 | ||
1125 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | |
1126 | iwl_irq_tasklet, (unsigned long)priv); | |
1127 | ||
1128 | iwl_alloc_isr_ict(priv); | |
1129 | ||
1130 | err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED, | |
1131 | DRV_NAME, priv); | |
1132 | if (err) { | |
1133 | IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq); | |
1134 | iwl_free_isr_ict(priv); | |
1135 | return err; | |
1136 | } | |
1137 | ||
1138 | INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); | |
1139 | return 0; | |
1140 | } | |
1141 | ||
1142 | static void iwl_trans_pcie_sync_irq(struct iwl_priv *priv) | |
a27367d2 EG |
1143 | { |
1144 | /* wait to make sure we flush pending tasklet*/ | |
d5934110 | 1145 | synchronize_irq(priv->bus->irq); |
a27367d2 EG |
1146 | tasklet_kill(&priv->irq_tasklet); |
1147 | } | |
1148 | ||
e6bb4c9c | 1149 | static void iwl_trans_pcie_free(struct iwl_priv *priv) |
34c1b7ba | 1150 | { |
d5934110 | 1151 | free_irq(priv->bus->irq, priv); |
34c1b7ba | 1152 | iwl_free_isr_ict(priv); |
e6bb4c9c EG |
1153 | kfree(trans(priv)); |
1154 | trans(priv) = NULL; | |
34c1b7ba EG |
1155 | } |
1156 | ||
e6bb4c9c | 1157 | const struct iwl_trans_ops trans_ops_pcie; |
e419d62d | 1158 | |
e6bb4c9c EG |
1159 | static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd) |
1160 | { | |
1161 | struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) + | |
1162 | sizeof(struct iwl_trans_pcie), | |
1163 | GFP_KERNEL); | |
1164 | if (iwl_trans) { | |
1165 | iwl_trans->ops = &trans_ops_pcie; | |
1166 | iwl_trans->shrd = shrd; | |
1167 | } | |
ab6cf8e8 | 1168 | |
e6bb4c9c EG |
1169 | return iwl_trans; |
1170 | } | |
47c1b496 | 1171 | |
e6bb4c9c EG |
1172 | const struct iwl_trans_ops trans_ops_pcie = { |
1173 | .alloc = iwl_trans_pcie_alloc, | |
1174 | .request_irq = iwl_trans_pcie_request_irq, | |
1175 | .start_device = iwl_trans_pcie_start_device, | |
1176 | .prepare_card_hw = iwl_trans_pcie_prepare_card_hw, | |
1177 | .stop_device = iwl_trans_pcie_stop_device, | |
48d42c42 | 1178 | |
e6bb4c9c | 1179 | .tx_start = iwl_trans_pcie_tx_start, |
48d42c42 | 1180 | |
e6bb4c9c EG |
1181 | .rx_free = iwl_trans_pcie_rx_free, |
1182 | .tx_free = iwl_trans_pcie_tx_free, | |
34c1b7ba | 1183 | |
e6bb4c9c EG |
1184 | .send_cmd = iwl_trans_pcie_send_cmd, |
1185 | .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu, | |
c85eb619 | 1186 | |
e6bb4c9c EG |
1187 | .get_tx_cmd = iwl_trans_pcie_get_tx_cmd, |
1188 | .tx = iwl_trans_pcie_tx, | |
34c1b7ba | 1189 | |
e6bb4c9c EG |
1190 | .txq_agg_disable = iwl_trans_pcie_txq_agg_disable, |
1191 | .txq_agg_setup = iwl_trans_pcie_txq_agg_setup, | |
34c1b7ba | 1192 | |
e6bb4c9c | 1193 | .kick_nic = iwl_trans_pcie_kick_nic, |
1e89cbac | 1194 | |
e6bb4c9c EG |
1195 | .sync_irq = iwl_trans_pcie_sync_irq, |
1196 | .free = iwl_trans_pcie_free, | |
1197 | }; | |
ab697a9f | 1198 |