]>
Commit | Line | Data |
---|---|---|
c85eb619 EG |
1 | /****************************************************************************** |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
22 | * USA | |
23 | * | |
24 | * The full GNU General Public License is included in this distribution | |
25 | * in the file called LICENSE.GPL. | |
26 | * | |
27 | * Contact Information: | |
28 | * Intel Linux Wireless <ilw@linux.intel.com> | |
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
30 | * | |
31 | * BSD LICENSE | |
32 | * | |
33 | * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. | |
34 | * All rights reserved. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * | |
40 | * * Redistributions of source code must retain the above copyright | |
41 | * notice, this list of conditions and the following disclaimer. | |
42 | * * Redistributions in binary form must reproduce the above copyright | |
43 | * notice, this list of conditions and the following disclaimer in | |
44 | * the documentation and/or other materials provided with the | |
45 | * distribution. | |
46 | * * Neither the name Intel Corporation nor the names of its | |
47 | * contributors may be used to endorse or promote products derived | |
48 | * from this software without specific prior written permission. | |
49 | * | |
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
61 | * | |
62 | *****************************************************************************/ | |
a0f6b0a2 | 63 | #include "iwl-dev.h" |
c85eb619 | 64 | #include "iwl-trans.h" |
02aca585 EG |
65 | #include "iwl-core.h" |
66 | #include "iwl-helpers.h" | |
67 | /*TODO remove uneeded includes when the transport layer tx_free will be here */ | |
68 | #include "iwl-agn.h" | |
c85eb619 EG |
69 | |
70 | static int iwl_trans_rx_alloc(struct iwl_priv *priv) | |
71 | { | |
72 | struct iwl_rx_queue *rxq = &priv->rxq; | |
73 | struct device *dev = priv->bus.dev; | |
74 | ||
75 | memset(&priv->rxq, 0, sizeof(priv->rxq)); | |
76 | ||
77 | spin_lock_init(&rxq->lock); | |
78 | INIT_LIST_HEAD(&rxq->rx_free); | |
79 | INIT_LIST_HEAD(&rxq->rx_used); | |
80 | ||
81 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | |
82 | return -EINVAL; | |
83 | ||
84 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | |
a0f6b0a2 EG |
85 | rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
86 | &rxq->bd_dma, GFP_KERNEL); | |
c85eb619 EG |
87 | if (!rxq->bd) |
88 | goto err_bd; | |
a0f6b0a2 | 89 | memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE); |
c85eb619 EG |
90 | |
91 | /*Allocate the driver's pointer to receive buffer status */ | |
92 | rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts), | |
93 | &rxq->rb_stts_dma, GFP_KERNEL); | |
94 | if (!rxq->rb_stts) | |
95 | goto err_rb_stts; | |
96 | memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); | |
97 | ||
98 | return 0; | |
99 | ||
100 | err_rb_stts: | |
a0f6b0a2 EG |
101 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, |
102 | rxq->bd, rxq->bd_dma); | |
c85eb619 EG |
103 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); |
104 | rxq->bd = NULL; | |
105 | err_bd: | |
106 | return -ENOMEM; | |
107 | } | |
108 | ||
a0f6b0a2 | 109 | static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv) |
c85eb619 EG |
110 | { |
111 | struct iwl_rx_queue *rxq = &priv->rxq; | |
a0f6b0a2 | 112 | int i; |
c85eb619 EG |
113 | |
114 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
115 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
116 | /* In the reset function, these buffers may have been allocated | |
117 | * to an SKB, so we need to unmap and free potential storage */ | |
118 | if (rxq->pool[i].page != NULL) { | |
119 | dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma, | |
120 | PAGE_SIZE << priv->hw_params.rx_page_order, | |
121 | DMA_FROM_DEVICE); | |
122 | __iwl_free_pages(priv, rxq->pool[i].page); | |
123 | rxq->pool[i].page = NULL; | |
124 | } | |
125 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
126 | } | |
a0f6b0a2 EG |
127 | } |
128 | ||
129 | static int iwl_trans_rx_init(struct iwl_priv *priv) | |
130 | { | |
131 | struct iwl_rx_queue *rxq = &priv->rxq; | |
132 | int i, err; | |
133 | unsigned long flags; | |
134 | ||
135 | if (!rxq->bd) { | |
136 | err = iwl_trans_rx_alloc(priv); | |
137 | if (err) | |
138 | return err; | |
139 | } | |
140 | ||
141 | spin_lock_irqsave(&rxq->lock, flags); | |
142 | INIT_LIST_HEAD(&rxq->rx_free); | |
143 | INIT_LIST_HEAD(&rxq->rx_used); | |
144 | ||
145 | iwl_trans_rxq_free_rx_bufs(priv); | |
c85eb619 EG |
146 | |
147 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
148 | rxq->queue[i] = NULL; | |
149 | ||
150 | /* Set us so that we have processed and used all buffers, but have | |
151 | * not restocked the Rx queue with fresh buffers */ | |
152 | rxq->read = rxq->write = 0; | |
153 | rxq->write_actual = 0; | |
154 | rxq->free_count = 0; | |
155 | spin_unlock_irqrestore(&rxq->lock, flags); | |
156 | ||
157 | return 0; | |
158 | } | |
159 | ||
a0f6b0a2 EG |
160 | static void iwl_trans_rx_free(struct iwl_priv *priv) |
161 | { | |
162 | struct iwl_rx_queue *rxq = &priv->rxq; | |
163 | unsigned long flags; | |
164 | ||
165 | /*if rxq->bd is NULL, it means that nothing has been allocated, | |
166 | * exit now */ | |
167 | if (!rxq->bd) { | |
168 | IWL_DEBUG_INFO(priv, "Free NULL rx context\n"); | |
169 | return; | |
170 | } | |
171 | ||
172 | spin_lock_irqsave(&rxq->lock, flags); | |
173 | iwl_trans_rxq_free_rx_bufs(priv); | |
174 | spin_unlock_irqrestore(&rxq->lock, flags); | |
175 | ||
176 | dma_free_coherent(priv->bus.dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
177 | rxq->bd, rxq->bd_dma); | |
178 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | |
179 | rxq->bd = NULL; | |
180 | ||
181 | if (rxq->rb_stts) | |
182 | dma_free_coherent(priv->bus.dev, | |
183 | sizeof(struct iwl_rb_status), | |
184 | rxq->rb_stts, rxq->rb_stts_dma); | |
185 | else | |
186 | IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n"); | |
187 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); | |
188 | rxq->rb_stts = NULL; | |
189 | } | |
190 | ||
c2c52e8b EG |
191 | static int iwl_trans_rx_stop(struct iwl_priv *priv) |
192 | { | |
193 | ||
194 | /* stop Rx DMA */ | |
195 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
196 | return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, | |
197 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | |
198 | } | |
199 | ||
02aca585 EG |
200 | static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, |
201 | struct iwl_dma_ptr *ptr, size_t size) | |
202 | { | |
203 | if (WARN_ON(ptr->addr)) | |
204 | return -EINVAL; | |
205 | ||
206 | ptr->addr = dma_alloc_coherent(priv->bus.dev, size, | |
207 | &ptr->dma, GFP_KERNEL); | |
208 | if (!ptr->addr) | |
209 | return -ENOMEM; | |
210 | ptr->size = size; | |
211 | return 0; | |
212 | } | |
213 | ||
1359ca4f EG |
214 | static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, |
215 | struct iwl_dma_ptr *ptr) | |
216 | { | |
217 | if (unlikely(!ptr->addr)) | |
218 | return; | |
219 | ||
220 | dma_free_coherent(priv->bus.dev, ptr->size, ptr->addr, ptr->dma); | |
221 | memset(ptr, 0, sizeof(*ptr)); | |
222 | } | |
223 | ||
02aca585 EG |
224 | static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
225 | int slots_num, u32 txq_id) | |
226 | { | |
227 | size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; | |
228 | int i; | |
229 | ||
230 | if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds)) | |
231 | return -EINVAL; | |
232 | ||
1359ca4f EG |
233 | txq->q.n_window = slots_num; |
234 | ||
02aca585 EG |
235 | txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num, |
236 | GFP_KERNEL); | |
237 | txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num, | |
238 | GFP_KERNEL); | |
239 | ||
240 | if (!txq->meta || !txq->cmd) | |
241 | goto error; | |
242 | ||
243 | for (i = 0; i < slots_num; i++) { | |
244 | txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), | |
245 | GFP_KERNEL); | |
246 | if (!txq->cmd[i]) | |
247 | goto error; | |
248 | } | |
249 | ||
250 | /* Alloc driver data array and TFD circular buffer */ | |
251 | /* Driver private data, only for Tx (not command) queues, | |
252 | * not shared with device. */ | |
253 | if (txq_id != priv->cmd_queue) { | |
254 | txq->txb = kzalloc(sizeof(txq->txb[0]) * | |
255 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | |
256 | if (!txq->txb) { | |
257 | IWL_ERR(priv, "kmalloc for auxiliary BD " | |
258 | "structures failed\n"); | |
259 | goto error; | |
260 | } | |
261 | } else { | |
262 | txq->txb = NULL; | |
263 | } | |
264 | ||
265 | /* Circular buffer of transmit frame descriptors (TFDs), | |
266 | * shared with device */ | |
267 | txq->tfds = dma_alloc_coherent(priv->bus.dev, tfd_sz, &txq->q.dma_addr, | |
268 | GFP_KERNEL); | |
269 | if (!txq->tfds) { | |
270 | IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz); | |
271 | goto error; | |
272 | } | |
273 | txq->q.id = txq_id; | |
274 | ||
275 | return 0; | |
276 | error: | |
277 | kfree(txq->txb); | |
278 | txq->txb = NULL; | |
279 | /* since txq->cmd has been zeroed, | |
280 | * all non allocated cmd[i] will be NULL */ | |
281 | if (txq->cmd) | |
282 | for (i = 0; i < slots_num; i++) | |
283 | kfree(txq->cmd[i]); | |
284 | kfree(txq->meta); | |
285 | kfree(txq->cmd); | |
286 | txq->meta = NULL; | |
287 | txq->cmd = NULL; | |
288 | ||
289 | return -ENOMEM; | |
290 | ||
291 | } | |
292 | ||
293 | static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |
294 | int slots_num, u32 txq_id) | |
295 | { | |
296 | int ret; | |
297 | ||
298 | txq->need_update = 0; | |
299 | memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); | |
300 | ||
301 | /* | |
302 | * For the default queues 0-3, set up the swq_id | |
303 | * already -- all others need to get one later | |
304 | * (if they need one at all). | |
305 | */ | |
306 | if (txq_id < 4) | |
307 | iwl_set_swq_id(txq, txq_id, txq_id); | |
308 | ||
309 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | |
310 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | |
311 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | |
312 | ||
313 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
314 | ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, | |
315 | txq_id); | |
316 | if (ret) | |
317 | return ret; | |
318 | ||
319 | /* | |
320 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | |
321 | * given Tx queue, and enable the DMA channel used for that queue. | |
322 | * Circular buffer (TFD queue in DRAM) physical base address */ | |
323 | iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), | |
324 | txq->q.dma_addr >> 8); | |
325 | ||
326 | return 0; | |
327 | } | |
328 | ||
c170b867 EG |
329 | /** |
330 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | |
331 | */ | |
332 | static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) | |
333 | { | |
334 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
335 | struct iwl_queue *q = &txq->q; | |
336 | ||
337 | if (!q->n_bd) | |
338 | return; | |
339 | ||
340 | while (q->write_ptr != q->read_ptr) { | |
341 | /* The read_ptr needs to bound by q->n_window */ | |
342 | iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr)); | |
343 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | |
344 | } | |
345 | } | |
346 | ||
1359ca4f EG |
347 | /** |
348 | * iwl_tx_queue_free - Deallocate DMA queue. | |
349 | * @txq: Transmit queue to deallocate. | |
350 | * | |
351 | * Empty queue by removing and destroying all BD's. | |
352 | * Free all buffers. | |
353 | * 0-fill, but do not free "txq" descriptor structure. | |
354 | */ | |
355 | static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) | |
356 | { | |
357 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
358 | struct device *dev = priv->bus.dev; | |
359 | int i; | |
360 | if (WARN_ON(!txq)) | |
361 | return; | |
362 | ||
363 | iwl_tx_queue_unmap(priv, txq_id); | |
364 | ||
365 | /* De-alloc array of command/tx buffers */ | |
366 | for (i = 0; i < txq->q.n_window; i++) | |
367 | kfree(txq->cmd[i]); | |
368 | ||
369 | /* De-alloc circular buffer of TFDs */ | |
370 | if (txq->q.n_bd) { | |
371 | dma_free_coherent(dev, priv->hw_params.tfd_size * | |
372 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | |
373 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | |
374 | } | |
375 | ||
376 | /* De-alloc array of per-TFD driver data */ | |
377 | kfree(txq->txb); | |
378 | txq->txb = NULL; | |
379 | ||
380 | /* deallocate arrays */ | |
381 | kfree(txq->cmd); | |
382 | kfree(txq->meta); | |
383 | txq->cmd = NULL; | |
384 | txq->meta = NULL; | |
385 | ||
386 | /* 0-fill queue descriptor structure */ | |
387 | memset(txq, 0, sizeof(*txq)); | |
388 | } | |
389 | ||
390 | /** | |
391 | * iwl_trans_tx_free - Free TXQ Context | |
392 | * | |
393 | * Destroy all TX DMA queues and structures | |
394 | */ | |
395 | static void iwl_trans_tx_free(struct iwl_priv *priv) | |
396 | { | |
397 | int txq_id; | |
398 | ||
399 | /* Tx queues */ | |
400 | if (priv->txq) { | |
401 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | |
402 | iwl_tx_queue_free(priv, txq_id); | |
403 | } | |
404 | ||
405 | kfree(priv->txq); | |
406 | priv->txq = NULL; | |
407 | ||
408 | iwlagn_free_dma_ptr(priv, &priv->kw); | |
409 | ||
410 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | |
411 | } | |
412 | ||
02aca585 EG |
413 | /** |
414 | * iwl_trans_tx_alloc - allocate TX context | |
415 | * Allocate all Tx DMA structures and initialize them | |
416 | * | |
417 | * @param priv | |
418 | * @return error code | |
419 | */ | |
420 | static int iwl_trans_tx_alloc(struct iwl_priv *priv) | |
421 | { | |
422 | int ret; | |
423 | int txq_id, slots_num; | |
424 | ||
425 | /*It is not allowed to alloc twice, so warn when this happens. | |
426 | * We cannot rely on the previous allocation, so free and fail */ | |
427 | if (WARN_ON(priv->txq)) { | |
428 | ret = -EINVAL; | |
429 | goto error; | |
430 | } | |
431 | ||
432 | ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, | |
433 | priv->hw_params.scd_bc_tbls_size); | |
434 | if (ret) { | |
435 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); | |
436 | goto error; | |
437 | } | |
438 | ||
439 | /* Alloc keep-warm buffer */ | |
440 | ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); | |
441 | if (ret) { | |
442 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | |
443 | goto error; | |
444 | } | |
445 | ||
446 | priv->txq = kzalloc(sizeof(struct iwl_tx_queue) * | |
447 | priv->cfg->base_params->num_of_queues, GFP_KERNEL); | |
448 | if (!priv->txq) { | |
449 | IWL_ERR(priv, "Not enough memory for txq\n"); | |
450 | ret = ENOMEM; | |
451 | goto error; | |
452 | } | |
453 | ||
454 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
455 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | |
456 | slots_num = (txq_id == priv->cmd_queue) ? | |
457 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | |
458 | ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num, | |
459 | txq_id); | |
460 | if (ret) { | |
461 | IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id); | |
462 | goto error; | |
463 | } | |
464 | } | |
465 | ||
466 | return 0; | |
467 | ||
468 | error: | |
1359ca4f | 469 | priv->trans.ops->tx_free(priv); |
02aca585 EG |
470 | |
471 | return ret; | |
472 | } | |
473 | static int iwl_trans_tx_init(struct iwl_priv *priv) | |
474 | { | |
475 | int ret; | |
476 | int txq_id, slots_num; | |
477 | unsigned long flags; | |
478 | bool alloc = false; | |
479 | ||
480 | if (!priv->txq) { | |
481 | ret = iwl_trans_tx_alloc(priv); | |
482 | if (ret) | |
483 | goto error; | |
484 | alloc = true; | |
485 | } | |
486 | ||
487 | spin_lock_irqsave(&priv->lock, flags); | |
488 | ||
489 | /* Turn off all Tx DMA fifos */ | |
490 | iwl_write_prph(priv, IWLAGN_SCD_TXFACT, 0); | |
491 | ||
492 | /* Tell NIC where to find the "keep warm" buffer */ | |
493 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | |
494 | ||
495 | spin_unlock_irqrestore(&priv->lock, flags); | |
496 | ||
497 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
498 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | |
499 | slots_num = (txq_id == priv->cmd_queue) ? | |
500 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | |
501 | ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num, | |
502 | txq_id); | |
503 | if (ret) { | |
504 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); | |
505 | goto error; | |
506 | } | |
507 | } | |
508 | ||
509 | return 0; | |
510 | error: | |
511 | /*Upon error, free only if we allocated something */ | |
512 | if (alloc) | |
1359ca4f | 513 | priv->trans.ops->tx_free(priv); |
02aca585 EG |
514 | return ret; |
515 | } | |
516 | ||
c170b867 EG |
517 | /** |
518 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | |
519 | */ | |
520 | static int iwl_trans_tx_stop(struct iwl_priv *priv) | |
521 | { | |
522 | int ch, txq_id; | |
523 | unsigned long flags; | |
524 | ||
525 | /* Turn off all Tx DMA fifos */ | |
526 | spin_lock_irqsave(&priv->lock, flags); | |
527 | ||
528 | iwlagn_txq_set_sched(priv, 0); | |
529 | ||
530 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
531 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { | |
532 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | |
533 | if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | |
534 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | |
535 | 1000)) | |
536 | IWL_ERR(priv, "Failing on timeout while stopping" | |
537 | " DMA channel %d [0x%08x]", ch, | |
538 | iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); | |
539 | } | |
540 | spin_unlock_irqrestore(&priv->lock, flags); | |
541 | ||
542 | if (!priv->txq) { | |
543 | IWL_WARN(priv, "Stopping tx queues that aren't allocated..."); | |
544 | return 0; | |
545 | } | |
546 | ||
547 | /* Unmap DMA from host system and free skb's */ | |
548 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | |
549 | iwl_tx_queue_unmap(priv, txq_id); | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
c85eb619 EG |
554 | static const struct iwl_trans_ops trans_ops = { |
555 | .rx_init = iwl_trans_rx_init, | |
c2c52e8b | 556 | .rx_stop = iwl_trans_rx_stop, |
a0f6b0a2 | 557 | .rx_free = iwl_trans_rx_free, |
02aca585 EG |
558 | |
559 | .tx_init = iwl_trans_tx_init, | |
c170b867 | 560 | .tx_stop = iwl_trans_tx_stop, |
1359ca4f | 561 | .tx_free = iwl_trans_tx_free, |
c85eb619 EG |
562 | }; |
563 | ||
564 | void iwl_trans_register(struct iwl_trans *trans) | |
565 | { | |
566 | trans->ops = &trans_ops; | |
567 | } |