]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl-trans.c
iwlagn: priv->sta_lock moves to iwl_shared
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / wireless / iwlwifi / iwl-trans.c
CommitLineData
c85eb619
EG
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
a0f6b0a2 63#include "iwl-dev.h"
c85eb619 64#include "iwl-trans.h"
02aca585
EG
65#include "iwl-core.h"
66#include "iwl-helpers.h"
ab697a9f 67#include "iwl-trans-int-pcie.h"
02aca585
EG
68/*TODO remove uneeded includes when the transport layer tx_free will be here */
69#include "iwl-agn.h"
e419d62d 70#include "iwl-core.h"
48f20d35 71#include "iwl-shared.h"
c85eb619
EG
72
73static int iwl_trans_rx_alloc(struct iwl_priv *priv)
74{
75 struct iwl_rx_queue *rxq = &priv->rxq;
d5934110 76 struct device *dev = priv->bus->dev;
c85eb619
EG
77
78 memset(&priv->rxq, 0, sizeof(priv->rxq));
79
80 spin_lock_init(&rxq->lock);
81 INIT_LIST_HEAD(&rxq->rx_free);
82 INIT_LIST_HEAD(&rxq->rx_used);
83
84 if (WARN_ON(rxq->bd || rxq->rb_stts))
85 return -EINVAL;
86
87 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
a0f6b0a2
EG
88 rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
89 &rxq->bd_dma, GFP_KERNEL);
c85eb619
EG
90 if (!rxq->bd)
91 goto err_bd;
a0f6b0a2 92 memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
c85eb619
EG
93
94 /*Allocate the driver's pointer to receive buffer status */
95 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
96 &rxq->rb_stts_dma, GFP_KERNEL);
97 if (!rxq->rb_stts)
98 goto err_rb_stts;
99 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
100
101 return 0;
102
103err_rb_stts:
a0f6b0a2
EG
104 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
105 rxq->bd, rxq->bd_dma);
c85eb619
EG
106 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
107 rxq->bd = NULL;
108err_bd:
109 return -ENOMEM;
110}
111
a0f6b0a2 112static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
c85eb619
EG
113{
114 struct iwl_rx_queue *rxq = &priv->rxq;
a0f6b0a2 115 int i;
c85eb619
EG
116
117 /* Fill the rx_used queue with _all_ of the Rx buffers */
118 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
119 /* In the reset function, these buffers may have been allocated
120 * to an SKB, so we need to unmap and free potential storage */
121 if (rxq->pool[i].page != NULL) {
d5934110 122 dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
d6189124 123 PAGE_SIZE << hw_params(priv).rx_page_order,
c85eb619
EG
124 DMA_FROM_DEVICE);
125 __iwl_free_pages(priv, rxq->pool[i].page);
126 rxq->pool[i].page = NULL;
127 }
128 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
129 }
a0f6b0a2
EG
130}
131
ab697a9f
EG
132static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
133 struct iwl_rx_queue *rxq)
134{
135 u32 rb_size;
136 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
137 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
138
139 rb_timeout = RX_RB_TIMEOUT;
140
141 if (iwlagn_mod_params.amsdu_size_8K)
142 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
143 else
144 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
145
146 /* Stop Rx DMA */
147 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
148
149 /* Reset driver's Rx queue write index */
150 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
151
152 /* Tell device where to find RBD circular buffer in DRAM */
153 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
154 (u32)(rxq->bd_dma >> 8));
155
156 /* Tell device where in DRAM to update its Rx status */
157 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
158 rxq->rb_stts_dma >> 4);
159
160 /* Enable Rx DMA
161 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
162 * the credit mechanism in 5000 HW RX FIFO
163 * Direct rx interrupts to hosts
164 * Rx buffer size 4 or 8k
165 * RB timeout 0x10
166 * 256 RBDs
167 */
168 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
169 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
170 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
171 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
172 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
173 rb_size|
174 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
175 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
176
177 /* Set interrupt coalescing timer to default (2048 usecs) */
178 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
179}
180
392f8b78 181static int iwl_rx_init(struct iwl_priv *priv)
a0f6b0a2
EG
182{
183 struct iwl_rx_queue *rxq = &priv->rxq;
184 int i, err;
185 unsigned long flags;
186
187 if (!rxq->bd) {
188 err = iwl_trans_rx_alloc(priv);
189 if (err)
190 return err;
191 }
192
193 spin_lock_irqsave(&rxq->lock, flags);
194 INIT_LIST_HEAD(&rxq->rx_free);
195 INIT_LIST_HEAD(&rxq->rx_used);
196
197 iwl_trans_rxq_free_rx_bufs(priv);
c85eb619
EG
198
199 for (i = 0; i < RX_QUEUE_SIZE; i++)
200 rxq->queue[i] = NULL;
201
202 /* Set us so that we have processed and used all buffers, but have
203 * not restocked the Rx queue with fresh buffers */
204 rxq->read = rxq->write = 0;
205 rxq->write_actual = 0;
206 rxq->free_count = 0;
207 spin_unlock_irqrestore(&rxq->lock, flags);
208
ab697a9f
EG
209 iwlagn_rx_replenish(priv);
210
211 iwl_trans_rx_hw_init(priv, rxq);
212
10b15e6f 213 spin_lock_irqsave(&priv->shrd->lock, flags);
ab697a9f
EG
214 rxq->need_update = 1;
215 iwl_rx_queue_update_write_ptr(priv, rxq);
10b15e6f 216 spin_unlock_irqrestore(&priv->shrd->lock, flags);
ab697a9f 217
c85eb619
EG
218 return 0;
219}
220
a0f6b0a2
EG
221static void iwl_trans_rx_free(struct iwl_priv *priv)
222{
223 struct iwl_rx_queue *rxq = &priv->rxq;
224 unsigned long flags;
225
226 /*if rxq->bd is NULL, it means that nothing has been allocated,
227 * exit now */
228 if (!rxq->bd) {
229 IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
230 return;
231 }
232
233 spin_lock_irqsave(&rxq->lock, flags);
234 iwl_trans_rxq_free_rx_bufs(priv);
235 spin_unlock_irqrestore(&rxq->lock, flags);
236
d5934110 237 dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE,
a0f6b0a2
EG
238 rxq->bd, rxq->bd_dma);
239 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
240 rxq->bd = NULL;
241
242 if (rxq->rb_stts)
d5934110 243 dma_free_coherent(priv->bus->dev,
a0f6b0a2
EG
244 sizeof(struct iwl_rb_status),
245 rxq->rb_stts, rxq->rb_stts_dma);
246 else
247 IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
248 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
249 rxq->rb_stts = NULL;
250}
251
c2c52e8b
EG
252static int iwl_trans_rx_stop(struct iwl_priv *priv)
253{
254
255 /* stop Rx DMA */
256 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
257 return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
258 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
259}
260
02aca585
EG
261static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
262 struct iwl_dma_ptr *ptr, size_t size)
263{
264 if (WARN_ON(ptr->addr))
265 return -EINVAL;
266
d5934110 267 ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
02aca585
EG
268 &ptr->dma, GFP_KERNEL);
269 if (!ptr->addr)
270 return -ENOMEM;
271 ptr->size = size;
272 return 0;
273}
274
1359ca4f
EG
275static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
276 struct iwl_dma_ptr *ptr)
277{
278 if (unlikely(!ptr->addr))
279 return;
280
d5934110 281 dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
1359ca4f
EG
282 memset(ptr, 0, sizeof(*ptr));
283}
284
02aca585
EG
285static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
286 int slots_num, u32 txq_id)
287{
d6189124 288 size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
02aca585
EG
289 int i;
290
291 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
292 return -EINVAL;
293
1359ca4f
EG
294 txq->q.n_window = slots_num;
295
02aca585
EG
296 txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
297 GFP_KERNEL);
298 txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
299 GFP_KERNEL);
300
301 if (!txq->meta || !txq->cmd)
302 goto error;
303
304 for (i = 0; i < slots_num; i++) {
305 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
306 GFP_KERNEL);
307 if (!txq->cmd[i])
308 goto error;
309 }
310
311 /* Alloc driver data array and TFD circular buffer */
312 /* Driver private data, only for Tx (not command) queues,
313 * not shared with device. */
cefeaa5f 314 if (txq_id != priv->shrd->cmd_queue) {
02aca585
EG
315 txq->txb = kzalloc(sizeof(txq->txb[0]) *
316 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
317 if (!txq->txb) {
318 IWL_ERR(priv, "kmalloc for auxiliary BD "
319 "structures failed\n");
320 goto error;
321 }
322 } else {
323 txq->txb = NULL;
324 }
325
326 /* Circular buffer of transmit frame descriptors (TFDs),
327 * shared with device */
d5934110 328 txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
02aca585
EG
329 GFP_KERNEL);
330 if (!txq->tfds) {
331 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
332 goto error;
333 }
334 txq->q.id = txq_id;
335
336 return 0;
337error:
338 kfree(txq->txb);
339 txq->txb = NULL;
340 /* since txq->cmd has been zeroed,
341 * all non allocated cmd[i] will be NULL */
342 if (txq->cmd)
343 for (i = 0; i < slots_num; i++)
344 kfree(txq->cmd[i]);
345 kfree(txq->meta);
346 kfree(txq->cmd);
347 txq->meta = NULL;
348 txq->cmd = NULL;
349
350 return -ENOMEM;
351
352}
353
354static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
355 int slots_num, u32 txq_id)
356{
357 int ret;
358
359 txq->need_update = 0;
360 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
361
362 /*
363 * For the default queues 0-3, set up the swq_id
364 * already -- all others need to get one later
365 * (if they need one at all).
366 */
367 if (txq_id < 4)
368 iwl_set_swq_id(txq, txq_id, txq_id);
369
370 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
371 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
372 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
373
374 /* Initialize queue's high/low-water marks, and head/tail indexes */
375 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
376 txq_id);
377 if (ret)
378 return ret;
379
380 /*
381 * Tell nic where to find circular buffer of Tx Frame Descriptors for
382 * given Tx queue, and enable the DMA channel used for that queue.
383 * Circular buffer (TFD queue in DRAM) physical base address */
384 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
385 txq->q.dma_addr >> 8);
386
387 return 0;
388}
389
c170b867
EG
390/**
391 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
392 */
393static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
394{
395 struct iwl_tx_queue *txq = &priv->txq[txq_id];
396 struct iwl_queue *q = &txq->q;
397
398 if (!q->n_bd)
399 return;
400
401 while (q->write_ptr != q->read_ptr) {
402 /* The read_ptr needs to bound by q->n_window */
403 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
404 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
405 }
406}
407
1359ca4f
EG
408/**
409 * iwl_tx_queue_free - Deallocate DMA queue.
410 * @txq: Transmit queue to deallocate.
411 *
412 * Empty queue by removing and destroying all BD's.
413 * Free all buffers.
414 * 0-fill, but do not free "txq" descriptor structure.
415 */
416static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
417{
418 struct iwl_tx_queue *txq = &priv->txq[txq_id];
d5934110 419 struct device *dev = priv->bus->dev;
1359ca4f
EG
420 int i;
421 if (WARN_ON(!txq))
422 return;
423
424 iwl_tx_queue_unmap(priv, txq_id);
425
426 /* De-alloc array of command/tx buffers */
427 for (i = 0; i < txq->q.n_window; i++)
428 kfree(txq->cmd[i]);
429
430 /* De-alloc circular buffer of TFDs */
431 if (txq->q.n_bd) {
d6189124 432 dma_free_coherent(dev, hw_params(priv).tfd_size *
1359ca4f
EG
433 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
434 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
435 }
436
437 /* De-alloc array of per-TFD driver data */
438 kfree(txq->txb);
439 txq->txb = NULL;
440
441 /* deallocate arrays */
442 kfree(txq->cmd);
443 kfree(txq->meta);
444 txq->cmd = NULL;
445 txq->meta = NULL;
446
447 /* 0-fill queue descriptor structure */
448 memset(txq, 0, sizeof(*txq));
449}
450
451/**
452 * iwl_trans_tx_free - Free TXQ Context
453 *
454 * Destroy all TX DMA queues and structures
455 */
456static void iwl_trans_tx_free(struct iwl_priv *priv)
457{
458 int txq_id;
459
460 /* Tx queues */
461 if (priv->txq) {
d6189124
EG
462 for (txq_id = 0;
463 txq_id < hw_params(priv).max_txq_num; txq_id++)
1359ca4f
EG
464 iwl_tx_queue_free(priv, txq_id);
465 }
466
467 kfree(priv->txq);
468 priv->txq = NULL;
469
470 iwlagn_free_dma_ptr(priv, &priv->kw);
471
472 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
473}
474
02aca585
EG
475/**
476 * iwl_trans_tx_alloc - allocate TX context
477 * Allocate all Tx DMA structures and initialize them
478 *
479 * @param priv
480 * @return error code
481 */
482static int iwl_trans_tx_alloc(struct iwl_priv *priv)
483{
484 int ret;
485 int txq_id, slots_num;
486
487 /*It is not allowed to alloc twice, so warn when this happens.
488 * We cannot rely on the previous allocation, so free and fail */
489 if (WARN_ON(priv->txq)) {
490 ret = -EINVAL;
491 goto error;
492 }
493
494 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
d6189124 495 hw_params(priv).scd_bc_tbls_size);
02aca585
EG
496 if (ret) {
497 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
498 goto error;
499 }
500
501 /* Alloc keep-warm buffer */
502 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
503 if (ret) {
504 IWL_ERR(priv, "Keep Warm allocation failed\n");
505 goto error;
506 }
507
508 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
509 priv->cfg->base_params->num_of_queues, GFP_KERNEL);
510 if (!priv->txq) {
511 IWL_ERR(priv, "Not enough memory for txq\n");
512 ret = ENOMEM;
513 goto error;
514 }
515
516 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
d6189124 517 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
cefeaa5f 518 slots_num = (txq_id == priv->shrd->cmd_queue) ?
02aca585
EG
519 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
520 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
521 txq_id);
522 if (ret) {
523 IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
524 goto error;
525 }
526 }
527
528 return 0;
529
530error:
41c50542 531 trans_tx_free(&priv->trans);
02aca585
EG
532
533 return ret;
534}
392f8b78 535static int iwl_tx_init(struct iwl_priv *priv)
02aca585
EG
536{
537 int ret;
538 int txq_id, slots_num;
539 unsigned long flags;
540 bool alloc = false;
541
542 if (!priv->txq) {
543 ret = iwl_trans_tx_alloc(priv);
544 if (ret)
545 goto error;
546 alloc = true;
547 }
548
10b15e6f 549 spin_lock_irqsave(&priv->shrd->lock, flags);
02aca585
EG
550
551 /* Turn off all Tx DMA fifos */
b3c2ce13 552 iwl_write_prph(priv, SCD_TXFACT, 0);
02aca585
EG
553
554 /* Tell NIC where to find the "keep warm" buffer */
555 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
556
10b15e6f 557 spin_unlock_irqrestore(&priv->shrd->lock, flags);
02aca585
EG
558
559 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
d6189124 560 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
cefeaa5f 561 slots_num = (txq_id == priv->shrd->cmd_queue) ?
02aca585
EG
562 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
563 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
564 txq_id);
565 if (ret) {
566 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
567 goto error;
568 }
569 }
570
571 return 0;
572error:
573 /*Upon error, free only if we allocated something */
574 if (alloc)
41c50542 575 trans_tx_free(&priv->trans);
02aca585
EG
576 return ret;
577}
578
392f8b78
EG
579static void iwl_set_pwr_vmain(struct iwl_priv *priv)
580{
581/*
582 * (for documentation purposes)
583 * to set power to V_AUX, do:
584
585 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
586 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
587 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
588 ~APMG_PS_CTRL_MSK_PWR_SRC);
589 */
590
591 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
592 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
593 ~APMG_PS_CTRL_MSK_PWR_SRC);
594}
595
596static int iwl_nic_init(struct iwl_priv *priv)
597{
598 unsigned long flags;
599
600 /* nic_init */
10b15e6f 601 spin_lock_irqsave(&priv->shrd->lock, flags);
392f8b78
EG
602 iwl_apm_init(priv);
603
604 /* Set interrupt coalescing calibration timer to default (512 usecs) */
605 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
606
10b15e6f 607 spin_unlock_irqrestore(&priv->shrd->lock, flags);
392f8b78
EG
608
609 iwl_set_pwr_vmain(priv);
610
611 priv->cfg->lib->nic_config(priv);
612
613 /* Allocate the RX queue, or reset if it is already allocated */
614 iwl_rx_init(priv);
615
616 /* Allocate or reset and init all Tx and Command queues */
617 if (iwl_tx_init(priv))
618 return -ENOMEM;
619
620 if (priv->cfg->base_params->shadow_reg_enable) {
621 /* enable shadow regs in HW */
622 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
623 0x800FFFFF);
624 }
625
63013ae3 626 set_bit(STATUS_INIT, &priv->shrd->status);
392f8b78
EG
627
628 return 0;
629}
630
631#define HW_READY_TIMEOUT (50)
632
633/* Note: returns poll_bit return value, which is >= 0 if success */
634static int iwl_set_hw_ready(struct iwl_priv *priv)
635{
636 int ret;
637
638 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
639 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
640
641 /* See if we got it */
642 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
643 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
644 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
645 HW_READY_TIMEOUT);
646
647 IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
648 return ret;
649}
650
651/* Note: returns standard 0/-ERROR code */
0286cee0 652static int iwl_trans_prepare_card_hw(struct iwl_priv *priv)
392f8b78
EG
653{
654 int ret;
655
0286cee0 656 IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
392f8b78
EG
657
658 ret = iwl_set_hw_ready(priv);
659 if (ret >= 0)
660 return 0;
661
662 /* If HW is not ready, prepare the conditions to check again */
663 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
664 CSR_HW_IF_CONFIG_REG_PREPARE);
665
666 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
667 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
668 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
669
670 if (ret < 0)
671 return ret;
672
673 /* HW should be ready by now, check again. */
674 ret = iwl_set_hw_ready(priv);
675 if (ret >= 0)
676 return 0;
677 return ret;
678}
679
680static int iwl_trans_start_device(struct iwl_priv *priv)
681{
682 int ret;
683
684 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
685
686 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
0286cee0 687 iwl_trans_prepare_card_hw(priv)) {
392f8b78
EG
688 IWL_WARN(priv, "Exit HW not ready\n");
689 return -EIO;
690 }
691
692 /* If platform's RF_KILL switch is NOT set to KILL */
693 if (iwl_read32(priv, CSR_GP_CNTRL) &
694 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
63013ae3 695 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
392f8b78 696 else
63013ae3 697 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
392f8b78
EG
698
699 if (iwl_is_rfkill(priv)) {
700 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
701 iwl_enable_interrupts(priv);
702 return -ERFKILL;
703 }
704
705 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
706
707 ret = iwl_nic_init(priv);
708 if (ret) {
709 IWL_ERR(priv, "Unable to init nic\n");
710 return ret;
711 }
712
713 /* make sure rfkill handshake bits are cleared */
714 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
715 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
716 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
717
718 /* clear (again), then enable host interrupts */
719 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
720 iwl_enable_interrupts(priv);
721
722 /* really make sure rfkill handshake bits are cleared */
723 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
724 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
725
726 return 0;
727}
728
b3c2ce13
EG
729/*
730 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
10b15e6f 731 * must be called under priv->shrd->lock and mac access
b3c2ce13
EG
732 */
733static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
734{
735 iwl_write_prph(priv, SCD_TXFACT, mask);
736}
737
738#define IWL_AC_UNSET -1
739
740struct queue_to_fifo_ac {
741 s8 fifo, ac;
742};
743
744static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
745 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
746 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
747 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
748 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
749 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
750 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
751 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
752 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
753 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
754 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
72c04ce0 755 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
b3c2ce13
EG
756};
757
758static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
759 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
760 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
761 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
762 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
763 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
764 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
765 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
766 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
767 { IWL_TX_FIFO_BE_IPAN, 2, },
768 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
72c04ce0 769 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
b3c2ce13
EG
770};
771static void iwl_trans_tx_start(struct iwl_priv *priv)
772{
773 const struct queue_to_fifo_ac *queue_to_fifo;
774 struct iwl_rxon_context *ctx;
775 u32 a;
776 unsigned long flags;
777 int i, chan;
778 u32 reg_val;
779
10b15e6f 780 spin_lock_irqsave(&priv->shrd->lock, flags);
b3c2ce13
EG
781
782 priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
783 a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
784 /* reset conext data memory */
785 for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
786 a += 4)
787 iwl_write_targ_mem(priv, a, 0);
788 /* reset tx status memory */
789 for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
790 a += 4)
791 iwl_write_targ_mem(priv, a, 0);
792 for (; a < priv->scd_base_addr +
d6189124
EG
793 SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
794 a += 4)
b3c2ce13
EG
795 iwl_write_targ_mem(priv, a, 0);
796
797 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
798 priv->scd_bc_tbls.dma >> 10);
799
800 /* Enable DMA channel */
801 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
802 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
803 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
804 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
805
806 /* Update FH chicken bits */
807 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
808 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
809 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
810
811 iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
812 SCD_QUEUECHAIN_SEL_ALL(priv));
813 iwl_write_prph(priv, SCD_AGGR_SEL, 0);
814
815 /* initiate the queues */
d6189124 816 for (i = 0; i < hw_params(priv).max_txq_num; i++) {
b3c2ce13
EG
817 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
818 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
819 iwl_write_targ_mem(priv, priv->scd_base_addr +
820 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
821 iwl_write_targ_mem(priv, priv->scd_base_addr +
822 SCD_CONTEXT_QUEUE_OFFSET(i) +
823 sizeof(u32),
824 ((SCD_WIN_SIZE <<
825 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
826 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
827 ((SCD_FRAME_LIMIT <<
828 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
829 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
830 }
831
832 iwl_write_prph(priv, SCD_INTERRUPT_MASK,
d6189124 833 IWL_MASK(0, hw_params(priv).max_txq_num));
b3c2ce13
EG
834
835 /* Activate all Tx DMA/FIFO channels */
836 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
837
838 /* map queues to FIFOs */
839 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
840 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
841 else
842 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
843
cefeaa5f 844 iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0);
b3c2ce13
EG
845
846 /* make sure all queue are not stopped */
847 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
848 for (i = 0; i < 4; i++)
849 atomic_set(&priv->queue_stop_count[i], 0);
850 for_each_context(priv, ctx)
851 ctx->last_tx_rejected = false;
852
853 /* reset to 0 to enable all the queue first */
854 priv->txq_ctx_active_msk = 0;
855
72c04ce0
JB
856 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) !=
857 IWLAGN_FIRST_AMPDU_QUEUE);
858 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) !=
859 IWLAGN_FIRST_AMPDU_QUEUE);
b3c2ce13 860
72c04ce0 861 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
b3c2ce13
EG
862 int fifo = queue_to_fifo[i].fifo;
863 int ac = queue_to_fifo[i].ac;
864
865 iwl_txq_ctx_activate(priv, i);
866
867 if (fifo == IWL_TX_FIFO_UNUSED)
868 continue;
869
870 if (ac != IWL_AC_UNSET)
871 iwl_set_swq_id(&priv->txq[i], ac, i);
48d42c42 872 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
b3c2ce13
EG
873 }
874
10b15e6f 875 spin_unlock_irqrestore(&priv->shrd->lock, flags);
b3c2ce13
EG
876
877 /* Enable L1-Active */
878 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
879 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
880}
881
c170b867
EG
882/**
883 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
884 */
885static int iwl_trans_tx_stop(struct iwl_priv *priv)
886{
887 int ch, txq_id;
888 unsigned long flags;
889
890 /* Turn off all Tx DMA fifos */
10b15e6f 891 spin_lock_irqsave(&priv->shrd->lock, flags);
c170b867 892
b3c2ce13 893 iwl_trans_txq_set_sched(priv, 0);
c170b867
EG
894
895 /* Stop each Tx DMA channel, and wait for it to be idle */
02f6f659 896 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
c170b867
EG
897 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
898 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
899 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
900 1000))
901 IWL_ERR(priv, "Failing on timeout while stopping"
902 " DMA channel %d [0x%08x]", ch,
903 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
904 }
10b15e6f 905 spin_unlock_irqrestore(&priv->shrd->lock, flags);
c170b867
EG
906
907 if (!priv->txq) {
908 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
909 return 0;
910 }
911
912 /* Unmap DMA from host system and free skb's */
d6189124 913 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
c170b867
EG
914 iwl_tx_queue_unmap(priv, txq_id);
915
916 return 0;
917}
918
ab6cf8e8
EG
919static void iwl_trans_stop_device(struct iwl_priv *priv)
920{
921 unsigned long flags;
922
923 /* stop and reset the on-board processor */
924 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
925
926 /* tell the device to stop sending interrupts */
10b15e6f 927 spin_lock_irqsave(&priv->shrd->lock, flags);
ab6cf8e8 928 iwl_disable_interrupts(priv);
10b15e6f 929 spin_unlock_irqrestore(&priv->shrd->lock, flags);
41c50542 930 trans_sync_irq(&priv->trans);
ab6cf8e8
EG
931
932 /* device going down, Stop using ICT table */
933 iwl_disable_ict(priv);
934
935 /*
936 * If a HW restart happens during firmware loading,
937 * then the firmware loading might call this function
938 * and later it might be called again due to the
939 * restart. So don't process again if the device is
940 * already dead.
941 */
63013ae3 942 if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) {
ab6cf8e8
EG
943 iwl_trans_tx_stop(priv);
944 iwl_trans_rx_stop(priv);
945
946 /* Power-down device's busmaster DMA clocks */
947 iwl_write_prph(priv, APMG_CLK_DIS_REG,
948 APMG_CLK_VAL_DMA_CLK_RQT);
949 udelay(5);
950 }
951
952 /* Make sure (redundant) we've released our request to stay awake */
953 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
954
955 /* Stop the device, and put it in low power state */
956 iwl_apm_stop(priv);
957}
958
47c1b496
EG
959static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv,
960 int txq_id)
961{
962 struct iwl_tx_queue *txq = &priv->txq[txq_id];
963 struct iwl_queue *q = &txq->q;
964 struct iwl_device_cmd *dev_cmd;
965
966 if (unlikely(iwl_queue_space(q) < q->high_mark))
967 return NULL;
968
969 /*
970 * Set up the Tx-command (not MAC!) header.
971 * Store the chosen Tx queue and TFD index within the sequence field;
972 * after Tx, uCode's Tx response will return this value so driver can
973 * locate the frame within the tx queue and do post-tx processing.
974 */
975 dev_cmd = txq->cmd[q->write_ptr];
976 memset(dev_cmd, 0, sizeof(*dev_cmd));
977 dev_cmd->hdr.cmd = REPLY_TX;
978 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
979 INDEX_TO_SEQ(q->write_ptr)));
980 return &dev_cmd->cmd.tx;
981}
982
983static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
984 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
985 struct iwl_rxon_context *ctx)
986{
987 struct iwl_tx_queue *txq = &priv->txq[txq_id];
988 struct iwl_queue *q = &txq->q;
989 struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
990 struct iwl_cmd_meta *out_meta;
991
992 dma_addr_t phys_addr = 0;
993 dma_addr_t txcmd_phys;
994 dma_addr_t scratch_phys;
995 u16 len, firstlen, secondlen;
996 u8 wait_write_ptr = 0;
997 u8 hdr_len = ieee80211_hdrlen(fc);
998
999 /* Set up driver data for this TFD */
1000 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
1001 txq->txb[q->write_ptr].skb = skb;
1002 txq->txb[q->write_ptr].ctx = ctx;
1003
1004 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1005 out_meta = &txq->meta[q->write_ptr];
1006
1007 /*
1008 * Use the first empty entry in this queue's command buffer array
1009 * to contain the Tx command and MAC header concatenated together
1010 * (payload data will be in another buffer).
1011 * Size of this varies, due to varying MAC header length.
1012 * If end is not dword aligned, we'll have 2 extra bytes at the end
1013 * of the MAC header (device reads on dword boundaries).
1014 * We'll tell device about this padding later.
1015 */
1016 len = sizeof(struct iwl_tx_cmd) +
1017 sizeof(struct iwl_cmd_header) + hdr_len;
1018 firstlen = (len + 3) & ~3;
1019
1020 /* Tell NIC about any 2-byte padding after MAC header */
1021 if (firstlen != len)
1022 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1023
1024 /* Physical address of this Tx command's header (not MAC header!),
1025 * within command buffer array. */
d5934110 1026 txcmd_phys = dma_map_single(priv->bus->dev,
47c1b496
EG
1027 &dev_cmd->hdr, firstlen,
1028 DMA_BIDIRECTIONAL);
d5934110 1029 if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
47c1b496
EG
1030 return -1;
1031 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1032 dma_unmap_len_set(out_meta, len, firstlen);
1033
1034 if (!ieee80211_has_morefrags(fc)) {
1035 txq->need_update = 1;
1036 } else {
1037 wait_write_ptr = 1;
1038 txq->need_update = 0;
1039 }
1040
1041 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1042 * if any (802.11 null frames have no payload). */
1043 secondlen = skb->len - hdr_len;
1044 if (secondlen > 0) {
d5934110 1045 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
47c1b496 1046 secondlen, DMA_TO_DEVICE);
d5934110
EG
1047 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1048 dma_unmap_single(priv->bus->dev,
47c1b496
EG
1049 dma_unmap_addr(out_meta, mapping),
1050 dma_unmap_len(out_meta, len),
1051 DMA_BIDIRECTIONAL);
1052 return -1;
1053 }
1054 }
1055
1056 /* Attach buffers to TFD */
1057 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
1058 if (secondlen > 0)
1059 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
1060 secondlen, 0);
1061
1062 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1063 offsetof(struct iwl_tx_cmd, scratch);
1064
1065 /* take back ownership of DMA buffer to enable update */
d5934110 1066 dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
47c1b496
EG
1067 DMA_BIDIRECTIONAL);
1068 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1069 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1070
1071 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1072 le16_to_cpu(dev_cmd->hdr.sequence));
1073 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1074 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1075 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1076
1077 /* Set up entry for this TFD in Tx byte-count array */
1078 if (ampdu)
48d42c42 1079 iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
47c1b496
EG
1080 le16_to_cpu(tx_cmd->len));
1081
d5934110 1082 dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
47c1b496
EG
1083 DMA_BIDIRECTIONAL);
1084
1085 trace_iwlwifi_dev_tx(priv,
1086 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1087 sizeof(struct iwl_tfd),
1088 &dev_cmd->hdr, firstlen,
1089 skb->data + hdr_len, secondlen);
1090
1091 /* Tell device the write index *just past* this latest filled TFD */
1092 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1093 iwl_txq_update_write_ptr(priv, txq);
1094
1095 /*
1096 * At this point the frame is "transmitted" successfully
1097 * and we will get a TX status notification eventually,
1098 * regardless of the value of ret. "ret" only indicates
1099 * whether or not we should update the write pointer.
1100 */
1101 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1102 if (wait_write_ptr) {
1103 txq->need_update = 1;
1104 iwl_txq_update_write_ptr(priv, txq);
1105 } else {
1106 iwl_stop_queue(priv, txq);
1107 }
1108 }
1109 return 0;
1110}
1111
56d90f4c
EG
1112static void iwl_trans_kick_nic(struct iwl_priv *priv)
1113{
1114 /* Remove all resets to allow NIC to operate */
1115 iwl_write32(priv, CSR_RESET, 0);
1116}
1117
a27367d2
EG
1118static void iwl_trans_sync_irq(struct iwl_priv *priv)
1119{
1120 /* wait to make sure we flush pending tasklet*/
d5934110 1121 synchronize_irq(priv->bus->irq);
a27367d2
EG
1122 tasklet_kill(&priv->irq_tasklet);
1123}
1124
34c1b7ba
EG
1125static void iwl_trans_free(struct iwl_priv *priv)
1126{
d5934110 1127 free_irq(priv->bus->irq, priv);
34c1b7ba
EG
1128 iwl_free_isr_ict(priv);
1129}
1130
c85eb619 1131static const struct iwl_trans_ops trans_ops = {
392f8b78 1132 .start_device = iwl_trans_start_device,
0286cee0 1133 .prepare_card_hw = iwl_trans_prepare_card_hw,
392f8b78 1134 .stop_device = iwl_trans_stop_device,
0286cee0 1135
b3c2ce13 1136 .tx_start = iwl_trans_tx_start,
e419d62d 1137
392f8b78
EG
1138 .rx_free = iwl_trans_rx_free,
1139 .tx_free = iwl_trans_tx_free,
ab6cf8e8 1140
e419d62d
EG
1141 .send_cmd = iwl_send_cmd,
1142 .send_cmd_pdu = iwl_send_cmd_pdu,
47c1b496
EG
1143
1144 .get_tx_cmd = iwl_trans_get_tx_cmd,
1145 .tx = iwl_trans_tx,
48d42c42
EG
1146
1147 .txq_agg_disable = iwl_trans_txq_agg_disable,
1148 .txq_agg_setup = iwl_trans_txq_agg_setup,
1149
56d90f4c 1150 .kick_nic = iwl_trans_kick_nic,
34c1b7ba 1151
a27367d2 1152 .sync_irq = iwl_trans_sync_irq,
34c1b7ba 1153 .free = iwl_trans_free,
c85eb619
EG
1154};
1155
41c50542 1156int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv)
c85eb619 1157{
34c1b7ba
EG
1158 int err;
1159
1160 priv->trans.ops = &trans_ops;
41c50542 1161 priv->trans.priv = priv;
34c1b7ba 1162
1e89cbac
EG
1163 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
1164 iwl_irq_tasklet, (unsigned long)priv);
1165
34c1b7ba
EG
1166 iwl_alloc_isr_ict(priv);
1167
d5934110 1168 err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED,
34c1b7ba
EG
1169 DRV_NAME, priv);
1170 if (err) {
d5934110 1171 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
34c1b7ba
EG
1172 iwl_free_isr_ict(priv);
1173 return err;
1174 }
1175
ab697a9f
EG
1176 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
1177
34c1b7ba 1178 return 0;
c85eb619 1179}