]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl-trans.c
iwlagn: move iwl_free_pages to iwl-shared.h
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / iwlwifi / iwl-trans.c
CommitLineData
c85eb619
EG
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
e6bb4c9c 63#include <linux/interrupt.h>
87e5666c 64#include <linux/debugfs.h>
e6bb4c9c 65
a0f6b0a2 66#include "iwl-dev.h"
c85eb619 67#include "iwl-trans.h"
02aca585
EG
68#include "iwl-core.h"
69#include "iwl-helpers.h"
ab697a9f 70#include "iwl-trans-int-pcie.h"
02aca585
EG
71/*TODO remove uneeded includes when the transport layer tx_free will be here */
72#include "iwl-agn.h"
e419d62d 73#include "iwl-core.h"
48f20d35 74#include "iwl-shared.h"
c85eb619 75
5a878bf6 76static int iwl_trans_rx_alloc(struct iwl_trans *trans)
c85eb619 77{
5a878bf6
EG
78 struct iwl_trans_pcie *trans_pcie =
79 IWL_TRANS_GET_PCIE_TRANS(trans);
80 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
81 struct device *dev = bus(trans)->dev;
c85eb619 82
5a878bf6 83 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
c85eb619
EG
84
85 spin_lock_init(&rxq->lock);
86 INIT_LIST_HEAD(&rxq->rx_free);
87 INIT_LIST_HEAD(&rxq->rx_used);
88
89 if (WARN_ON(rxq->bd || rxq->rb_stts))
90 return -EINVAL;
91
92 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
a0f6b0a2
EG
93 rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
94 &rxq->bd_dma, GFP_KERNEL);
c85eb619
EG
95 if (!rxq->bd)
96 goto err_bd;
a0f6b0a2 97 memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
c85eb619
EG
98
99 /*Allocate the driver's pointer to receive buffer status */
100 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
101 &rxq->rb_stts_dma, GFP_KERNEL);
102 if (!rxq->rb_stts)
103 goto err_rb_stts;
104 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
105
106 return 0;
107
108err_rb_stts:
a0f6b0a2
EG
109 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
110 rxq->bd, rxq->bd_dma);
c85eb619
EG
111 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
112 rxq->bd = NULL;
113err_bd:
114 return -ENOMEM;
115}
116
5a878bf6 117static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
c85eb619 118{
5a878bf6
EG
119 struct iwl_trans_pcie *trans_pcie =
120 IWL_TRANS_GET_PCIE_TRANS(trans);
121 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
a0f6b0a2 122 int i;
c85eb619
EG
123
124 /* Fill the rx_used queue with _all_ of the Rx buffers */
125 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
126 /* In the reset function, these buffers may have been allocated
127 * to an SKB, so we need to unmap and free potential storage */
128 if (rxq->pool[i].page != NULL) {
5a878bf6
EG
129 dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
130 PAGE_SIZE << hw_params(trans).rx_page_order,
c85eb619 131 DMA_FROM_DEVICE);
790428b6
EG
132 __free_pages(rxq->pool[i].page,
133 hw_params(trans).rx_page_order);
c85eb619
EG
134 rxq->pool[i].page = NULL;
135 }
136 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
137 }
a0f6b0a2
EG
138}
139
ab697a9f
EG
140static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
141 struct iwl_rx_queue *rxq)
142{
143 u32 rb_size;
144 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
145 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
146
147 rb_timeout = RX_RB_TIMEOUT;
148
149 if (iwlagn_mod_params.amsdu_size_8K)
150 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
151 else
152 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
153
154 /* Stop Rx DMA */
155 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
156
157 /* Reset driver's Rx queue write index */
158 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
159
160 /* Tell device where to find RBD circular buffer in DRAM */
161 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
162 (u32)(rxq->bd_dma >> 8));
163
164 /* Tell device where in DRAM to update its Rx status */
165 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
166 rxq->rb_stts_dma >> 4);
167
168 /* Enable Rx DMA
169 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
170 * the credit mechanism in 5000 HW RX FIFO
171 * Direct rx interrupts to hosts
172 * Rx buffer size 4 or 8k
173 * RB timeout 0x10
174 * 256 RBDs
175 */
176 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
177 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
178 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
179 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
180 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
181 rb_size|
182 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
183 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
184
185 /* Set interrupt coalescing timer to default (2048 usecs) */
186 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
187}
188
5a878bf6 189static int iwl_rx_init(struct iwl_trans *trans)
a0f6b0a2 190{
5a878bf6
EG
191 struct iwl_trans_pcie *trans_pcie =
192 IWL_TRANS_GET_PCIE_TRANS(trans);
193 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
194
a0f6b0a2
EG
195 int i, err;
196 unsigned long flags;
197
198 if (!rxq->bd) {
5a878bf6 199 err = iwl_trans_rx_alloc(trans);
a0f6b0a2
EG
200 if (err)
201 return err;
202 }
203
204 spin_lock_irqsave(&rxq->lock, flags);
205 INIT_LIST_HEAD(&rxq->rx_free);
206 INIT_LIST_HEAD(&rxq->rx_used);
207
5a878bf6 208 iwl_trans_rxq_free_rx_bufs(trans);
c85eb619
EG
209
210 for (i = 0; i < RX_QUEUE_SIZE; i++)
211 rxq->queue[i] = NULL;
212
213 /* Set us so that we have processed and used all buffers, but have
214 * not restocked the Rx queue with fresh buffers */
215 rxq->read = rxq->write = 0;
216 rxq->write_actual = 0;
217 rxq->free_count = 0;
218 spin_unlock_irqrestore(&rxq->lock, flags);
219
5a878bf6 220 iwlagn_rx_replenish(trans);
ab697a9f 221
5a878bf6 222 iwl_trans_rx_hw_init(priv(trans), rxq);
ab697a9f 223
5a878bf6 224 spin_lock_irqsave(&trans->shrd->lock, flags);
ab697a9f 225 rxq->need_update = 1;
5a878bf6
EG
226 iwl_rx_queue_update_write_ptr(trans, rxq);
227 spin_unlock_irqrestore(&trans->shrd->lock, flags);
ab697a9f 228
c85eb619
EG
229 return 0;
230}
231
5a878bf6 232static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
a0f6b0a2 233{
5a878bf6
EG
234 struct iwl_trans_pcie *trans_pcie =
235 IWL_TRANS_GET_PCIE_TRANS(trans);
236 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
237
a0f6b0a2
EG
238 unsigned long flags;
239
240 /*if rxq->bd is NULL, it means that nothing has been allocated,
241 * exit now */
242 if (!rxq->bd) {
5a878bf6 243 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
a0f6b0a2
EG
244 return;
245 }
246
247 spin_lock_irqsave(&rxq->lock, flags);
5a878bf6 248 iwl_trans_rxq_free_rx_bufs(trans);
a0f6b0a2
EG
249 spin_unlock_irqrestore(&rxq->lock, flags);
250
5a878bf6 251 dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
a0f6b0a2
EG
252 rxq->bd, rxq->bd_dma);
253 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
254 rxq->bd = NULL;
255
256 if (rxq->rb_stts)
5a878bf6 257 dma_free_coherent(bus(trans)->dev,
a0f6b0a2
EG
258 sizeof(struct iwl_rb_status),
259 rxq->rb_stts, rxq->rb_stts_dma);
260 else
5a878bf6 261 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
a0f6b0a2
EG
262 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
263 rxq->rb_stts = NULL;
264}
265
c2c52e8b
EG
266static int iwl_trans_rx_stop(struct iwl_priv *priv)
267{
268
269 /* stop Rx DMA */
270 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
271 return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
272 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
273}
274
02aca585
EG
275static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
276 struct iwl_dma_ptr *ptr, size_t size)
277{
278 if (WARN_ON(ptr->addr))
279 return -EINVAL;
280
d5934110 281 ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
02aca585
EG
282 &ptr->dma, GFP_KERNEL);
283 if (!ptr->addr)
284 return -ENOMEM;
285 ptr->size = size;
286 return 0;
287}
288
1359ca4f
EG
289static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
290 struct iwl_dma_ptr *ptr)
291{
292 if (unlikely(!ptr->addr))
293 return;
294
d5934110 295 dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
1359ca4f
EG
296 memset(ptr, 0, sizeof(*ptr));
297}
298
02aca585
EG
299static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
300 int slots_num, u32 txq_id)
301{
d6189124 302 size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
02aca585
EG
303 int i;
304
305 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
306 return -EINVAL;
307
1359ca4f
EG
308 txq->q.n_window = slots_num;
309
02aca585
EG
310 txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
311 GFP_KERNEL);
312 txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
313 GFP_KERNEL);
314
315 if (!txq->meta || !txq->cmd)
316 goto error;
317
318 for (i = 0; i < slots_num; i++) {
319 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
320 GFP_KERNEL);
321 if (!txq->cmd[i])
322 goto error;
323 }
324
325 /* Alloc driver data array and TFD circular buffer */
326 /* Driver private data, only for Tx (not command) queues,
327 * not shared with device. */
cefeaa5f 328 if (txq_id != priv->shrd->cmd_queue) {
02aca585
EG
329 txq->txb = kzalloc(sizeof(txq->txb[0]) *
330 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
331 if (!txq->txb) {
332 IWL_ERR(priv, "kmalloc for auxiliary BD "
333 "structures failed\n");
334 goto error;
335 }
336 } else {
337 txq->txb = NULL;
338 }
339
340 /* Circular buffer of transmit frame descriptors (TFDs),
341 * shared with device */
d5934110 342 txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
02aca585
EG
343 GFP_KERNEL);
344 if (!txq->tfds) {
345 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
346 goto error;
347 }
348 txq->q.id = txq_id;
349
350 return 0;
351error:
352 kfree(txq->txb);
353 txq->txb = NULL;
354 /* since txq->cmd has been zeroed,
355 * all non allocated cmd[i] will be NULL */
356 if (txq->cmd)
357 for (i = 0; i < slots_num; i++)
358 kfree(txq->cmd[i]);
359 kfree(txq->meta);
360 kfree(txq->cmd);
361 txq->meta = NULL;
362 txq->cmd = NULL;
363
364 return -ENOMEM;
365
366}
367
368static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
369 int slots_num, u32 txq_id)
370{
371 int ret;
372
373 txq->need_update = 0;
374 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
375
376 /*
377 * For the default queues 0-3, set up the swq_id
378 * already -- all others need to get one later
379 * (if they need one at all).
380 */
381 if (txq_id < 4)
382 iwl_set_swq_id(txq, txq_id, txq_id);
383
384 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
385 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387
388 /* Initialize queue's high/low-water marks, and head/tail indexes */
389 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
390 txq_id);
391 if (ret)
392 return ret;
393
394 /*
395 * Tell nic where to find circular buffer of Tx Frame Descriptors for
396 * given Tx queue, and enable the DMA channel used for that queue.
397 * Circular buffer (TFD queue in DRAM) physical base address */
398 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
399 txq->q.dma_addr >> 8);
400
401 return 0;
402}
403
c170b867
EG
404/**
405 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
406 */
407static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
408{
409 struct iwl_tx_queue *txq = &priv->txq[txq_id];
410 struct iwl_queue *q = &txq->q;
411
412 if (!q->n_bd)
413 return;
414
415 while (q->write_ptr != q->read_ptr) {
416 /* The read_ptr needs to bound by q->n_window */
417 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
418 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
419 }
420}
421
1359ca4f
EG
422/**
423 * iwl_tx_queue_free - Deallocate DMA queue.
424 * @txq: Transmit queue to deallocate.
425 *
426 * Empty queue by removing and destroying all BD's.
427 * Free all buffers.
428 * 0-fill, but do not free "txq" descriptor structure.
429 */
430static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
431{
432 struct iwl_tx_queue *txq = &priv->txq[txq_id];
d5934110 433 struct device *dev = priv->bus->dev;
1359ca4f
EG
434 int i;
435 if (WARN_ON(!txq))
436 return;
437
438 iwl_tx_queue_unmap(priv, txq_id);
439
440 /* De-alloc array of command/tx buffers */
441 for (i = 0; i < txq->q.n_window; i++)
442 kfree(txq->cmd[i]);
443
444 /* De-alloc circular buffer of TFDs */
445 if (txq->q.n_bd) {
d6189124 446 dma_free_coherent(dev, hw_params(priv).tfd_size *
1359ca4f
EG
447 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
448 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
449 }
450
451 /* De-alloc array of per-TFD driver data */
452 kfree(txq->txb);
453 txq->txb = NULL;
454
455 /* deallocate arrays */
456 kfree(txq->cmd);
457 kfree(txq->meta);
458 txq->cmd = NULL;
459 txq->meta = NULL;
460
461 /* 0-fill queue descriptor structure */
462 memset(txq, 0, sizeof(*txq));
463}
464
465/**
466 * iwl_trans_tx_free - Free TXQ Context
467 *
468 * Destroy all TX DMA queues and structures
469 */
e6bb4c9c 470static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
1359ca4f
EG
471{
472 int txq_id;
105183b1
EG
473 struct iwl_trans *trans = trans(priv);
474 struct iwl_trans_pcie *trans_pcie =
475 IWL_TRANS_GET_PCIE_TRANS(trans);
1359ca4f
EG
476
477 /* Tx queues */
478 if (priv->txq) {
d6189124
EG
479 for (txq_id = 0;
480 txq_id < hw_params(priv).max_txq_num; txq_id++)
1359ca4f
EG
481 iwl_tx_queue_free(priv, txq_id);
482 }
483
484 kfree(priv->txq);
485 priv->txq = NULL;
486
487 iwlagn_free_dma_ptr(priv, &priv->kw);
488
105183b1 489 iwlagn_free_dma_ptr(priv, &trans_pcie->scd_bc_tbls);
1359ca4f
EG
490}
491
02aca585
EG
492/**
493 * iwl_trans_tx_alloc - allocate TX context
494 * Allocate all Tx DMA structures and initialize them
495 *
496 * @param priv
497 * @return error code
498 */
499static int iwl_trans_tx_alloc(struct iwl_priv *priv)
500{
501 int ret;
502 int txq_id, slots_num;
105183b1
EG
503 struct iwl_trans *trans = trans(priv);
504 struct iwl_trans_pcie *trans_pcie =
505 IWL_TRANS_GET_PCIE_TRANS(trans);
02aca585
EG
506
507 /*It is not allowed to alloc twice, so warn when this happens.
508 * We cannot rely on the previous allocation, so free and fail */
509 if (WARN_ON(priv->txq)) {
510 ret = -EINVAL;
511 goto error;
512 }
513
105183b1 514 ret = iwlagn_alloc_dma_ptr(priv, &trans_pcie->scd_bc_tbls,
d6189124 515 hw_params(priv).scd_bc_tbls_size);
02aca585
EG
516 if (ret) {
517 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
518 goto error;
519 }
520
521 /* Alloc keep-warm buffer */
522 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
523 if (ret) {
524 IWL_ERR(priv, "Keep Warm allocation failed\n");
525 goto error;
526 }
527
528 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
529 priv->cfg->base_params->num_of_queues, GFP_KERNEL);
530 if (!priv->txq) {
531 IWL_ERR(priv, "Not enough memory for txq\n");
532 ret = ENOMEM;
533 goto error;
534 }
535
536 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
d6189124 537 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
cefeaa5f 538 slots_num = (txq_id == priv->shrd->cmd_queue) ?
02aca585
EG
539 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
540 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
541 txq_id);
542 if (ret) {
543 IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
544 goto error;
545 }
546 }
547
548 return 0;
549
550error:
e6bb4c9c 551 iwl_trans_tx_free(trans(priv));
02aca585
EG
552
553 return ret;
554}
392f8b78 555static int iwl_tx_init(struct iwl_priv *priv)
02aca585
EG
556{
557 int ret;
558 int txq_id, slots_num;
559 unsigned long flags;
560 bool alloc = false;
561
562 if (!priv->txq) {
563 ret = iwl_trans_tx_alloc(priv);
564 if (ret)
565 goto error;
566 alloc = true;
567 }
568
10b15e6f 569 spin_lock_irqsave(&priv->shrd->lock, flags);
02aca585
EG
570
571 /* Turn off all Tx DMA fifos */
b3c2ce13 572 iwl_write_prph(priv, SCD_TXFACT, 0);
02aca585
EG
573
574 /* Tell NIC where to find the "keep warm" buffer */
575 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
576
10b15e6f 577 spin_unlock_irqrestore(&priv->shrd->lock, flags);
02aca585
EG
578
579 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
d6189124 580 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
cefeaa5f 581 slots_num = (txq_id == priv->shrd->cmd_queue) ?
02aca585
EG
582 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
583 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
584 txq_id);
585 if (ret) {
586 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
587 goto error;
588 }
589 }
590
591 return 0;
592error:
593 /*Upon error, free only if we allocated something */
594 if (alloc)
e6bb4c9c 595 iwl_trans_tx_free(trans(priv));
02aca585
EG
596 return ret;
597}
598
392f8b78
EG
599static void iwl_set_pwr_vmain(struct iwl_priv *priv)
600{
601/*
602 * (for documentation purposes)
603 * to set power to V_AUX, do:
604
605 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
606 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
607 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
608 ~APMG_PS_CTRL_MSK_PWR_SRC);
609 */
610
611 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
612 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
613 ~APMG_PS_CTRL_MSK_PWR_SRC);
614}
615
616static int iwl_nic_init(struct iwl_priv *priv)
617{
618 unsigned long flags;
619
620 /* nic_init */
10b15e6f 621 spin_lock_irqsave(&priv->shrd->lock, flags);
392f8b78
EG
622 iwl_apm_init(priv);
623
624 /* Set interrupt coalescing calibration timer to default (512 usecs) */
625 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
626
10b15e6f 627 spin_unlock_irqrestore(&priv->shrd->lock, flags);
392f8b78
EG
628
629 iwl_set_pwr_vmain(priv);
630
631 priv->cfg->lib->nic_config(priv);
632
633 /* Allocate the RX queue, or reset if it is already allocated */
5a878bf6 634 iwl_rx_init(trans(priv));
392f8b78
EG
635
636 /* Allocate or reset and init all Tx and Command queues */
637 if (iwl_tx_init(priv))
638 return -ENOMEM;
639
640 if (priv->cfg->base_params->shadow_reg_enable) {
641 /* enable shadow regs in HW */
642 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
643 0x800FFFFF);
644 }
645
63013ae3 646 set_bit(STATUS_INIT, &priv->shrd->status);
392f8b78
EG
647
648 return 0;
649}
650
651#define HW_READY_TIMEOUT (50)
652
653/* Note: returns poll_bit return value, which is >= 0 if success */
654static int iwl_set_hw_ready(struct iwl_priv *priv)
655{
656 int ret;
657
658 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
659 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
660
661 /* See if we got it */
662 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
663 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
664 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
665 HW_READY_TIMEOUT);
666
667 IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
668 return ret;
669}
670
671/* Note: returns standard 0/-ERROR code */
e6bb4c9c 672static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv)
392f8b78
EG
673{
674 int ret;
675
0286cee0 676 IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
392f8b78
EG
677
678 ret = iwl_set_hw_ready(priv);
679 if (ret >= 0)
680 return 0;
681
682 /* If HW is not ready, prepare the conditions to check again */
683 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
684 CSR_HW_IF_CONFIG_REG_PREPARE);
685
686 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
687 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
688 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
689
690 if (ret < 0)
691 return ret;
692
693 /* HW should be ready by now, check again. */
694 ret = iwl_set_hw_ready(priv);
695 if (ret >= 0)
696 return 0;
697 return ret;
698}
699
e6bb4c9c 700static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
392f8b78
EG
701{
702 int ret;
703
704 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
705
706 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
e6bb4c9c 707 iwl_trans_pcie_prepare_card_hw(priv)) {
392f8b78
EG
708 IWL_WARN(priv, "Exit HW not ready\n");
709 return -EIO;
710 }
711
712 /* If platform's RF_KILL switch is NOT set to KILL */
713 if (iwl_read32(priv, CSR_GP_CNTRL) &
714 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
63013ae3 715 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
392f8b78 716 else
63013ae3 717 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
392f8b78 718
845a9c0d 719 if (iwl_is_rfkill(priv->shrd)) {
392f8b78 720 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
0c325769 721 iwl_enable_interrupts(trans(priv));
392f8b78
EG
722 return -ERFKILL;
723 }
724
725 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
726
727 ret = iwl_nic_init(priv);
728 if (ret) {
729 IWL_ERR(priv, "Unable to init nic\n");
730 return ret;
731 }
732
733 /* make sure rfkill handshake bits are cleared */
734 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
735 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
736 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
737
738 /* clear (again), then enable host interrupts */
739 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
0c325769 740 iwl_enable_interrupts(trans(priv));
392f8b78
EG
741
742 /* really make sure rfkill handshake bits are cleared */
743 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
744 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
745
746 return 0;
747}
748
b3c2ce13
EG
749/*
750 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
10b15e6f 751 * must be called under priv->shrd->lock and mac access
b3c2ce13
EG
752 */
753static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
754{
755 iwl_write_prph(priv, SCD_TXFACT, mask);
756}
757
758#define IWL_AC_UNSET -1
759
760struct queue_to_fifo_ac {
761 s8 fifo, ac;
762};
763
764static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
765 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
766 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
767 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
768 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
769 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
770 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
771 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
772 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
773 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
774 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
72c04ce0 775 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
b3c2ce13
EG
776};
777
778static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
779 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
780 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
781 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
782 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
783 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
784 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
785 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
786 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
787 { IWL_TX_FIFO_BE_IPAN, 2, },
788 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
72c04ce0 789 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
b3c2ce13 790};
e6bb4c9c 791static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
b3c2ce13
EG
792{
793 const struct queue_to_fifo_ac *queue_to_fifo;
794 struct iwl_rxon_context *ctx;
105183b1
EG
795 struct iwl_trans *trans = trans(priv);
796 struct iwl_trans_pcie *trans_pcie =
797 IWL_TRANS_GET_PCIE_TRANS(trans);
b3c2ce13
EG
798 u32 a;
799 unsigned long flags;
800 int i, chan;
801 u32 reg_val;
802
105183b1 803 spin_lock_irqsave(&trans->shrd->lock, flags);
b3c2ce13 804
105183b1
EG
805 trans_pcie->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
806 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
b3c2ce13 807 /* reset conext data memory */
105183b1 808 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
b3c2ce13
EG
809 a += 4)
810 iwl_write_targ_mem(priv, a, 0);
811 /* reset tx status memory */
105183b1 812 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
b3c2ce13
EG
813 a += 4)
814 iwl_write_targ_mem(priv, a, 0);
105183b1 815 for (; a < trans_pcie->scd_base_addr +
d6189124
EG
816 SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
817 a += 4)
b3c2ce13
EG
818 iwl_write_targ_mem(priv, a, 0);
819
820 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
105183b1 821 trans_pcie->scd_bc_tbls.dma >> 10);
b3c2ce13
EG
822
823 /* Enable DMA channel */
824 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
825 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
826 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
827 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
828
829 /* Update FH chicken bits */
830 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
831 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
832 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
833
834 iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
835 SCD_QUEUECHAIN_SEL_ALL(priv));
836 iwl_write_prph(priv, SCD_AGGR_SEL, 0);
837
838 /* initiate the queues */
d6189124 839 for (i = 0; i < hw_params(priv).max_txq_num; i++) {
b3c2ce13
EG
840 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
841 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
105183b1 842 iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
b3c2ce13 843 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
105183b1 844 iwl_write_targ_mem(priv, trans_pcie->scd_base_addr +
b3c2ce13
EG
845 SCD_CONTEXT_QUEUE_OFFSET(i) +
846 sizeof(u32),
847 ((SCD_WIN_SIZE <<
848 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
849 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
850 ((SCD_FRAME_LIMIT <<
851 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
852 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
853 }
854
855 iwl_write_prph(priv, SCD_INTERRUPT_MASK,
105183b1 856 IWL_MASK(0, hw_params(trans).max_txq_num));
b3c2ce13
EG
857
858 /* Activate all Tx DMA/FIFO channels */
859 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
860
861 /* map queues to FIFOs */
862 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
863 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
864 else
865 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
866
cefeaa5f 867 iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0);
b3c2ce13
EG
868
869 /* make sure all queue are not stopped */
870 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
871 for (i = 0; i < 4; i++)
872 atomic_set(&priv->queue_stop_count[i], 0);
873 for_each_context(priv, ctx)
874 ctx->last_tx_rejected = false;
875
876 /* reset to 0 to enable all the queue first */
877 priv->txq_ctx_active_msk = 0;
878
effcea16 879 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
72c04ce0 880 IWLAGN_FIRST_AMPDU_QUEUE);
effcea16 881 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
72c04ce0 882 IWLAGN_FIRST_AMPDU_QUEUE);
b3c2ce13 883
72c04ce0 884 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
b3c2ce13
EG
885 int fifo = queue_to_fifo[i].fifo;
886 int ac = queue_to_fifo[i].ac;
887
888 iwl_txq_ctx_activate(priv, i);
889
890 if (fifo == IWL_TX_FIFO_UNUSED)
891 continue;
892
893 if (ac != IWL_AC_UNSET)
894 iwl_set_swq_id(&priv->txq[i], ac, i);
48d42c42 895 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
b3c2ce13
EG
896 }
897
10b15e6f 898 spin_unlock_irqrestore(&priv->shrd->lock, flags);
b3c2ce13
EG
899
900 /* Enable L1-Active */
901 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
902 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
903}
904
c170b867
EG
905/**
906 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
907 */
908static int iwl_trans_tx_stop(struct iwl_priv *priv)
909{
910 int ch, txq_id;
911 unsigned long flags;
912
913 /* Turn off all Tx DMA fifos */
10b15e6f 914 spin_lock_irqsave(&priv->shrd->lock, flags);
c170b867 915
b3c2ce13 916 iwl_trans_txq_set_sched(priv, 0);
c170b867
EG
917
918 /* Stop each Tx DMA channel, and wait for it to be idle */
02f6f659 919 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
c170b867
EG
920 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
921 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
922 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
923 1000))
924 IWL_ERR(priv, "Failing on timeout while stopping"
925 " DMA channel %d [0x%08x]", ch,
926 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
927 }
10b15e6f 928 spin_unlock_irqrestore(&priv->shrd->lock, flags);
c170b867
EG
929
930 if (!priv->txq) {
931 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
932 return 0;
933 }
934
935 /* Unmap DMA from host system and free skb's */
d6189124 936 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
c170b867
EG
937 iwl_tx_queue_unmap(priv, txq_id);
938
939 return 0;
940}
941
e6bb4c9c 942static void iwl_trans_pcie_stop_device(struct iwl_priv *priv)
ab6cf8e8 943{
ab6cf8e8
EG
944 /* stop and reset the on-board processor */
945 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
946
947 /* tell the device to stop sending interrupts */
0c325769 948 iwl_trans_disable_sync_irq(trans(priv));
ab6cf8e8
EG
949
950 /* device going down, Stop using ICT table */
0c325769 951 iwl_disable_ict(trans(priv));
ab6cf8e8
EG
952
953 /*
954 * If a HW restart happens during firmware loading,
955 * then the firmware loading might call this function
956 * and later it might be called again due to the
957 * restart. So don't process again if the device is
958 * already dead.
959 */
63013ae3 960 if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) {
ab6cf8e8
EG
961 iwl_trans_tx_stop(priv);
962 iwl_trans_rx_stop(priv);
963
964 /* Power-down device's busmaster DMA clocks */
965 iwl_write_prph(priv, APMG_CLK_DIS_REG,
966 APMG_CLK_VAL_DMA_CLK_RQT);
967 udelay(5);
968 }
969
970 /* Make sure (redundant) we've released our request to stay awake */
971 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
972
973 /* Stop the device, and put it in low power state */
974 iwl_apm_stop(priv);
975}
976
e6bb4c9c 977static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv,
47c1b496
EG
978 int txq_id)
979{
980 struct iwl_tx_queue *txq = &priv->txq[txq_id];
981 struct iwl_queue *q = &txq->q;
982 struct iwl_device_cmd *dev_cmd;
983
984 if (unlikely(iwl_queue_space(q) < q->high_mark))
985 return NULL;
986
987 /*
988 * Set up the Tx-command (not MAC!) header.
989 * Store the chosen Tx queue and TFD index within the sequence field;
990 * after Tx, uCode's Tx response will return this value so driver can
991 * locate the frame within the tx queue and do post-tx processing.
992 */
993 dev_cmd = txq->cmd[q->write_ptr];
994 memset(dev_cmd, 0, sizeof(*dev_cmd));
995 dev_cmd->hdr.cmd = REPLY_TX;
996 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
997 INDEX_TO_SEQ(q->write_ptr)));
998 return &dev_cmd->cmd.tx;
999}
1000
e6bb4c9c 1001static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
47c1b496
EG
1002 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
1003 struct iwl_rxon_context *ctx)
1004{
1005 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1006 struct iwl_queue *q = &txq->q;
1007 struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
1008 struct iwl_cmd_meta *out_meta;
1009
1010 dma_addr_t phys_addr = 0;
1011 dma_addr_t txcmd_phys;
1012 dma_addr_t scratch_phys;
1013 u16 len, firstlen, secondlen;
1014 u8 wait_write_ptr = 0;
1015 u8 hdr_len = ieee80211_hdrlen(fc);
1016
1017 /* Set up driver data for this TFD */
1018 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
1019 txq->txb[q->write_ptr].skb = skb;
1020 txq->txb[q->write_ptr].ctx = ctx;
1021
1022 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1023 out_meta = &txq->meta[q->write_ptr];
1024
1025 /*
1026 * Use the first empty entry in this queue's command buffer array
1027 * to contain the Tx command and MAC header concatenated together
1028 * (payload data will be in another buffer).
1029 * Size of this varies, due to varying MAC header length.
1030 * If end is not dword aligned, we'll have 2 extra bytes at the end
1031 * of the MAC header (device reads on dword boundaries).
1032 * We'll tell device about this padding later.
1033 */
1034 len = sizeof(struct iwl_tx_cmd) +
1035 sizeof(struct iwl_cmd_header) + hdr_len;
1036 firstlen = (len + 3) & ~3;
1037
1038 /* Tell NIC about any 2-byte padding after MAC header */
1039 if (firstlen != len)
1040 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1041
1042 /* Physical address of this Tx command's header (not MAC header!),
1043 * within command buffer array. */
d5934110 1044 txcmd_phys = dma_map_single(priv->bus->dev,
47c1b496
EG
1045 &dev_cmd->hdr, firstlen,
1046 DMA_BIDIRECTIONAL);
d5934110 1047 if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
47c1b496
EG
1048 return -1;
1049 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1050 dma_unmap_len_set(out_meta, len, firstlen);
1051
1052 if (!ieee80211_has_morefrags(fc)) {
1053 txq->need_update = 1;
1054 } else {
1055 wait_write_ptr = 1;
1056 txq->need_update = 0;
1057 }
1058
1059 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1060 * if any (802.11 null frames have no payload). */
1061 secondlen = skb->len - hdr_len;
1062 if (secondlen > 0) {
d5934110 1063 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
47c1b496 1064 secondlen, DMA_TO_DEVICE);
d5934110
EG
1065 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1066 dma_unmap_single(priv->bus->dev,
47c1b496
EG
1067 dma_unmap_addr(out_meta, mapping),
1068 dma_unmap_len(out_meta, len),
1069 DMA_BIDIRECTIONAL);
1070 return -1;
1071 }
1072 }
1073
1074 /* Attach buffers to TFD */
1075 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
1076 if (secondlen > 0)
1077 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
1078 secondlen, 0);
1079
1080 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1081 offsetof(struct iwl_tx_cmd, scratch);
1082
1083 /* take back ownership of DMA buffer to enable update */
d5934110 1084 dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
47c1b496
EG
1085 DMA_BIDIRECTIONAL);
1086 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1087 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1088
1089 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1090 le16_to_cpu(dev_cmd->hdr.sequence));
1091 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1092 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1093 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1094
1095 /* Set up entry for this TFD in Tx byte-count array */
1096 if (ampdu)
48d42c42 1097 iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
47c1b496
EG
1098 le16_to_cpu(tx_cmd->len));
1099
d5934110 1100 dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
47c1b496
EG
1101 DMA_BIDIRECTIONAL);
1102
1103 trace_iwlwifi_dev_tx(priv,
1104 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1105 sizeof(struct iwl_tfd),
1106 &dev_cmd->hdr, firstlen,
1107 skb->data + hdr_len, secondlen);
1108
1109 /* Tell device the write index *just past* this latest filled TFD */
1110 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1111 iwl_txq_update_write_ptr(priv, txq);
1112
1113 /*
1114 * At this point the frame is "transmitted" successfully
1115 * and we will get a TX status notification eventually,
1116 * regardless of the value of ret. "ret" only indicates
1117 * whether or not we should update the write pointer.
1118 */
a0eaad71 1119 if (iwl_queue_space(q) < q->high_mark) {
47c1b496
EG
1120 if (wait_write_ptr) {
1121 txq->need_update = 1;
1122 iwl_txq_update_write_ptr(priv, txq);
1123 } else {
1124 iwl_stop_queue(priv, txq);
1125 }
1126 }
1127 return 0;
1128}
1129
e6bb4c9c 1130static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv)
56d90f4c
EG
1131{
1132 /* Remove all resets to allow NIC to operate */
1133 iwl_write32(priv, CSR_RESET, 0);
1134}
1135
e6bb4c9c
EG
1136static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1137{
5a878bf6
EG
1138 struct iwl_trans_pcie *trans_pcie =
1139 IWL_TRANS_GET_PCIE_TRANS(trans);
e6bb4c9c
EG
1140 int err;
1141
0c325769
EG
1142 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1143
1144 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1145 iwl_irq_tasklet, (unsigned long)trans);
e6bb4c9c 1146
0c325769 1147 iwl_alloc_isr_ict(trans);
e6bb4c9c
EG
1148
1149 err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
0c325769 1150 DRV_NAME, trans);
e6bb4c9c 1151 if (err) {
0c325769
EG
1152 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1153 iwl_free_isr_ict(trans);
e6bb4c9c
EG
1154 return err;
1155 }
1156
5a878bf6 1157 INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
e6bb4c9c
EG
1158 return 0;
1159}
1160
a0eaad71
EG
1161static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id,
1162 int ssn, u32 status, struct sk_buff_head *skbs)
1163{
1164 struct iwl_priv *priv = priv(trans);
1165 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1166 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1167 int tfd_num = ssn & (txq->q.n_bd - 1);
1168 u8 agg_state;
1169 bool cond;
1170
1171 if (txq->sched_retry) {
1172 agg_state =
1173 priv->stations[txq->sta_id].tid[txq->tid].agg.state;
1174 cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
1175 } else {
1176 cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
1177 }
1178
1179 if (txq->q.read_ptr != tfd_num) {
1180 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
1181 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1182 ssn , tfd_num, txq_id, txq->swq_id);
1183 iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1184 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
1185 iwl_wake_queue(priv, txq);
1186 }
1187}
1188
0c325769 1189static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
a27367d2 1190{
0c325769
EG
1191 unsigned long flags;
1192 struct iwl_trans_pcie *trans_pcie =
1193 IWL_TRANS_GET_PCIE_TRANS(trans);
1194
1195 spin_lock_irqsave(&trans->shrd->lock, flags);
1196 iwl_disable_interrupts(trans);
1197 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1198
a27367d2 1199 /* wait to make sure we flush pending tasklet*/
0c325769
EG
1200 synchronize_irq(bus(trans)->irq);
1201 tasklet_kill(&trans_pcie->irq_tasklet);
a27367d2
EG
1202}
1203
e6bb4c9c 1204static void iwl_trans_pcie_free(struct iwl_priv *priv)
34c1b7ba 1205{
0c325769
EG
1206 free_irq(priv->bus->irq, trans(priv));
1207 iwl_free_isr_ict(trans(priv));
e6bb4c9c
EG
1208 kfree(trans(priv));
1209 trans(priv) = NULL;
34c1b7ba
EG
1210}
1211
57210f7c
EG
1212#ifdef CONFIG_PM
1213
1214static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1215{
1216 /*
1217 * This function is called when system goes into suspend state
1218 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1219 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1220 * it will not call apm_ops.stop() to stop the DMA operation.
1221 * Calling apm_ops.stop here to make sure we stop the DMA.
1222 *
1223 * But of course ... if we have configured WoWLAN then we did other
1224 * things already :-)
1225 */
1226 if (!trans->shrd->wowlan)
1227 iwl_apm_stop(priv(trans));
1228
1229 return 0;
1230}
1231
1232static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1233{
1234 bool hw_rfkill = false;
1235
0c325769 1236 iwl_enable_interrupts(trans);
57210f7c
EG
1237
1238 if (!(iwl_read32(priv(trans), CSR_GP_CNTRL) &
1239 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1240 hw_rfkill = true;
1241
1242 if (hw_rfkill)
1243 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1244 else
1245 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1246
1247 wiphy_rfkill_set_hw_state(priv(trans)->hw->wiphy, hw_rfkill);
1248
1249 return 0;
1250}
1251#else /* CONFIG_PM */
1252static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1253{ return 0; }
1254
1255static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1256{ return 0; }
1257
1258#endif /* CONFIG_PM */
1259
e6bb4c9c 1260const struct iwl_trans_ops trans_ops_pcie;
e419d62d 1261
e6bb4c9c
EG
1262static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1263{
1264 struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1265 sizeof(struct iwl_trans_pcie),
1266 GFP_KERNEL);
1267 if (iwl_trans) {
5a878bf6
EG
1268 struct iwl_trans_pcie *trans_pcie =
1269 IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
e6bb4c9c
EG
1270 iwl_trans->ops = &trans_ops_pcie;
1271 iwl_trans->shrd = shrd;
5a878bf6 1272 trans_pcie->trans = iwl_trans;
e6bb4c9c 1273 }
ab6cf8e8 1274
e6bb4c9c
EG
1275 return iwl_trans;
1276}
47c1b496 1277
87e5666c
EG
1278#ifdef CONFIG_IWLWIFI_DEBUGFS
1279/* create and remove of files */
1280#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
5a878bf6 1281 if (!debugfs_create_file(#name, mode, parent, trans, \
87e5666c
EG
1282 &iwl_dbgfs_##name##_ops)) \
1283 return -ENOMEM; \
1284} while (0)
1285
1286/* file operation */
1287#define DEBUGFS_READ_FUNC(name) \
1288static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1289 char __user *user_buf, \
1290 size_t count, loff_t *ppos);
1291
1292#define DEBUGFS_WRITE_FUNC(name) \
1293static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1294 const char __user *user_buf, \
1295 size_t count, loff_t *ppos);
1296
1297
1298static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1299{
1300 file->private_data = inode->i_private;
1301 return 0;
1302}
1303
1304#define DEBUGFS_READ_FILE_OPS(name) \
1305 DEBUGFS_READ_FUNC(name); \
1306static const struct file_operations iwl_dbgfs_##name##_ops = { \
1307 .read = iwl_dbgfs_##name##_read, \
1308 .open = iwl_dbgfs_open_file_generic, \
1309 .llseek = generic_file_llseek, \
1310};
1311
1312#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1313 DEBUGFS_READ_FUNC(name); \
1314 DEBUGFS_WRITE_FUNC(name); \
1315static const struct file_operations iwl_dbgfs_##name##_ops = { \
1316 .write = iwl_dbgfs_##name##_write, \
1317 .read = iwl_dbgfs_##name##_read, \
1318 .open = iwl_dbgfs_open_file_generic, \
1319 .llseek = generic_file_llseek, \
1320};
1321
1322static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1323 char __user *user_buf,
1324 size_t count, loff_t *ppos)
1325{
5a878bf6
EG
1326 struct iwl_trans *trans = file->private_data;
1327 struct iwl_priv *priv = priv(trans);
87e5666c
EG
1328 int pos = 0, ofs = 0;
1329 int cnt = 0, entry;
5a878bf6
EG
1330 struct iwl_trans_pcie *trans_pcie =
1331 IWL_TRANS_GET_PCIE_TRANS(trans);
87e5666c
EG
1332 struct iwl_tx_queue *txq;
1333 struct iwl_queue *q;
5a878bf6 1334 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
87e5666c
EG
1335 char *buf;
1336 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
1337 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
1338 const u8 *ptr;
1339 ssize_t ret;
1340
1341 if (!priv->txq) {
5a878bf6 1342 IWL_ERR(trans, "txq not ready\n");
87e5666c
EG
1343 return -EAGAIN;
1344 }
1345 buf = kzalloc(bufsz, GFP_KERNEL);
1346 if (!buf) {
5a878bf6 1347 IWL_ERR(trans, "Can not allocate buffer\n");
87e5666c
EG
1348 return -ENOMEM;
1349 }
1350 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
5a878bf6 1351 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
87e5666c
EG
1352 txq = &priv->txq[cnt];
1353 q = &txq->q;
1354 pos += scnprintf(buf + pos, bufsz - pos,
1355 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1356 cnt, q->read_ptr, q->write_ptr);
1357 }
1358 if (priv->tx_traffic &&
5a878bf6 1359 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
87e5666c
EG
1360 ptr = priv->tx_traffic;
1361 pos += scnprintf(buf + pos, bufsz - pos,
5a878bf6 1362 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
87e5666c
EG
1363 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1364 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1365 entry++, ofs += 16) {
1366 pos += scnprintf(buf + pos, bufsz - pos,
1367 "0x%.4x ", ofs);
1368 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1369 buf + pos, bufsz - pos, 0);
1370 pos += strlen(buf + pos);
1371 if (bufsz - pos > 0)
1372 buf[pos++] = '\n';
1373 }
1374 }
1375 }
1376
1377 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1378 pos += scnprintf(buf + pos, bufsz - pos,
1379 "read: %u, write: %u\n",
1380 rxq->read, rxq->write);
1381
1382 if (priv->rx_traffic &&
5a878bf6 1383 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
87e5666c
EG
1384 ptr = priv->rx_traffic;
1385 pos += scnprintf(buf + pos, bufsz - pos,
5a878bf6 1386 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
87e5666c
EG
1387 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1388 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1389 entry++, ofs += 16) {
1390 pos += scnprintf(buf + pos, bufsz - pos,
1391 "0x%.4x ", ofs);
1392 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1393 buf + pos, bufsz - pos, 0);
1394 pos += strlen(buf + pos);
1395 if (bufsz - pos > 0)
1396 buf[pos++] = '\n';
1397 }
1398 }
1399 }
1400
1401 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1402 kfree(buf);
1403 return ret;
1404}
1405
1406static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1407 const char __user *user_buf,
1408 size_t count, loff_t *ppos)
1409{
5a878bf6 1410 struct iwl_trans *trans = file->private_data;
87e5666c
EG
1411 char buf[8];
1412 int buf_size;
1413 int traffic_log;
1414
1415 memset(buf, 0, sizeof(buf));
1416 buf_size = min(count, sizeof(buf) - 1);
1417 if (copy_from_user(buf, user_buf, buf_size))
1418 return -EFAULT;
1419 if (sscanf(buf, "%d", &traffic_log) != 1)
1420 return -EFAULT;
1421 if (traffic_log == 0)
5a878bf6 1422 iwl_reset_traffic_log(priv(trans));
87e5666c
EG
1423
1424 return count;
1425}
1426
1427static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1428 char __user *user_buf,
1429 size_t count, loff_t *ppos) {
1430
5a878bf6
EG
1431 struct iwl_trans *trans = file->private_data;
1432 struct iwl_priv *priv = priv(trans);
87e5666c
EG
1433 struct iwl_tx_queue *txq;
1434 struct iwl_queue *q;
1435 char *buf;
1436 int pos = 0;
1437 int cnt;
1438 int ret;
1439 const size_t bufsz = sizeof(char) * 64 *
1440 priv->cfg->base_params->num_of_queues;
1441
1442 if (!priv->txq) {
1443 IWL_ERR(priv, "txq not ready\n");
1444 return -EAGAIN;
1445 }
1446 buf = kzalloc(bufsz, GFP_KERNEL);
1447 if (!buf)
1448 return -ENOMEM;
1449
5a878bf6 1450 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
87e5666c
EG
1451 txq = &priv->txq[cnt];
1452 q = &txq->q;
1453 pos += scnprintf(buf + pos, bufsz - pos,
1454 "hwq %.2d: read=%u write=%u stop=%d"
1455 " swq_id=%#.2x (ac %d/hwq %d)\n",
1456 cnt, q->read_ptr, q->write_ptr,
1457 !!test_bit(cnt, priv->queue_stopped),
1458 txq->swq_id, txq->swq_id & 3,
1459 (txq->swq_id >> 2) & 0x1f);
1460 if (cnt >= 4)
1461 continue;
1462 /* for the ACs, display the stop count too */
1463 pos += scnprintf(buf + pos, bufsz - pos,
1464 " stop-count: %d\n",
1465 atomic_read(&priv->queue_stop_count[cnt]));
1466 }
1467 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1468 kfree(buf);
1469 return ret;
1470}
1471
1472static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1473 char __user *user_buf,
1474 size_t count, loff_t *ppos) {
5a878bf6
EG
1475 struct iwl_trans *trans = file->private_data;
1476 struct iwl_trans_pcie *trans_pcie =
1477 IWL_TRANS_GET_PCIE_TRANS(trans);
1478 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
87e5666c
EG
1479 char buf[256];
1480 int pos = 0;
1481 const size_t bufsz = sizeof(buf);
1482
1483 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1484 rxq->read);
1485 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1486 rxq->write);
1487 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1488 rxq->free_count);
1489 if (rxq->rb_stts) {
1490 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1491 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1492 } else {
1493 pos += scnprintf(buf + pos, bufsz - pos,
1494 "closed_rb_num: Not Allocated\n");
1495 }
1496 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1497}
1498
7ff94706
EG
1499static ssize_t iwl_dbgfs_log_event_read(struct file *file,
1500 char __user *user_buf,
1501 size_t count, loff_t *ppos)
1502{
1503 struct iwl_trans *trans = file->private_data;
1504 char *buf;
1505 int pos = 0;
1506 ssize_t ret = -ENOMEM;
1507
1508 ret = pos = iwl_dump_nic_event_log(priv(trans), true, &buf, true);
1509 if (buf) {
1510 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1511 kfree(buf);
1512 }
1513 return ret;
1514}
1515
1516static ssize_t iwl_dbgfs_log_event_write(struct file *file,
1517 const char __user *user_buf,
1518 size_t count, loff_t *ppos)
1519{
1520 struct iwl_trans *trans = file->private_data;
1521 u32 event_log_flag;
1522 char buf[8];
1523 int buf_size;
1524
1525 memset(buf, 0, sizeof(buf));
1526 buf_size = min(count, sizeof(buf) - 1);
1527 if (copy_from_user(buf, user_buf, buf_size))
1528 return -EFAULT;
1529 if (sscanf(buf, "%d", &event_log_flag) != 1)
1530 return -EFAULT;
1531 if (event_log_flag == 1)
1532 iwl_dump_nic_event_log(priv(trans), true, NULL, false);
1533
1534 return count;
1535}
1536
1f7b6172
EG
1537static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1538 char __user *user_buf,
1539 size_t count, loff_t *ppos) {
1540
1541 struct iwl_trans *trans = file->private_data;
1542 struct iwl_trans_pcie *trans_pcie =
1543 IWL_TRANS_GET_PCIE_TRANS(trans);
1544 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1545
1546 int pos = 0;
1547 char *buf;
1548 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1549 ssize_t ret;
1550
1551 buf = kzalloc(bufsz, GFP_KERNEL);
1552 if (!buf) {
1553 IWL_ERR(trans, "Can not allocate Buffer\n");
1554 return -ENOMEM;
1555 }
1556
1557 pos += scnprintf(buf + pos, bufsz - pos,
1558 "Interrupt Statistics Report:\n");
1559
1560 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1561 isr_stats->hw);
1562 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1563 isr_stats->sw);
1564 if (isr_stats->sw || isr_stats->hw) {
1565 pos += scnprintf(buf + pos, bufsz - pos,
1566 "\tLast Restarting Code: 0x%X\n",
1567 isr_stats->err_code);
1568 }
1569#ifdef CONFIG_IWLWIFI_DEBUG
1570 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1571 isr_stats->sch);
1572 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1573 isr_stats->alive);
1574#endif
1575 pos += scnprintf(buf + pos, bufsz - pos,
1576 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1577
1578 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1579 isr_stats->ctkill);
1580
1581 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1582 isr_stats->wakeup);
1583
1584 pos += scnprintf(buf + pos, bufsz - pos,
1585 "Rx command responses:\t\t %u\n", isr_stats->rx);
1586
1587 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1588 isr_stats->tx);
1589
1590 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1591 isr_stats->unhandled);
1592
1593 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1594 kfree(buf);
1595 return ret;
1596}
1597
1598static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1599 const char __user *user_buf,
1600 size_t count, loff_t *ppos)
1601{
1602 struct iwl_trans *trans = file->private_data;
1603 struct iwl_trans_pcie *trans_pcie =
1604 IWL_TRANS_GET_PCIE_TRANS(trans);
1605 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1606
1607 char buf[8];
1608 int buf_size;
1609 u32 reset_flag;
1610
1611 memset(buf, 0, sizeof(buf));
1612 buf_size = min(count, sizeof(buf) - 1);
1613 if (copy_from_user(buf, user_buf, buf_size))
1614 return -EFAULT;
1615 if (sscanf(buf, "%x", &reset_flag) != 1)
1616 return -EFAULT;
1617 if (reset_flag == 0)
1618 memset(isr_stats, 0, sizeof(*isr_stats));
1619
1620 return count;
1621}
1622
87e5666c 1623DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
7ff94706 1624DEBUGFS_READ_WRITE_FILE_OPS(log_event);
1f7b6172 1625DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
87e5666c
EG
1626DEBUGFS_READ_FILE_OPS(rx_queue);
1627DEBUGFS_READ_FILE_OPS(tx_queue);
1628
1629/*
1630 * Create the debugfs files and directories
1631 *
1632 */
1633static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1634 struct dentry *dir)
1635{
87e5666c
EG
1636 DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
1637 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1638 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
7ff94706 1639 DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
1f7b6172 1640 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
87e5666c
EG
1641 return 0;
1642}
1643#else
1644static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1645 struct dentry *dir)
1646{ return 0; }
1647
1648#endif /*CONFIG_IWLWIFI_DEBUGFS */
1649
e6bb4c9c
EG
1650const struct iwl_trans_ops trans_ops_pcie = {
1651 .alloc = iwl_trans_pcie_alloc,
1652 .request_irq = iwl_trans_pcie_request_irq,
1653 .start_device = iwl_trans_pcie_start_device,
1654 .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
1655 .stop_device = iwl_trans_pcie_stop_device,
48d42c42 1656
e6bb4c9c 1657 .tx_start = iwl_trans_pcie_tx_start,
48d42c42 1658
e6bb4c9c
EG
1659 .rx_free = iwl_trans_pcie_rx_free,
1660 .tx_free = iwl_trans_pcie_tx_free,
34c1b7ba 1661
e6bb4c9c
EG
1662 .send_cmd = iwl_trans_pcie_send_cmd,
1663 .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
c85eb619 1664
e6bb4c9c
EG
1665 .get_tx_cmd = iwl_trans_pcie_get_tx_cmd,
1666 .tx = iwl_trans_pcie_tx,
a0eaad71 1667 .reclaim = iwl_trans_pcie_reclaim,
34c1b7ba 1668
e6bb4c9c
EG
1669 .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
1670 .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
34c1b7ba 1671
e6bb4c9c 1672 .kick_nic = iwl_trans_pcie_kick_nic,
1e89cbac 1673
0c325769 1674 .disable_sync_irq = iwl_trans_pcie_disable_sync_irq,
e6bb4c9c 1675 .free = iwl_trans_pcie_free,
87e5666c
EG
1676
1677 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
57210f7c
EG
1678 .suspend = iwl_trans_pcie_suspend,
1679 .resume = iwl_trans_pcie_resume,
e6bb4c9c 1680};
ab697a9f 1681