]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/wireless/iwlwifi/pcie/tx.c
iwlwifi: mvm: convert the SRAM dump to the generic memory dump
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlwifi / pcie / tx.c
CommitLineData
1053d35f
RR
1/******************************************************************************
2 *
51368bf7 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
8b4139dc 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
1053d35f
RR
5 *
6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 *
22 * The full GNU General Public License is included in this distribution in the
23 * file called LICENSE.
24 *
25 * Contact Information:
759ef89f 26 * Intel Linux Wireless <ilw@linux.intel.com>
1053d35f
RR
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 *****************************************************************************/
fd4abac5 30#include <linux/etherdevice.h>
5a0e3ad6 31#include <linux/slab.h>
253a634c 32#include <linux/sched.h>
253a634c 33
522376d2
EG
34#include "iwl-debug.h"
35#include "iwl-csr.h"
36#include "iwl-prph.h"
1053d35f 37#include "iwl-io.h"
680073b7 38#include "iwl-scd.h"
ed277c93 39#include "iwl-op-mode.h"
6468a01a 40#include "internal.h"
6238b008 41/* FIXME: need to abstract out TX command (once we know what it looks like) */
1023fdc4 42#include "dvm/commands.h"
1053d35f 43
522376d2
EG
44#define IWL_TX_CRC_SIZE 4
45#define IWL_TX_DELIMITER_SIZE 4
46
f02831be
EG
47/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
48 * DMA services
49 *
50 * Theory of operation
51 *
52 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
53 * of buffer descriptors, each of which points to one or more data buffers for
54 * the device to read from or fill. Driver and device exchange status of each
55 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
56 * entries in each circular buffer, to protect against confusing empty and full
57 * queue states.
58 *
59 * The device reads or writes the data in the queues via the device's several
60 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
61 *
62 * For Tx queue, there are low mark and high mark limits. If, after queuing
63 * the packet for Tx, free space become < low mark, Tx queue stopped. When
64 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
65 * Tx queue resumed.
66 *
67 ***************************************************/
68static int iwl_queue_space(const struct iwl_queue *q)
69{
a9b29246
IY
70 unsigned int max;
71 unsigned int used;
f02831be 72
a9b29246
IY
73 /*
74 * To avoid ambiguity between empty and completely full queues, there
83f32a4b
JB
75 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
76 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
77 * to reserve any queue entries for this purpose.
a9b29246 78 */
83f32a4b 79 if (q->n_window < TFD_QUEUE_SIZE_MAX)
a9b29246
IY
80 max = q->n_window;
81 else
83f32a4b 82 max = TFD_QUEUE_SIZE_MAX - 1;
f02831be 83
a9b29246 84 /*
83f32a4b
JB
85 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
86 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
a9b29246 87 */
83f32a4b 88 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
a9b29246
IY
89
90 if (WARN_ON(used > max))
91 return 0;
92
93 return max - used;
f02831be
EG
94}
95
96/*
97 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
98 */
83f32a4b 99static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
f02831be 100{
f02831be
EG
101 q->n_window = slots_num;
102 q->id = id;
103
f02831be
EG
104 /* slots_num must be power-of-two size, otherwise
105 * get_cmd_index is broken. */
106 if (WARN_ON(!is_power_of_2(slots_num)))
107 return -EINVAL;
108
109 q->low_mark = q->n_window / 4;
110 if (q->low_mark < 4)
111 q->low_mark = 4;
112
113 q->high_mark = q->n_window / 8;
114 if (q->high_mark < 2)
115 q->high_mark = 2;
116
117 q->write_ptr = 0;
118 q->read_ptr = 0;
119
120 return 0;
121}
122
f02831be
EG
123static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
124 struct iwl_dma_ptr *ptr, size_t size)
125{
126 if (WARN_ON(ptr->addr))
127 return -EINVAL;
128
129 ptr->addr = dma_alloc_coherent(trans->dev, size,
130 &ptr->dma, GFP_KERNEL);
131 if (!ptr->addr)
132 return -ENOMEM;
133 ptr->size = size;
134 return 0;
135}
136
137static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
138 struct iwl_dma_ptr *ptr)
139{
140 if (unlikely(!ptr->addr))
141 return;
142
143 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
144 memset(ptr, 0, sizeof(*ptr));
145}
146
147static void iwl_pcie_txq_stuck_timer(unsigned long data)
148{
149 struct iwl_txq *txq = (void *)data;
150 struct iwl_queue *q = &txq->q;
151 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
152 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
153 u32 scd_sram_addr = trans_pcie->scd_base_addr +
154 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
155 u8 buf[16];
156 int i;
157
158 spin_lock(&txq->lock);
159 /* check if triggered erroneously */
160 if (txq->q.read_ptr == txq->q.write_ptr) {
161 spin_unlock(&txq->lock);
162 return;
163 }
164 spin_unlock(&txq->lock);
165
166 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
167 jiffies_to_msecs(trans_pcie->wd_timeout));
168 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
169 txq->q.read_ptr, txq->q.write_ptr);
170
4fd442db 171 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
f02831be
EG
172
173 iwl_print_hex_error(trans, buf, sizeof(buf));
174
175 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
176 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
177 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
178
179 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
180 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
181 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
182 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
183 u32 tbl_dw =
4fd442db
EG
184 iwl_trans_read_mem32(trans,
185 trans_pcie->scd_base_addr +
186 SCD_TRANS_TBL_OFFSET_QUEUE(i));
f02831be
EG
187
188 if (i & 0x1)
189 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
190 else
191 tbl_dw = tbl_dw & 0x0000FFFF;
192
193 IWL_ERR(trans,
194 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
195 i, active ? "" : "in", fifo, tbl_dw,
83f32a4b
JB
196 iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
197 (TFD_QUEUE_SIZE_MAX - 1),
f02831be
EG
198 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
199 }
200
201 for (i = q->read_ptr; i != q->write_ptr;
83f32a4b 202 i = iwl_queue_inc_wrap(i))
f02831be 203 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
38c0f334 204 le32_to_cpu(txq->scratchbufs[i].scratch));
f02831be 205
4c9706dc 206 iwl_force_nmi(trans);
f02831be
EG
207}
208
990aa6d7
EG
209/*
210 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
48d42c42 211 */
f02831be
EG
212static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
213 struct iwl_txq *txq, u16 byte_cnt)
48d42c42 214{
105183b1 215 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
20d3b647 216 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42
EG
217 int write_ptr = txq->q.write_ptr;
218 int txq_id = txq->q.id;
219 u8 sec_ctl = 0;
220 u8 sta_id = 0;
221 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
222 __le16 bc_ent;
132f98c2 223 struct iwl_tx_cmd *tx_cmd =
bf8440e6 224 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
48d42c42 225
105183b1
EG
226 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
227
48d42c42
EG
228 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
229
132f98c2
EG
230 sta_id = tx_cmd->sta_id;
231 sec_ctl = tx_cmd->sec_ctl;
48d42c42
EG
232
233 switch (sec_ctl & TX_CMD_SEC_MSK) {
234 case TX_CMD_SEC_CCM:
4325f6ca 235 len += IEEE80211_CCMP_MIC_LEN;
48d42c42
EG
236 break;
237 case TX_CMD_SEC_TKIP:
4325f6ca 238 len += IEEE80211_TKIP_ICV_LEN;
48d42c42
EG
239 break;
240 case TX_CMD_SEC_WEP:
4325f6ca 241 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
48d42c42
EG
242 break;
243 }
244
046db346
EG
245 if (trans_pcie->bc_table_dword)
246 len = DIV_ROUND_UP(len, 4);
247
248 bc_ent = cpu_to_le16(len | (sta_id << 12));
48d42c42
EG
249
250 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
251
252 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
253 scd_bc_tbl[txq_id].
254 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
255}
256
f02831be
EG
257static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
258 struct iwl_txq *txq)
259{
260 struct iwl_trans_pcie *trans_pcie =
261 IWL_TRANS_GET_PCIE_TRANS(trans);
262 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
263 int txq_id = txq->q.id;
264 int read_ptr = txq->q.read_ptr;
265 u8 sta_id = 0;
266 __le16 bc_ent;
267 struct iwl_tx_cmd *tx_cmd =
268 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
269
270 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
271
272 if (txq_id != trans_pcie->cmd_queue)
273 sta_id = tx_cmd->sta_id;
274
275 bc_ent = cpu_to_le16(1 | (sta_id << 12));
276 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
277
278 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
279 scd_bc_tbl[txq_id].
280 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
281}
282
990aa6d7
EG
283/*
284 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
fd4abac5 285 */
ea68f460
JB
286static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
287 struct iwl_txq *txq)
fd4abac5 288{
23e76d1a 289 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
fd4abac5 290 u32 reg = 0;
fd4abac5
TW
291 int txq_id = txq->q.id;
292
ea68f460 293 lockdep_assert_held(&txq->lock);
fd4abac5 294
5045388c
EP
295 /*
296 * explicitly wake up the NIC if:
297 * 1. shadow registers aren't enabled
298 * 2. NIC is woken up for CMD regardless of shadow outside this function
299 * 3. there is a chance that the NIC is asleep
300 */
301 if (!trans->cfg->base_params->shadow_reg_enable &&
302 txq_id != trans_pcie->cmd_queue &&
303 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
f81c1f48 304 /*
5045388c
EP
305 * wake up nic if it's powered down ...
306 * uCode will wake up, and interrupt us again, so next
307 * time we'll skip this part.
f81c1f48 308 */
5045388c
EP
309 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
310
311 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
312 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
313 txq_id, reg);
314 iwl_set_bit(trans, CSR_GP_CNTRL,
315 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ea68f460 316 txq->need_update = true;
5045388c
EP
317 return;
318 }
f81c1f48 319 }
5045388c
EP
320
321 /*
322 * if not in power-save mode, uCode will never sleep when we're
323 * trying to tx (during RFKILL, we're not trying to tx).
324 */
325 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
326 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
ea68f460 327}
5045388c 328
ea68f460
JB
329void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
330{
331 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
332 int i;
333
334 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
335 struct iwl_txq *txq = &trans_pcie->txq[i];
336
d090f878 337 spin_lock_bh(&txq->lock);
ea68f460
JB
338 if (trans_pcie->txq[i].need_update) {
339 iwl_pcie_txq_inc_wr_ptr(trans, txq);
340 trans_pcie->txq[i].need_update = false;
341 }
d090f878 342 spin_unlock_bh(&txq->lock);
ea68f460 343 }
fd4abac5 344}
fd4abac5 345
f02831be 346static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
214d14d4
JB
347{
348 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
349
350 dma_addr_t addr = get_unaligned_le32(&tb->lo);
351 if (sizeof(dma_addr_t) > sizeof(u32))
352 addr |=
353 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
354
355 return addr;
356}
357
f02831be
EG
358static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
359 dma_addr_t addr, u16 len)
214d14d4
JB
360{
361 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
362 u16 hi_n_len = len << 4;
363
364 put_unaligned_le32(addr, &tb->lo);
365 if (sizeof(dma_addr_t) > sizeof(u32))
366 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
367
368 tb->hi_n_len = cpu_to_le16(hi_n_len);
369
370 tfd->num_tbs = idx + 1;
371}
372
f02831be 373static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
214d14d4
JB
374{
375 return tfd->num_tbs & 0x1f;
376}
377
f02831be 378static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
98891754
JB
379 struct iwl_cmd_meta *meta,
380 struct iwl_tfd *tfd)
214d14d4 381{
214d14d4
JB
382 int i;
383 int num_tbs;
384
214d14d4 385 /* Sanity check on number of chunks */
f02831be 386 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
214d14d4
JB
387
388 if (num_tbs >= IWL_NUM_OF_TBS) {
6d8f6eeb 389 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
214d14d4
JB
390 /* @todo issue fatal error, it is quite serious situation */
391 return;
392 }
393
38c0f334 394 /* first TB is never freed - it's the scratchbuf data */
214d14d4 395
214d14d4 396 for (i = 1; i < num_tbs; i++)
f02831be 397 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
98891754
JB
398 iwl_pcie_tfd_tb_get_len(tfd, i),
399 DMA_TO_DEVICE);
ebed633c
EG
400
401 tfd->num_tbs = 0;
4ce7cc2b
JB
402}
403
990aa6d7
EG
404/*
405 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
6d8f6eeb 406 * @trans - transport private data
4ce7cc2b 407 * @txq - tx queue
ebed633c 408 * @dma_dir - the direction of the DMA mapping
4ce7cc2b
JB
409 *
410 * Does NOT advance any TFD circular buffer read/write indexes
411 * Does NOT free the TFD itself (which is within circular buffer)
412 */
98891754 413static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
4ce7cc2b
JB
414{
415 struct iwl_tfd *tfd_tmp = txq->tfds;
4ce7cc2b 416
83f32a4b
JB
417 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
418 * idx is bounded by n_window
419 */
ebed633c
EG
420 int rd_ptr = txq->q.read_ptr;
421 int idx = get_cmd_index(&txq->q, rd_ptr);
422
015c15e1
JB
423 lockdep_assert_held(&txq->lock);
424
83f32a4b
JB
425 /* We have only q->n_window txq->entries, but we use
426 * TFD_QUEUE_SIZE_MAX tfds
427 */
98891754 428 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
214d14d4
JB
429
430 /* free SKB */
bf8440e6 431 if (txq->entries) {
214d14d4
JB
432 struct sk_buff *skb;
433
ebed633c 434 skb = txq->entries[idx].skb;
214d14d4 435
909e9b23
EG
436 /* Can be called from irqs-disabled context
437 * If skb is not NULL, it means that the whole queue is being
438 * freed and that the queue is not empty - free the skb
439 */
214d14d4 440 if (skb) {
ed277c93 441 iwl_op_mode_free_skb(trans->op_mode, skb);
ebed633c 442 txq->entries[idx].skb = NULL;
214d14d4
JB
443 }
444 }
445}
446
f02831be 447static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
6d6e68f8 448 dma_addr_t addr, u16 len, bool reset)
214d14d4
JB
449{
450 struct iwl_queue *q;
451 struct iwl_tfd *tfd, *tfd_tmp;
452 u32 num_tbs;
453
454 q = &txq->q;
4ce7cc2b 455 tfd_tmp = txq->tfds;
214d14d4
JB
456 tfd = &tfd_tmp[q->write_ptr];
457
f02831be
EG
458 if (reset)
459 memset(tfd, 0, sizeof(*tfd));
460
461 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
462
463 /* Each TFD can point to a maximum 20 Tx buffers */
464 if (num_tbs >= IWL_NUM_OF_TBS) {
465 IWL_ERR(trans, "Error can not send more than %d chunks\n",
466 IWL_NUM_OF_TBS);
467 return -EINVAL;
468 }
469
1092b9bc
EP
470 if (WARN(addr & ~IWL_TX_DMA_MASK,
471 "Unaligned address = %llx\n", (unsigned long long)addr))
f02831be
EG
472 return -EINVAL;
473
f02831be
EG
474 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
475
476 return 0;
477}
478
479static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
480 struct iwl_txq *txq, int slots_num,
481 u32 txq_id)
482{
483 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
484 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
38c0f334 485 size_t scratchbuf_sz;
f02831be
EG
486 int i;
487
488 if (WARN_ON(txq->entries || txq->tfds))
489 return -EINVAL;
490
491 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
492 (unsigned long)txq);
493 txq->trans_pcie = trans_pcie;
494
495 txq->q.n_window = slots_num;
496
497 txq->entries = kcalloc(slots_num,
498 sizeof(struct iwl_pcie_txq_entry),
499 GFP_KERNEL);
500
501 if (!txq->entries)
502 goto error;
503
504 if (txq_id == trans_pcie->cmd_queue)
505 for (i = 0; i < slots_num; i++) {
506 txq->entries[i].cmd =
507 kmalloc(sizeof(struct iwl_device_cmd),
508 GFP_KERNEL);
509 if (!txq->entries[i].cmd)
510 goto error;
511 }
512
513 /* Circular buffer of transmit frame descriptors (TFDs),
514 * shared with device */
515 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
516 &txq->q.dma_addr, GFP_KERNEL);
d0320f75 517 if (!txq->tfds)
f02831be 518 goto error;
38c0f334
JB
519
520 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
521 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
522 sizeof(struct iwl_cmd_header) +
523 offsetof(struct iwl_tx_cmd, scratch));
524
525 scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
526
527 txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
528 &txq->scratchbufs_dma,
529 GFP_KERNEL);
530 if (!txq->scratchbufs)
531 goto err_free_tfds;
532
f02831be
EG
533 txq->q.id = txq_id;
534
535 return 0;
38c0f334
JB
536err_free_tfds:
537 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
f02831be
EG
538error:
539 if (txq->entries && txq_id == trans_pcie->cmd_queue)
540 for (i = 0; i < slots_num; i++)
541 kfree(txq->entries[i].cmd);
542 kfree(txq->entries);
543 txq->entries = NULL;
544
545 return -ENOMEM;
546
547}
548
549static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
550 int slots_num, u32 txq_id)
551{
552 int ret;
553
43aa616f 554 txq->need_update = false;
f02831be
EG
555
556 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
557 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
558 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
559
560 /* Initialize queue's high/low-water marks, and head/tail indexes */
83f32a4b 561 ret = iwl_queue_init(&txq->q, slots_num, txq_id);
f02831be
EG
562 if (ret)
563 return ret;
564
565 spin_lock_init(&txq->lock);
566
567 /*
568 * Tell nic where to find circular buffer of Tx Frame Descriptors for
569 * given Tx queue, and enable the DMA channel used for that queue.
570 * Circular buffer (TFD queue in DRAM) physical base address */
571 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
572 txq->q.dma_addr >> 8);
573
574 return 0;
575}
576
577/*
578 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
579 */
580static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
581{
582 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
583 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
584 struct iwl_queue *q = &txq->q;
f02831be 585
f02831be
EG
586 spin_lock_bh(&txq->lock);
587 while (q->write_ptr != q->read_ptr) {
b967613d
EG
588 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
589 txq_id, q->read_ptr);
98891754 590 iwl_pcie_txq_free_tfd(trans, txq);
83f32a4b 591 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
f02831be 592 }
b967613d 593 txq->active = false;
f02831be 594 spin_unlock_bh(&txq->lock);
8a487b1a
EG
595
596 /* just in case - this queue may have been stopped */
597 iwl_wake_queue(trans, txq);
f02831be
EG
598}
599
600/*
601 * iwl_pcie_txq_free - Deallocate DMA queue.
602 * @txq: Transmit queue to deallocate.
603 *
604 * Empty queue by removing and destroying all BD's.
605 * Free all buffers.
606 * 0-fill, but do not free "txq" descriptor structure.
607 */
608static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
609{
610 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
611 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
612 struct device *dev = trans->dev;
613 int i;
614
615 if (WARN_ON(!txq))
616 return;
617
618 iwl_pcie_txq_unmap(trans, txq_id);
619
620 /* De-alloc array of command/tx buffers */
621 if (txq_id == trans_pcie->cmd_queue)
622 for (i = 0; i < txq->q.n_window; i++) {
5d4185ae
JB
623 kzfree(txq->entries[i].cmd);
624 kzfree(txq->entries[i].free_buf);
f02831be
EG
625 }
626
627 /* De-alloc circular buffer of TFDs */
83f32a4b
JB
628 if (txq->tfds) {
629 dma_free_coherent(dev,
630 sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
631 txq->tfds, txq->q.dma_addr);
d21fa2da 632 txq->q.dma_addr = 0;
83f32a4b 633 txq->tfds = NULL;
38c0f334
JB
634
635 dma_free_coherent(dev,
636 sizeof(*txq->scratchbufs) * txq->q.n_window,
637 txq->scratchbufs, txq->scratchbufs_dma);
f02831be
EG
638 }
639
640 kfree(txq->entries);
641 txq->entries = NULL;
642
643 del_timer_sync(&txq->stuck_timer);
644
645 /* 0-fill queue descriptor structure */
646 memset(txq, 0, sizeof(*txq));
647}
648
f02831be
EG
649void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
650{
651 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
22dc3c95 652 int nq = trans->cfg->base_params->num_of_queues;
f02831be
EG
653 int chan;
654 u32 reg_val;
22dc3c95
JB
655 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
656 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
f02831be
EG
657
658 /* make sure all queue are not stopped/used */
659 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
660 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
661
662 trans_pcie->scd_base_addr =
663 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
664
665 WARN_ON(scd_base_addr != 0 &&
666 scd_base_addr != trans_pcie->scd_base_addr);
667
22dc3c95
JB
668 /* reset context data, TX status and translation data */
669 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
670 SCD_CONTEXT_MEM_LOWER_BOUND,
671 NULL, clear_dwords);
f02831be
EG
672
673 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
674 trans_pcie->scd_bc_tbls.dma >> 10);
675
676 /* The chain extension of the SCD doesn't work well. This feature is
677 * enabled by default by the HW, so we need to disable it manually.
678 */
e03bbb62
EG
679 if (trans->cfg->base_params->scd_chain_ext_wa)
680 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
f02831be
EG
681
682 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
683 trans_pcie->cmd_fifo);
684
685 /* Activate all Tx DMA/FIFO channels */
680073b7 686 iwl_scd_activate_fifos(trans);
f02831be
EG
687
688 /* Enable DMA channel */
689 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
690 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
691 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
692 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
693
694 /* Update FH chicken bits */
695 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
696 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
697 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
698
699 /* Enable L1-Active */
3073d8c0
EH
700 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
701 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
702 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
f02831be
EG
703}
704
ddaf5a5b
JB
705void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
706{
707 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
708 int txq_id;
709
710 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
711 txq_id++) {
712 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
713
714 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
715 txq->q.dma_addr >> 8);
716 iwl_pcie_txq_unmap(trans, txq_id);
717 txq->q.read_ptr = 0;
718 txq->q.write_ptr = 0;
719 }
720
721 /* Tell NIC where to find the "keep warm" buffer */
722 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
723 trans_pcie->kw.dma >> 4);
724
725 iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
726}
727
f02831be
EG
728/*
729 * iwl_pcie_tx_stop - Stop all Tx DMA channels
730 */
731int iwl_pcie_tx_stop(struct iwl_trans *trans)
732{
733 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
734 int ch, txq_id, ret;
f02831be
EG
735
736 /* Turn off all Tx DMA fifos */
7b70bd63 737 spin_lock(&trans_pcie->irq_lock);
f02831be 738
680073b7 739 iwl_scd_deactivate_fifos(trans);
f02831be
EG
740
741 /* Stop each Tx DMA channel, and wait for it to be idle */
742 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
743 iwl_write_direct32(trans,
744 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
745 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
746 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
747 if (ret < 0)
748 IWL_ERR(trans,
749 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
750 ch,
751 iwl_read_direct32(trans,
752 FH_TSSR_TX_STATUS_REG));
753 }
7b70bd63 754 spin_unlock(&trans_pcie->irq_lock);
f02831be 755
fba1c627
EG
756 /*
757 * This function can be called before the op_mode disabled the
758 * queues. This happens when we have an rfkill interrupt.
759 * Since we stop Tx altogether - mark the queues as stopped.
760 */
761 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
762 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
763
764 /* This can happen: start_hw, stop_device */
765 if (!trans_pcie->txq)
f02831be 766 return 0;
f02831be
EG
767
768 /* Unmap DMA from host system and free skb's */
769 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
770 txq_id++)
771 iwl_pcie_txq_unmap(trans, txq_id);
772
773 return 0;
774}
775
776/*
777 * iwl_trans_tx_free - Free TXQ Context
778 *
779 * Destroy all TX DMA queues and structures
780 */
781void iwl_pcie_tx_free(struct iwl_trans *trans)
782{
783 int txq_id;
784 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
785
786 /* Tx queues */
787 if (trans_pcie->txq) {
788 for (txq_id = 0;
789 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
790 iwl_pcie_txq_free(trans, txq_id);
791 }
792
793 kfree(trans_pcie->txq);
794 trans_pcie->txq = NULL;
795
796 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
797
798 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
799}
800
801/*
802 * iwl_pcie_tx_alloc - allocate TX context
803 * Allocate all Tx DMA structures and initialize them
804 */
805static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
806{
807 int ret;
808 int txq_id, slots_num;
809 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
810
811 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
812 sizeof(struct iwlagn_scd_bc_tbl);
813
814 /*It is not allowed to alloc twice, so warn when this happens.
815 * We cannot rely on the previous allocation, so free and fail */
816 if (WARN_ON(trans_pcie->txq)) {
817 ret = -EINVAL;
818 goto error;
819 }
820
821 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
822 scd_bc_tbls_size);
823 if (ret) {
824 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
825 goto error;
826 }
827
828 /* Alloc keep-warm buffer */
829 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
830 if (ret) {
831 IWL_ERR(trans, "Keep Warm allocation failed\n");
832 goto error;
833 }
834
835 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
836 sizeof(struct iwl_txq), GFP_KERNEL);
837 if (!trans_pcie->txq) {
838 IWL_ERR(trans, "Not enough memory for txq\n");
2ab9ba0f 839 ret = -ENOMEM;
f02831be
EG
840 goto error;
841 }
842
843 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
844 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
845 txq_id++) {
846 slots_num = (txq_id == trans_pcie->cmd_queue) ?
847 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
848 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
849 slots_num, txq_id);
850 if (ret) {
851 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
852 goto error;
853 }
854 }
855
856 return 0;
857
858error:
859 iwl_pcie_tx_free(trans);
860
861 return ret;
862}
863int iwl_pcie_tx_init(struct iwl_trans *trans)
864{
865 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
866 int ret;
867 int txq_id, slots_num;
f02831be
EG
868 bool alloc = false;
869
870 if (!trans_pcie->txq) {
871 ret = iwl_pcie_tx_alloc(trans);
872 if (ret)
873 goto error;
874 alloc = true;
875 }
876
7b70bd63 877 spin_lock(&trans_pcie->irq_lock);
f02831be
EG
878
879 /* Turn off all Tx DMA fifos */
680073b7 880 iwl_scd_deactivate_fifos(trans);
f02831be
EG
881
882 /* Tell NIC where to find the "keep warm" buffer */
883 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
884 trans_pcie->kw.dma >> 4);
885
7b70bd63 886 spin_unlock(&trans_pcie->irq_lock);
f02831be
EG
887
888 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
889 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
890 txq_id++) {
891 slots_num = (txq_id == trans_pcie->cmd_queue) ?
892 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
893 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
894 slots_num, txq_id);
895 if (ret) {
896 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
897 goto error;
898 }
899 }
900
901 return 0;
902error:
903 /*Upon error, free only if we allocated something */
904 if (alloc)
905 iwl_pcie_tx_free(trans);
906 return ret;
907}
908
909static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
910 struct iwl_txq *txq)
911{
912 if (!trans_pcie->wd_timeout)
913 return;
914
915 /*
916 * if empty delete timer, otherwise move timer forward
917 * since we're making progress on this queue
918 */
919 if (txq->q.read_ptr == txq->q.write_ptr)
920 del_timer(&txq->stuck_timer);
921 else
922 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
923}
924
925/* Frees buffers until index _not_ inclusive */
f6d497cd
EG
926void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
927 struct sk_buff_head *skbs)
f02831be
EG
928{
929 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
930 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
83f32a4b 931 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
f02831be
EG
932 struct iwl_queue *q = &txq->q;
933 int last_to_free;
f02831be
EG
934
935 /* This function is not meant to release cmd queue*/
936 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
f6d497cd 937 return;
214d14d4 938
2bfb5092 939 spin_lock_bh(&txq->lock);
f6d497cd 940
b967613d
EG
941 if (!txq->active) {
942 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
943 txq_id, ssn);
944 goto out;
945 }
946
f6d497cd
EG
947 if (txq->q.read_ptr == tfd_num)
948 goto out;
949
950 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
951 txq_id, txq->q.read_ptr, tfd_num, ssn);
214d14d4 952
f02831be
EG
953 /*Since we free until index _not_ inclusive, the one before index is
954 * the last we will free. This one must be used */
83f32a4b 955 last_to_free = iwl_queue_dec_wrap(tfd_num);
f02831be 956
6ca6ebc1 957 if (!iwl_queue_used(q, last_to_free)) {
f02831be
EG
958 IWL_ERR(trans,
959 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
83f32a4b 960 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
f02831be 961 q->write_ptr, q->read_ptr);
f6d497cd 962 goto out;
214d14d4
JB
963 }
964
f02831be 965 if (WARN_ON(!skb_queue_empty(skbs)))
f6d497cd 966 goto out;
214d14d4 967
f02831be 968 for (;
f6d497cd 969 q->read_ptr != tfd_num;
83f32a4b 970 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
214d14d4 971
f02831be
EG
972 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
973 continue;
214d14d4 974
f02831be 975 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
214d14d4 976
f02831be 977 txq->entries[txq->q.read_ptr].skb = NULL;
fd4abac5 978
f02831be 979 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
fd4abac5 980
98891754 981 iwl_pcie_txq_free_tfd(trans, txq);
f02831be 982 }
fd4abac5 983
f02831be
EG
984 iwl_pcie_txq_progress(trans_pcie, txq);
985
f6d497cd
EG
986 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
987 iwl_wake_queue(trans, txq);
988out:
2bfb5092 989 spin_unlock_bh(&txq->lock);
1053d35f
RR
990}
991
804d4c5a
EP
992static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans)
993{
994 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
995 int ret;
996
997 lockdep_assert_held(&trans_pcie->reg_lock);
998
999 if (trans_pcie->cmd_in_flight)
1000 return 0;
1001
1002 trans_pcie->cmd_in_flight = true;
1003
1004 /*
1005 * wake up the NIC to make sure that the firmware will see the host
1006 * command - we will let the NIC sleep once all the host commands
1007 * returned. This needs to be done only on NICs that have
1008 * apmg_wake_up_wa set.
1009 */
1010 if (trans->cfg->base_params->apmg_wake_up_wa) {
1011 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1012 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1013 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1014 udelay(2);
1015
1016 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1017 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1018 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1019 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1020 15000);
1021 if (ret < 0) {
1022 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1023 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1024 trans_pcie->cmd_in_flight = false;
1025 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1026 return -EIO;
1027 }
1028 }
1029
1030 return 0;
1031}
1032
1033static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
1034{
1035 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1036
1037 lockdep_assert_held(&trans_pcie->reg_lock);
1038
1039 if (WARN_ON(!trans_pcie->cmd_in_flight))
1040 return 0;
1041
1042 trans_pcie->cmd_in_flight = false;
1043
1044 if (trans->cfg->base_params->apmg_wake_up_wa)
1045 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1046 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1047
1048 return 0;
1049}
1050
f02831be
EG
1051/*
1052 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1053 *
1054 * When FW advances 'R' index, all entries between old and new 'R' index
1055 * need to be reclaimed. As result, some free space forms. If there is
1056 * enough free space (> low mark), wake the stack that feeds us.
1057 */
1058static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
48d42c42 1059{
f02831be
EG
1060 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1061 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1062 struct iwl_queue *q = &txq->q;
b9439491 1063 unsigned long flags;
f02831be 1064 int nfreed = 0;
48d42c42 1065
f02831be 1066 lockdep_assert_held(&txq->lock);
48d42c42 1067
83f32a4b 1068 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
f02831be
EG
1069 IWL_ERR(trans,
1070 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
83f32a4b 1071 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
f02831be
EG
1072 q->write_ptr, q->read_ptr);
1073 return;
1074 }
48d42c42 1075
83f32a4b
JB
1076 for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
1077 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
48d42c42 1078
f02831be
EG
1079 if (nfreed++ > 0) {
1080 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1081 idx, q->write_ptr, q->read_ptr);
4c9706dc 1082 iwl_force_nmi(trans);
f02831be
EG
1083 }
1084 }
1085
804d4c5a 1086 if (q->read_ptr == q->write_ptr) {
b9439491 1087 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
804d4c5a 1088 iwl_pcie_clear_cmd_in_flight(trans);
b9439491
EG
1089 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1090 }
1091
f02831be 1092 iwl_pcie_txq_progress(trans_pcie, txq);
48d42c42
EG
1093}
1094
f02831be 1095static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1ce8658c 1096 u16 txq_id)
48d42c42 1097{
20d3b647 1098 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42
EG
1099 u32 tbl_dw_addr;
1100 u32 tbl_dw;
1101 u16 scd_q2ratid;
1102
1103 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1104
105183b1 1105 tbl_dw_addr = trans_pcie->scd_base_addr +
48d42c42
EG
1106 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1107
4fd442db 1108 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
48d42c42
EG
1109
1110 if (txq_id & 0x1)
1111 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1112 else
1113 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1114
4fd442db 1115 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
48d42c42
EG
1116
1117 return 0;
1118}
1119
bd5f6a34
EG
1120/* Receiver address (actually, Rx station's index into station table),
1121 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1122#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1123
fea7795f
JB
1124void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1125 const struct iwl_trans_txq_scd_cfg *cfg)
48d42c42 1126{
9eae88fa 1127 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
d4578ea8 1128 int fifo = -1;
4beaf6c2 1129
9eae88fa
JB
1130 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1131 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
48d42c42 1132
d4578ea8
JB
1133 if (cfg) {
1134 fifo = cfg->fifo;
48d42c42 1135
002a9e26 1136 /* Disable the scheduler prior configuring the cmd queue */
3a736bcb
EG
1137 if (txq_id == trans_pcie->cmd_queue &&
1138 trans_pcie->scd_set_active)
002a9e26
AA
1139 iwl_scd_enable_set_active(trans, 0);
1140
d4578ea8
JB
1141 /* Stop this Tx queue before configuring it */
1142 iwl_scd_txq_set_inactive(trans, txq_id);
4beaf6c2 1143
d4578ea8
JB
1144 /* Set this queue as a chain-building queue unless it is CMD */
1145 if (txq_id != trans_pcie->cmd_queue)
1146 iwl_scd_txq_set_chain(trans, txq_id);
48d42c42 1147
64ba8930 1148 if (cfg->aggregate) {
d4578ea8 1149 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
48d42c42 1150
d4578ea8
JB
1151 /* Map receiver-address / traffic-ID to this queue */
1152 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
f4772520 1153
d4578ea8
JB
1154 /* enable aggregations for the queue */
1155 iwl_scd_txq_enable_agg(trans, txq_id);
1156 trans_pcie->txq[txq_id].ampdu = true;
1157 } else {
1158 /*
1159 * disable aggregations for the queue, this will also
1160 * make the ra_tid mapping configuration irrelevant
1161 * since it is now a non-AGG queue.
1162 */
1163 iwl_scd_txq_disable_agg(trans, txq_id);
1164
1165 ssn = trans_pcie->txq[txq_id].q.read_ptr;
1166 }
4beaf6c2 1167 }
48d42c42
EG
1168
1169 /* Place first TFD at index corresponding to start sequence number.
1170 * Assumes that ssn_idx is valid (!= 0xFFF) */
822e8b2a
EG
1171 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
1172 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1ce8658c 1173
d4578ea8
JB
1174 if (cfg) {
1175 u8 frame_limit = cfg->frame_limit;
48d42c42 1176
d4578ea8
JB
1177 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1178 (ssn & 0xff) | (txq_id << 8));
1179 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1180
1181 /* Set up Tx window size and frame limit for this queue */
1182 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1183 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1184 iwl_trans_write_mem32(trans,
1185 trans_pcie->scd_base_addr +
9eae88fa
JB
1186 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1187 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
d4578ea8 1188 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
9eae88fa 1189 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
d4578ea8
JB
1190 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1191
1192 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1193 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1194 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1195 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1196 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1197 SCD_QUEUE_STTS_REG_MSK);
002a9e26
AA
1198
1199 /* enable the scheduler for this queue (only) */
3a736bcb
EG
1200 if (txq_id == trans_pcie->cmd_queue &&
1201 trans_pcie->scd_set_active)
002a9e26 1202 iwl_scd_enable_set_active(trans, BIT(txq_id));
d4578ea8
JB
1203 }
1204
b967613d 1205 trans_pcie->txq[txq_id].active = true;
1ce8658c 1206 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
d4578ea8 1207 txq_id, fifo, ssn & 0xff);
4beaf6c2
EG
1208}
1209
d4578ea8
JB
1210void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1211 bool configure_scd)
288712a6 1212{
8ad71bef 1213 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
986ea6c9
EG
1214 u32 stts_addr = trans_pcie->scd_base_addr +
1215 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1216 static const u32 zero_val[4] = {};
288712a6 1217
fba1c627
EG
1218 /*
1219 * Upon HW Rfkill - we stop the device, and then stop the queues
1220 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1221 * allow the op_mode to call txq_disable after it already called
1222 * stop_device.
1223 */
9eae88fa 1224 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
fba1c627
EG
1225 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1226 "queue %d not used", txq_id);
9eae88fa 1227 return;
48d42c42
EG
1228 }
1229
d4578ea8
JB
1230 if (configure_scd) {
1231 iwl_scd_txq_set_inactive(trans, txq_id);
ac928f8d 1232
d4578ea8
JB
1233 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
1234 ARRAY_SIZE(zero_val));
1235 }
986ea6c9 1236
990aa6d7 1237 iwl_pcie_txq_unmap(trans, txq_id);
68972c46 1238 trans_pcie->txq[txq_id].ampdu = false;
6c3fd3f0 1239
1ce8658c 1240 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
48d42c42
EG
1241}
1242
fd4abac5
TW
1243/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1244
990aa6d7 1245/*
f02831be 1246 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
fd4abac5 1247 * @priv: device private data point
e89044d7 1248 * @cmd: a pointer to the ucode command structure
fd4abac5 1249 *
e89044d7
EP
1250 * The function returns < 0 values to indicate the operation
1251 * failed. On success, it returns the index (>= 0) of command in the
fd4abac5
TW
1252 * command queue.
1253 */
f02831be
EG
1254static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1255 struct iwl_host_cmd *cmd)
fd4abac5 1256{
8ad71bef 1257 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1258 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
fd4abac5 1259 struct iwl_queue *q = &txq->q;
c2acea8e
JB
1260 struct iwl_device_cmd *out_cmd;
1261 struct iwl_cmd_meta *out_meta;
b9439491 1262 unsigned long flags;
f4feb8ac 1263 void *dup_buf = NULL;
fd4abac5 1264 dma_addr_t phys_addr;
f4feb8ac 1265 int idx;
38c0f334 1266 u16 copy_size, cmd_size, scratch_size;
4ce7cc2b 1267 bool had_nocopy = false;
b9439491 1268 int i, ret;
96791422 1269 u32 cmd_pos;
1afbfb60
JB
1270 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1271 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
fd4abac5 1272
4ce7cc2b
JB
1273 copy_size = sizeof(out_cmd->hdr);
1274 cmd_size = sizeof(out_cmd->hdr);
1275
1276 /* need one for the header if the first is NOCOPY */
1afbfb60 1277 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
4ce7cc2b 1278
1afbfb60 1279 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
8a964f44
JB
1280 cmddata[i] = cmd->data[i];
1281 cmdlen[i] = cmd->len[i];
1282
4ce7cc2b
JB
1283 if (!cmd->len[i])
1284 continue;
8a964f44 1285
38c0f334
JB
1286 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1287 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1288 int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
8a964f44
JB
1289
1290 if (copy > cmdlen[i])
1291 copy = cmdlen[i];
1292 cmdlen[i] -= copy;
1293 cmddata[i] += copy;
1294 copy_size += copy;
1295 }
1296
4ce7cc2b
JB
1297 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1298 had_nocopy = true;
f4feb8ac
JB
1299 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1300 idx = -EINVAL;
1301 goto free_dup_buf;
1302 }
1303 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1304 /*
1305 * This is also a chunk that isn't copied
1306 * to the static buffer so set had_nocopy.
1307 */
1308 had_nocopy = true;
1309
1310 /* only allowed once */
1311 if (WARN_ON(dup_buf)) {
1312 idx = -EINVAL;
1313 goto free_dup_buf;
1314 }
1315
8a964f44 1316 dup_buf = kmemdup(cmddata[i], cmdlen[i],
f4feb8ac
JB
1317 GFP_ATOMIC);
1318 if (!dup_buf)
1319 return -ENOMEM;
4ce7cc2b
JB
1320 } else {
1321 /* NOCOPY must not be followed by normal! */
f4feb8ac
JB
1322 if (WARN_ON(had_nocopy)) {
1323 idx = -EINVAL;
1324 goto free_dup_buf;
1325 }
8a964f44 1326 copy_size += cmdlen[i];
4ce7cc2b
JB
1327 }
1328 cmd_size += cmd->len[i];
1329 }
fd4abac5 1330
3e41ace5
JB
1331 /*
1332 * If any of the command structures end up being larger than
4ce7cc2b
JB
1333 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1334 * allocated into separate TFDs, then we will need to
1335 * increase the size of the buffers.
3e41ace5 1336 */
2a79e45e
JB
1337 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1338 "Command %s (%#x) is too large (%d bytes)\n",
990aa6d7 1339 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
f4feb8ac
JB
1340 idx = -EINVAL;
1341 goto free_dup_buf;
1342 }
fd4abac5 1343
015c15e1 1344 spin_lock_bh(&txq->lock);
3598e177 1345
c2acea8e 1346 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
015c15e1 1347 spin_unlock_bh(&txq->lock);
3598e177 1348
6d8f6eeb 1349 IWL_ERR(trans, "No space in command queue\n");
0e781842 1350 iwl_op_mode_cmd_queue_full(trans->op_mode);
f4feb8ac
JB
1351 idx = -ENOSPC;
1352 goto free_dup_buf;
fd4abac5
TW
1353 }
1354
4ce7cc2b 1355 idx = get_cmd_index(q, q->write_ptr);
bf8440e6
JB
1356 out_cmd = txq->entries[idx].cmd;
1357 out_meta = &txq->entries[idx].meta;
c2acea8e 1358
8ce73f3a 1359 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
c2acea8e
JB
1360 if (cmd->flags & CMD_WANT_SKB)
1361 out_meta->source = cmd;
fd4abac5 1362
4ce7cc2b 1363 /* set up the header */
fd4abac5 1364
4ce7cc2b 1365 out_cmd->hdr.cmd = cmd->id;
fd4abac5 1366 out_cmd->hdr.flags = 0;
cefeaa5f 1367 out_cmd->hdr.sequence =
c6f600fc 1368 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
cefeaa5f 1369 INDEX_TO_SEQ(q->write_ptr));
4ce7cc2b
JB
1370
1371 /* and copy the data that needs to be copied */
96791422 1372 cmd_pos = offsetof(struct iwl_device_cmd, payload);
8a964f44 1373 copy_size = sizeof(out_cmd->hdr);
1afbfb60 1374 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
4d075007 1375 int copy;
8a964f44 1376
cc904c71 1377 if (!cmd->len[i])
4ce7cc2b 1378 continue;
8a964f44 1379
8a964f44
JB
1380 /* copy everything if not nocopy/dup */
1381 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
4d075007 1382 IWL_HCMD_DFL_DUP))) {
8a964f44
JB
1383 copy = cmd->len[i];
1384
8a964f44
JB
1385 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1386 cmd_pos += copy;
1387 copy_size += copy;
4d075007
JB
1388 continue;
1389 }
1390
1391 /*
1392 * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
1393 * in total (for the scratchbuf handling), but copy up to what
1394 * we can fit into the payload for debug dump purposes.
1395 */
1396 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1397
1398 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1399 cmd_pos += copy;
1400
1401 /* However, treat copy_size the proper way, we need it below */
1402 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1403 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1404
1405 if (copy > cmd->len[i])
1406 copy = cmd->len[i];
1407 copy_size += copy;
8a964f44 1408 }
96791422
EG
1409 }
1410
d9fb6465 1411 IWL_DEBUG_HC(trans,
20d3b647 1412 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
990aa6d7 1413 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
20d3b647
JB
1414 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1415 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
4ce7cc2b 1416
38c0f334
JB
1417 /* start the TFD with the scratchbuf */
1418 scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
1419 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1420 iwl_pcie_txq_build_tfd(trans, txq,
1421 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
6d6e68f8 1422 scratch_size, true);
38c0f334
JB
1423
1424 /* map first command fragment, if any remains */
1425 if (copy_size > scratch_size) {
1426 phys_addr = dma_map_single(trans->dev,
1427 ((u8 *)&out_cmd->hdr) + scratch_size,
1428 copy_size - scratch_size,
1429 DMA_TO_DEVICE);
1430 if (dma_mapping_error(trans->dev, phys_addr)) {
1431 iwl_pcie_tfd_unmap(trans, out_meta,
1432 &txq->tfds[q->write_ptr]);
1433 idx = -ENOMEM;
1434 goto out;
1435 }
8a964f44 1436
38c0f334 1437 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
6d6e68f8 1438 copy_size - scratch_size, false);
2c46f72e
JB
1439 }
1440
8a964f44 1441 /* map the remaining (adjusted) nocopy/dup fragments */
1afbfb60 1442 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
8a964f44 1443 const void *data = cmddata[i];
f4feb8ac 1444
8a964f44 1445 if (!cmdlen[i])
4ce7cc2b 1446 continue;
f4feb8ac
JB
1447 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1448 IWL_HCMD_DFL_DUP)))
4ce7cc2b 1449 continue;
f4feb8ac
JB
1450 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1451 data = dup_buf;
1452 phys_addr = dma_map_single(trans->dev, (void *)data,
98891754 1453 cmdlen[i], DMA_TO_DEVICE);
1042db2a 1454 if (dma_mapping_error(trans->dev, phys_addr)) {
f02831be 1455 iwl_pcie_tfd_unmap(trans, out_meta,
98891754 1456 &txq->tfds[q->write_ptr]);
4ce7cc2b
JB
1457 idx = -ENOMEM;
1458 goto out;
1459 }
1460
6d6e68f8 1461 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
4ce7cc2b 1462 }
df833b1d 1463
afaf6b57 1464 out_meta->flags = cmd->flags;
f4feb8ac 1465 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
5d4185ae 1466 kzfree(txq->entries[idx].free_buf);
f4feb8ac 1467 txq->entries[idx].free_buf = dup_buf;
2c46f72e 1468
8a964f44 1469 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
df833b1d 1470
7c5ba4a8
JB
1471 /* start timer if queue currently empty */
1472 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
1473 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1474
b9439491 1475 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
804d4c5a
EP
1476 ret = iwl_pcie_set_cmd_in_flight(trans);
1477 if (ret < 0) {
1478 idx = ret;
1479 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1480 goto out;
b9439491
EG
1481 }
1482
fd4abac5 1483 /* Increment and update queue's write index */
83f32a4b 1484 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
990aa6d7 1485 iwl_pcie_txq_inc_wr_ptr(trans, txq);
fd4abac5 1486
b9439491
EG
1487 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1488
2c46f72e 1489 out:
015c15e1 1490 spin_unlock_bh(&txq->lock);
f4feb8ac
JB
1491 free_dup_buf:
1492 if (idx < 0)
1493 kfree(dup_buf);
7bfedc59 1494 return idx;
fd4abac5
TW
1495}
1496
990aa6d7
EG
1497/*
1498 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
17b88929 1499 * @rxb: Rx buffer to reclaim
247c61d6
EG
1500 * @handler_status: return value of the handler of the command
1501 * (put in setup_rx_handlers)
17b88929
TW
1502 *
1503 * If an Rx buffer has an async callback associated with it the callback
1504 * will be executed. The attached skb (if present) will only be freed
1505 * if the callback returns 1
1506 */
990aa6d7
EG
1507void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1508 struct iwl_rx_cmd_buffer *rxb, int handler_status)
17b88929 1509{
2f301227 1510 struct iwl_rx_packet *pkt = rxb_addr(rxb);
17b88929
TW
1511 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1512 int txq_id = SEQ_TO_QUEUE(sequence);
1513 int index = SEQ_TO_INDEX(sequence);
17b88929 1514 int cmd_index;
c2acea8e
JB
1515 struct iwl_device_cmd *cmd;
1516 struct iwl_cmd_meta *meta;
8ad71bef 1517 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1518 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
17b88929
TW
1519
1520 /* If a Tx command is being handled and it isn't in the actual
1521 * command queue then there a command routing bug has been introduced
1522 * in the queue management code. */
c6f600fc 1523 if (WARN(txq_id != trans_pcie->cmd_queue,
13bb9483 1524 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
20d3b647
JB
1525 txq_id, trans_pcie->cmd_queue, sequence,
1526 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
1527 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
3e10caeb 1528 iwl_print_hex_error(trans, pkt, 32);
55d6a3cd 1529 return;
01ef9323 1530 }
17b88929 1531
2bfb5092 1532 spin_lock_bh(&txq->lock);
015c15e1 1533
4ce7cc2b 1534 cmd_index = get_cmd_index(&txq->q, index);
bf8440e6
JB
1535 cmd = txq->entries[cmd_index].cmd;
1536 meta = &txq->entries[cmd_index].meta;
17b88929 1537
98891754 1538 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
c33de625 1539
17b88929 1540 /* Input error checking is done when commands are added to queue. */
c2acea8e 1541 if (meta->flags & CMD_WANT_SKB) {
48a2d66f 1542 struct page *p = rxb_steal_page(rxb);
65b94a4a 1543
65b94a4a
JB
1544 meta->source->resp_pkt = pkt;
1545 meta->source->_rx_page_addr = (unsigned long)page_address(p);
b2cf410c 1546 meta->source->_rx_page_order = trans_pcie->rx_page_order;
247c61d6 1547 meta->source->handler_status = handler_status;
247c61d6 1548 }
2624e96c 1549
f02831be 1550 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
17b88929 1551
c2acea8e 1552 if (!(meta->flags & CMD_ASYNC)) {
eb7ff77e 1553 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
05c89b91
WYG
1554 IWL_WARN(trans,
1555 "HCMD_ACTIVE already clear for command %s\n",
990aa6d7 1556 get_cmd_string(trans_pcie, cmd->hdr.cmd));
05c89b91 1557 }
eb7ff77e 1558 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
6d8f6eeb 1559 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
990aa6d7 1560 get_cmd_string(trans_pcie, cmd->hdr.cmd));
f946b529 1561 wake_up(&trans_pcie->wait_command_queue);
17b88929 1562 }
3598e177 1563
dd487449 1564 meta->flags = 0;
3598e177 1565
2bfb5092 1566 spin_unlock_bh(&txq->lock);
17b88929 1567}
253a634c 1568
9439eac7 1569#define HOST_COMPLETE_TIMEOUT (2 * HZ)
253a634c 1570
f02831be
EG
1571static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1572 struct iwl_host_cmd *cmd)
253a634c 1573{
d9fb6465 1574 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253a634c
EG
1575 int ret;
1576
1577 /* An asynchronous command can not expect an SKB to be set. */
1578 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1579 return -EINVAL;
1580
f02831be 1581 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
253a634c 1582 if (ret < 0) {
721c32f7 1583 IWL_ERR(trans,
b36b110c 1584 "Error sending %s: enqueue_hcmd failed: %d\n",
990aa6d7 1585 get_cmd_string(trans_pcie, cmd->id), ret);
253a634c
EG
1586 return ret;
1587 }
1588 return 0;
1589}
1590
f02831be
EG
1591static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1592 struct iwl_host_cmd *cmd)
253a634c 1593{
8ad71bef 1594 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253a634c
EG
1595 int cmd_idx;
1596 int ret;
1597
6d8f6eeb 1598 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
990aa6d7 1599 get_cmd_string(trans_pcie, cmd->id));
253a634c 1600
eb7ff77e
AN
1601 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1602 &trans->status),
bcbb8c9c
JB
1603 "Command %s: a command is already active!\n",
1604 get_cmd_string(trans_pcie, cmd->id)))
2cc39c94 1605 return -EIO;
2cc39c94 1606
6d8f6eeb 1607 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
990aa6d7 1608 get_cmd_string(trans_pcie, cmd->id));
253a634c 1609
f02831be 1610 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
253a634c
EG
1611 if (cmd_idx < 0) {
1612 ret = cmd_idx;
eb7ff77e 1613 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
721c32f7 1614 IWL_ERR(trans,
b36b110c 1615 "Error sending %s: enqueue_hcmd failed: %d\n",
990aa6d7 1616 get_cmd_string(trans_pcie, cmd->id), ret);
253a634c
EG
1617 return ret;
1618 }
1619
b9439491
EG
1620 ret = wait_event_timeout(trans_pcie->wait_command_queue,
1621 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1622 &trans->status),
1623 HOST_COMPLETE_TIMEOUT);
253a634c 1624 if (!ret) {
6dde8c48
JB
1625 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1626 struct iwl_queue *q = &txq->q;
d10630af 1627
6dde8c48
JB
1628 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1629 get_cmd_string(trans_pcie, cmd->id),
1630 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
253a634c 1631
6dde8c48
JB
1632 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1633 q->read_ptr, q->write_ptr);
d10630af 1634
eb7ff77e 1635 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
6dde8c48
JB
1636 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1637 get_cmd_string(trans_pcie, cmd->id));
1638 ret = -ETIMEDOUT;
42550a53 1639
4c9706dc 1640 iwl_force_nmi(trans);
2a988e98 1641 iwl_trans_fw_error(trans);
42550a53 1642
6dde8c48 1643 goto cancel;
253a634c
EG
1644 }
1645
eb7ff77e 1646 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
d18aa87f 1647 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
990aa6d7 1648 get_cmd_string(trans_pcie, cmd->id));
b656fa33 1649 dump_stack();
d18aa87f
JB
1650 ret = -EIO;
1651 goto cancel;
1652 }
1653
1094fa26 1654 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
eb7ff77e 1655 test_bit(STATUS_RFKILL, &trans->status)) {
f946b529
EG
1656 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1657 ret = -ERFKILL;
1658 goto cancel;
1659 }
1660
65b94a4a 1661 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
6d8f6eeb 1662 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
990aa6d7 1663 get_cmd_string(trans_pcie, cmd->id));
253a634c
EG
1664 ret = -EIO;
1665 goto cancel;
1666 }
1667
1668 return 0;
1669
1670cancel:
1671 if (cmd->flags & CMD_WANT_SKB) {
1672 /*
1673 * Cancel the CMD_WANT_SKB flag for the cmd in the
1674 * TX cmd queue. Otherwise in case the cmd comes
1675 * in later, it will possibly set an invalid
1676 * address (cmd->meta.source).
1677 */
bf8440e6
JB
1678 trans_pcie->txq[trans_pcie->cmd_queue].
1679 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
253a634c 1680 }
9cac4943 1681
65b94a4a
JB
1682 if (cmd->resp_pkt) {
1683 iwl_free_resp(cmd);
1684 cmd->resp_pkt = NULL;
253a634c
EG
1685 }
1686
1687 return ret;
1688}
1689
f02831be 1690int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c 1691{
4f59334b 1692 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
eb7ff77e 1693 test_bit(STATUS_RFKILL, &trans->status)) {
754d7d9e
EG
1694 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1695 cmd->id);
f946b529 1696 return -ERFKILL;
754d7d9e 1697 }
f946b529 1698
253a634c 1699 if (cmd->flags & CMD_ASYNC)
f02831be 1700 return iwl_pcie_send_hcmd_async(trans, cmd);
253a634c 1701
f946b529 1702 /* We still can fail on RFKILL that can be asserted while we wait */
f02831be 1703 return iwl_pcie_send_hcmd_sync(trans, cmd);
253a634c
EG
1704}
1705
f02831be
EG
1706int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1707 struct iwl_device_cmd *dev_cmd, int txq_id)
a0eaad71 1708{
8ad71bef 1709 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
f02831be
EG
1710 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1711 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
1712 struct iwl_cmd_meta *out_meta;
1713 struct iwl_txq *txq;
1714 struct iwl_queue *q;
38c0f334
JB
1715 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1716 void *tb1_addr;
1717 u16 len, tb1_len, tb2_len;
ea68f460 1718 bool wait_write_ptr;
f02831be
EG
1719 __le16 fc = hdr->frame_control;
1720 u8 hdr_len = ieee80211_hdrlen(fc);
68972c46 1721 u16 wifi_seq;
f02831be
EG
1722
1723 txq = &trans_pcie->txq[txq_id];
1724 q = &txq->q;
a0eaad71 1725
961de6a5
JB
1726 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
1727 "TX on unused queue %d\n", txq_id))
f02831be 1728 return -EINVAL;
39644e9a 1729
f02831be 1730 spin_lock(&txq->lock);
015c15e1 1731
f02831be
EG
1732 /* In AGG mode, the index in the ring must correspond to the WiFi
1733 * sequence number. This is a HW requirements to help the SCD to parse
1734 * the BA.
1735 * Check here that the packets are in the right place on the ring.
1736 */
9a886586 1737 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1092b9bc 1738 WARN_ONCE(txq->ampdu &&
68972c46 1739 (wifi_seq & 0xff) != q->write_ptr,
f02831be
EG
1740 "Q: %d WiFi Seq %d tfdNum %d",
1741 txq_id, wifi_seq, q->write_ptr);
f02831be
EG
1742
1743 /* Set up driver data for this TFD */
1744 txq->entries[q->write_ptr].skb = skb;
1745 txq->entries[q->write_ptr].cmd = dev_cmd;
1746
f02831be
EG
1747 dev_cmd->hdr.sequence =
1748 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1749 INDEX_TO_SEQ(q->write_ptr)));
1750
38c0f334
JB
1751 tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
1752 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1753 offsetof(struct iwl_tx_cmd, scratch);
1754
1755 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1756 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1757
f02831be
EG
1758 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1759 out_meta = &txq->entries[q->write_ptr].meta;
a0eaad71 1760
f02831be 1761 /*
38c0f334
JB
1762 * The second TB (tb1) points to the remainder of the TX command
1763 * and the 802.11 header - dword aligned size
1764 * (This calculation modifies the TX command, so do it before the
1765 * setup of the first TB)
f02831be 1766 */
38c0f334
JB
1767 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1768 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1092b9bc 1769 tb1_len = ALIGN(len, 4);
f02831be
EG
1770
1771 /* Tell NIC about any 2-byte padding after MAC header */
38c0f334 1772 if (tb1_len != len)
f02831be
EG
1773 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1774
38c0f334
JB
1775 /* The first TB points to the scratchbuf data - min_copy bytes */
1776 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1777 IWL_HCMD_SCRATCHBUF_SIZE);
1778 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
6d6e68f8 1779 IWL_HCMD_SCRATCHBUF_SIZE, true);
f02831be 1780
38c0f334
JB
1781 /* there must be data left over for TB1 or this code must be changed */
1782 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
1783
1784 /* map the data for TB1 */
1785 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
1786 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1787 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1788 goto out_err;
6d6e68f8 1789 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
a0eaad71 1790
38c0f334
JB
1791 /*
1792 * Set up TFD's third entry to point directly to remainder
1793 * of skb, if any (802.11 null frames have no payload).
1794 */
1795 tb2_len = skb->len - hdr_len;
1796 if (tb2_len > 0) {
1797 dma_addr_t tb2_phys = dma_map_single(trans->dev,
1798 skb->data + hdr_len,
1799 tb2_len, DMA_TO_DEVICE);
1800 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1801 iwl_pcie_tfd_unmap(trans, out_meta,
1802 &txq->tfds[q->write_ptr]);
f02831be
EG
1803 goto out_err;
1804 }
6d6e68f8 1805 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
f02831be 1806 }
a0eaad71 1807
f02831be
EG
1808 /* Set up entry for this TFD in Tx byte-count array */
1809 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
a0eaad71 1810
f02831be
EG
1811 trace_iwlwifi_dev_tx(trans->dev, skb,
1812 &txq->tfds[txq->q.write_ptr],
1813 sizeof(struct iwl_tfd),
38c0f334
JB
1814 &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
1815 skb->data + hdr_len, tb2_len);
f02831be 1816 trace_iwlwifi_dev_tx_data(trans->dev, skb,
38c0f334
JB
1817 skb->data + hdr_len, tb2_len);
1818
ea68f460 1819 wait_write_ptr = ieee80211_has_morefrags(fc);
7c5ba4a8 1820
f02831be
EG
1821 /* start timer if queue currently empty */
1822 if (txq->need_update && q->read_ptr == q->write_ptr &&
1823 trans_pcie->wd_timeout)
1824 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1825
1826 /* Tell device the write index *just past* this latest filled TFD */
83f32a4b 1827 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
ea68f460
JB
1828 if (!wait_write_ptr)
1829 iwl_pcie_txq_inc_wr_ptr(trans, txq);
f02831be
EG
1830
1831 /*
1832 * At this point the frame is "transmitted" successfully
43aa616f 1833 * and we will get a TX status notification eventually.
f02831be
EG
1834 */
1835 if (iwl_queue_space(q) < q->high_mark) {
ea68f460 1836 if (wait_write_ptr)
f02831be 1837 iwl_pcie_txq_inc_wr_ptr(trans, txq);
ea68f460 1838 else
f02831be 1839 iwl_stop_queue(trans, txq);
f02831be
EG
1840 }
1841 spin_unlock(&txq->lock);
1842 return 0;
1843out_err:
1844 spin_unlock(&txq->lock);
1845 return -1;
a0eaad71 1846}