]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/wireless/iwlwifi/pcie/tx.c
iwlwifi: pcie: prepare the enablement of 31 TFD queues
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlwifi / pcie / tx.c
CommitLineData
1053d35f
RR
1/******************************************************************************
2 *
51368bf7 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
8b4139dc 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
1053d35f
RR
5 *
6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 *
22 * The full GNU General Public License is included in this distribution in the
23 * file called LICENSE.
24 *
25 * Contact Information:
759ef89f 26 * Intel Linux Wireless <ilw@linux.intel.com>
1053d35f
RR
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 *****************************************************************************/
fd4abac5 30#include <linux/etherdevice.h>
5a0e3ad6 31#include <linux/slab.h>
253a634c 32#include <linux/sched.h>
253a634c 33
522376d2
EG
34#include "iwl-debug.h"
35#include "iwl-csr.h"
36#include "iwl-prph.h"
1053d35f 37#include "iwl-io.h"
680073b7 38#include "iwl-scd.h"
ed277c93 39#include "iwl-op-mode.h"
6468a01a 40#include "internal.h"
6238b008 41/* FIXME: need to abstract out TX command (once we know what it looks like) */
1023fdc4 42#include "dvm/commands.h"
1053d35f 43
522376d2
EG
44#define IWL_TX_CRC_SIZE 4
45#define IWL_TX_DELIMITER_SIZE 4
46
f02831be
EG
47/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
48 * DMA services
49 *
50 * Theory of operation
51 *
52 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
53 * of buffer descriptors, each of which points to one or more data buffers for
54 * the device to read from or fill. Driver and device exchange status of each
55 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
56 * entries in each circular buffer, to protect against confusing empty and full
57 * queue states.
58 *
59 * The device reads or writes the data in the queues via the device's several
60 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
61 *
62 * For Tx queue, there are low mark and high mark limits. If, after queuing
63 * the packet for Tx, free space become < low mark, Tx queue stopped. When
64 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
65 * Tx queue resumed.
66 *
67 ***************************************************/
68static int iwl_queue_space(const struct iwl_queue *q)
69{
a9b29246
IY
70 unsigned int max;
71 unsigned int used;
f02831be 72
a9b29246
IY
73 /*
74 * To avoid ambiguity between empty and completely full queues, there
83f32a4b
JB
75 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
76 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
77 * to reserve any queue entries for this purpose.
a9b29246 78 */
83f32a4b 79 if (q->n_window < TFD_QUEUE_SIZE_MAX)
a9b29246
IY
80 max = q->n_window;
81 else
83f32a4b 82 max = TFD_QUEUE_SIZE_MAX - 1;
f02831be 83
a9b29246 84 /*
83f32a4b
JB
85 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
86 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
a9b29246 87 */
83f32a4b 88 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
a9b29246
IY
89
90 if (WARN_ON(used > max))
91 return 0;
92
93 return max - used;
f02831be
EG
94}
95
96/*
97 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
98 */
83f32a4b 99static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
f02831be 100{
f02831be
EG
101 q->n_window = slots_num;
102 q->id = id;
103
f02831be
EG
104 /* slots_num must be power-of-two size, otherwise
105 * get_cmd_index is broken. */
106 if (WARN_ON(!is_power_of_2(slots_num)))
107 return -EINVAL;
108
109 q->low_mark = q->n_window / 4;
110 if (q->low_mark < 4)
111 q->low_mark = 4;
112
113 q->high_mark = q->n_window / 8;
114 if (q->high_mark < 2)
115 q->high_mark = 2;
116
117 q->write_ptr = 0;
118 q->read_ptr = 0;
119
120 return 0;
121}
122
f02831be
EG
123static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
124 struct iwl_dma_ptr *ptr, size_t size)
125{
126 if (WARN_ON(ptr->addr))
127 return -EINVAL;
128
129 ptr->addr = dma_alloc_coherent(trans->dev, size,
130 &ptr->dma, GFP_KERNEL);
131 if (!ptr->addr)
132 return -ENOMEM;
133 ptr->size = size;
134 return 0;
135}
136
137static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
138 struct iwl_dma_ptr *ptr)
139{
140 if (unlikely(!ptr->addr))
141 return;
142
143 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
144 memset(ptr, 0, sizeof(*ptr));
145}
146
147static void iwl_pcie_txq_stuck_timer(unsigned long data)
148{
149 struct iwl_txq *txq = (void *)data;
f02831be
EG
150 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
151 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
152 u32 scd_sram_addr = trans_pcie->scd_base_addr +
153 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
154 u8 buf[16];
155 int i;
156
157 spin_lock(&txq->lock);
158 /* check if triggered erroneously */
159 if (txq->q.read_ptr == txq->q.write_ptr) {
160 spin_unlock(&txq->lock);
161 return;
162 }
163 spin_unlock(&txq->lock);
164
165 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
166 jiffies_to_msecs(trans_pcie->wd_timeout));
167 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
168 txq->q.read_ptr, txq->q.write_ptr);
169
4fd442db 170 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
f02831be
EG
171
172 iwl_print_hex_error(trans, buf, sizeof(buf));
173
174 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
175 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
176 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
177
178 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
179 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
180 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
181 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
182 u32 tbl_dw =
4fd442db
EG
183 iwl_trans_read_mem32(trans,
184 trans_pcie->scd_base_addr +
185 SCD_TRANS_TBL_OFFSET_QUEUE(i));
f02831be
EG
186
187 if (i & 0x1)
188 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
189 else
190 tbl_dw = tbl_dw & 0x0000FFFF;
191
192 IWL_ERR(trans,
193 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
194 i, active ? "" : "in", fifo, tbl_dw,
83f32a4b
JB
195 iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
196 (TFD_QUEUE_SIZE_MAX - 1),
f02831be
EG
197 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
198 }
199
4c9706dc 200 iwl_force_nmi(trans);
f02831be
EG
201}
202
990aa6d7
EG
203/*
204 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
48d42c42 205 */
f02831be
EG
206static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
207 struct iwl_txq *txq, u16 byte_cnt)
48d42c42 208{
105183b1 209 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
20d3b647 210 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42
EG
211 int write_ptr = txq->q.write_ptr;
212 int txq_id = txq->q.id;
213 u8 sec_ctl = 0;
214 u8 sta_id = 0;
215 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
216 __le16 bc_ent;
132f98c2 217 struct iwl_tx_cmd *tx_cmd =
bf8440e6 218 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
48d42c42 219
105183b1
EG
220 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
221
48d42c42
EG
222 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
223
132f98c2
EG
224 sta_id = tx_cmd->sta_id;
225 sec_ctl = tx_cmd->sec_ctl;
48d42c42
EG
226
227 switch (sec_ctl & TX_CMD_SEC_MSK) {
228 case TX_CMD_SEC_CCM:
4325f6ca 229 len += IEEE80211_CCMP_MIC_LEN;
48d42c42
EG
230 break;
231 case TX_CMD_SEC_TKIP:
4325f6ca 232 len += IEEE80211_TKIP_ICV_LEN;
48d42c42
EG
233 break;
234 case TX_CMD_SEC_WEP:
4325f6ca 235 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
48d42c42
EG
236 break;
237 }
238
046db346
EG
239 if (trans_pcie->bc_table_dword)
240 len = DIV_ROUND_UP(len, 4);
241
242 bc_ent = cpu_to_le16(len | (sta_id << 12));
48d42c42
EG
243
244 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
245
246 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
247 scd_bc_tbl[txq_id].
248 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
249}
250
f02831be
EG
251static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
252 struct iwl_txq *txq)
253{
254 struct iwl_trans_pcie *trans_pcie =
255 IWL_TRANS_GET_PCIE_TRANS(trans);
256 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
257 int txq_id = txq->q.id;
258 int read_ptr = txq->q.read_ptr;
259 u8 sta_id = 0;
260 __le16 bc_ent;
261 struct iwl_tx_cmd *tx_cmd =
262 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
263
264 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
265
266 if (txq_id != trans_pcie->cmd_queue)
267 sta_id = tx_cmd->sta_id;
268
269 bc_ent = cpu_to_le16(1 | (sta_id << 12));
270 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
271
272 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
273 scd_bc_tbl[txq_id].
274 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
275}
276
990aa6d7
EG
277/*
278 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
fd4abac5 279 */
ea68f460
JB
280static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
281 struct iwl_txq *txq)
fd4abac5 282{
23e76d1a 283 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
fd4abac5 284 u32 reg = 0;
fd4abac5
TW
285 int txq_id = txq->q.id;
286
ea68f460 287 lockdep_assert_held(&txq->lock);
fd4abac5 288
5045388c
EP
289 /*
290 * explicitly wake up the NIC if:
291 * 1. shadow registers aren't enabled
292 * 2. NIC is woken up for CMD regardless of shadow outside this function
293 * 3. there is a chance that the NIC is asleep
294 */
295 if (!trans->cfg->base_params->shadow_reg_enable &&
296 txq_id != trans_pcie->cmd_queue &&
297 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
f81c1f48 298 /*
5045388c
EP
299 * wake up nic if it's powered down ...
300 * uCode will wake up, and interrupt us again, so next
301 * time we'll skip this part.
f81c1f48 302 */
5045388c
EP
303 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
304
305 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
306 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
307 txq_id, reg);
308 iwl_set_bit(trans, CSR_GP_CNTRL,
309 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ea68f460 310 txq->need_update = true;
5045388c
EP
311 return;
312 }
f81c1f48 313 }
5045388c
EP
314
315 /*
316 * if not in power-save mode, uCode will never sleep when we're
317 * trying to tx (during RFKILL, we're not trying to tx).
318 */
319 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
320 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
ea68f460 321}
5045388c 322
ea68f460
JB
323void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
324{
325 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
326 int i;
327
328 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
329 struct iwl_txq *txq = &trans_pcie->txq[i];
330
d090f878 331 spin_lock_bh(&txq->lock);
ea68f460
JB
332 if (trans_pcie->txq[i].need_update) {
333 iwl_pcie_txq_inc_wr_ptr(trans, txq);
334 trans_pcie->txq[i].need_update = false;
335 }
d090f878 336 spin_unlock_bh(&txq->lock);
ea68f460 337 }
fd4abac5 338}
fd4abac5 339
f02831be 340static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
214d14d4
JB
341{
342 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
343
344 dma_addr_t addr = get_unaligned_le32(&tb->lo);
345 if (sizeof(dma_addr_t) > sizeof(u32))
346 addr |=
347 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
348
349 return addr;
350}
351
f02831be
EG
352static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
353 dma_addr_t addr, u16 len)
214d14d4
JB
354{
355 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
356 u16 hi_n_len = len << 4;
357
358 put_unaligned_le32(addr, &tb->lo);
359 if (sizeof(dma_addr_t) > sizeof(u32))
360 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
361
362 tb->hi_n_len = cpu_to_le16(hi_n_len);
363
364 tfd->num_tbs = idx + 1;
365}
366
f02831be 367static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
214d14d4
JB
368{
369 return tfd->num_tbs & 0x1f;
370}
371
f02831be 372static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
98891754
JB
373 struct iwl_cmd_meta *meta,
374 struct iwl_tfd *tfd)
214d14d4 375{
214d14d4
JB
376 int i;
377 int num_tbs;
378
214d14d4 379 /* Sanity check on number of chunks */
f02831be 380 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
214d14d4
JB
381
382 if (num_tbs >= IWL_NUM_OF_TBS) {
6d8f6eeb 383 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
214d14d4
JB
384 /* @todo issue fatal error, it is quite serious situation */
385 return;
386 }
387
38c0f334 388 /* first TB is never freed - it's the scratchbuf data */
214d14d4 389
214d14d4 390 for (i = 1; i < num_tbs; i++)
f02831be 391 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
98891754
JB
392 iwl_pcie_tfd_tb_get_len(tfd, i),
393 DMA_TO_DEVICE);
ebed633c
EG
394
395 tfd->num_tbs = 0;
4ce7cc2b
JB
396}
397
990aa6d7
EG
398/*
399 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
6d8f6eeb 400 * @trans - transport private data
4ce7cc2b 401 * @txq - tx queue
ebed633c 402 * @dma_dir - the direction of the DMA mapping
4ce7cc2b
JB
403 *
404 * Does NOT advance any TFD circular buffer read/write indexes
405 * Does NOT free the TFD itself (which is within circular buffer)
406 */
98891754 407static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
4ce7cc2b
JB
408{
409 struct iwl_tfd *tfd_tmp = txq->tfds;
4ce7cc2b 410
83f32a4b
JB
411 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
412 * idx is bounded by n_window
413 */
ebed633c
EG
414 int rd_ptr = txq->q.read_ptr;
415 int idx = get_cmd_index(&txq->q, rd_ptr);
416
015c15e1
JB
417 lockdep_assert_held(&txq->lock);
418
83f32a4b
JB
419 /* We have only q->n_window txq->entries, but we use
420 * TFD_QUEUE_SIZE_MAX tfds
421 */
98891754 422 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
214d14d4
JB
423
424 /* free SKB */
bf8440e6 425 if (txq->entries) {
214d14d4
JB
426 struct sk_buff *skb;
427
ebed633c 428 skb = txq->entries[idx].skb;
214d14d4 429
909e9b23
EG
430 /* Can be called from irqs-disabled context
431 * If skb is not NULL, it means that the whole queue is being
432 * freed and that the queue is not empty - free the skb
433 */
214d14d4 434 if (skb) {
ed277c93 435 iwl_op_mode_free_skb(trans->op_mode, skb);
ebed633c 436 txq->entries[idx].skb = NULL;
214d14d4
JB
437 }
438 }
439}
440
f02831be 441static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
6d6e68f8 442 dma_addr_t addr, u16 len, bool reset)
214d14d4
JB
443{
444 struct iwl_queue *q;
445 struct iwl_tfd *tfd, *tfd_tmp;
446 u32 num_tbs;
447
448 q = &txq->q;
4ce7cc2b 449 tfd_tmp = txq->tfds;
214d14d4
JB
450 tfd = &tfd_tmp[q->write_ptr];
451
f02831be
EG
452 if (reset)
453 memset(tfd, 0, sizeof(*tfd));
454
455 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
456
457 /* Each TFD can point to a maximum 20 Tx buffers */
458 if (num_tbs >= IWL_NUM_OF_TBS) {
459 IWL_ERR(trans, "Error can not send more than %d chunks\n",
460 IWL_NUM_OF_TBS);
461 return -EINVAL;
462 }
463
1092b9bc
EP
464 if (WARN(addr & ~IWL_TX_DMA_MASK,
465 "Unaligned address = %llx\n", (unsigned long long)addr))
f02831be
EG
466 return -EINVAL;
467
f02831be
EG
468 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
469
470 return 0;
471}
472
473static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
474 struct iwl_txq *txq, int slots_num,
475 u32 txq_id)
476{
477 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
478 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
38c0f334 479 size_t scratchbuf_sz;
f02831be
EG
480 int i;
481
482 if (WARN_ON(txq->entries || txq->tfds))
483 return -EINVAL;
484
485 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
486 (unsigned long)txq);
487 txq->trans_pcie = trans_pcie;
488
489 txq->q.n_window = slots_num;
490
491 txq->entries = kcalloc(slots_num,
492 sizeof(struct iwl_pcie_txq_entry),
493 GFP_KERNEL);
494
495 if (!txq->entries)
496 goto error;
497
498 if (txq_id == trans_pcie->cmd_queue)
499 for (i = 0; i < slots_num; i++) {
500 txq->entries[i].cmd =
501 kmalloc(sizeof(struct iwl_device_cmd),
502 GFP_KERNEL);
503 if (!txq->entries[i].cmd)
504 goto error;
505 }
506
507 /* Circular buffer of transmit frame descriptors (TFDs),
508 * shared with device */
509 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
510 &txq->q.dma_addr, GFP_KERNEL);
d0320f75 511 if (!txq->tfds)
f02831be 512 goto error;
38c0f334
JB
513
514 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
515 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
516 sizeof(struct iwl_cmd_header) +
517 offsetof(struct iwl_tx_cmd, scratch));
518
519 scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
520
521 txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
522 &txq->scratchbufs_dma,
523 GFP_KERNEL);
524 if (!txq->scratchbufs)
525 goto err_free_tfds;
526
f02831be
EG
527 txq->q.id = txq_id;
528
529 return 0;
38c0f334
JB
530err_free_tfds:
531 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
f02831be
EG
532error:
533 if (txq->entries && txq_id == trans_pcie->cmd_queue)
534 for (i = 0; i < slots_num; i++)
535 kfree(txq->entries[i].cmd);
536 kfree(txq->entries);
537 txq->entries = NULL;
538
539 return -ENOMEM;
540
541}
542
543static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
544 int slots_num, u32 txq_id)
545{
546 int ret;
547
43aa616f 548 txq->need_update = false;
f02831be
EG
549
550 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
551 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
552 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
553
554 /* Initialize queue's high/low-water marks, and head/tail indexes */
83f32a4b 555 ret = iwl_queue_init(&txq->q, slots_num, txq_id);
f02831be
EG
556 if (ret)
557 return ret;
558
559 spin_lock_init(&txq->lock);
560
561 /*
562 * Tell nic where to find circular buffer of Tx Frame Descriptors for
563 * given Tx queue, and enable the DMA channel used for that queue.
564 * Circular buffer (TFD queue in DRAM) physical base address */
565 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
566 txq->q.dma_addr >> 8);
567
568 return 0;
569}
570
571/*
572 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
573 */
574static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
575{
576 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
577 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
578 struct iwl_queue *q = &txq->q;
f02831be 579
f02831be
EG
580 spin_lock_bh(&txq->lock);
581 while (q->write_ptr != q->read_ptr) {
b967613d
EG
582 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
583 txq_id, q->read_ptr);
98891754 584 iwl_pcie_txq_free_tfd(trans, txq);
83f32a4b 585 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
f02831be 586 }
b967613d 587 txq->active = false;
f02831be 588 spin_unlock_bh(&txq->lock);
8a487b1a
EG
589
590 /* just in case - this queue may have been stopped */
591 iwl_wake_queue(trans, txq);
f02831be
EG
592}
593
594/*
595 * iwl_pcie_txq_free - Deallocate DMA queue.
596 * @txq: Transmit queue to deallocate.
597 *
598 * Empty queue by removing and destroying all BD's.
599 * Free all buffers.
600 * 0-fill, but do not free "txq" descriptor structure.
601 */
602static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
603{
604 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
605 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
606 struct device *dev = trans->dev;
607 int i;
608
609 if (WARN_ON(!txq))
610 return;
611
612 iwl_pcie_txq_unmap(trans, txq_id);
613
614 /* De-alloc array of command/tx buffers */
615 if (txq_id == trans_pcie->cmd_queue)
616 for (i = 0; i < txq->q.n_window; i++) {
5d4185ae
JB
617 kzfree(txq->entries[i].cmd);
618 kzfree(txq->entries[i].free_buf);
f02831be
EG
619 }
620
621 /* De-alloc circular buffer of TFDs */
83f32a4b
JB
622 if (txq->tfds) {
623 dma_free_coherent(dev,
624 sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
625 txq->tfds, txq->q.dma_addr);
d21fa2da 626 txq->q.dma_addr = 0;
83f32a4b 627 txq->tfds = NULL;
38c0f334
JB
628
629 dma_free_coherent(dev,
630 sizeof(*txq->scratchbufs) * txq->q.n_window,
631 txq->scratchbufs, txq->scratchbufs_dma);
f02831be
EG
632 }
633
634 kfree(txq->entries);
635 txq->entries = NULL;
636
637 del_timer_sync(&txq->stuck_timer);
638
639 /* 0-fill queue descriptor structure */
640 memset(txq, 0, sizeof(*txq));
641}
642
f02831be
EG
643void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
644{
645 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
22dc3c95 646 int nq = trans->cfg->base_params->num_of_queues;
f02831be
EG
647 int chan;
648 u32 reg_val;
22dc3c95
JB
649 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
650 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
f02831be
EG
651
652 /* make sure all queue are not stopped/used */
653 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
654 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
655
656 trans_pcie->scd_base_addr =
657 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
658
659 WARN_ON(scd_base_addr != 0 &&
660 scd_base_addr != trans_pcie->scd_base_addr);
661
22dc3c95
JB
662 /* reset context data, TX status and translation data */
663 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
664 SCD_CONTEXT_MEM_LOWER_BOUND,
665 NULL, clear_dwords);
f02831be
EG
666
667 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
668 trans_pcie->scd_bc_tbls.dma >> 10);
669
670 /* The chain extension of the SCD doesn't work well. This feature is
671 * enabled by default by the HW, so we need to disable it manually.
672 */
e03bbb62
EG
673 if (trans->cfg->base_params->scd_chain_ext_wa)
674 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
f02831be
EG
675
676 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
677 trans_pcie->cmd_fifo);
678
679 /* Activate all Tx DMA/FIFO channels */
680073b7 680 iwl_scd_activate_fifos(trans);
f02831be
EG
681
682 /* Enable DMA channel */
683 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
684 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
685 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
686 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
687
688 /* Update FH chicken bits */
689 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
690 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
691 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
692
693 /* Enable L1-Active */
3073d8c0
EH
694 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
695 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
696 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
f02831be
EG
697}
698
ddaf5a5b
JB
699void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
700{
701 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
702 int txq_id;
703
704 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
705 txq_id++) {
706 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
707
708 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
709 txq->q.dma_addr >> 8);
710 iwl_pcie_txq_unmap(trans, txq_id);
711 txq->q.read_ptr = 0;
712 txq->q.write_ptr = 0;
713 }
714
715 /* Tell NIC where to find the "keep warm" buffer */
716 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
717 trans_pcie->kw.dma >> 4);
718
719 iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
720}
721
f02831be
EG
722/*
723 * iwl_pcie_tx_stop - Stop all Tx DMA channels
724 */
725int iwl_pcie_tx_stop(struct iwl_trans *trans)
726{
727 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
728 int ch, txq_id, ret;
f02831be
EG
729
730 /* Turn off all Tx DMA fifos */
7b70bd63 731 spin_lock(&trans_pcie->irq_lock);
f02831be 732
680073b7 733 iwl_scd_deactivate_fifos(trans);
f02831be
EG
734
735 /* Stop each Tx DMA channel, and wait for it to be idle */
736 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
737 iwl_write_direct32(trans,
738 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
739 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
740 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
741 if (ret < 0)
742 IWL_ERR(trans,
743 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
744 ch,
745 iwl_read_direct32(trans,
746 FH_TSSR_TX_STATUS_REG));
747 }
7b70bd63 748 spin_unlock(&trans_pcie->irq_lock);
f02831be 749
fba1c627
EG
750 /*
751 * This function can be called before the op_mode disabled the
752 * queues. This happens when we have an rfkill interrupt.
753 * Since we stop Tx altogether - mark the queues as stopped.
754 */
755 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
756 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
757
758 /* This can happen: start_hw, stop_device */
759 if (!trans_pcie->txq)
f02831be 760 return 0;
f02831be
EG
761
762 /* Unmap DMA from host system and free skb's */
763 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
764 txq_id++)
765 iwl_pcie_txq_unmap(trans, txq_id);
766
767 return 0;
768}
769
770/*
771 * iwl_trans_tx_free - Free TXQ Context
772 *
773 * Destroy all TX DMA queues and structures
774 */
775void iwl_pcie_tx_free(struct iwl_trans *trans)
776{
777 int txq_id;
778 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
779
780 /* Tx queues */
781 if (trans_pcie->txq) {
782 for (txq_id = 0;
783 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
784 iwl_pcie_txq_free(trans, txq_id);
785 }
786
787 kfree(trans_pcie->txq);
788 trans_pcie->txq = NULL;
789
790 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
791
792 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
793}
794
795/*
796 * iwl_pcie_tx_alloc - allocate TX context
797 * Allocate all Tx DMA structures and initialize them
798 */
799static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
800{
801 int ret;
802 int txq_id, slots_num;
803 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
804
805 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
806 sizeof(struct iwlagn_scd_bc_tbl);
807
808 /*It is not allowed to alloc twice, so warn when this happens.
809 * We cannot rely on the previous allocation, so free and fail */
810 if (WARN_ON(trans_pcie->txq)) {
811 ret = -EINVAL;
812 goto error;
813 }
814
815 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
816 scd_bc_tbls_size);
817 if (ret) {
818 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
819 goto error;
820 }
821
822 /* Alloc keep-warm buffer */
823 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
824 if (ret) {
825 IWL_ERR(trans, "Keep Warm allocation failed\n");
826 goto error;
827 }
828
829 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
830 sizeof(struct iwl_txq), GFP_KERNEL);
831 if (!trans_pcie->txq) {
832 IWL_ERR(trans, "Not enough memory for txq\n");
2ab9ba0f 833 ret = -ENOMEM;
f02831be
EG
834 goto error;
835 }
836
837 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
838 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
839 txq_id++) {
840 slots_num = (txq_id == trans_pcie->cmd_queue) ?
841 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
842 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
843 slots_num, txq_id);
844 if (ret) {
845 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
846 goto error;
847 }
848 }
849
850 return 0;
851
852error:
853 iwl_pcie_tx_free(trans);
854
855 return ret;
856}
857int iwl_pcie_tx_init(struct iwl_trans *trans)
858{
859 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
860 int ret;
861 int txq_id, slots_num;
f02831be
EG
862 bool alloc = false;
863
864 if (!trans_pcie->txq) {
865 ret = iwl_pcie_tx_alloc(trans);
866 if (ret)
867 goto error;
868 alloc = true;
869 }
870
7b70bd63 871 spin_lock(&trans_pcie->irq_lock);
f02831be
EG
872
873 /* Turn off all Tx DMA fifos */
680073b7 874 iwl_scd_deactivate_fifos(trans);
f02831be
EG
875
876 /* Tell NIC where to find the "keep warm" buffer */
877 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
878 trans_pcie->kw.dma >> 4);
879
7b70bd63 880 spin_unlock(&trans_pcie->irq_lock);
f02831be
EG
881
882 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
883 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
884 txq_id++) {
885 slots_num = (txq_id == trans_pcie->cmd_queue) ?
886 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
887 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
888 slots_num, txq_id);
889 if (ret) {
890 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
891 goto error;
892 }
893 }
894
cb6bb128
EG
895 if (trans->cfg->base_params->num_of_queues > 20)
896 iwl_set_bits_prph(trans, SCD_GP_CTRL,
897 SCD_GP_CTRL_ENABLE_31_QUEUES);
898
f02831be
EG
899 return 0;
900error:
901 /*Upon error, free only if we allocated something */
902 if (alloc)
903 iwl_pcie_tx_free(trans);
904 return ret;
905}
906
907static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
908 struct iwl_txq *txq)
909{
910 if (!trans_pcie->wd_timeout)
911 return;
912
913 /*
914 * if empty delete timer, otherwise move timer forward
915 * since we're making progress on this queue
916 */
917 if (txq->q.read_ptr == txq->q.write_ptr)
918 del_timer(&txq->stuck_timer);
919 else
920 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
921}
922
923/* Frees buffers until index _not_ inclusive */
f6d497cd
EG
924void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
925 struct sk_buff_head *skbs)
f02831be
EG
926{
927 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
928 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
83f32a4b 929 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
f02831be
EG
930 struct iwl_queue *q = &txq->q;
931 int last_to_free;
f02831be
EG
932
933 /* This function is not meant to release cmd queue*/
934 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
f6d497cd 935 return;
214d14d4 936
2bfb5092 937 spin_lock_bh(&txq->lock);
f6d497cd 938
b967613d
EG
939 if (!txq->active) {
940 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
941 txq_id, ssn);
942 goto out;
943 }
944
f6d497cd
EG
945 if (txq->q.read_ptr == tfd_num)
946 goto out;
947
948 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
949 txq_id, txq->q.read_ptr, tfd_num, ssn);
214d14d4 950
f02831be
EG
951 /*Since we free until index _not_ inclusive, the one before index is
952 * the last we will free. This one must be used */
83f32a4b 953 last_to_free = iwl_queue_dec_wrap(tfd_num);
f02831be 954
6ca6ebc1 955 if (!iwl_queue_used(q, last_to_free)) {
f02831be
EG
956 IWL_ERR(trans,
957 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
83f32a4b 958 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
f02831be 959 q->write_ptr, q->read_ptr);
f6d497cd 960 goto out;
214d14d4
JB
961 }
962
f02831be 963 if (WARN_ON(!skb_queue_empty(skbs)))
f6d497cd 964 goto out;
214d14d4 965
f02831be 966 for (;
f6d497cd 967 q->read_ptr != tfd_num;
83f32a4b 968 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
214d14d4 969
f02831be
EG
970 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
971 continue;
214d14d4 972
f02831be 973 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
214d14d4 974
f02831be 975 txq->entries[txq->q.read_ptr].skb = NULL;
fd4abac5 976
f02831be 977 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
fd4abac5 978
98891754 979 iwl_pcie_txq_free_tfd(trans, txq);
f02831be 980 }
fd4abac5 981
f02831be
EG
982 iwl_pcie_txq_progress(trans_pcie, txq);
983
f6d497cd
EG
984 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
985 iwl_wake_queue(trans, txq);
7616f334
EP
986
987 if (q->read_ptr == q->write_ptr) {
988 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
989 iwl_trans_pcie_unref(trans);
990 }
991
f6d497cd 992out:
2bfb5092 993 spin_unlock_bh(&txq->lock);
1053d35f
RR
994}
995
7616f334
EP
996static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
997 const struct iwl_host_cmd *cmd)
804d4c5a
EP
998{
999 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1000 int ret;
1001
1002 lockdep_assert_held(&trans_pcie->reg_lock);
1003
7616f334
EP
1004 if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
1005 !trans_pcie->ref_cmd_in_flight) {
1006 trans_pcie->ref_cmd_in_flight = true;
1007 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
1008 iwl_trans_pcie_ref(trans);
1009 }
1010
804d4c5a
EP
1011 if (trans_pcie->cmd_in_flight)
1012 return 0;
1013
1014 trans_pcie->cmd_in_flight = true;
1015
1016 /*
1017 * wake up the NIC to make sure that the firmware will see the host
1018 * command - we will let the NIC sleep once all the host commands
1019 * returned. This needs to be done only on NICs that have
1020 * apmg_wake_up_wa set.
1021 */
1022 if (trans->cfg->base_params->apmg_wake_up_wa) {
1023 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1024 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1025 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1026 udelay(2);
1027
1028 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1029 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1030 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1031 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1032 15000);
1033 if (ret < 0) {
1034 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1035 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1036 trans_pcie->cmd_in_flight = false;
1037 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1038 return -EIO;
1039 }
1040 }
1041
1042 return 0;
1043}
1044
1045static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
1046{
1047 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1048
1049 lockdep_assert_held(&trans_pcie->reg_lock);
1050
7616f334
EP
1051 if (trans_pcie->ref_cmd_in_flight) {
1052 trans_pcie->ref_cmd_in_flight = false;
1053 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
1054 iwl_trans_pcie_unref(trans);
1055 }
1056
804d4c5a
EP
1057 if (WARN_ON(!trans_pcie->cmd_in_flight))
1058 return 0;
1059
1060 trans_pcie->cmd_in_flight = false;
1061
1062 if (trans->cfg->base_params->apmg_wake_up_wa)
1063 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1064 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1065
1066 return 0;
1067}
1068
f02831be
EG
1069/*
1070 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1071 *
1072 * When FW advances 'R' index, all entries between old and new 'R' index
1073 * need to be reclaimed. As result, some free space forms. If there is
1074 * enough free space (> low mark), wake the stack that feeds us.
1075 */
1076static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
48d42c42 1077{
f02831be
EG
1078 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1079 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1080 struct iwl_queue *q = &txq->q;
b9439491 1081 unsigned long flags;
f02831be 1082 int nfreed = 0;
48d42c42 1083
f02831be 1084 lockdep_assert_held(&txq->lock);
48d42c42 1085
83f32a4b 1086 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
f02831be
EG
1087 IWL_ERR(trans,
1088 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
83f32a4b 1089 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
f02831be
EG
1090 q->write_ptr, q->read_ptr);
1091 return;
1092 }
48d42c42 1093
83f32a4b
JB
1094 for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
1095 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
48d42c42 1096
f02831be
EG
1097 if (nfreed++ > 0) {
1098 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1099 idx, q->write_ptr, q->read_ptr);
4c9706dc 1100 iwl_force_nmi(trans);
f02831be
EG
1101 }
1102 }
1103
804d4c5a 1104 if (q->read_ptr == q->write_ptr) {
b9439491 1105 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
804d4c5a 1106 iwl_pcie_clear_cmd_in_flight(trans);
b9439491
EG
1107 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1108 }
1109
f02831be 1110 iwl_pcie_txq_progress(trans_pcie, txq);
48d42c42
EG
1111}
1112
f02831be 1113static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1ce8658c 1114 u16 txq_id)
48d42c42 1115{
20d3b647 1116 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42
EG
1117 u32 tbl_dw_addr;
1118 u32 tbl_dw;
1119 u16 scd_q2ratid;
1120
1121 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1122
105183b1 1123 tbl_dw_addr = trans_pcie->scd_base_addr +
48d42c42
EG
1124 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1125
4fd442db 1126 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
48d42c42
EG
1127
1128 if (txq_id & 0x1)
1129 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1130 else
1131 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1132
4fd442db 1133 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
48d42c42
EG
1134
1135 return 0;
1136}
1137
bd5f6a34
EG
1138/* Receiver address (actually, Rx station's index into station table),
1139 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1140#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1141
fea7795f
JB
1142void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1143 const struct iwl_trans_txq_scd_cfg *cfg)
48d42c42 1144{
9eae88fa 1145 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
d4578ea8 1146 int fifo = -1;
4beaf6c2 1147
9eae88fa
JB
1148 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1149 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
48d42c42 1150
d4578ea8
JB
1151 if (cfg) {
1152 fifo = cfg->fifo;
48d42c42 1153
002a9e26 1154 /* Disable the scheduler prior configuring the cmd queue */
3a736bcb
EG
1155 if (txq_id == trans_pcie->cmd_queue &&
1156 trans_pcie->scd_set_active)
002a9e26
AA
1157 iwl_scd_enable_set_active(trans, 0);
1158
d4578ea8
JB
1159 /* Stop this Tx queue before configuring it */
1160 iwl_scd_txq_set_inactive(trans, txq_id);
4beaf6c2 1161
d4578ea8
JB
1162 /* Set this queue as a chain-building queue unless it is CMD */
1163 if (txq_id != trans_pcie->cmd_queue)
1164 iwl_scd_txq_set_chain(trans, txq_id);
48d42c42 1165
64ba8930 1166 if (cfg->aggregate) {
d4578ea8 1167 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
48d42c42 1168
d4578ea8
JB
1169 /* Map receiver-address / traffic-ID to this queue */
1170 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
f4772520 1171
d4578ea8
JB
1172 /* enable aggregations for the queue */
1173 iwl_scd_txq_enable_agg(trans, txq_id);
1174 trans_pcie->txq[txq_id].ampdu = true;
1175 } else {
1176 /*
1177 * disable aggregations for the queue, this will also
1178 * make the ra_tid mapping configuration irrelevant
1179 * since it is now a non-AGG queue.
1180 */
1181 iwl_scd_txq_disable_agg(trans, txq_id);
1182
1183 ssn = trans_pcie->txq[txq_id].q.read_ptr;
1184 }
4beaf6c2 1185 }
48d42c42
EG
1186
1187 /* Place first TFD at index corresponding to start sequence number.
1188 * Assumes that ssn_idx is valid (!= 0xFFF) */
822e8b2a
EG
1189 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
1190 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
0294d9ee
EG
1191 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1192 (ssn & 0xff) | (txq_id << 8));
1ce8658c 1193
d4578ea8
JB
1194 if (cfg) {
1195 u8 frame_limit = cfg->frame_limit;
48d42c42 1196
d4578ea8
JB
1197 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1198
1199 /* Set up Tx window size and frame limit for this queue */
1200 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1201 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1202 iwl_trans_write_mem32(trans,
1203 trans_pcie->scd_base_addr +
9eae88fa
JB
1204 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1205 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
d4578ea8 1206 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
9eae88fa 1207 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
d4578ea8
JB
1208 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1209
1210 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1211 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1212 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1213 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1214 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1215 SCD_QUEUE_STTS_REG_MSK);
002a9e26
AA
1216
1217 /* enable the scheduler for this queue (only) */
3a736bcb
EG
1218 if (txq_id == trans_pcie->cmd_queue &&
1219 trans_pcie->scd_set_active)
002a9e26 1220 iwl_scd_enable_set_active(trans, BIT(txq_id));
0294d9ee
EG
1221
1222 IWL_DEBUG_TX_QUEUES(trans,
1223 "Activate queue %d on FIFO %d WrPtr: %d\n",
1224 txq_id, fifo, ssn & 0xff);
1225 } else {
1226 IWL_DEBUG_TX_QUEUES(trans,
1227 "Activate queue %d WrPtr: %d\n",
1228 txq_id, ssn & 0xff);
d4578ea8
JB
1229 }
1230
b967613d 1231 trans_pcie->txq[txq_id].active = true;
4beaf6c2
EG
1232}
1233
d4578ea8
JB
1234void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1235 bool configure_scd)
288712a6 1236{
8ad71bef 1237 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
986ea6c9
EG
1238 u32 stts_addr = trans_pcie->scd_base_addr +
1239 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1240 static const u32 zero_val[4] = {};
288712a6 1241
fba1c627
EG
1242 /*
1243 * Upon HW Rfkill - we stop the device, and then stop the queues
1244 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1245 * allow the op_mode to call txq_disable after it already called
1246 * stop_device.
1247 */
9eae88fa 1248 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
fba1c627
EG
1249 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1250 "queue %d not used", txq_id);
9eae88fa 1251 return;
48d42c42
EG
1252 }
1253
d4578ea8
JB
1254 if (configure_scd) {
1255 iwl_scd_txq_set_inactive(trans, txq_id);
ac928f8d 1256
d4578ea8
JB
1257 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
1258 ARRAY_SIZE(zero_val));
1259 }
986ea6c9 1260
990aa6d7 1261 iwl_pcie_txq_unmap(trans, txq_id);
68972c46 1262 trans_pcie->txq[txq_id].ampdu = false;
6c3fd3f0 1263
1ce8658c 1264 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
48d42c42
EG
1265}
1266
fd4abac5
TW
1267/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1268
990aa6d7 1269/*
f02831be 1270 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
fd4abac5 1271 * @priv: device private data point
e89044d7 1272 * @cmd: a pointer to the ucode command structure
fd4abac5 1273 *
e89044d7
EP
1274 * The function returns < 0 values to indicate the operation
1275 * failed. On success, it returns the index (>= 0) of command in the
fd4abac5
TW
1276 * command queue.
1277 */
f02831be
EG
1278static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1279 struct iwl_host_cmd *cmd)
fd4abac5 1280{
8ad71bef 1281 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1282 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
fd4abac5 1283 struct iwl_queue *q = &txq->q;
c2acea8e
JB
1284 struct iwl_device_cmd *out_cmd;
1285 struct iwl_cmd_meta *out_meta;
b9439491 1286 unsigned long flags;
f4feb8ac 1287 void *dup_buf = NULL;
fd4abac5 1288 dma_addr_t phys_addr;
f4feb8ac 1289 int idx;
38c0f334 1290 u16 copy_size, cmd_size, scratch_size;
4ce7cc2b 1291 bool had_nocopy = false;
b9439491 1292 int i, ret;
96791422 1293 u32 cmd_pos;
1afbfb60
JB
1294 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1295 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
fd4abac5 1296
4ce7cc2b
JB
1297 copy_size = sizeof(out_cmd->hdr);
1298 cmd_size = sizeof(out_cmd->hdr);
1299
1300 /* need one for the header if the first is NOCOPY */
1afbfb60 1301 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
4ce7cc2b 1302
1afbfb60 1303 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
8a964f44
JB
1304 cmddata[i] = cmd->data[i];
1305 cmdlen[i] = cmd->len[i];
1306
4ce7cc2b
JB
1307 if (!cmd->len[i])
1308 continue;
8a964f44 1309
38c0f334
JB
1310 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1311 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1312 int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
8a964f44
JB
1313
1314 if (copy > cmdlen[i])
1315 copy = cmdlen[i];
1316 cmdlen[i] -= copy;
1317 cmddata[i] += copy;
1318 copy_size += copy;
1319 }
1320
4ce7cc2b
JB
1321 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1322 had_nocopy = true;
f4feb8ac
JB
1323 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1324 idx = -EINVAL;
1325 goto free_dup_buf;
1326 }
1327 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1328 /*
1329 * This is also a chunk that isn't copied
1330 * to the static buffer so set had_nocopy.
1331 */
1332 had_nocopy = true;
1333
1334 /* only allowed once */
1335 if (WARN_ON(dup_buf)) {
1336 idx = -EINVAL;
1337 goto free_dup_buf;
1338 }
1339
8a964f44 1340 dup_buf = kmemdup(cmddata[i], cmdlen[i],
f4feb8ac
JB
1341 GFP_ATOMIC);
1342 if (!dup_buf)
1343 return -ENOMEM;
4ce7cc2b
JB
1344 } else {
1345 /* NOCOPY must not be followed by normal! */
f4feb8ac
JB
1346 if (WARN_ON(had_nocopy)) {
1347 idx = -EINVAL;
1348 goto free_dup_buf;
1349 }
8a964f44 1350 copy_size += cmdlen[i];
4ce7cc2b
JB
1351 }
1352 cmd_size += cmd->len[i];
1353 }
fd4abac5 1354
3e41ace5
JB
1355 /*
1356 * If any of the command structures end up being larger than
4ce7cc2b
JB
1357 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1358 * allocated into separate TFDs, then we will need to
1359 * increase the size of the buffers.
3e41ace5 1360 */
2a79e45e
JB
1361 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1362 "Command %s (%#x) is too large (%d bytes)\n",
990aa6d7 1363 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
f4feb8ac
JB
1364 idx = -EINVAL;
1365 goto free_dup_buf;
1366 }
fd4abac5 1367
015c15e1 1368 spin_lock_bh(&txq->lock);
3598e177 1369
c2acea8e 1370 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
015c15e1 1371 spin_unlock_bh(&txq->lock);
3598e177 1372
6d8f6eeb 1373 IWL_ERR(trans, "No space in command queue\n");
0e781842 1374 iwl_op_mode_cmd_queue_full(trans->op_mode);
f4feb8ac
JB
1375 idx = -ENOSPC;
1376 goto free_dup_buf;
fd4abac5
TW
1377 }
1378
4ce7cc2b 1379 idx = get_cmd_index(q, q->write_ptr);
bf8440e6
JB
1380 out_cmd = txq->entries[idx].cmd;
1381 out_meta = &txq->entries[idx].meta;
c2acea8e 1382
8ce73f3a 1383 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
c2acea8e
JB
1384 if (cmd->flags & CMD_WANT_SKB)
1385 out_meta->source = cmd;
fd4abac5 1386
4ce7cc2b 1387 /* set up the header */
fd4abac5 1388
4ce7cc2b 1389 out_cmd->hdr.cmd = cmd->id;
fd4abac5 1390 out_cmd->hdr.flags = 0;
cefeaa5f 1391 out_cmd->hdr.sequence =
c6f600fc 1392 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
cefeaa5f 1393 INDEX_TO_SEQ(q->write_ptr));
4ce7cc2b
JB
1394
1395 /* and copy the data that needs to be copied */
96791422 1396 cmd_pos = offsetof(struct iwl_device_cmd, payload);
8a964f44 1397 copy_size = sizeof(out_cmd->hdr);
1afbfb60 1398 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
4d075007 1399 int copy;
8a964f44 1400
cc904c71 1401 if (!cmd->len[i])
4ce7cc2b 1402 continue;
8a964f44 1403
8a964f44
JB
1404 /* copy everything if not nocopy/dup */
1405 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
4d075007 1406 IWL_HCMD_DFL_DUP))) {
8a964f44
JB
1407 copy = cmd->len[i];
1408
8a964f44
JB
1409 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1410 cmd_pos += copy;
1411 copy_size += copy;
4d075007
JB
1412 continue;
1413 }
1414
1415 /*
1416 * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
1417 * in total (for the scratchbuf handling), but copy up to what
1418 * we can fit into the payload for debug dump purposes.
1419 */
1420 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1421
1422 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1423 cmd_pos += copy;
1424
1425 /* However, treat copy_size the proper way, we need it below */
1426 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1427 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1428
1429 if (copy > cmd->len[i])
1430 copy = cmd->len[i];
1431 copy_size += copy;
8a964f44 1432 }
96791422
EG
1433 }
1434
d9fb6465 1435 IWL_DEBUG_HC(trans,
20d3b647 1436 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
990aa6d7 1437 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
20d3b647
JB
1438 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1439 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
4ce7cc2b 1440
38c0f334
JB
1441 /* start the TFD with the scratchbuf */
1442 scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
1443 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1444 iwl_pcie_txq_build_tfd(trans, txq,
1445 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
6d6e68f8 1446 scratch_size, true);
38c0f334
JB
1447
1448 /* map first command fragment, if any remains */
1449 if (copy_size > scratch_size) {
1450 phys_addr = dma_map_single(trans->dev,
1451 ((u8 *)&out_cmd->hdr) + scratch_size,
1452 copy_size - scratch_size,
1453 DMA_TO_DEVICE);
1454 if (dma_mapping_error(trans->dev, phys_addr)) {
1455 iwl_pcie_tfd_unmap(trans, out_meta,
1456 &txq->tfds[q->write_ptr]);
1457 idx = -ENOMEM;
1458 goto out;
1459 }
8a964f44 1460
38c0f334 1461 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
6d6e68f8 1462 copy_size - scratch_size, false);
2c46f72e
JB
1463 }
1464
8a964f44 1465 /* map the remaining (adjusted) nocopy/dup fragments */
1afbfb60 1466 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
8a964f44 1467 const void *data = cmddata[i];
f4feb8ac 1468
8a964f44 1469 if (!cmdlen[i])
4ce7cc2b 1470 continue;
f4feb8ac
JB
1471 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1472 IWL_HCMD_DFL_DUP)))
4ce7cc2b 1473 continue;
f4feb8ac
JB
1474 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1475 data = dup_buf;
1476 phys_addr = dma_map_single(trans->dev, (void *)data,
98891754 1477 cmdlen[i], DMA_TO_DEVICE);
1042db2a 1478 if (dma_mapping_error(trans->dev, phys_addr)) {
f02831be 1479 iwl_pcie_tfd_unmap(trans, out_meta,
98891754 1480 &txq->tfds[q->write_ptr]);
4ce7cc2b
JB
1481 idx = -ENOMEM;
1482 goto out;
1483 }
1484
6d6e68f8 1485 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
4ce7cc2b 1486 }
df833b1d 1487
afaf6b57 1488 out_meta->flags = cmd->flags;
f4feb8ac 1489 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
5d4185ae 1490 kzfree(txq->entries[idx].free_buf);
f4feb8ac 1491 txq->entries[idx].free_buf = dup_buf;
2c46f72e 1492
8a964f44 1493 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
df833b1d 1494
7c5ba4a8
JB
1495 /* start timer if queue currently empty */
1496 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
1497 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1498
b9439491 1499 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
7616f334 1500 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
804d4c5a
EP
1501 if (ret < 0) {
1502 idx = ret;
1503 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1504 goto out;
b9439491
EG
1505 }
1506
fd4abac5 1507 /* Increment and update queue's write index */
83f32a4b 1508 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
990aa6d7 1509 iwl_pcie_txq_inc_wr_ptr(trans, txq);
fd4abac5 1510
b9439491
EG
1511 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1512
2c46f72e 1513 out:
015c15e1 1514 spin_unlock_bh(&txq->lock);
f4feb8ac
JB
1515 free_dup_buf:
1516 if (idx < 0)
1517 kfree(dup_buf);
7bfedc59 1518 return idx;
fd4abac5
TW
1519}
1520
990aa6d7
EG
1521/*
1522 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
17b88929 1523 * @rxb: Rx buffer to reclaim
247c61d6
EG
1524 * @handler_status: return value of the handler of the command
1525 * (put in setup_rx_handlers)
17b88929
TW
1526 *
1527 * If an Rx buffer has an async callback associated with it the callback
1528 * will be executed. The attached skb (if present) will only be freed
1529 * if the callback returns 1
1530 */
990aa6d7
EG
1531void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1532 struct iwl_rx_cmd_buffer *rxb, int handler_status)
17b88929 1533{
2f301227 1534 struct iwl_rx_packet *pkt = rxb_addr(rxb);
17b88929
TW
1535 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1536 int txq_id = SEQ_TO_QUEUE(sequence);
1537 int index = SEQ_TO_INDEX(sequence);
17b88929 1538 int cmd_index;
c2acea8e
JB
1539 struct iwl_device_cmd *cmd;
1540 struct iwl_cmd_meta *meta;
8ad71bef 1541 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
990aa6d7 1542 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
17b88929
TW
1543
1544 /* If a Tx command is being handled and it isn't in the actual
1545 * command queue then there a command routing bug has been introduced
1546 * in the queue management code. */
c6f600fc 1547 if (WARN(txq_id != trans_pcie->cmd_queue,
13bb9483 1548 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
20d3b647
JB
1549 txq_id, trans_pcie->cmd_queue, sequence,
1550 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
1551 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
3e10caeb 1552 iwl_print_hex_error(trans, pkt, 32);
55d6a3cd 1553 return;
01ef9323 1554 }
17b88929 1555
2bfb5092 1556 spin_lock_bh(&txq->lock);
015c15e1 1557
4ce7cc2b 1558 cmd_index = get_cmd_index(&txq->q, index);
bf8440e6
JB
1559 cmd = txq->entries[cmd_index].cmd;
1560 meta = &txq->entries[cmd_index].meta;
17b88929 1561
98891754 1562 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
c33de625 1563
17b88929 1564 /* Input error checking is done when commands are added to queue. */
c2acea8e 1565 if (meta->flags & CMD_WANT_SKB) {
48a2d66f 1566 struct page *p = rxb_steal_page(rxb);
65b94a4a 1567
65b94a4a
JB
1568 meta->source->resp_pkt = pkt;
1569 meta->source->_rx_page_addr = (unsigned long)page_address(p);
b2cf410c 1570 meta->source->_rx_page_order = trans_pcie->rx_page_order;
247c61d6 1571 meta->source->handler_status = handler_status;
247c61d6 1572 }
2624e96c 1573
f02831be 1574 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
17b88929 1575
c2acea8e 1576 if (!(meta->flags & CMD_ASYNC)) {
eb7ff77e 1577 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
05c89b91
WYG
1578 IWL_WARN(trans,
1579 "HCMD_ACTIVE already clear for command %s\n",
990aa6d7 1580 get_cmd_string(trans_pcie, cmd->hdr.cmd));
05c89b91 1581 }
eb7ff77e 1582 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
6d8f6eeb 1583 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
990aa6d7 1584 get_cmd_string(trans_pcie, cmd->hdr.cmd));
f946b529 1585 wake_up(&trans_pcie->wait_command_queue);
17b88929 1586 }
3598e177 1587
dd487449 1588 meta->flags = 0;
3598e177 1589
2bfb5092 1590 spin_unlock_bh(&txq->lock);
17b88929 1591}
253a634c 1592
9439eac7 1593#define HOST_COMPLETE_TIMEOUT (2 * HZ)
253a634c 1594
f02831be
EG
1595static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1596 struct iwl_host_cmd *cmd)
253a634c 1597{
d9fb6465 1598 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253a634c
EG
1599 int ret;
1600
1601 /* An asynchronous command can not expect an SKB to be set. */
1602 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1603 return -EINVAL;
1604
f02831be 1605 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
253a634c 1606 if (ret < 0) {
721c32f7 1607 IWL_ERR(trans,
b36b110c 1608 "Error sending %s: enqueue_hcmd failed: %d\n",
990aa6d7 1609 get_cmd_string(trans_pcie, cmd->id), ret);
253a634c
EG
1610 return ret;
1611 }
1612 return 0;
1613}
1614
f02831be
EG
1615static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1616 struct iwl_host_cmd *cmd)
253a634c 1617{
8ad71bef 1618 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253a634c
EG
1619 int cmd_idx;
1620 int ret;
1621
6d8f6eeb 1622 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
990aa6d7 1623 get_cmd_string(trans_pcie, cmd->id));
253a634c 1624
eb7ff77e
AN
1625 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1626 &trans->status),
bcbb8c9c
JB
1627 "Command %s: a command is already active!\n",
1628 get_cmd_string(trans_pcie, cmd->id)))
2cc39c94 1629 return -EIO;
2cc39c94 1630
6d8f6eeb 1631 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
990aa6d7 1632 get_cmd_string(trans_pcie, cmd->id));
253a634c 1633
f02831be 1634 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
253a634c
EG
1635 if (cmd_idx < 0) {
1636 ret = cmd_idx;
eb7ff77e 1637 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
721c32f7 1638 IWL_ERR(trans,
b36b110c 1639 "Error sending %s: enqueue_hcmd failed: %d\n",
990aa6d7 1640 get_cmd_string(trans_pcie, cmd->id), ret);
253a634c
EG
1641 return ret;
1642 }
1643
b9439491
EG
1644 ret = wait_event_timeout(trans_pcie->wait_command_queue,
1645 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1646 &trans->status),
1647 HOST_COMPLETE_TIMEOUT);
253a634c 1648 if (!ret) {
6dde8c48
JB
1649 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1650 struct iwl_queue *q = &txq->q;
d10630af 1651
6dde8c48
JB
1652 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1653 get_cmd_string(trans_pcie, cmd->id),
1654 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
253a634c 1655
6dde8c48
JB
1656 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1657 q->read_ptr, q->write_ptr);
d10630af 1658
eb7ff77e 1659 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
6dde8c48
JB
1660 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1661 get_cmd_string(trans_pcie, cmd->id));
1662 ret = -ETIMEDOUT;
42550a53 1663
4c9706dc 1664 iwl_force_nmi(trans);
2a988e98 1665 iwl_trans_fw_error(trans);
42550a53 1666
6dde8c48 1667 goto cancel;
253a634c
EG
1668 }
1669
eb7ff77e 1670 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
d18aa87f 1671 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
990aa6d7 1672 get_cmd_string(trans_pcie, cmd->id));
b656fa33 1673 dump_stack();
d18aa87f
JB
1674 ret = -EIO;
1675 goto cancel;
1676 }
1677
1094fa26 1678 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
eb7ff77e 1679 test_bit(STATUS_RFKILL, &trans->status)) {
f946b529
EG
1680 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1681 ret = -ERFKILL;
1682 goto cancel;
1683 }
1684
65b94a4a 1685 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
6d8f6eeb 1686 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
990aa6d7 1687 get_cmd_string(trans_pcie, cmd->id));
253a634c
EG
1688 ret = -EIO;
1689 goto cancel;
1690 }
1691
1692 return 0;
1693
1694cancel:
1695 if (cmd->flags & CMD_WANT_SKB) {
1696 /*
1697 * Cancel the CMD_WANT_SKB flag for the cmd in the
1698 * TX cmd queue. Otherwise in case the cmd comes
1699 * in later, it will possibly set an invalid
1700 * address (cmd->meta.source).
1701 */
bf8440e6
JB
1702 trans_pcie->txq[trans_pcie->cmd_queue].
1703 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
253a634c 1704 }
9cac4943 1705
65b94a4a
JB
1706 if (cmd->resp_pkt) {
1707 iwl_free_resp(cmd);
1708 cmd->resp_pkt = NULL;
253a634c
EG
1709 }
1710
1711 return ret;
1712}
1713
f02831be 1714int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c 1715{
4f59334b 1716 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
eb7ff77e 1717 test_bit(STATUS_RFKILL, &trans->status)) {
754d7d9e
EG
1718 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1719 cmd->id);
f946b529 1720 return -ERFKILL;
754d7d9e 1721 }
f946b529 1722
253a634c 1723 if (cmd->flags & CMD_ASYNC)
f02831be 1724 return iwl_pcie_send_hcmd_async(trans, cmd);
253a634c 1725
f946b529 1726 /* We still can fail on RFKILL that can be asserted while we wait */
f02831be 1727 return iwl_pcie_send_hcmd_sync(trans, cmd);
253a634c
EG
1728}
1729
f02831be
EG
1730int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1731 struct iwl_device_cmd *dev_cmd, int txq_id)
a0eaad71 1732{
8ad71bef 1733 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
f02831be
EG
1734 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1735 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
1736 struct iwl_cmd_meta *out_meta;
1737 struct iwl_txq *txq;
1738 struct iwl_queue *q;
38c0f334
JB
1739 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1740 void *tb1_addr;
1741 u16 len, tb1_len, tb2_len;
ea68f460 1742 bool wait_write_ptr;
f02831be
EG
1743 __le16 fc = hdr->frame_control;
1744 u8 hdr_len = ieee80211_hdrlen(fc);
68972c46 1745 u16 wifi_seq;
f02831be
EG
1746
1747 txq = &trans_pcie->txq[txq_id];
1748 q = &txq->q;
a0eaad71 1749
961de6a5
JB
1750 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
1751 "TX on unused queue %d\n", txq_id))
f02831be 1752 return -EINVAL;
39644e9a 1753
f02831be 1754 spin_lock(&txq->lock);
015c15e1 1755
f02831be
EG
1756 /* In AGG mode, the index in the ring must correspond to the WiFi
1757 * sequence number. This is a HW requirements to help the SCD to parse
1758 * the BA.
1759 * Check here that the packets are in the right place on the ring.
1760 */
9a886586 1761 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1092b9bc 1762 WARN_ONCE(txq->ampdu &&
68972c46 1763 (wifi_seq & 0xff) != q->write_ptr,
f02831be
EG
1764 "Q: %d WiFi Seq %d tfdNum %d",
1765 txq_id, wifi_seq, q->write_ptr);
f02831be
EG
1766
1767 /* Set up driver data for this TFD */
1768 txq->entries[q->write_ptr].skb = skb;
1769 txq->entries[q->write_ptr].cmd = dev_cmd;
1770
f02831be
EG
1771 dev_cmd->hdr.sequence =
1772 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1773 INDEX_TO_SEQ(q->write_ptr)));
1774
38c0f334
JB
1775 tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
1776 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1777 offsetof(struct iwl_tx_cmd, scratch);
1778
1779 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1780 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1781
f02831be
EG
1782 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1783 out_meta = &txq->entries[q->write_ptr].meta;
a0eaad71 1784
f02831be 1785 /*
38c0f334
JB
1786 * The second TB (tb1) points to the remainder of the TX command
1787 * and the 802.11 header - dword aligned size
1788 * (This calculation modifies the TX command, so do it before the
1789 * setup of the first TB)
f02831be 1790 */
38c0f334
JB
1791 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1792 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1092b9bc 1793 tb1_len = ALIGN(len, 4);
f02831be
EG
1794
1795 /* Tell NIC about any 2-byte padding after MAC header */
38c0f334 1796 if (tb1_len != len)
f02831be
EG
1797 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1798
38c0f334
JB
1799 /* The first TB points to the scratchbuf data - min_copy bytes */
1800 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1801 IWL_HCMD_SCRATCHBUF_SIZE);
1802 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
6d6e68f8 1803 IWL_HCMD_SCRATCHBUF_SIZE, true);
f02831be 1804
38c0f334
JB
1805 /* there must be data left over for TB1 or this code must be changed */
1806 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
1807
1808 /* map the data for TB1 */
1809 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
1810 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1811 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1812 goto out_err;
6d6e68f8 1813 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
a0eaad71 1814
38c0f334
JB
1815 /*
1816 * Set up TFD's third entry to point directly to remainder
1817 * of skb, if any (802.11 null frames have no payload).
1818 */
1819 tb2_len = skb->len - hdr_len;
1820 if (tb2_len > 0) {
1821 dma_addr_t tb2_phys = dma_map_single(trans->dev,
1822 skb->data + hdr_len,
1823 tb2_len, DMA_TO_DEVICE);
1824 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1825 iwl_pcie_tfd_unmap(trans, out_meta,
1826 &txq->tfds[q->write_ptr]);
f02831be
EG
1827 goto out_err;
1828 }
6d6e68f8 1829 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
f02831be 1830 }
a0eaad71 1831
f02831be
EG
1832 /* Set up entry for this TFD in Tx byte-count array */
1833 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
a0eaad71 1834
f02831be
EG
1835 trace_iwlwifi_dev_tx(trans->dev, skb,
1836 &txq->tfds[txq->q.write_ptr],
1837 sizeof(struct iwl_tfd),
38c0f334
JB
1838 &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
1839 skb->data + hdr_len, tb2_len);
f02831be 1840 trace_iwlwifi_dev_tx_data(trans->dev, skb,
38c0f334
JB
1841 skb->data + hdr_len, tb2_len);
1842
ea68f460 1843 wait_write_ptr = ieee80211_has_morefrags(fc);
7c5ba4a8 1844
f02831be 1845 /* start timer if queue currently empty */
7616f334
EP
1846 if (q->read_ptr == q->write_ptr) {
1847 if (txq->need_update && trans_pcie->wd_timeout)
1848 mod_timer(&txq->stuck_timer,
1849 jiffies + trans_pcie->wd_timeout);
1850 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
1851 iwl_trans_pcie_ref(trans);
1852 }
f02831be
EG
1853
1854 /* Tell device the write index *just past* this latest filled TFD */
83f32a4b 1855 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
ea68f460
JB
1856 if (!wait_write_ptr)
1857 iwl_pcie_txq_inc_wr_ptr(trans, txq);
f02831be
EG
1858
1859 /*
1860 * At this point the frame is "transmitted" successfully
43aa616f 1861 * and we will get a TX status notification eventually.
f02831be
EG
1862 */
1863 if (iwl_queue_space(q) < q->high_mark) {
ea68f460 1864 if (wait_write_ptr)
f02831be 1865 iwl_pcie_txq_inc_wr_ptr(trans, txq);
ea68f460 1866 else
f02831be 1867 iwl_stop_queue(trans, txq);
f02831be
EG
1868 }
1869 spin_unlock(&txq->lock);
1870 return 0;
1871out_err:
1872 spin_unlock(&txq->lock);
1873 return -1;
a0eaad71 1874}