]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
iwlagn: merge iwl_pci_down and iwl_pci_remove
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlwifi / iwl-trans-tx-pcie.c
CommitLineData
1053d35f
RR
1/******************************************************************************
2 *
901069c7 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
1053d35f
RR
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
1053d35f
RR
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
fd4abac5 29#include <linux/etherdevice.h>
5a0e3ad6 30#include <linux/slab.h>
253a634c 31#include <linux/sched.h>
253a634c 32
214d14d4 33#include "iwl-agn.h"
1053d35f
RR
34#include "iwl-dev.h"
35#include "iwl-core.h"
1053d35f
RR
36#include "iwl-io.h"
37#include "iwl-helpers.h"
253a634c 38#include "iwl-trans-int-pcie.h"
1053d35f 39
48d42c42
EG
40/**
41 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
42 */
6d8f6eeb 43void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
48d42c42
EG
44 struct iwl_tx_queue *txq,
45 u16 byte_cnt)
46{
105183b1 47 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
105183b1
EG
48 struct iwl_trans_pcie *trans_pcie =
49 IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42
EG
50 int write_ptr = txq->q.write_ptr;
51 int txq_id = txq->q.id;
52 u8 sec_ctl = 0;
53 u8 sta_id = 0;
54 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
55 __le16 bc_ent;
56
105183b1
EG
57 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
58
48d42c42
EG
59 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
60
61 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
62 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
63
64 switch (sec_ctl & TX_CMD_SEC_MSK) {
65 case TX_CMD_SEC_CCM:
66 len += CCMP_MIC_LEN;
67 break;
68 case TX_CMD_SEC_TKIP:
69 len += TKIP_ICV_LEN;
70 break;
71 case TX_CMD_SEC_WEP:
72 len += WEP_IV_LEN + WEP_ICV_LEN;
73 break;
74 }
75
76 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
77
78 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
79
80 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
81 scd_bc_tbl[txq_id].
82 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
83}
84
fd4abac5
TW
85/**
86 * iwl_txq_update_write_ptr - Send new write index to hardware
87 */
fd656935 88void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
fd4abac5
TW
89{
90 u32 reg = 0;
fd4abac5
TW
91 int txq_id = txq->q.id;
92
93 if (txq->need_update == 0)
7bfedc59 94 return;
fd4abac5 95
fd656935 96 if (hw_params(trans).shadow_reg_enable) {
f81c1f48 97 /* shadow register enabled */
fd656935 98 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
f81c1f48
WYG
99 txq->q.write_ptr | (txq_id << 8));
100 } else {
101 /* if we're trying to save power */
fd656935 102 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
f81c1f48
WYG
103 /* wake up nic if it's powered down ...
104 * uCode will wake up, and interrupt us again, so next
105 * time we'll skip this part. */
fd656935 106 reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
fd4abac5 107
f81c1f48 108 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
fd656935 109 IWL_DEBUG_INFO(trans,
f81c1f48
WYG
110 "Tx queue %d requesting wakeup,"
111 " GP1 = 0x%x\n", txq_id, reg);
fd656935 112 iwl_set_bit(bus(trans), CSR_GP_CNTRL,
f81c1f48
WYG
113 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
114 return;
115 }
fd4abac5 116
fd656935 117 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
fd4abac5 118 txq->q.write_ptr | (txq_id << 8));
fd4abac5 119
f81c1f48
WYG
120 /*
121 * else not in power-save mode,
122 * uCode will never sleep when we're
123 * trying to tx (during RFKILL, we're not trying to tx).
124 */
125 } else
fd656935 126 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
f81c1f48
WYG
127 txq->q.write_ptr | (txq_id << 8));
128 }
fd4abac5 129 txq->need_update = 0;
fd4abac5 130}
fd4abac5 131
214d14d4
JB
132static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
133{
134 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
135
136 dma_addr_t addr = get_unaligned_le32(&tb->lo);
137 if (sizeof(dma_addr_t) > sizeof(u32))
138 addr |=
139 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
140
141 return addr;
142}
143
144static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
145{
146 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
147
148 return le16_to_cpu(tb->hi_n_len) >> 4;
149}
150
151static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
152 dma_addr_t addr, u16 len)
153{
154 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
155 u16 hi_n_len = len << 4;
156
157 put_unaligned_le32(addr, &tb->lo);
158 if (sizeof(dma_addr_t) > sizeof(u32))
159 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
160
161 tb->hi_n_len = cpu_to_le16(hi_n_len);
162
163 tfd->num_tbs = idx + 1;
164}
165
166static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
167{
168 return tfd->num_tbs & 0x1f;
169}
170
6d8f6eeb 171static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
253a634c 172 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
214d14d4 173{
214d14d4
JB
174 int i;
175 int num_tbs;
176
214d14d4
JB
177 /* Sanity check on number of chunks */
178 num_tbs = iwl_tfd_get_num_tbs(tfd);
179
180 if (num_tbs >= IWL_NUM_OF_TBS) {
6d8f6eeb 181 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
214d14d4
JB
182 /* @todo issue fatal error, it is quite serious situation */
183 return;
184 }
185
186 /* Unmap tx_cmd */
187 if (num_tbs)
6d8f6eeb 188 dma_unmap_single(bus(trans)->dev,
4ce7cc2b
JB
189 dma_unmap_addr(meta, mapping),
190 dma_unmap_len(meta, len),
795414db 191 DMA_BIDIRECTIONAL);
214d14d4
JB
192
193 /* Unmap chunks, if any. */
194 for (i = 1; i < num_tbs; i++)
6d8f6eeb 195 dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
e815407d 196 iwl_tfd_tb_get_len(tfd, i), dma_dir);
4ce7cc2b
JB
197}
198
199/**
200 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
6d8f6eeb 201 * @trans - transport private data
4ce7cc2b 202 * @txq - tx queue
1359ca4f 203 * @index - the index of the TFD to be freed
4ce7cc2b
JB
204 *
205 * Does NOT advance any TFD circular buffer read/write indexes
206 * Does NOT free the TFD itself (which is within circular buffer)
207 */
6d8f6eeb 208void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
1359ca4f 209 int index)
4ce7cc2b
JB
210{
211 struct iwl_tfd *tfd_tmp = txq->tfds;
4ce7cc2b 212
6d8f6eeb 213 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index],
3be3fdb5 214 DMA_TO_DEVICE);
214d14d4
JB
215
216 /* free SKB */
2c452297 217 if (txq->skbs) {
214d14d4
JB
218 struct sk_buff *skb;
219
2c452297 220 skb = txq->skbs[index];
214d14d4
JB
221
222 /* can be called from irqs-disabled context */
223 if (skb) {
224 dev_kfree_skb_any(skb);
2c452297 225 txq->skbs[index] = NULL;
214d14d4
JB
226 }
227 }
228}
229
6d8f6eeb 230int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
214d14d4
JB
231 struct iwl_tx_queue *txq,
232 dma_addr_t addr, u16 len,
4c42db0f 233 u8 reset)
214d14d4
JB
234{
235 struct iwl_queue *q;
236 struct iwl_tfd *tfd, *tfd_tmp;
237 u32 num_tbs;
238
239 q = &txq->q;
4ce7cc2b 240 tfd_tmp = txq->tfds;
214d14d4
JB
241 tfd = &tfd_tmp[q->write_ptr];
242
243 if (reset)
244 memset(tfd, 0, sizeof(*tfd));
245
246 num_tbs = iwl_tfd_get_num_tbs(tfd);
247
248 /* Each TFD can point to a maximum 20 Tx buffers */
249 if (num_tbs >= IWL_NUM_OF_TBS) {
6d8f6eeb 250 IWL_ERR(trans, "Error can not send more than %d chunks\n",
214d14d4
JB
251 IWL_NUM_OF_TBS);
252 return -EINVAL;
253 }
254
255 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
256 return -EINVAL;
257
258 if (unlikely(addr & ~IWL_TX_DMA_MASK))
6d8f6eeb 259 IWL_ERR(trans, "Unaligned address = %llx\n",
214d14d4
JB
260 (unsigned long long)addr);
261
262 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
263
264 return 0;
265}
266
fd4abac5
TW
267/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
268 * DMA services
269 *
270 * Theory of operation
271 *
272 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
273 * of buffer descriptors, each of which points to one or more data buffers for
274 * the device to read from or fill. Driver and device exchange status of each
275 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
276 * entries in each circular buffer, to protect against confusing empty and full
277 * queue states.
278 *
279 * The device reads or writes the data in the queues via the device's several
280 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
281 *
282 * For Tx queue, there are low mark and high mark limits. If, after queuing
283 * the packet for Tx, free space become < low mark, Tx queue stopped. When
284 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
285 * Tx queue resumed.
286 *
fd4abac5
TW
287 ***************************************************/
288
289int iwl_queue_space(const struct iwl_queue *q)
290{
291 int s = q->read_ptr - q->write_ptr;
292
293 if (q->read_ptr > q->write_ptr)
294 s -= q->n_bd;
295
296 if (s <= 0)
297 s += q->n_window;
298 /* keep some reserve to not confuse empty and full situations */
299 s -= 2;
300 if (s < 0)
301 s = 0;
302 return s;
303}
fd4abac5 304
1053d35f
RR
305/**
306 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
307 */
6d8f6eeb 308int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
1053d35f
RR
309{
310 q->n_bd = count;
311 q->n_window = slots_num;
312 q->id = id;
313
314 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
315 * and iwl_queue_dec_wrap are broken. */
3e41ace5
JB
316 if (WARN_ON(!is_power_of_2(count)))
317 return -EINVAL;
1053d35f
RR
318
319 /* slots_num must be power-of-two size, otherwise
320 * get_cmd_index is broken. */
3e41ace5
JB
321 if (WARN_ON(!is_power_of_2(slots_num)))
322 return -EINVAL;
1053d35f
RR
323
324 q->low_mark = q->n_window / 4;
325 if (q->low_mark < 4)
326 q->low_mark = 4;
327
328 q->high_mark = q->n_window / 8;
329 if (q->high_mark < 2)
330 q->high_mark = 2;
331
332 q->write_ptr = q->read_ptr = 0;
333
334 return 0;
335}
336
6d8f6eeb 337static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
48d42c42
EG
338 struct iwl_tx_queue *txq)
339{
105183b1
EG
340 struct iwl_trans_pcie *trans_pcie =
341 IWL_TRANS_GET_PCIE_TRANS(trans);
6d8f6eeb 342 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
48d42c42
EG
343 int txq_id = txq->q.id;
344 int read_ptr = txq->q.read_ptr;
345 u8 sta_id = 0;
346 __le16 bc_ent;
347
348 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
349
6d8f6eeb 350 if (txq_id != trans->shrd->cmd_queue)
48d42c42
EG
351 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
352
353 bc_ent = cpu_to_le16(1 | (sta_id << 12));
354 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
355
356 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
357 scd_bc_tbl[txq_id].
358 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
359}
360
6d8f6eeb 361static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
48d42c42
EG
362 u16 txq_id)
363{
364 u32 tbl_dw_addr;
365 u32 tbl_dw;
366 u16 scd_q2ratid;
367
105183b1
EG
368 struct iwl_trans_pcie *trans_pcie =
369 IWL_TRANS_GET_PCIE_TRANS(trans);
370
48d42c42
EG
371 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
372
105183b1 373 tbl_dw_addr = trans_pcie->scd_base_addr +
48d42c42
EG
374 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
375
83ed9015 376 tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
48d42c42
EG
377
378 if (txq_id & 0x1)
379 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
380 else
381 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
382
83ed9015 383 iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
48d42c42
EG
384
385 return 0;
386}
387
6d8f6eeb 388static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
48d42c42
EG
389{
390 /* Simply stop the queue, but don't change any configuration;
391 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
83ed9015 392 iwl_write_prph(bus(trans),
48d42c42
EG
393 SCD_QUEUE_STATUS_BITS(txq_id),
394 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
395 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
396}
397
6d8f6eeb 398void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
48d42c42
EG
399 int txq_id, u32 index)
400{
83ed9015 401 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
48d42c42 402 (index & 0xff) | (txq_id << 8));
83ed9015 403 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
48d42c42
EG
404}
405
c91bd124 406void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
48d42c42
EG
407 struct iwl_tx_queue *txq,
408 int tx_fifo_id, int scd_retry)
409{
8ad71bef 410 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42 411 int txq_id = txq->q.id;
c91bd124 412 int active =
8ad71bef 413 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
48d42c42 414
c91bd124 415 iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
48d42c42
EG
416 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
417 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
418 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
419 SCD_QUEUE_STTS_REG_MSK);
420
421 txq->sched_retry = scd_retry;
422
c91bd124 423 IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
48d42c42
EG
424 active ? "Activate" : "Deactivate",
425 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
426}
427
e13c0c59
EG
428static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
429 u8 ctx, u16 tid)
ba562f71 430{
e13c0c59 431 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
ba562f71 432 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
e13c0c59 433 return ac_to_fifo[tid_to_ac[tid]];
ba562f71
EG
434
435 /* no support for TIDs 8-15 yet */
436 return -EINVAL;
437}
438
c91bd124
EG
439void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
440 enum iwl_rxon_context_id ctx, int sta_id,
441 int tid, int frame_limit)
48d42c42
EG
442{
443 int tx_fifo, txq_id, ssn_idx;
444 u16 ra_tid;
445 unsigned long flags;
446 struct iwl_tid_data *tid_data;
447
105183b1
EG
448 struct iwl_trans_pcie *trans_pcie =
449 IWL_TRANS_GET_PCIE_TRANS(trans);
450
48d42c42
EG
451 if (WARN_ON(sta_id == IWL_INVALID_STATION))
452 return;
5f85a789 453 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
48d42c42
EG
454 return;
455
e13c0c59 456 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
ba562f71
EG
457 if (WARN_ON(tx_fifo < 0)) {
458 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
459 return;
460 }
461
c91bd124
EG
462 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
463 tid_data = &trans->shrd->tid_data[sta_id][tid];
48d42c42
EG
464 ssn_idx = SEQ_TO_SN(tid_data->seq_number);
465 txq_id = tid_data->agg.txq_id;
c91bd124 466 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
48d42c42
EG
467
468 ra_tid = BUILD_RAxTID(sta_id, tid);
469
c91bd124 470 spin_lock_irqsave(&trans->shrd->lock, flags);
48d42c42
EG
471
472 /* Stop this Tx queue before configuring it */
6d8f6eeb 473 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
48d42c42
EG
474
475 /* Map receiver-address / traffic-ID to this queue */
6d8f6eeb 476 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
48d42c42
EG
477
478 /* Set this queue as a chain-building queue */
c91bd124 479 iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
48d42c42
EG
480
481 /* enable aggregations for the queue */
c91bd124 482 iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
48d42c42
EG
483
484 /* Place first TFD at index corresponding to start sequence number.
485 * Assumes that ssn_idx is valid (!= 0xFFF) */
8ad71bef
EG
486 trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
487 trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
6d8f6eeb 488 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
48d42c42
EG
489
490 /* Set up Tx window size and frame limit for this queue */
c91bd124 491 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
48d42c42
EG
492 SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
493 sizeof(u32),
494 ((frame_limit <<
495 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
496 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
497 ((frame_limit <<
498 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
499 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
500
c91bd124 501 iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
48d42c42
EG
502
503 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
8ad71bef 504 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
c91bd124 505 tx_fifo, 1);
48d42c42 506
8ad71bef
EG
507 trans_pcie->txq[txq_id].sta_id = sta_id;
508 trans_pcie->txq[txq_id].tid = tid;
a0eaad71 509
c91bd124 510 spin_unlock_irqrestore(&trans->shrd->lock, flags);
48d42c42
EG
511}
512
288712a6
EG
513/*
514 * Find first available (lowest unused) Tx Queue, mark it "active".
515 * Called only when finding queue for aggregation.
516 * Should never return anything < 7, because they should already
517 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
518 */
519static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
520{
8ad71bef 521 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
288712a6
EG
522 int txq_id;
523
524 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
525 if (!test_and_set_bit(txq_id,
8ad71bef 526 &trans_pcie->txq_ctx_active_msk))
288712a6
EG
527 return txq_id;
528 return -1;
529}
530
531int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
532 enum iwl_rxon_context_id ctx, int sta_id,
533 int tid, u16 *ssn)
534{
8ad71bef 535 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
288712a6
EG
536 struct iwl_tid_data *tid_data;
537 unsigned long flags;
4690c33d 538 int txq_id;
288712a6
EG
539 struct iwl_priv *priv = priv(trans);
540
541 txq_id = iwlagn_txq_ctx_activate_free(trans);
542 if (txq_id == -1) {
543 IWL_ERR(trans, "No free aggregation queue available\n");
544 return -ENXIO;
545 }
546
547 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
548 tid_data = &trans->shrd->tid_data[sta_id][tid];
549 *ssn = SEQ_TO_SN(tid_data->seq_number);
550 tid_data->agg.txq_id = txq_id;
8ad71bef 551 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
288712a6
EG
552
553 tid_data = &trans->shrd->tid_data[sta_id][tid];
554 if (tid_data->tfds_in_queue == 0) {
555 IWL_DEBUG_HT(trans, "HW queue is empty\n");
556 tid_data->agg.state = IWL_AGG_ON;
557 iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
558 } else {
559 IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
560 "queue\n", tid_data->tfds_in_queue);
561 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
562 }
563 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
564
565 return 0;
566}
7f01d567
EG
567
568void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
48d42c42 569{
8ad71bef 570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
7f01d567
EG
571 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
572
573 iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
574
8ad71bef
EG
575 trans_pcie->txq[txq_id].q.read_ptr = 0;
576 trans_pcie->txq[txq_id].q.write_ptr = 0;
7f01d567
EG
577 /* supposes that ssn_idx is valid (!= 0xFFF) */
578 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
579
580 iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
8ad71bef
EG
581 iwl_txq_ctx_deactivate(trans_pcie, txq_id);
582 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
7f01d567
EG
583}
584
585int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
586 enum iwl_rxon_context_id ctx, int sta_id,
587 int tid)
588{
8ad71bef 589 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
7f01d567
EG
590 unsigned long flags;
591 int read_ptr, write_ptr;
592 struct iwl_tid_data *tid_data;
593 int txq_id;
594
595 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
596
597 tid_data = &trans->shrd->tid_data[sta_id][tid];
598 txq_id = tid_data->agg.txq_id;
599
48d42c42
EG
600 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
601 (IWLAGN_FIRST_AMPDU_QUEUE +
7f01d567
EG
602 hw_params(trans).num_ampdu_queues <= txq_id)) {
603 IWL_ERR(trans,
48d42c42
EG
604 "queue number out of range: %d, must be %d to %d\n",
605 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
606 IWLAGN_FIRST_AMPDU_QUEUE +
7f01d567
EG
607 hw_params(trans).num_ampdu_queues - 1);
608 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
48d42c42
EG
609 return -EINVAL;
610 }
611
7f01d567
EG
612 switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
613 case IWL_EMPTYING_HW_QUEUE_ADDBA:
614 /*
615 * This can happen if the peer stops aggregation
616 * again before we've had a chance to drain the
617 * queue we selected previously, i.e. before the
618 * session was really started completely.
619 */
620 IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
621 goto turn_off;
622 case IWL_AGG_ON:
623 break;
624 default:
625 IWL_WARN(trans, "Stopping AGG while state not ON"
626 "or starting\n");
627 }
48d42c42 628
8ad71bef
EG
629 write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
630 read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
48d42c42 631
7f01d567
EG
632 /* The queue is not empty */
633 if (write_ptr != read_ptr) {
634 IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
635 trans->shrd->tid_data[sta_id][tid].agg.state =
636 IWL_EMPTYING_HW_QUEUE_DELBA;
637 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
638 return 0;
639 }
640
641 IWL_DEBUG_HT(trans, "HW queue is empty\n");
642turn_off:
643 trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
644
645 /* do not restore/save irqs */
646 spin_unlock(&trans->shrd->sta_lock);
647 spin_lock(&trans->shrd->lock);
648
649 iwl_trans_pcie_txq_agg_disable(trans, txq_id);
650
651 spin_unlock_irqrestore(&trans->shrd->lock, flags);
48d42c42 652
7f01d567 653 iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
48d42c42
EG
654
655 return 0;
656}
657
fd4abac5
TW
658/*************** HOST COMMAND QUEUE FUNCTIONS *****/
659
660/**
661 * iwl_enqueue_hcmd - enqueue a uCode command
662 * @priv: device private data point
663 * @cmd: a point to the ucode command structure
664 *
665 * The function returns < 0 values to indicate the operation is
666 * failed. On success, it turns the index (> 0) of command in the
667 * command queue.
668 */
6d8f6eeb 669static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
fd4abac5 670{
8ad71bef
EG
671 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
672 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
fd4abac5 673 struct iwl_queue *q = &txq->q;
c2acea8e
JB
674 struct iwl_device_cmd *out_cmd;
675 struct iwl_cmd_meta *out_meta;
fd4abac5 676 dma_addr_t phys_addr;
fd4abac5 677 unsigned long flags;
f3674227 678 u32 idx;
4ce7cc2b 679 u16 copy_size, cmd_size;
0975cc8f 680 bool is_ct_kill = false;
4ce7cc2b
JB
681 bool had_nocopy = false;
682 int i;
683 u8 *cmd_dest;
684#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
685 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
686 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
687 int trace_idx;
688#endif
fd4abac5 689
6d8f6eeb
EG
690 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
691 IWL_WARN(trans, "fw recovery, no hcmd send\n");
3083d03c
WYG
692 return -EIO;
693 }
694
fd656935 695 if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
eedb6e35 696 !(cmd->flags & CMD_ON_DEMAND)) {
6d8f6eeb 697 IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
eedb6e35
WYG
698 return -EIO;
699 }
700
4ce7cc2b
JB
701 copy_size = sizeof(out_cmd->hdr);
702 cmd_size = sizeof(out_cmd->hdr);
703
704 /* need one for the header if the first is NOCOPY */
705 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
706
707 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
708 if (!cmd->len[i])
709 continue;
710 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
711 had_nocopy = true;
712 } else {
713 /* NOCOPY must not be followed by normal! */
714 if (WARN_ON(had_nocopy))
715 return -EINVAL;
716 copy_size += cmd->len[i];
717 }
718 cmd_size += cmd->len[i];
719 }
fd4abac5 720
3e41ace5
JB
721 /*
722 * If any of the command structures end up being larger than
4ce7cc2b
JB
723 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
724 * allocated into separate TFDs, then we will need to
725 * increase the size of the buffers.
3e41ace5 726 */
4ce7cc2b 727 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
3e41ace5 728 return -EINVAL;
fd4abac5 729
6d8f6eeb
EG
730 if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
731 IWL_WARN(trans, "Not sending command - %s KILL\n",
732 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
fd4abac5
TW
733 return -EIO;
734 }
7b21f00e 735
72012474 736 spin_lock_irqsave(&trans->hcmd_lock, flags);
3598e177 737
c2acea8e 738 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
72012474 739 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
3598e177 740
6d8f6eeb 741 IWL_ERR(trans, "No space in command queue\n");
fd656935 742 is_ct_kill = iwl_check_for_ct_kill(priv(trans));
0975cc8f 743 if (!is_ct_kill) {
6d8f6eeb 744 IWL_ERR(trans, "Restarting adapter queue is full\n");
fd656935 745 iwlagn_fw_error(priv(trans), false);
7812b167 746 }
fd4abac5
TW
747 return -ENOSPC;
748 }
749
4ce7cc2b 750 idx = get_cmd_index(q, q->write_ptr);
da99c4b6 751 out_cmd = txq->cmd[idx];
c2acea8e
JB
752 out_meta = &txq->meta[idx];
753
8ce73f3a 754 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
c2acea8e
JB
755 if (cmd->flags & CMD_WANT_SKB)
756 out_meta->source = cmd;
757 if (cmd->flags & CMD_ASYNC)
758 out_meta->callback = cmd->callback;
fd4abac5 759
4ce7cc2b 760 /* set up the header */
fd4abac5 761
4ce7cc2b 762 out_cmd->hdr.cmd = cmd->id;
fd4abac5 763 out_cmd->hdr.flags = 0;
cefeaa5f 764 out_cmd->hdr.sequence =
6d8f6eeb 765 cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
cefeaa5f 766 INDEX_TO_SEQ(q->write_ptr));
4ce7cc2b
JB
767
768 /* and copy the data that needs to be copied */
769
770 cmd_dest = &out_cmd->cmd.payload[0];
771 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
772 if (!cmd->len[i])
773 continue;
774 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
775 break;
776 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
777 cmd_dest += cmd->len[i];
ded2ae7c 778 }
4ce7cc2b 779
6d8f6eeb 780 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
4ce7cc2b
JB
781 "%d bytes at %d[%d]:%d\n",
782 get_cmd_string(out_cmd->hdr.cmd),
783 out_cmd->hdr.cmd,
784 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
6d8f6eeb 785 q->write_ptr, idx, trans->shrd->cmd_queue);
4ce7cc2b 786
6d8f6eeb 787 phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
795414db 788 DMA_BIDIRECTIONAL);
6d8f6eeb 789 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
2c46f72e
JB
790 idx = -ENOMEM;
791 goto out;
792 }
793
2e724443 794 dma_unmap_addr_set(out_meta, mapping, phys_addr);
4ce7cc2b
JB
795 dma_unmap_len_set(out_meta, len, copy_size);
796
6d8f6eeb
EG
797 iwlagn_txq_attach_buf_to_tfd(trans, txq,
798 phys_addr, copy_size, 1);
4ce7cc2b
JB
799#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
800 trace_bufs[0] = &out_cmd->hdr;
801 trace_lens[0] = copy_size;
802 trace_idx = 1;
803#endif
804
805 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
806 if (!cmd->len[i])
807 continue;
808 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
809 continue;
6d8f6eeb
EG
810 phys_addr = dma_map_single(bus(trans)->dev,
811 (void *)cmd->data[i],
3be3fdb5 812 cmd->len[i], DMA_BIDIRECTIONAL);
6d8f6eeb
EG
813 if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
814 iwlagn_unmap_tfd(trans, out_meta,
e815407d 815 &txq->tfds[q->write_ptr],
3be3fdb5 816 DMA_BIDIRECTIONAL);
4ce7cc2b
JB
817 idx = -ENOMEM;
818 goto out;
819 }
820
6d8f6eeb 821 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
4ce7cc2b
JB
822 cmd->len[i], 0);
823#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
824 trace_bufs[trace_idx] = cmd->data[i];
825 trace_lens[trace_idx] = cmd->len[i];
826 trace_idx++;
827#endif
828 }
df833b1d 829
afaf6b57 830 out_meta->flags = cmd->flags;
2c46f72e
JB
831
832 txq->need_update = 1;
833
4ce7cc2b
JB
834 /* check that tracing gets all possible blocks */
835 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
836#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
fd656935 837 trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
4ce7cc2b
JB
838 trace_bufs[0], trace_lens[0],
839 trace_bufs[1], trace_lens[1],
840 trace_bufs[2], trace_lens[2]);
841#endif
df833b1d 842
fd4abac5
TW
843 /* Increment and update queue's write index */
844 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
fd656935 845 iwl_txq_update_write_ptr(trans, txq);
fd4abac5 846
2c46f72e 847 out:
72012474 848 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
7bfedc59 849 return idx;
fd4abac5
TW
850}
851
17b88929
TW
852/**
853 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
854 *
855 * When FW advances 'R' index, all entries between old and new 'R' index
856 * need to be reclaimed. As result, some free space forms. If there is
857 * enough free space (> low mark), wake the stack that feeds us.
858 */
20ba2861 859static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
17b88929 860{
8ad71bef
EG
861 struct iwl_trans_pcie *trans_pcie =
862 IWL_TRANS_GET_PCIE_TRANS(trans(priv));
863 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
17b88929
TW
864 struct iwl_queue *q = &txq->q;
865 int nfreed = 0;
866
499b1883 867 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
2e5d04da
DH
868 IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
869 "index %d is out of range [0-%d] %d %d.\n", __func__,
870 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
17b88929
TW
871 return;
872 }
873
499b1883
TW
874 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
875 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
17b88929 876
499b1883 877 if (nfreed++ > 0) {
15b1687c 878 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
17b88929 879 q->write_ptr, q->read_ptr);
e649437f 880 iwlagn_fw_error(priv, false);
17b88929 881 }
da99c4b6 882
17b88929
TW
883 }
884}
885
886/**
887 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
888 * @rxb: Rx buffer to reclaim
889 *
890 * If an Rx buffer has an async callback associated with it the callback
891 * will be executed. The attached skb (if present) will only be freed
892 * if the callback returns 1
893 */
894void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
895{
2f301227 896 struct iwl_rx_packet *pkt = rxb_addr(rxb);
17b88929
TW
897 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
898 int txq_id = SEQ_TO_QUEUE(sequence);
899 int index = SEQ_TO_INDEX(sequence);
17b88929 900 int cmd_index;
c2acea8e
JB
901 struct iwl_device_cmd *cmd;
902 struct iwl_cmd_meta *meta;
6d8f6eeb 903 struct iwl_trans *trans = trans(priv);
8ad71bef
EG
904 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
905 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
3598e177 906 unsigned long flags;
17b88929
TW
907
908 /* If a Tx command is being handled and it isn't in the actual
909 * command queue then there a command routing bug has been introduced
910 * in the queue management code. */
6d8f6eeb 911 if (WARN(txq_id != trans->shrd->cmd_queue,
13bb9483 912 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
6d8f6eeb 913 txq_id, trans->shrd->cmd_queue, sequence,
8ad71bef
EG
914 trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
915 trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
ec741164 916 iwl_print_hex_error(priv, pkt, 32);
55d6a3cd 917 return;
01ef9323 918 }
17b88929 919
4ce7cc2b 920 cmd_index = get_cmd_index(&txq->q, index);
dd487449
ZY
921 cmd = txq->cmd[cmd_index];
922 meta = &txq->meta[cmd_index];
17b88929 923
6d8f6eeb
EG
924 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
925 DMA_BIDIRECTIONAL);
c33de625 926
17b88929 927 /* Input error checking is done when commands are added to queue. */
c2acea8e 928 if (meta->flags & CMD_WANT_SKB) {
2f301227
ZY
929 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
930 rxb->page = NULL;
2624e96c
SG
931 } else if (meta->callback)
932 meta->callback(priv, cmd, pkt);
933
72012474 934 spin_lock_irqsave(&trans->hcmd_lock, flags);
17b88929 935
20ba2861 936 iwl_hcmd_queue_reclaim(priv, txq_id, index);
17b88929 937
c2acea8e 938 if (!(meta->flags & CMD_ASYNC)) {
6d8f6eeb
EG
939 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
940 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
d2dfe6df 941 get_cmd_string(cmd->hdr.cmd));
17b88929
TW
942 wake_up_interruptible(&priv->wait_command_queue);
943 }
3598e177 944
dd487449 945 meta->flags = 0;
3598e177 946
72012474 947 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
17b88929 948}
253a634c
EG
949
950const char *get_cmd_string(u8 cmd)
951{
952 switch (cmd) {
953 IWL_CMD(REPLY_ALIVE);
954 IWL_CMD(REPLY_ERROR);
955 IWL_CMD(REPLY_RXON);
956 IWL_CMD(REPLY_RXON_ASSOC);
957 IWL_CMD(REPLY_QOS_PARAM);
958 IWL_CMD(REPLY_RXON_TIMING);
959 IWL_CMD(REPLY_ADD_STA);
960 IWL_CMD(REPLY_REMOVE_STA);
961 IWL_CMD(REPLY_REMOVE_ALL_STA);
962 IWL_CMD(REPLY_TXFIFO_FLUSH);
963 IWL_CMD(REPLY_WEPKEY);
964 IWL_CMD(REPLY_TX);
965 IWL_CMD(REPLY_LEDS_CMD);
966 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
967 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
968 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
969 IWL_CMD(COEX_EVENT_CMD);
970 IWL_CMD(REPLY_QUIET_CMD);
971 IWL_CMD(REPLY_CHANNEL_SWITCH);
972 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
973 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
974 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
975 IWL_CMD(POWER_TABLE_CMD);
976 IWL_CMD(PM_SLEEP_NOTIFICATION);
977 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
978 IWL_CMD(REPLY_SCAN_CMD);
979 IWL_CMD(REPLY_SCAN_ABORT_CMD);
980 IWL_CMD(SCAN_START_NOTIFICATION);
981 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
982 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
983 IWL_CMD(BEACON_NOTIFICATION);
984 IWL_CMD(REPLY_TX_BEACON);
985 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
986 IWL_CMD(QUIET_NOTIFICATION);
987 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
988 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
989 IWL_CMD(REPLY_BT_CONFIG);
990 IWL_CMD(REPLY_STATISTICS_CMD);
991 IWL_CMD(STATISTICS_NOTIFICATION);
992 IWL_CMD(REPLY_CARD_STATE_CMD);
993 IWL_CMD(CARD_STATE_NOTIFICATION);
994 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
995 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
996 IWL_CMD(SENSITIVITY_CMD);
997 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
998 IWL_CMD(REPLY_RX_PHY_CMD);
999 IWL_CMD(REPLY_RX_MPDU_CMD);
1000 IWL_CMD(REPLY_RX);
1001 IWL_CMD(REPLY_COMPRESSED_BA);
1002 IWL_CMD(CALIBRATION_CFG_CMD);
1003 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
1004 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
1005 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
1006 IWL_CMD(TEMPERATURE_NOTIFICATION);
1007 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
1008 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
1009 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
1010 IWL_CMD(REPLY_BT_COEX_PROT_ENV);
1011 IWL_CMD(REPLY_WIPAN_PARAMS);
1012 IWL_CMD(REPLY_WIPAN_RXON);
1013 IWL_CMD(REPLY_WIPAN_RXON_TIMING);
1014 IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
1015 IWL_CMD(REPLY_WIPAN_QOS_PARAM);
1016 IWL_CMD(REPLY_WIPAN_WEPKEY);
1017 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
1018 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
1019 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
c8ac61cf
JB
1020 IWL_CMD(REPLY_WOWLAN_PATTERNS);
1021 IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
1022 IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
1023 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
1024 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
1025 IWL_CMD(REPLY_WOWLAN_GET_STATUS);
253a634c
EG
1026 default:
1027 return "UNKNOWN";
1028
1029 }
1030}
1031
1032#define HOST_COMPLETE_TIMEOUT (2 * HZ)
1033
1034static void iwl_generic_cmd_callback(struct iwl_priv *priv,
1035 struct iwl_device_cmd *cmd,
1036 struct iwl_rx_packet *pkt)
1037{
1038 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1039 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
1040 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
1041 return;
1042 }
1043
1044#ifdef CONFIG_IWLWIFI_DEBUG
1045 switch (cmd->hdr.cmd) {
1046 case REPLY_TX_LINK_QUALITY_CMD:
1047 case SENSITIVITY_CMD:
1048 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
1049 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
1050 break;
1051 default:
1052 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
1053 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
1054 }
1055#endif
1056}
1057
6d8f6eeb 1058static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c
EG
1059{
1060 int ret;
1061
1062 /* An asynchronous command can not expect an SKB to be set. */
1063 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1064 return -EINVAL;
1065
1066 /* Assign a generic callback if one is not provided */
1067 if (!cmd->callback)
1068 cmd->callback = iwl_generic_cmd_callback;
1069
6d8f6eeb 1070 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
253a634c
EG
1071 return -EBUSY;
1072
6d8f6eeb 1073 ret = iwl_enqueue_hcmd(trans, cmd);
253a634c 1074 if (ret < 0) {
6d8f6eeb 1075 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
253a634c
EG
1076 get_cmd_string(cmd->id), ret);
1077 return ret;
1078 }
1079 return 0;
1080}
1081
6d8f6eeb 1082static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c 1083{
8ad71bef 1084 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253a634c
EG
1085 int cmd_idx;
1086 int ret;
1087
6d8f6eeb 1088 lockdep_assert_held(&trans->shrd->mutex);
253a634c
EG
1089
1090 /* A synchronous command can not have a callback set. */
1091 if (WARN_ON(cmd->callback))
1092 return -EINVAL;
1093
6d8f6eeb 1094 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
253a634c
EG
1095 get_cmd_string(cmd->id));
1096
6d8f6eeb
EG
1097 set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1098 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
253a634c
EG
1099 get_cmd_string(cmd->id));
1100
6d8f6eeb 1101 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
253a634c
EG
1102 if (cmd_idx < 0) {
1103 ret = cmd_idx;
6d8f6eeb
EG
1104 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1105 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
253a634c
EG
1106 get_cmd_string(cmd->id), ret);
1107 return ret;
1108 }
1109
6d8f6eeb
EG
1110 ret = wait_event_interruptible_timeout(priv(trans)->wait_command_queue,
1111 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
253a634c
EG
1112 HOST_COMPLETE_TIMEOUT);
1113 if (!ret) {
6d8f6eeb
EG
1114 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
1115 IWL_ERR(trans,
253a634c
EG
1116 "Error sending %s: time out after %dms.\n",
1117 get_cmd_string(cmd->id),
1118 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1119
6d8f6eeb
EG
1120 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1121 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
253a634c
EG
1122 "%s\n", get_cmd_string(cmd->id));
1123 ret = -ETIMEDOUT;
1124 goto cancel;
1125 }
1126 }
1127
6d8f6eeb
EG
1128 if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
1129 IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
253a634c
EG
1130 get_cmd_string(cmd->id));
1131 ret = -ECANCELED;
1132 goto fail;
1133 }
6d8f6eeb
EG
1134 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
1135 IWL_ERR(trans, "Command %s failed: FW Error\n",
253a634c
EG
1136 get_cmd_string(cmd->id));
1137 ret = -EIO;
1138 goto fail;
1139 }
1140 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
6d8f6eeb 1141 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
253a634c
EG
1142 get_cmd_string(cmd->id));
1143 ret = -EIO;
1144 goto cancel;
1145 }
1146
1147 return 0;
1148
1149cancel:
1150 if (cmd->flags & CMD_WANT_SKB) {
1151 /*
1152 * Cancel the CMD_WANT_SKB flag for the cmd in the
1153 * TX cmd queue. Otherwise in case the cmd comes
1154 * in later, it will possibly set an invalid
1155 * address (cmd->meta.source).
1156 */
8ad71bef 1157 trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
253a634c
EG
1158 ~CMD_WANT_SKB;
1159 }
1160fail:
1161 if (cmd->reply_page) {
6d8f6eeb 1162 iwl_free_pages(trans->shrd, cmd->reply_page);
253a634c
EG
1163 cmd->reply_page = 0;
1164 }
1165
1166 return ret;
1167}
1168
6d8f6eeb 1169int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c
EG
1170{
1171 if (cmd->flags & CMD_ASYNC)
6d8f6eeb 1172 return iwl_send_cmd_async(trans, cmd);
253a634c 1173
6d8f6eeb 1174 return iwl_send_cmd_sync(trans, cmd);
253a634c
EG
1175}
1176
6d8f6eeb 1177int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
e6bb4c9c 1178 u16 len, const void *data)
253a634c
EG
1179{
1180 struct iwl_host_cmd cmd = {
1181 .id = id,
1182 .len = { len, },
1183 .data = { data, },
1184 .flags = flags,
1185 };
1186
6d8f6eeb 1187 return iwl_trans_pcie_send_cmd(trans, &cmd);
253a634c 1188}
a0eaad71
EG
1189
1190/* Frees buffers until index _not_ inclusive */
464021ff
EG
1191int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1192 struct sk_buff_head *skbs)
a0eaad71 1193{
8ad71bef
EG
1194 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1195 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
a0eaad71 1196 struct iwl_queue *q = &txq->q;
a0eaad71 1197 int last_to_free;
464021ff 1198 int freed = 0;
a0eaad71
EG
1199
1200 /*Since we free until index _not_ inclusive, the one before index is
1201 * the last we will free. This one must be used */
1202 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1203
1204 if ((index >= q->n_bd) ||
1205 (iwl_queue_used(q, last_to_free) == 0)) {
1206 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1207 "last_to_free %d is out of range [0-%d] %d %d.\n",
1208 __func__, txq_id, last_to_free, q->n_bd,
1209 q->write_ptr, q->read_ptr);
464021ff 1210 return 0;
a0eaad71
EG
1211 }
1212
1213 IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1214 q->read_ptr, index);
1215
1216 if (WARN_ON(!skb_queue_empty(skbs)))
464021ff 1217 return 0;
a0eaad71
EG
1218
1219 for (;
1220 q->read_ptr != index;
1221 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1222
2c452297 1223 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
a0eaad71
EG
1224 continue;
1225
2c452297 1226 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
a0eaad71 1227
2c452297 1228 txq->skbs[txq->q.read_ptr] = NULL;
a0eaad71 1229
6d8f6eeb 1230 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
a0eaad71 1231
6d8f6eeb 1232 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr);
464021ff 1233 freed++;
a0eaad71 1234 }
464021ff 1235 return freed;
a0eaad71 1236}