1 /******************************************************************************
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
33 #include "iwl-debug.h"
37 #include "iwl-agn-hw.h"
38 #include "iwl-op-mode.h"
39 #include "iwl-trans-pcie-int.h"
41 #define IWL_TX_CRC_SIZE 4
42 #define IWL_TX_DELIMITER_SIZE 4
45 * mac80211 queues, ACs, hardware queues, FIFOs.
47 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
49 * Mac80211 uses the following numbers, which we get as from it
50 * by way of skb_get_queue_mapping(skb):
58 * Regular (not A-MPDU) frames are put into hardware queues corresponding
59 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
60 * own queue per aggregation session (RA/TID combination), such queues are
61 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
62 * order to map frames to the right queue, we also need an AC->hw queue
63 * mapping. This is implemented here.
65 * Due to the way hw queues are set up (by the hw specific code), the AC->hw
66 * queue mapping is the identity mapping.
69 static const u8 tid_to_ac
[] = {
82 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
84 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans
*trans
,
85 struct iwl_tx_queue
*txq
,
88 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
;
89 struct iwl_trans_pcie
*trans_pcie
=
90 IWL_TRANS_GET_PCIE_TRANS(trans
);
91 int write_ptr
= txq
->q
.write_ptr
;
92 int txq_id
= txq
->q
.id
;
95 u16 len
= byte_cnt
+ IWL_TX_CRC_SIZE
+ IWL_TX_DELIMITER_SIZE
;
97 struct iwl_tx_cmd
*tx_cmd
=
98 (struct iwl_tx_cmd
*) txq
->cmd
[txq
->q
.write_ptr
]->payload
;
100 scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
102 WARN_ON(len
> 0xFFF || write_ptr
>= TFD_QUEUE_SIZE_MAX
);
104 sta_id
= tx_cmd
->sta_id
;
105 sec_ctl
= tx_cmd
->sec_ctl
;
107 switch (sec_ctl
& TX_CMD_SEC_MSK
) {
111 case TX_CMD_SEC_TKIP
:
115 len
+= WEP_IV_LEN
+ WEP_ICV_LEN
;
119 bc_ent
= cpu_to_le16((len
& 0xFFF) | (sta_id
<< 12));
121 scd_bc_tbl
[txq_id
].tfd_offset
[write_ptr
] = bc_ent
;
123 if (write_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
125 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ write_ptr
] = bc_ent
;
129 * iwl_txq_update_write_ptr - Send new write index to hardware
131 void iwl_txq_update_write_ptr(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
)
134 int txq_id
= txq
->q
.id
;
136 if (txq
->need_update
== 0)
139 if (cfg(trans
)->base_params
->shadow_reg_enable
) {
140 /* shadow register enabled */
141 iwl_write32(trans
, HBUS_TARG_WRPTR
,
142 txq
->q
.write_ptr
| (txq_id
<< 8));
144 /* if we're trying to save power */
145 if (test_bit(STATUS_POWER_PMI
, &trans
->shrd
->status
)) {
146 /* wake up nic if it's powered down ...
147 * uCode will wake up, and interrupt us again, so next
148 * time we'll skip this part. */
149 reg
= iwl_read32(trans
, CSR_UCODE_DRV_GP1
);
151 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
152 IWL_DEBUG_INFO(trans
,
153 "Tx queue %d requesting wakeup,"
154 " GP1 = 0x%x\n", txq_id
, reg
);
155 iwl_set_bit(trans
, CSR_GP_CNTRL
,
156 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
160 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
,
161 txq
->q
.write_ptr
| (txq_id
<< 8));
164 * else not in power-save mode,
165 * uCode will never sleep when we're
166 * trying to tx (during RFKILL, we're not trying to tx).
169 iwl_write32(trans
, HBUS_TARG_WRPTR
,
170 txq
->q
.write_ptr
| (txq_id
<< 8));
172 txq
->need_update
= 0;
175 static inline dma_addr_t
iwl_tfd_tb_get_addr(struct iwl_tfd
*tfd
, u8 idx
)
177 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
179 dma_addr_t addr
= get_unaligned_le32(&tb
->lo
);
180 if (sizeof(dma_addr_t
) > sizeof(u32
))
182 ((dma_addr_t
)(le16_to_cpu(tb
->hi_n_len
) & 0xF) << 16) << 16;
187 static inline u16
iwl_tfd_tb_get_len(struct iwl_tfd
*tfd
, u8 idx
)
189 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
191 return le16_to_cpu(tb
->hi_n_len
) >> 4;
194 static inline void iwl_tfd_set_tb(struct iwl_tfd
*tfd
, u8 idx
,
195 dma_addr_t addr
, u16 len
)
197 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
198 u16 hi_n_len
= len
<< 4;
200 put_unaligned_le32(addr
, &tb
->lo
);
201 if (sizeof(dma_addr_t
) > sizeof(u32
))
202 hi_n_len
|= ((addr
>> 16) >> 16) & 0xF;
204 tb
->hi_n_len
= cpu_to_le16(hi_n_len
);
206 tfd
->num_tbs
= idx
+ 1;
209 static inline u8
iwl_tfd_get_num_tbs(struct iwl_tfd
*tfd
)
211 return tfd
->num_tbs
& 0x1f;
214 static void iwlagn_unmap_tfd(struct iwl_trans
*trans
, struct iwl_cmd_meta
*meta
,
215 struct iwl_tfd
*tfd
, enum dma_data_direction dma_dir
)
220 /* Sanity check on number of chunks */
221 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
223 if (num_tbs
>= IWL_NUM_OF_TBS
) {
224 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
225 /* @todo issue fatal error, it is quite serious situation */
231 dma_unmap_single(trans
->dev
,
232 dma_unmap_addr(meta
, mapping
),
233 dma_unmap_len(meta
, len
),
236 /* Unmap chunks, if any. */
237 for (i
= 1; i
< num_tbs
; i
++)
238 dma_unmap_single(trans
->dev
, iwl_tfd_tb_get_addr(tfd
, i
),
239 iwl_tfd_tb_get_len(tfd
, i
), dma_dir
);
243 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
244 * @trans - transport private data
246 * @index - the index of the TFD to be freed
247 *@dma_dir - the direction of the DMA mapping
249 * Does NOT advance any TFD circular buffer read/write indexes
250 * Does NOT free the TFD itself (which is within circular buffer)
252 void iwlagn_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
,
253 int index
, enum dma_data_direction dma_dir
)
255 struct iwl_tfd
*tfd_tmp
= txq
->tfds
;
257 lockdep_assert_held(&txq
->lock
);
259 iwlagn_unmap_tfd(trans
, &txq
->meta
[index
], &tfd_tmp
[index
], dma_dir
);
265 skb
= txq
->skbs
[index
];
267 /* Can be called from irqs-disabled context
268 * If skb is not NULL, it means that the whole queue is being
269 * freed and that the queue is not empty - free the skb
272 iwl_op_mode_free_skb(trans
->op_mode
, skb
);
273 txq
->skbs
[index
] = NULL
;
278 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans
*trans
,
279 struct iwl_tx_queue
*txq
,
280 dma_addr_t addr
, u16 len
,
284 struct iwl_tfd
*tfd
, *tfd_tmp
;
289 tfd
= &tfd_tmp
[q
->write_ptr
];
292 memset(tfd
, 0, sizeof(*tfd
));
294 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
296 /* Each TFD can point to a maximum 20 Tx buffers */
297 if (num_tbs
>= IWL_NUM_OF_TBS
) {
298 IWL_ERR(trans
, "Error can not send more than %d chunks\n",
303 if (WARN_ON(addr
& ~DMA_BIT_MASK(36)))
306 if (unlikely(addr
& ~IWL_TX_DMA_MASK
))
307 IWL_ERR(trans
, "Unaligned address = %llx\n",
308 (unsigned long long)addr
);
310 iwl_tfd_set_tb(tfd
, num_tbs
, addr
, len
);
315 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
318 * Theory of operation
320 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
321 * of buffer descriptors, each of which points to one or more data buffers for
322 * the device to read from or fill. Driver and device exchange status of each
323 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
324 * entries in each circular buffer, to protect against confusing empty and full
327 * The device reads or writes the data in the queues via the device's several
328 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
330 * For Tx queue, there are low mark and high mark limits. If, after queuing
331 * the packet for Tx, free space become < low mark, Tx queue stopped. When
332 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
335 ***************************************************/
337 int iwl_queue_space(const struct iwl_queue
*q
)
339 int s
= q
->read_ptr
- q
->write_ptr
;
341 if (q
->read_ptr
> q
->write_ptr
)
346 /* keep some reserve to not confuse empty and full situations */
354 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
356 int iwl_queue_init(struct iwl_queue
*q
, int count
, int slots_num
, u32 id
)
359 q
->n_window
= slots_num
;
362 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
363 * and iwl_queue_dec_wrap are broken. */
364 if (WARN_ON(!is_power_of_2(count
)))
367 /* slots_num must be power-of-two size, otherwise
368 * get_cmd_index is broken. */
369 if (WARN_ON(!is_power_of_2(slots_num
)))
372 q
->low_mark
= q
->n_window
/ 4;
376 q
->high_mark
= q
->n_window
/ 8;
377 if (q
->high_mark
< 2)
380 q
->write_ptr
= q
->read_ptr
= 0;
385 static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans
*trans
,
386 struct iwl_tx_queue
*txq
)
388 struct iwl_trans_pcie
*trans_pcie
=
389 IWL_TRANS_GET_PCIE_TRANS(trans
);
390 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
391 int txq_id
= txq
->q
.id
;
392 int read_ptr
= txq
->q
.read_ptr
;
395 struct iwl_tx_cmd
*tx_cmd
=
396 (struct iwl_tx_cmd
*) txq
->cmd
[txq
->q
.read_ptr
]->payload
;
398 WARN_ON(read_ptr
>= TFD_QUEUE_SIZE_MAX
);
400 if (txq_id
!= trans_pcie
->cmd_queue
)
401 sta_id
= tx_cmd
->sta_id
;
403 bc_ent
= cpu_to_le16(1 | (sta_id
<< 12));
404 scd_bc_tbl
[txq_id
].tfd_offset
[read_ptr
] = bc_ent
;
406 if (read_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
408 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ read_ptr
] = bc_ent
;
411 static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans
*trans
, u16 ra_tid
,
418 struct iwl_trans_pcie
*trans_pcie
=
419 IWL_TRANS_GET_PCIE_TRANS(trans
);
421 scd_q2ratid
= ra_tid
& SCD_QUEUE_RA_TID_MAP_RATID_MSK
;
423 tbl_dw_addr
= trans_pcie
->scd_base_addr
+
424 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id
);
426 tbl_dw
= iwl_read_targ_mem(trans
, tbl_dw_addr
);
429 tbl_dw
= (scd_q2ratid
<< 16) | (tbl_dw
& 0x0000FFFF);
431 tbl_dw
= scd_q2ratid
| (tbl_dw
& 0xFFFF0000);
433 iwl_write_targ_mem(trans
, tbl_dw_addr
, tbl_dw
);
438 static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans
*trans
, u16 txq_id
)
440 /* Simply stop the queue, but don't change any configuration;
441 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
442 iwl_write_prph(trans
,
443 SCD_QUEUE_STATUS_BITS(txq_id
),
444 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE
)|
445 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
));
448 void iwl_trans_set_wr_ptrs(struct iwl_trans
*trans
,
449 int txq_id
, u32 index
)
451 IWL_DEBUG_TX_QUEUES(trans
, "Q %d WrPtr: %d", txq_id
, index
& 0xff);
452 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
,
453 (index
& 0xff) | (txq_id
<< 8));
454 iwl_write_prph(trans
, SCD_QUEUE_RDPTR(txq_id
), index
);
457 void iwl_trans_tx_queue_set_status(struct iwl_trans
*trans
,
458 struct iwl_tx_queue
*txq
,
459 int tx_fifo_id
, int scd_retry
)
461 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
462 int txq_id
= txq
->q
.id
;
464 test_bit(txq_id
, &trans_pcie
->txq_ctx_active_msk
) ? 1 : 0;
466 iwl_write_prph(trans
, SCD_QUEUE_STATUS_BITS(txq_id
),
467 (active
<< SCD_QUEUE_STTS_REG_POS_ACTIVE
) |
468 (tx_fifo_id
<< SCD_QUEUE_STTS_REG_POS_TXF
) |
469 (1 << SCD_QUEUE_STTS_REG_POS_WSL
) |
470 SCD_QUEUE_STTS_REG_MSK
);
472 txq
->sched_retry
= scd_retry
;
475 IWL_DEBUG_TX_QUEUES(trans
, "Activate %s Queue %d on FIFO %d\n",
476 scd_retry
? "BA" : "AC/CMD", txq_id
, tx_fifo_id
);
478 IWL_DEBUG_TX_QUEUES(trans
, "Deactivate %s Queue %d\n",
479 scd_retry
? "BA" : "AC/CMD", txq_id
);
482 static inline int get_ac_from_tid(u16 tid
)
484 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
485 return tid_to_ac
[tid
];
487 /* no support for TIDs 8-15 yet */
491 static inline int get_fifo_from_tid(struct iwl_trans_pcie
*trans_pcie
,
494 const u8
*ac_to_fifo
= trans_pcie
->ac_to_fifo
[ctx
];
495 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
496 return ac_to_fifo
[tid_to_ac
[tid
]];
498 /* no support for TIDs 8-15 yet */
502 static inline bool is_agg_txqid_valid(struct iwl_trans
*trans
, int txq_id
)
504 if (txq_id
< IWLAGN_FIRST_AMPDU_QUEUE
)
506 return txq_id
< (IWLAGN_FIRST_AMPDU_QUEUE
+
507 hw_params(trans
).num_ampdu_queues
);
510 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans
*trans
,
511 enum iwl_rxon_context_id ctx
, int sta_id
,
512 int tid
, int frame_limit
, u16 ssn
)
518 struct iwl_trans_pcie
*trans_pcie
=
519 IWL_TRANS_GET_PCIE_TRANS(trans
);
521 if (WARN_ON(sta_id
== IWL_INVALID_STATION
))
523 if (WARN_ON(tid
>= IWL_MAX_TID_COUNT
))
526 tx_fifo
= get_fifo_from_tid(trans_pcie
, ctx
, tid
);
527 if (WARN_ON(tx_fifo
< 0)) {
528 IWL_ERR(trans
, "txq_agg_setup, bad fifo: %d\n", tx_fifo
);
532 txq_id
= trans_pcie
->agg_txq
[sta_id
][tid
];
533 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans
, txq_id
))) {
535 "queue number out of range: %d, must be %d to %d\n",
536 txq_id
, IWLAGN_FIRST_AMPDU_QUEUE
,
537 IWLAGN_FIRST_AMPDU_QUEUE
+
538 hw_params(trans
).num_ampdu_queues
- 1);
542 ra_tid
= BUILD_RAxTID(sta_id
, tid
);
544 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
546 /* Stop this Tx queue before configuring it */
547 iwlagn_tx_queue_stop_scheduler(trans
, txq_id
);
549 /* Map receiver-address / traffic-ID to this queue */
550 iwlagn_tx_queue_set_q2ratid(trans
, ra_tid
, txq_id
);
552 /* Set this queue as a chain-building queue */
553 iwl_set_bits_prph(trans
, SCD_QUEUECHAIN_SEL
, (1<<txq_id
));
555 /* enable aggregations for the queue */
556 iwl_set_bits_prph(trans
, SCD_AGGR_SEL
, (1<<txq_id
));
558 /* Place first TFD at index corresponding to start sequence number.
559 * Assumes that ssn_idx is valid (!= 0xFFF) */
560 trans_pcie
->txq
[txq_id
].q
.read_ptr
= (ssn
& 0xff);
561 trans_pcie
->txq
[txq_id
].q
.write_ptr
= (ssn
& 0xff);
562 iwl_trans_set_wr_ptrs(trans
, txq_id
, ssn
);
564 /* Set up Tx window size and frame limit for this queue */
565 iwl_write_targ_mem(trans
, trans_pcie
->scd_base_addr
+
566 SCD_CONTEXT_QUEUE_OFFSET(txq_id
) +
569 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
570 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
572 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
573 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
575 iwl_set_bits_prph(trans
, SCD_INTERRUPT_MASK
, (1 << txq_id
));
577 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
578 iwl_trans_tx_queue_set_status(trans
, &trans_pcie
->txq
[txq_id
],
581 trans_pcie
->txq
[txq_id
].sta_id
= sta_id
;
582 trans_pcie
->txq
[txq_id
].tid
= tid
;
584 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
588 * Find first available (lowest unused) Tx Queue, mark it "active".
589 * Called only when finding queue for aggregation.
590 * Should never return anything < 7, because they should already
591 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
593 static int iwlagn_txq_ctx_activate_free(struct iwl_trans
*trans
)
595 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
598 for (txq_id
= 0; txq_id
< cfg(trans
)->base_params
->num_of_queues
;
600 if (!test_and_set_bit(txq_id
,
601 &trans_pcie
->txq_ctx_active_msk
))
606 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans
*trans
,
609 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
612 txq_id
= iwlagn_txq_ctx_activate_free(trans
);
614 IWL_ERR(trans
, "No free aggregation queue available\n");
618 trans_pcie
->agg_txq
[sta_id
][tid
] = txq_id
;
619 iwl_set_swq_id(&trans_pcie
->txq
[txq_id
], get_ac_from_tid(tid
), txq_id
);
624 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans
*trans
, int sta_id
, int tid
)
626 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
627 u8 txq_id
= trans_pcie
->agg_txq
[sta_id
][tid
];
629 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans
, txq_id
))) {
631 "queue number out of range: %d, must be %d to %d\n",
632 txq_id
, IWLAGN_FIRST_AMPDU_QUEUE
,
633 IWLAGN_FIRST_AMPDU_QUEUE
+
634 hw_params(trans
).num_ampdu_queues
- 1);
638 iwlagn_tx_queue_stop_scheduler(trans
, txq_id
);
640 iwl_clear_bits_prph(trans
, SCD_AGGR_SEL
, (1 << txq_id
));
642 trans_pcie
->agg_txq
[sta_id
][tid
] = 0;
643 trans_pcie
->txq
[txq_id
].q
.read_ptr
= 0;
644 trans_pcie
->txq
[txq_id
].q
.write_ptr
= 0;
645 /* supposes that ssn_idx is valid (!= 0xFFF) */
646 iwl_trans_set_wr_ptrs(trans
, txq_id
, 0);
648 iwl_clear_bits_prph(trans
, SCD_INTERRUPT_MASK
, (1 << txq_id
));
649 iwl_txq_ctx_deactivate(trans_pcie
, txq_id
);
650 iwl_trans_tx_queue_set_status(trans
, &trans_pcie
->txq
[txq_id
], 0, 0);
654 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
657 * iwl_enqueue_hcmd - enqueue a uCode command
658 * @priv: device private data point
659 * @cmd: a point to the ucode command structure
661 * The function returns < 0 values to indicate the operation is
662 * failed. On success, it turns the index (> 0) of command in the
665 static int iwl_enqueue_hcmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
667 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
668 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
669 struct iwl_queue
*q
= &txq
->q
;
670 struct iwl_device_cmd
*out_cmd
;
671 struct iwl_cmd_meta
*out_meta
;
672 dma_addr_t phys_addr
;
674 u16 copy_size
, cmd_size
;
675 bool had_nocopy
= false;
678 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
679 const void *trace_bufs
[IWL_MAX_CMD_TFDS
+ 1] = {};
680 int trace_lens
[IWL_MAX_CMD_TFDS
+ 1] = {};
684 if (test_bit(STATUS_FW_ERROR
, &trans
->shrd
->status
)) {
685 IWL_WARN(trans
, "fw recovery, no hcmd send\n");
689 copy_size
= sizeof(out_cmd
->hdr
);
690 cmd_size
= sizeof(out_cmd
->hdr
);
692 /* need one for the header if the first is NOCOPY */
693 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
> IWL_NUM_OF_TBS
- 1);
695 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
698 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
701 /* NOCOPY must not be followed by normal! */
702 if (WARN_ON(had_nocopy
))
704 copy_size
+= cmd
->len
[i
];
706 cmd_size
+= cmd
->len
[i
];
710 * If any of the command structures end up being larger than
711 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
712 * allocated into separate TFDs, then we will need to
713 * increase the size of the buffers.
715 if (WARN_ON(copy_size
> TFD_MAX_PAYLOAD_SIZE
))
718 spin_lock_bh(&txq
->lock
);
720 if (iwl_queue_space(q
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
721 spin_unlock_bh(&txq
->lock
);
723 IWL_ERR(trans
, "No space in command queue\n");
724 iwl_op_mode_cmd_queue_full(trans
->op_mode
);
728 idx
= get_cmd_index(q
, q
->write_ptr
);
729 out_cmd
= txq
->cmd
[idx
];
730 out_meta
= &txq
->meta
[idx
];
732 memset(out_meta
, 0, sizeof(*out_meta
)); /* re-initialize to NULL */
733 if (cmd
->flags
& CMD_WANT_SKB
)
734 out_meta
->source
= cmd
;
736 /* set up the header */
738 out_cmd
->hdr
.cmd
= cmd
->id
;
739 out_cmd
->hdr
.flags
= 0;
740 out_cmd
->hdr
.sequence
=
741 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie
->cmd_queue
) |
742 INDEX_TO_SEQ(q
->write_ptr
));
744 /* and copy the data that needs to be copied */
746 cmd_dest
= out_cmd
->payload
;
747 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
750 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
)
752 memcpy(cmd_dest
, cmd
->data
[i
], cmd
->len
[i
]);
753 cmd_dest
+= cmd
->len
[i
];
756 IWL_DEBUG_HC(trans
, "Sending command %s (#%x), seq: 0x%04X, "
757 "%d bytes at %d[%d]:%d\n",
758 get_cmd_string(out_cmd
->hdr
.cmd
),
760 le16_to_cpu(out_cmd
->hdr
.sequence
), cmd_size
,
761 q
->write_ptr
, idx
, trans_pcie
->cmd_queue
);
763 phys_addr
= dma_map_single(trans
->dev
, &out_cmd
->hdr
, copy_size
,
765 if (unlikely(dma_mapping_error(trans
->dev
, phys_addr
))) {
770 dma_unmap_addr_set(out_meta
, mapping
, phys_addr
);
771 dma_unmap_len_set(out_meta
, len
, copy_size
);
773 iwlagn_txq_attach_buf_to_tfd(trans
, txq
,
774 phys_addr
, copy_size
, 1);
775 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
776 trace_bufs
[0] = &out_cmd
->hdr
;
777 trace_lens
[0] = copy_size
;
781 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
784 if (!(cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
))
786 phys_addr
= dma_map_single(trans
->dev
,
787 (void *)cmd
->data
[i
],
788 cmd
->len
[i
], DMA_BIDIRECTIONAL
);
789 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
790 iwlagn_unmap_tfd(trans
, out_meta
,
791 &txq
->tfds
[q
->write_ptr
],
797 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, phys_addr
,
799 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
800 trace_bufs
[trace_idx
] = cmd
->data
[i
];
801 trace_lens
[trace_idx
] = cmd
->len
[i
];
806 out_meta
->flags
= cmd
->flags
;
808 txq
->need_update
= 1;
810 /* check that tracing gets all possible blocks */
811 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
+ 1 != 3);
812 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
813 trace_iwlwifi_dev_hcmd(trans
->dev
, cmd
->flags
,
814 trace_bufs
[0], trace_lens
[0],
815 trace_bufs
[1], trace_lens
[1],
816 trace_bufs
[2], trace_lens
[2]);
819 /* Increment and update queue's write index */
820 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
821 iwl_txq_update_write_ptr(trans
, txq
);
824 spin_unlock_bh(&txq
->lock
);
829 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
831 * When FW advances 'R' index, all entries between old and new 'R' index
832 * need to be reclaimed. As result, some free space forms. If there is
833 * enough free space (> low mark), wake the stack that feeds us.
835 static void iwl_hcmd_queue_reclaim(struct iwl_trans
*trans
, int txq_id
,
838 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
839 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
840 struct iwl_queue
*q
= &txq
->q
;
843 lockdep_assert_held(&txq
->lock
);
845 if ((idx
>= q
->n_bd
) || (iwl_queue_used(q
, idx
) == 0)) {
846 IWL_ERR(trans
, "%s: Read index for DMA queue txq id (%d), "
847 "index %d is out of range [0-%d] %d %d.\n", __func__
,
848 txq_id
, idx
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
852 for (idx
= iwl_queue_inc_wrap(idx
, q
->n_bd
); q
->read_ptr
!= idx
;
853 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
856 IWL_ERR(trans
, "HCMD skipped: index (%d) %d %d\n", idx
,
857 q
->write_ptr
, q
->read_ptr
);
858 iwl_op_mode_nic_error(trans
->op_mode
);
865 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
866 * @rxb: Rx buffer to reclaim
867 * @handler_status: return value of the handler of the command
868 * (put in setup_rx_handlers)
870 * If an Rx buffer has an async callback associated with it the callback
871 * will be executed. The attached skb (if present) will only be freed
872 * if the callback returns 1
874 void iwl_tx_cmd_complete(struct iwl_trans
*trans
, struct iwl_rx_cmd_buffer
*rxb
,
877 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
878 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
879 int txq_id
= SEQ_TO_QUEUE(sequence
);
880 int index
= SEQ_TO_INDEX(sequence
);
882 struct iwl_device_cmd
*cmd
;
883 struct iwl_cmd_meta
*meta
;
884 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
885 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
887 /* If a Tx command is being handled and it isn't in the actual
888 * command queue then there a command routing bug has been introduced
889 * in the queue management code. */
890 if (WARN(txq_id
!= trans_pcie
->cmd_queue
,
891 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
892 txq_id
, trans_pcie
->cmd_queue
, sequence
,
893 trans_pcie
->txq
[trans_pcie
->cmd_queue
].q
.read_ptr
,
894 trans_pcie
->txq
[trans_pcie
->cmd_queue
].q
.write_ptr
)) {
895 iwl_print_hex_error(trans
, pkt
, 32);
899 spin_lock(&txq
->lock
);
901 cmd_index
= get_cmd_index(&txq
->q
, index
);
902 cmd
= txq
->cmd
[cmd_index
];
903 meta
= &txq
->meta
[cmd_index
];
905 txq
->time_stamp
= jiffies
;
907 iwlagn_unmap_tfd(trans
, meta
, &txq
->tfds
[index
],
910 /* Input error checking is done when commands are added to queue. */
911 if (meta
->flags
& CMD_WANT_SKB
) {
912 struct page
*p
= rxb_steal_page(rxb
);
914 meta
->source
->resp_pkt
= pkt
;
915 meta
->source
->_rx_page_addr
= (unsigned long)page_address(p
);
916 meta
->source
->_rx_page_order
= hw_params(trans
).rx_page_order
;
917 meta
->source
->handler_status
= handler_status
;
920 iwl_hcmd_queue_reclaim(trans
, txq_id
, index
);
922 if (!(meta
->flags
& CMD_ASYNC
)) {
923 if (!test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
)) {
925 "HCMD_ACTIVE already clear for command %s\n",
926 get_cmd_string(cmd
->hdr
.cmd
));
928 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
929 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
930 get_cmd_string(cmd
->hdr
.cmd
));
931 wake_up(&trans
->shrd
->wait_command_queue
);
936 spin_unlock(&txq
->lock
);
939 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
941 static int iwl_send_cmd_async(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
945 /* An asynchronous command can not expect an SKB to be set. */
946 if (WARN_ON(cmd
->flags
& CMD_WANT_SKB
))
950 ret
= iwl_enqueue_hcmd(trans
, cmd
);
953 "Error sending %s: enqueue_hcmd failed: %d\n",
954 get_cmd_string(cmd
->id
), ret
);
960 static int iwl_send_cmd_sync(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
962 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
966 IWL_DEBUG_INFO(trans
, "Attempting to send sync command %s\n",
967 get_cmd_string(cmd
->id
));
969 if (test_bit(STATUS_FW_ERROR
, &trans
->shrd
->status
)) {
970 IWL_ERR(trans
, "Command %s failed: FW Error\n",
971 get_cmd_string(cmd
->id
));
975 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE
,
976 &trans
->shrd
->status
))) {
977 IWL_ERR(trans
, "Command %s: a command is already active!\n",
978 get_cmd_string(cmd
->id
));
982 IWL_DEBUG_INFO(trans
, "Setting HCMD_ACTIVE for command %s\n",
983 get_cmd_string(cmd
->id
));
985 cmd_idx
= iwl_enqueue_hcmd(trans
, cmd
);
988 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
990 "Error sending %s: enqueue_hcmd failed: %d\n",
991 get_cmd_string(cmd
->id
), ret
);
995 ret
= wait_event_timeout(trans
->shrd
->wait_command_queue
,
996 !test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
),
997 HOST_COMPLETE_TIMEOUT
);
999 if (test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
)) {
1000 struct iwl_tx_queue
*txq
=
1001 &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
1002 struct iwl_queue
*q
= &txq
->q
;
1005 "Error sending %s: time out after %dms.\n",
1006 get_cmd_string(cmd
->id
),
1007 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT
));
1010 "Current CMD queue read_ptr %d write_ptr %d\n",
1011 q
->read_ptr
, q
->write_ptr
);
1013 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
1014 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command"
1015 "%s\n", get_cmd_string(cmd
->id
));
1021 if ((cmd
->flags
& CMD_WANT_SKB
) && !cmd
->resp_pkt
) {
1022 IWL_ERR(trans
, "Error: Response NULL in '%s'\n",
1023 get_cmd_string(cmd
->id
));
1031 if (cmd
->flags
& CMD_WANT_SKB
) {
1033 * Cancel the CMD_WANT_SKB flag for the cmd in the
1034 * TX cmd queue. Otherwise in case the cmd comes
1035 * in later, it will possibly set an invalid
1036 * address (cmd->meta.source).
1038 trans_pcie
->txq
[trans_pcie
->cmd_queue
].meta
[cmd_idx
].flags
&=
1042 if (cmd
->resp_pkt
) {
1044 cmd
->resp_pkt
= NULL
;
1050 int iwl_trans_pcie_send_cmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
1052 if (cmd
->flags
& CMD_ASYNC
)
1053 return iwl_send_cmd_async(trans
, cmd
);
1055 return iwl_send_cmd_sync(trans
, cmd
);
1058 /* Frees buffers until index _not_ inclusive */
1059 int iwl_tx_queue_reclaim(struct iwl_trans
*trans
, int txq_id
, int index
,
1060 struct sk_buff_head
*skbs
)
1062 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1063 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
1064 struct iwl_queue
*q
= &txq
->q
;
1068 /* This function is not meant to release cmd queue*/
1069 if (WARN_ON(txq_id
== trans_pcie
->cmd_queue
))
1072 lockdep_assert_held(&txq
->lock
);
1074 /*Since we free until index _not_ inclusive, the one before index is
1075 * the last we will free. This one must be used */
1076 last_to_free
= iwl_queue_dec_wrap(index
, q
->n_bd
);
1078 if ((index
>= q
->n_bd
) ||
1079 (iwl_queue_used(q
, last_to_free
) == 0)) {
1080 IWL_ERR(trans
, "%s: Read index for DMA queue txq id (%d), "
1081 "last_to_free %d is out of range [0-%d] %d %d.\n",
1082 __func__
, txq_id
, last_to_free
, q
->n_bd
,
1083 q
->write_ptr
, q
->read_ptr
);
1087 if (WARN_ON(!skb_queue_empty(skbs
)))
1091 q
->read_ptr
!= index
;
1092 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1094 if (WARN_ON_ONCE(txq
->skbs
[txq
->q
.read_ptr
] == NULL
))
1097 __skb_queue_tail(skbs
, txq
->skbs
[txq
->q
.read_ptr
]);
1099 txq
->skbs
[txq
->q
.read_ptr
] = NULL
;
1101 iwlagn_txq_inval_byte_cnt_tbl(trans
, txq
);
1103 iwlagn_txq_free_tfd(trans
, txq
, txq
->q
.read_ptr
, DMA_TO_DEVICE
);