1 /******************************************************************************
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <net/mac80211.h>
32 #include "iwl-eeprom.h"
37 #include "iwl-helpers.h"
39 static const u16 default_tid_to_tx_fifo
[] = {
59 static inline int iwl_alloc_dma_ptr(struct iwl_priv
*priv
,
60 struct iwl_dma_ptr
*ptr
, size_t size
)
62 ptr
->addr
= pci_alloc_consistent(priv
->pci_dev
, size
, &ptr
->dma
);
69 static inline void iwl_free_dma_ptr(struct iwl_priv
*priv
,
70 struct iwl_dma_ptr
*ptr
)
72 if (unlikely(!ptr
->addr
))
75 pci_free_consistent(priv
->pci_dev
, ptr
->size
, ptr
->addr
, ptr
->dma
);
76 memset(ptr
, 0, sizeof(*ptr
));
79 static inline dma_addr_t
iwl_tfd_tb_get_addr(struct iwl_tfd
*tfd
, u8 idx
)
81 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
83 dma_addr_t addr
= get_unaligned_le32(&tb
->lo
);
84 if (sizeof(dma_addr_t
) > sizeof(u32
))
86 ((dma_addr_t
)(le16_to_cpu(tb
->hi_n_len
) & 0xF) << 16) << 16;
91 static inline u16
iwl_tfd_tb_get_len(struct iwl_tfd
*tfd
, u8 idx
)
93 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
95 return le16_to_cpu(tb
->hi_n_len
) >> 4;
98 static inline void iwl_tfd_set_tb(struct iwl_tfd
*tfd
, u8 idx
,
99 dma_addr_t addr
, u16 len
)
101 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
102 u16 hi_n_len
= len
<< 4;
104 put_unaligned_le32(addr
, &tb
->lo
);
105 if (sizeof(dma_addr_t
) > sizeof(u32
))
106 hi_n_len
|= ((addr
>> 16) >> 16) & 0xF;
108 tb
->hi_n_len
= cpu_to_le16(hi_n_len
);
110 tfd
->num_tbs
= idx
+ 1;
113 static inline u8
iwl_tfd_get_num_tbs(struct iwl_tfd
*tfd
)
115 return tfd
->num_tbs
& 0x1f;
119 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
120 * @priv - driver private data
123 * Does NOT advance any TFD circular buffer read/write indexes
124 * Does NOT free the TFD itself (which is within circular buffer)
126 static void iwl_hw_txq_free_tfd(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
)
128 struct iwl_tfd
*tfd_tmp
= (struct iwl_tfd
*)&txq
->tfds
[0];
130 struct pci_dev
*dev
= priv
->pci_dev
;
131 int index
= txq
->q
.read_ptr
;
135 tfd
= &tfd_tmp
[index
];
137 /* Sanity check on number of chunks */
138 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
140 if (num_tbs
>= IWL_NUM_OF_TBS
) {
141 IWL_ERROR("Too many chunks: %i\n", num_tbs
);
142 /* @todo issue fatal error, it is quite serious situation */
148 pci_unmap_single(dev
,
149 pci_unmap_addr(&txq
->cmd
[index
]->meta
, mapping
),
150 pci_unmap_len(&txq
->cmd
[index
]->meta
, len
),
151 PCI_DMA_BIDIRECTIONAL
);
153 /* Unmap chunks, if any. */
154 for (i
= 1; i
< num_tbs
; i
++) {
155 pci_unmap_single(dev
, iwl_tfd_tb_get_addr(tfd
, i
),
156 iwl_tfd_tb_get_len(tfd
, i
), PCI_DMA_TODEVICE
);
159 dev_kfree_skb(txq
->txb
[txq
->q
.read_ptr
].skb
[i
- 1]);
160 txq
->txb
[txq
->q
.read_ptr
].skb
[i
- 1] = NULL
;
165 static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv
*priv
,
167 dma_addr_t addr
, u16 len
)
170 u32 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
172 /* Each TFD can point to a maximum 20 Tx buffers */
173 if (num_tbs
>= IWL_NUM_OF_TBS
) {
174 IWL_ERROR("Error can not send more than %d chunks\n",
179 BUG_ON(addr
& ~DMA_BIT_MASK(36));
180 if (unlikely(addr
& ~IWL_TX_DMA_MASK
))
181 IWL_ERROR("Unaligned address = %llx\n",
182 (unsigned long long)addr
);
184 iwl_tfd_set_tb(tfd
, num_tbs
, addr
, len
);
190 * iwl_txq_update_write_ptr - Send new write index to hardware
192 int iwl_txq_update_write_ptr(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
)
196 int txq_id
= txq
->q
.id
;
198 if (txq
->need_update
== 0)
201 /* if we're trying to save power */
202 if (test_bit(STATUS_POWER_PMI
, &priv
->status
)) {
203 /* wake up nic if it's powered down ...
204 * uCode will wake up, and interrupt us again, so next
205 * time we'll skip this part. */
206 reg
= iwl_read32(priv
, CSR_UCODE_DRV_GP1
);
208 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
209 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg
);
210 iwl_set_bit(priv
, CSR_GP_CNTRL
,
211 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
215 /* restore this queue's parameters in nic hardware. */
216 ret
= iwl_grab_nic_access(priv
);
219 iwl_write_direct32(priv
, HBUS_TARG_WRPTR
,
220 txq
->q
.write_ptr
| (txq_id
<< 8));
221 iwl_release_nic_access(priv
);
223 /* else not in power-save mode, uCode will never sleep when we're
224 * trying to tx (during RFKILL, we're not trying to tx). */
226 iwl_write32(priv
, HBUS_TARG_WRPTR
,
227 txq
->q
.write_ptr
| (txq_id
<< 8));
229 txq
->need_update
= 0;
233 EXPORT_SYMBOL(iwl_txq_update_write_ptr
);
237 * iwl_tx_queue_free - Deallocate DMA queue.
238 * @txq: Transmit queue to deallocate.
240 * Empty queue by removing and destroying all BD's.
242 * 0-fill, but do not free "txq" descriptor structure.
244 static void iwl_tx_queue_free(struct iwl_priv
*priv
, int txq_id
)
246 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
247 struct iwl_queue
*q
= &txq
->q
;
248 struct pci_dev
*dev
= priv
->pci_dev
;
254 /* first, empty all BD's */
255 for (; q
->write_ptr
!= q
->read_ptr
;
256 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
))
257 iwl_hw_txq_free_tfd(priv
, txq
);
259 len
= sizeof(struct iwl_cmd
) * q
->n_window
;
261 /* De-alloc array of command/tx buffers */
262 for (i
= 0; i
< TFD_TX_CMD_SLOTS
; i
++)
265 /* De-alloc circular buffer of TFDs */
267 pci_free_consistent(dev
, sizeof(struct iwl_tfd
) *
268 txq
->q
.n_bd
, txq
->tfds
, txq
->q
.dma_addr
);
270 /* De-alloc array of per-TFD driver data */
274 /* 0-fill queue descriptor structure */
275 memset(txq
, 0, sizeof(*txq
));
280 * iwl_cmd_queue_free - Deallocate DMA queue.
281 * @txq: Transmit queue to deallocate.
283 * Empty queue by removing and destroying all BD's.
285 * 0-fill, but do not free "txq" descriptor structure.
287 static void iwl_cmd_queue_free(struct iwl_priv
*priv
)
289 struct iwl_tx_queue
*txq
= &priv
->txq
[IWL_CMD_QUEUE_NUM
];
290 struct iwl_queue
*q
= &txq
->q
;
291 struct pci_dev
*dev
= priv
->pci_dev
;
297 len
= sizeof(struct iwl_cmd
) * q
->n_window
;
298 len
+= IWL_MAX_SCAN_SIZE
;
300 /* De-alloc array of command/tx buffers */
301 for (i
= 0; i
<= TFD_CMD_SLOTS
; i
++)
304 /* De-alloc circular buffer of TFDs */
306 pci_free_consistent(dev
, sizeof(struct iwl_tfd
) *
307 txq
->q
.n_bd
, txq
->tfds
, txq
->q
.dma_addr
);
309 /* 0-fill queue descriptor structure */
310 memset(txq
, 0, sizeof(*txq
));
312 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
315 * Theory of operation
317 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
318 * of buffer descriptors, each of which points to one or more data buffers for
319 * the device to read from or fill. Driver and device exchange status of each
320 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
321 * entries in each circular buffer, to protect against confusing empty and full
324 * The device reads or writes the data in the queues via the device's several
325 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
327 * For Tx queue, there are low mark and high mark limits. If, after queuing
328 * the packet for Tx, free space become < low mark, Tx queue stopped. When
329 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
332 * See more detailed info in iwl-4965-hw.h.
333 ***************************************************/
335 int iwl_queue_space(const struct iwl_queue
*q
)
337 int s
= q
->read_ptr
- q
->write_ptr
;
339 if (q
->read_ptr
> q
->write_ptr
)
344 /* keep some reserve to not confuse empty and full situations */
350 EXPORT_SYMBOL(iwl_queue_space
);
354 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
356 static int iwl_queue_init(struct iwl_priv
*priv
, struct iwl_queue
*q
,
357 int count
, int slots_num
, u32 id
)
360 q
->n_window
= slots_num
;
363 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
364 * and iwl_queue_dec_wrap are broken. */
365 BUG_ON(!is_power_of_2(count
));
367 /* slots_num must be power-of-two size, otherwise
368 * get_cmd_index is broken. */
369 BUG_ON(!is_power_of_2(slots_num
));
371 q
->low_mark
= q
->n_window
/ 4;
375 q
->high_mark
= q
->n_window
/ 8;
376 if (q
->high_mark
< 2)
379 q
->write_ptr
= q
->read_ptr
= 0;
385 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
387 static int iwl_tx_queue_alloc(struct iwl_priv
*priv
,
388 struct iwl_tx_queue
*txq
, u32 id
)
390 struct pci_dev
*dev
= priv
->pci_dev
;
392 /* Driver private data, only for Tx (not command) queues,
393 * not shared with device. */
394 if (id
!= IWL_CMD_QUEUE_NUM
) {
395 txq
->txb
= kmalloc(sizeof(txq
->txb
[0]) *
396 TFD_QUEUE_SIZE_MAX
, GFP_KERNEL
);
398 IWL_ERROR("kmalloc for auxiliary BD "
399 "structures failed\n");
405 /* Circular buffer of transmit frame descriptors (TFDs),
406 * shared with device */
407 txq
->tfds
= pci_alloc_consistent(dev
,
408 sizeof(txq
->tfds
[0]) * TFD_QUEUE_SIZE_MAX
,
412 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
413 sizeof(txq
->tfds
[0]) * TFD_QUEUE_SIZE_MAX
);
428 * Tell nic where to find circular buffer of Tx Frame Descriptors for
429 * given Tx queue, and enable the DMA channel used for that queue.
431 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
432 * channels supported in hardware.
434 static int iwl_hw_tx_queue_init(struct iwl_priv
*priv
,
435 struct iwl_tx_queue
*txq
)
439 int txq_id
= txq
->q
.id
;
441 spin_lock_irqsave(&priv
->lock
, flags
);
442 ret
= iwl_grab_nic_access(priv
);
444 spin_unlock_irqrestore(&priv
->lock
, flags
);
448 /* Circular buffer (TFD queue in DRAM) physical base address */
449 iwl_write_direct32(priv
, FH_MEM_CBBC_QUEUE(txq_id
),
450 txq
->q
.dma_addr
>> 8);
452 iwl_release_nic_access(priv
);
453 spin_unlock_irqrestore(&priv
->lock
, flags
);
459 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
461 static int iwl_tx_queue_init(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
,
462 int slots_num
, u32 txq_id
)
468 * Alloc buffer array for commands (Tx or other types of commands).
469 * For the command queue (#4), allocate command space + one big
470 * command for scan, since scan command is very huge; the system will
471 * not have two scans at the same time, so only one is needed.
472 * For normal Tx queues (all other queues), no super-size command
475 len
= sizeof(struct iwl_cmd
);
476 for (i
= 0; i
<= slots_num
; i
++) {
477 if (i
== slots_num
) {
478 if (txq_id
== IWL_CMD_QUEUE_NUM
)
479 len
+= IWL_MAX_SCAN_SIZE
;
484 txq
->cmd
[i
] = kmalloc(len
, GFP_KERNEL
);
489 /* Alloc driver data array and TFD circular buffer */
490 ret
= iwl_tx_queue_alloc(priv
, txq
, txq_id
);
494 txq
->need_update
= 0;
496 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
497 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
498 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX
& (TFD_QUEUE_SIZE_MAX
- 1));
500 /* Initialize queue's high/low-water marks, and head/tail indexes */
501 iwl_queue_init(priv
, &txq
->q
, TFD_QUEUE_SIZE_MAX
, slots_num
, txq_id
);
503 /* Tell device where to find queue */
504 iwl_hw_tx_queue_init(priv
, txq
);
508 for (i
= 0; i
< slots_num
; i
++) {
513 if (txq_id
== IWL_CMD_QUEUE_NUM
) {
514 kfree(txq
->cmd
[slots_num
]);
515 txq
->cmd
[slots_num
] = NULL
;
520 * iwl_hw_txq_ctx_free - Free TXQ Context
522 * Destroy all TX DMA queues and structures
524 void iwl_hw_txq_ctx_free(struct iwl_priv
*priv
)
529 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
530 if (txq_id
== IWL_CMD_QUEUE_NUM
)
531 iwl_cmd_queue_free(priv
);
533 iwl_tx_queue_free(priv
, txq_id
);
535 iwl_free_dma_ptr(priv
, &priv
->kw
);
537 iwl_free_dma_ptr(priv
, &priv
->scd_bc_tbls
);
539 EXPORT_SYMBOL(iwl_hw_txq_ctx_free
);
542 * iwl_txq_ctx_reset - Reset TX queue context
543 * Destroys all DMA structures and initialize them again
548 int iwl_txq_ctx_reset(struct iwl_priv
*priv
)
551 int txq_id
, slots_num
;
554 /* Free all tx/cmd queues and keep-warm buffer */
555 iwl_hw_txq_ctx_free(priv
);
557 ret
= iwl_alloc_dma_ptr(priv
, &priv
->scd_bc_tbls
,
558 priv
->hw_params
.scd_bc_tbls_size
);
560 IWL_ERROR("Scheduler BC Table allocation failed\n");
563 /* Alloc keep-warm buffer */
564 ret
= iwl_alloc_dma_ptr(priv
, &priv
->kw
, IWL_KW_SIZE
);
566 IWL_ERROR("Keep Warm allocation failed\n");
569 spin_lock_irqsave(&priv
->lock
, flags
);
570 ret
= iwl_grab_nic_access(priv
);
572 spin_unlock_irqrestore(&priv
->lock
, flags
);
576 /* Turn off all Tx DMA fifos */
577 priv
->cfg
->ops
->lib
->txq_set_sched(priv
, 0);
579 /* Tell NIC where to find the "keep warm" buffer */
580 iwl_write_direct32(priv
, FH_KW_MEM_ADDR_REG
, priv
->kw
.dma
>> 4);
582 iwl_release_nic_access(priv
);
583 spin_unlock_irqrestore(&priv
->lock
, flags
);
585 /* Alloc and init all Tx queues, including the command queue (#4) */
586 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++) {
587 slots_num
= (txq_id
== IWL_CMD_QUEUE_NUM
) ?
588 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
589 ret
= iwl_tx_queue_init(priv
, &priv
->txq
[txq_id
], slots_num
,
592 IWL_ERROR("Tx %d queue init failed\n", txq_id
);
600 iwl_hw_txq_ctx_free(priv
);
602 iwl_free_dma_ptr(priv
, &priv
->kw
);
604 iwl_free_dma_ptr(priv
, &priv
->scd_bc_tbls
);
610 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
612 void iwl_txq_ctx_stop(struct iwl_priv
*priv
)
617 /* Turn off all Tx DMA fifos */
618 spin_lock_irqsave(&priv
->lock
, flags
);
619 if (iwl_grab_nic_access(priv
)) {
620 spin_unlock_irqrestore(&priv
->lock
, flags
);
624 priv
->cfg
->ops
->lib
->txq_set_sched(priv
, 0);
626 /* Stop each Tx DMA channel, and wait for it to be idle */
627 for (ch
= 0; ch
< priv
->hw_params
.dma_chnl_num
; ch
++) {
628 iwl_write_direct32(priv
, FH_TCSR_CHNL_TX_CONFIG_REG(ch
), 0x0);
629 iwl_poll_direct_bit(priv
, FH_TSSR_TX_STATUS_REG
,
630 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch
),
633 iwl_release_nic_access(priv
);
634 spin_unlock_irqrestore(&priv
->lock
, flags
);
636 /* Deallocate memory for all Tx queues */
637 iwl_hw_txq_ctx_free(priv
);
639 EXPORT_SYMBOL(iwl_txq_ctx_stop
);
642 * handle build REPLY_TX command notification.
644 static void iwl_tx_cmd_build_basic(struct iwl_priv
*priv
,
645 struct iwl_tx_cmd
*tx_cmd
,
646 struct ieee80211_tx_info
*info
,
647 struct ieee80211_hdr
*hdr
,
650 __le16 fc
= hdr
->frame_control
;
651 __le32 tx_flags
= tx_cmd
->tx_flags
;
653 tx_cmd
->stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
654 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
655 tx_flags
|= TX_CMD_FLG_ACK_MSK
;
656 if (ieee80211_is_mgmt(fc
))
657 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
658 if (ieee80211_is_probe_resp(fc
) &&
659 !(le16_to_cpu(hdr
->seq_ctrl
) & 0xf))
660 tx_flags
|= TX_CMD_FLG_TSF_MSK
;
662 tx_flags
&= (~TX_CMD_FLG_ACK_MSK
);
663 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
666 if (ieee80211_is_back_req(fc
))
667 tx_flags
|= TX_CMD_FLG_ACK_MSK
| TX_CMD_FLG_IMM_BA_RSP_MASK
;
670 tx_cmd
->sta_id
= std_id
;
671 if (ieee80211_has_morefrags(fc
))
672 tx_flags
|= TX_CMD_FLG_MORE_FRAG_MSK
;
674 if (ieee80211_is_data_qos(fc
)) {
675 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
676 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
677 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL_MSK
;
679 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
682 priv
->cfg
->ops
->utils
->rts_tx_cmd_flag(info
, &tx_flags
);
684 if ((tx_flags
& TX_CMD_FLG_RTS_MSK
) || (tx_flags
& TX_CMD_FLG_CTS_MSK
))
685 tx_flags
|= TX_CMD_FLG_FULL_TXOP_PROT_MSK
;
687 tx_flags
&= ~(TX_CMD_FLG_ANT_SEL_MSK
);
688 if (ieee80211_is_mgmt(fc
)) {
689 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
690 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(3);
692 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(2);
694 tx_cmd
->timeout
.pm_frame_timeout
= 0;
697 tx_cmd
->driver_txop
= 0;
698 tx_cmd
->tx_flags
= tx_flags
;
699 tx_cmd
->next_frame_len
= 0;
702 #define RTS_HCCA_RETRY_LIMIT 3
703 #define RTS_DFAULT_RETRY_LIMIT 60
705 static void iwl_tx_cmd_build_rate(struct iwl_priv
*priv
,
706 struct iwl_tx_cmd
*tx_cmd
,
707 struct ieee80211_tx_info
*info
,
708 __le16 fc
, int sta_id
,
713 u8 rts_retry_limit
= 0;
714 u8 data_retry_limit
= 0;
717 rate_idx
= min(ieee80211_get_tx_rate(priv
->hw
, info
)->hw_value
& 0xffff,
720 rate_plcp
= iwl_rates
[rate_idx
].plcp
;
722 rts_retry_limit
= (is_hcca
) ?
723 RTS_HCCA_RETRY_LIMIT
: RTS_DFAULT_RETRY_LIMIT
;
725 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
726 rate_flags
|= RATE_MCS_CCK_MSK
;
729 if (ieee80211_is_probe_resp(fc
)) {
730 data_retry_limit
= 3;
731 if (data_retry_limit
< rts_retry_limit
)
732 rts_retry_limit
= data_retry_limit
;
734 data_retry_limit
= IWL_DEFAULT_TX_RETRY
;
736 if (priv
->data_retry_limit
!= -1)
737 data_retry_limit
= priv
->data_retry_limit
;
740 if (ieee80211_is_data(fc
)) {
741 tx_cmd
->initial_rate_index
= 0;
742 tx_cmd
->tx_flags
|= TX_CMD_FLG_STA_RATE_MSK
;
744 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
745 case cpu_to_le16(IEEE80211_STYPE_AUTH
):
746 case cpu_to_le16(IEEE80211_STYPE_DEAUTH
):
747 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ
):
748 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ
):
749 if (tx_cmd
->tx_flags
& TX_CMD_FLG_RTS_MSK
) {
750 tx_cmd
->tx_flags
&= ~TX_CMD_FLG_RTS_MSK
;
751 tx_cmd
->tx_flags
|= TX_CMD_FLG_CTS_MSK
;
758 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
);
759 rate_flags
|= iwl_ant_idx_to_flags(priv
->mgmt_tx_ant
);
762 tx_cmd
->rts_retry_limit
= rts_retry_limit
;
763 tx_cmd
->data_retry_limit
= data_retry_limit
;
764 tx_cmd
->rate_n_flags
= iwl_hw_set_rate_n_flags(rate_plcp
, rate_flags
);
767 static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv
*priv
,
768 struct ieee80211_tx_info
*info
,
769 struct iwl_tx_cmd
*tx_cmd
,
770 struct sk_buff
*skb_frag
,
773 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
775 switch (keyconf
->alg
) {
777 tx_cmd
->sec_ctl
= TX_CMD_SEC_CCM
;
778 memcpy(tx_cmd
->key
, keyconf
->key
, keyconf
->keylen
);
779 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
780 tx_cmd
->tx_flags
|= TX_CMD_FLG_AGG_CCMP_MSK
;
781 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
785 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
786 ieee80211_get_tkip_key(keyconf
, skb_frag
,
787 IEEE80211_TKIP_P2_KEY
, tx_cmd
->key
);
788 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
792 tx_cmd
->sec_ctl
|= (TX_CMD_SEC_WEP
|
793 (keyconf
->keyidx
& TX_CMD_SEC_MSK
) << TX_CMD_SEC_SHIFT
);
795 if (keyconf
->keylen
== WEP_KEY_LEN_128
)
796 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
798 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
800 IWL_DEBUG_TX("Configuring packet for WEP encryption "
801 "with key %d\n", keyconf
->keyidx
);
805 printk(KERN_ERR
"Unknown encode alg %d\n", keyconf
->alg
);
810 static void iwl_update_tx_stats(struct iwl_priv
*priv
, u16 fc
, u16 len
)
812 /* 0 - mgmt, 1 - cnt, 2 - data */
813 int idx
= (fc
& IEEE80211_FCTL_FTYPE
) >> 2;
814 priv
->tx_stats
[idx
].cnt
++;
815 priv
->tx_stats
[idx
].bytes
+= len
;
819 * start REPLY_TX command process
821 int iwl_tx_skb(struct iwl_priv
*priv
, struct sk_buff
*skb
)
823 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
824 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
826 struct iwl_tx_queue
*txq
;
828 struct iwl_cmd
*out_cmd
;
829 struct iwl_tx_cmd
*tx_cmd
;
831 dma_addr_t phys_addr
;
832 dma_addr_t txcmd_phys
;
833 dma_addr_t scratch_phys
;
839 u8 wait_write_ptr
= 0;
845 spin_lock_irqsave(&priv
->lock
, flags
);
846 if (iwl_is_rfkill(priv
)) {
847 IWL_DEBUG_DROP("Dropping - RF KILL\n");
851 if ((ieee80211_get_tx_rate(priv
->hw
, info
)->hw_value
& 0xFF) ==
853 IWL_ERROR("ERROR: No TX rate available.\n");
857 fc
= hdr
->frame_control
;
859 #ifdef CONFIG_IWLWIFI_DEBUG
860 if (ieee80211_is_auth(fc
))
861 IWL_DEBUG_TX("Sending AUTH frame\n");
862 else if (ieee80211_is_assoc_req(fc
))
863 IWL_DEBUG_TX("Sending ASSOC frame\n");
864 else if (ieee80211_is_reassoc_req(fc
))
865 IWL_DEBUG_TX("Sending REASSOC frame\n");
868 /* drop all data frame if we are not associated */
869 if (ieee80211_is_data(fc
) &&
870 (priv
->iw_mode
!= NL80211_IFTYPE_MONITOR
||
871 !(info
->flags
& IEEE80211_TX_CTL_INJECTED
)) && /* packet injection */
872 (!iwl_is_associated(priv
) ||
873 ((priv
->iw_mode
== NL80211_IFTYPE_STATION
) && !priv
->assoc_id
) ||
874 !priv
->assoc_station_added
)) {
875 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
879 spin_unlock_irqrestore(&priv
->lock
, flags
);
881 hdr_len
= ieee80211_hdrlen(fc
);
883 /* Find (or create) index into station table for destination station */
884 sta_id
= iwl_get_sta_id(priv
, hdr
);
885 if (sta_id
== IWL_INVALID_STATION
) {
886 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
891 IWL_DEBUG_TX("station Id %d\n", sta_id
);
893 swq_id
= skb_get_queue_mapping(skb
);
895 if (ieee80211_is_data_qos(fc
)) {
896 qc
= ieee80211_get_qos_ctl(hdr
);
897 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
898 seq_number
= priv
->stations
[sta_id
].tid
[tid
].seq_number
;
899 seq_number
&= IEEE80211_SCTL_SEQ
;
900 hdr
->seq_ctrl
= hdr
->seq_ctrl
&
901 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG
);
902 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
904 /* aggregation is on for this <sta,tid> */
905 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
906 txq_id
= priv
->stations
[sta_id
].tid
[tid
].agg
.txq_id
;
907 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
++;
910 txq
= &priv
->txq
[txq_id
];
912 txq
->swq_id
= swq_id
;
914 spin_lock_irqsave(&priv
->lock
, flags
);
916 /* Set up first empty TFD within this queue's circular TFD buffer */
917 tfd
= &txq
->tfds
[q
->write_ptr
];
918 memset(tfd
, 0, sizeof(*tfd
));
920 /* Set up driver data for this TFD */
921 memset(&(txq
->txb
[q
->write_ptr
]), 0, sizeof(struct iwl_tx_info
));
922 txq
->txb
[q
->write_ptr
].skb
[0] = skb
;
924 /* Set up first empty entry in queue's array of Tx/cmd buffers */
925 out_cmd
= txq
->cmd
[q
->write_ptr
];
926 tx_cmd
= &out_cmd
->cmd
.tx
;
927 memset(&out_cmd
->hdr
, 0, sizeof(out_cmd
->hdr
));
928 memset(tx_cmd
, 0, sizeof(struct iwl_tx_cmd
));
931 * Set up the Tx-command (not MAC!) header.
932 * Store the chosen Tx queue and TFD index within the sequence field;
933 * after Tx, uCode's Tx response will return this value so driver can
934 * locate the frame within the tx queue and do post-tx processing.
936 out_cmd
->hdr
.cmd
= REPLY_TX
;
937 out_cmd
->hdr
.sequence
= cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
938 INDEX_TO_SEQ(q
->write_ptr
)));
940 /* Copy MAC header from skb into command buffer */
941 memcpy(tx_cmd
->hdr
, hdr
, hdr_len
);
944 * Use the first empty entry in this queue's command buffer array
945 * to contain the Tx command and MAC header concatenated together
946 * (payload data will be in another buffer).
947 * Size of this varies, due to varying MAC header length.
948 * If end is not dword aligned, we'll have 2 extra bytes at the end
949 * of the MAC header (device reads on dword boundaries).
950 * We'll tell device about this padding later.
952 len
= sizeof(struct iwl_tx_cmd
) +
953 sizeof(struct iwl_cmd_header
) + hdr_len
;
956 len
= (len
+ 3) & ~3;
963 /* Physical address of this Tx command's header (not MAC header!),
964 * within command buffer array. */
965 txcmd_phys
= pci_map_single(priv
->pci_dev
,
966 out_cmd
, sizeof(struct iwl_cmd
),
967 PCI_DMA_BIDIRECTIONAL
);
968 pci_unmap_addr_set(&out_cmd
->meta
, mapping
, txcmd_phys
);
969 pci_unmap_len_set(&out_cmd
->meta
, len
, sizeof(struct iwl_cmd
));
970 /* Add buffer containing Tx command and MAC(!) header to TFD's
972 txcmd_phys
+= offsetof(struct iwl_cmd
, hdr
);
973 iwl_hw_txq_attach_buf_to_tfd(priv
, tfd
, txcmd_phys
, len
);
975 if (info
->control
.hw_key
)
976 iwl_tx_cmd_build_hwcrypto(priv
, info
, tx_cmd
, skb
, sta_id
);
978 /* Set up TFD's 2nd entry to point directly to remainder of skb,
979 * if any (802.11 null frames have no payload). */
980 len
= skb
->len
- hdr_len
;
982 phys_addr
= pci_map_single(priv
->pci_dev
, skb
->data
+ hdr_len
,
983 len
, PCI_DMA_TODEVICE
);
984 iwl_hw_txq_attach_buf_to_tfd(priv
, tfd
, phys_addr
, len
);
987 /* Tell NIC about any 2-byte padding after MAC header */
989 tx_cmd
->tx_flags
|= TX_CMD_FLG_MH_PAD_MSK
;
991 /* Total # bytes to be transmitted */
993 tx_cmd
->len
= cpu_to_le16(len
);
994 /* TODO need this for burst mode later on */
995 iwl_tx_cmd_build_basic(priv
, tx_cmd
, info
, hdr
, sta_id
);
997 /* set is_hcca to 0; it probably will never be implemented */
998 iwl_tx_cmd_build_rate(priv
, tx_cmd
, info
, fc
, sta_id
, 0);
1000 iwl_update_tx_stats(priv
, le16_to_cpu(fc
), len
);
1002 scratch_phys
= txcmd_phys
+ sizeof(struct iwl_cmd_header
) +
1003 offsetof(struct iwl_tx_cmd
, scratch
);
1004 tx_cmd
->dram_lsb_ptr
= cpu_to_le32(scratch_phys
);
1005 tx_cmd
->dram_msb_ptr
= iwl_get_dma_hi_addr(scratch_phys
);
1007 if (!ieee80211_has_morefrags(hdr
->frame_control
)) {
1008 txq
->need_update
= 1;
1010 priv
->stations
[sta_id
].tid
[tid
].seq_number
= seq_number
;
1013 txq
->need_update
= 0;
1016 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
, sizeof(*tx_cmd
));
1018 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
->hdr
, hdr_len
);
1020 /* Set up entry for this TFD in Tx byte-count array */
1021 priv
->cfg
->ops
->lib
->txq_update_byte_cnt_tbl(priv
, txq
, len
);
1023 /* Tell device the write index *just past* this latest filled TFD */
1024 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
1025 ret
= iwl_txq_update_write_ptr(priv
, txq
);
1026 spin_unlock_irqrestore(&priv
->lock
, flags
);
1031 if ((iwl_queue_space(q
) < q
->high_mark
) && priv
->mac80211_registered
) {
1032 if (wait_write_ptr
) {
1033 spin_lock_irqsave(&priv
->lock
, flags
);
1034 txq
->need_update
= 1;
1035 iwl_txq_update_write_ptr(priv
, txq
);
1036 spin_unlock_irqrestore(&priv
->lock
, flags
);
1038 ieee80211_stop_queue(priv
->hw
, txq
->swq_id
);
1045 spin_unlock_irqrestore(&priv
->lock
, flags
);
1049 EXPORT_SYMBOL(iwl_tx_skb
);
1051 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
1054 * iwl_enqueue_hcmd - enqueue a uCode command
1055 * @priv: device private data point
1056 * @cmd: a point to the ucode command structure
1058 * The function returns < 0 values to indicate the operation is
1059 * failed. On success, it turns the index (> 0) of command in the
1062 int iwl_enqueue_hcmd(struct iwl_priv
*priv
, struct iwl_host_cmd
*cmd
)
1064 struct iwl_tx_queue
*txq
= &priv
->txq
[IWL_CMD_QUEUE_NUM
];
1065 struct iwl_queue
*q
= &txq
->q
;
1066 struct iwl_tfd
*tfd
;
1067 struct iwl_cmd
*out_cmd
;
1068 dma_addr_t phys_addr
;
1069 unsigned long flags
;
1074 cmd
->len
= priv
->cfg
->ops
->utils
->get_hcmd_size(cmd
->id
, cmd
->len
);
1075 fix_size
= (u16
)(cmd
->len
+ sizeof(out_cmd
->hdr
));
1077 /* If any of the command structures end up being larger than
1078 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
1079 * we will need to increase the size of the TFD entries */
1080 BUG_ON((fix_size
> TFD_MAX_PAYLOAD_SIZE
) &&
1081 !(cmd
->meta
.flags
& CMD_SIZE_HUGE
));
1083 if (iwl_is_rfkill(priv
)) {
1084 IWL_DEBUG_INFO("Not sending command - RF KILL");
1088 if (iwl_queue_space(q
) < ((cmd
->meta
.flags
& CMD_ASYNC
) ? 2 : 1)) {
1089 IWL_ERROR("No space for Tx\n");
1093 spin_lock_irqsave(&priv
->hcmd_lock
, flags
);
1095 tfd
= &txq
->tfds
[q
->write_ptr
];
1096 memset(tfd
, 0, sizeof(*tfd
));
1099 idx
= get_cmd_index(q
, q
->write_ptr
, cmd
->meta
.flags
& CMD_SIZE_HUGE
);
1100 out_cmd
= txq
->cmd
[idx
];
1102 out_cmd
->hdr
.cmd
= cmd
->id
;
1103 memcpy(&out_cmd
->meta
, &cmd
->meta
, sizeof(cmd
->meta
));
1104 memcpy(&out_cmd
->cmd
.payload
, cmd
->data
, cmd
->len
);
1106 /* At this point, the out_cmd now has all of the incoming cmd
1109 out_cmd
->hdr
.flags
= 0;
1110 out_cmd
->hdr
.sequence
= cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM
) |
1111 INDEX_TO_SEQ(q
->write_ptr
));
1112 if (out_cmd
->meta
.flags
& CMD_SIZE_HUGE
)
1113 out_cmd
->hdr
.sequence
|= SEQ_HUGE_FRAME
;
1114 len
= (idx
== TFD_CMD_SLOTS
) ?
1115 IWL_MAX_SCAN_SIZE
: sizeof(struct iwl_cmd
);
1117 phys_addr
= pci_map_single(priv
->pci_dev
, out_cmd
,
1118 len
, PCI_DMA_BIDIRECTIONAL
);
1119 pci_unmap_addr_set(&out_cmd
->meta
, mapping
, phys_addr
);
1120 pci_unmap_len_set(&out_cmd
->meta
, len
, len
);
1121 phys_addr
+= offsetof(struct iwl_cmd
, hdr
);
1123 iwl_hw_txq_attach_buf_to_tfd(priv
, tfd
, phys_addr
, fix_size
);
1125 #ifdef CONFIG_IWLWIFI_DEBUG
1126 switch (out_cmd
->hdr
.cmd
) {
1127 case REPLY_TX_LINK_QUALITY_CMD
:
1128 case SENSITIVITY_CMD
:
1129 IWL_DEBUG_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
1130 "%d bytes at %d[%d]:%d\n",
1131 get_cmd_string(out_cmd
->hdr
.cmd
),
1133 le16_to_cpu(out_cmd
->hdr
.sequence
), fix_size
,
1134 q
->write_ptr
, idx
, IWL_CMD_QUEUE_NUM
);
1137 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1138 "%d bytes at %d[%d]:%d\n",
1139 get_cmd_string(out_cmd
->hdr
.cmd
),
1141 le16_to_cpu(out_cmd
->hdr
.sequence
), fix_size
,
1142 q
->write_ptr
, idx
, IWL_CMD_QUEUE_NUM
);
1145 txq
->need_update
= 1;
1147 /* Set up entry in queue's byte count circular buffer */
1148 priv
->cfg
->ops
->lib
->txq_update_byte_cnt_tbl(priv
, txq
, 0);
1150 /* Increment and update queue's write index */
1151 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
1152 ret
= iwl_txq_update_write_ptr(priv
, txq
);
1154 spin_unlock_irqrestore(&priv
->hcmd_lock
, flags
);
1155 return ret
? ret
: idx
;
1158 int iwl_tx_queue_reclaim(struct iwl_priv
*priv
, int txq_id
, int index
)
1160 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
1161 struct iwl_queue
*q
= &txq
->q
;
1162 struct iwl_tx_info
*tx_info
;
1165 if ((index
>= q
->n_bd
) || (iwl_queue_used(q
, index
) == 0)) {
1166 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1167 "is out of range [0-%d] %d %d.\n", txq_id
,
1168 index
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
1172 for (index
= iwl_queue_inc_wrap(index
, q
->n_bd
);
1173 q
->read_ptr
!= index
;
1174 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1176 tx_info
= &txq
->txb
[txq
->q
.read_ptr
];
1177 ieee80211_tx_status_irqsafe(priv
->hw
, tx_info
->skb
[0]);
1178 tx_info
->skb
[0] = NULL
;
1180 if (priv
->cfg
->ops
->lib
->txq_inval_byte_cnt_tbl
)
1181 priv
->cfg
->ops
->lib
->txq_inval_byte_cnt_tbl(priv
, txq
);
1183 iwl_hw_txq_free_tfd(priv
, txq
);
1188 EXPORT_SYMBOL(iwl_tx_queue_reclaim
);
1192 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1194 * When FW advances 'R' index, all entries between old and new 'R' index
1195 * need to be reclaimed. As result, some free space forms. If there is
1196 * enough free space (> low mark), wake the stack that feeds us.
1198 static void iwl_hcmd_queue_reclaim(struct iwl_priv
*priv
, int txq_id
,
1199 int idx
, int cmd_idx
)
1201 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
1202 struct iwl_queue
*q
= &txq
->q
;
1205 if ((idx
>= q
->n_bd
) || (iwl_queue_used(q
, idx
) == 0)) {
1206 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1207 "is out of range [0-%d] %d %d.\n", txq_id
,
1208 idx
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
1212 pci_unmap_single(priv
->pci_dev
,
1213 pci_unmap_addr(&txq
->cmd
[cmd_idx
]->meta
, mapping
),
1214 pci_unmap_len(&txq
->cmd
[cmd_idx
]->meta
, len
),
1215 PCI_DMA_BIDIRECTIONAL
);
1217 for (idx
= iwl_queue_inc_wrap(idx
, q
->n_bd
); q
->read_ptr
!= idx
;
1218 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1221 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", idx
,
1222 q
->write_ptr
, q
->read_ptr
);
1223 queue_work(priv
->workqueue
, &priv
->restart
);
1230 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1231 * @rxb: Rx buffer to reclaim
1233 * If an Rx buffer has an async callback associated with it the callback
1234 * will be executed. The attached skb (if present) will only be freed
1235 * if the callback returns 1
1237 void iwl_tx_cmd_complete(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
1239 struct iwl_rx_packet
*pkt
= (struct iwl_rx_packet
*)rxb
->skb
->data
;
1240 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1241 int txq_id
= SEQ_TO_QUEUE(sequence
);
1242 int index
= SEQ_TO_INDEX(sequence
);
1244 bool huge
= !!(pkt
->hdr
.sequence
& SEQ_HUGE_FRAME
);
1245 struct iwl_cmd
*cmd
;
1247 /* If a Tx command is being handled and it isn't in the actual
1248 * command queue then there a command routing bug has been introduced
1249 * in the queue management code. */
1250 if (WARN(txq_id
!= IWL_CMD_QUEUE_NUM
,
1251 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1253 priv
->txq
[IWL_CMD_QUEUE_NUM
].q
.read_ptr
,
1254 priv
->txq
[IWL_CMD_QUEUE_NUM
].q
.write_ptr
)) {
1255 iwl_print_hex_dump(priv
, IWL_DL_INFO
, rxb
, 32);
1259 cmd_index
= get_cmd_index(&priv
->txq
[IWL_CMD_QUEUE_NUM
].q
, index
, huge
);
1260 cmd
= priv
->txq
[IWL_CMD_QUEUE_NUM
].cmd
[cmd_index
];
1262 /* Input error checking is done when commands are added to queue. */
1263 if (cmd
->meta
.flags
& CMD_WANT_SKB
) {
1264 cmd
->meta
.source
->u
.skb
= rxb
->skb
;
1266 } else if (cmd
->meta
.u
.callback
&&
1267 !cmd
->meta
.u
.callback(priv
, cmd
, rxb
->skb
))
1270 iwl_hcmd_queue_reclaim(priv
, txq_id
, index
, cmd_index
);
1272 if (!(cmd
->meta
.flags
& CMD_ASYNC
)) {
1273 clear_bit(STATUS_HCMD_ACTIVE
, &priv
->status
);
1274 wake_up_interruptible(&priv
->wait_command_queue
);
1277 EXPORT_SYMBOL(iwl_tx_cmd_complete
);
1280 * Find first available (lowest unused) Tx Queue, mark it "active".
1281 * Called only when finding queue for aggregation.
1282 * Should never return anything < 7, because they should already
1283 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1285 static int iwl_txq_ctx_activate_free(struct iwl_priv
*priv
)
1289 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
1290 if (!test_and_set_bit(txq_id
, &priv
->txq_ctx_active_msk
))
1295 int iwl_tx_agg_start(struct iwl_priv
*priv
, const u8
*ra
, u16 tid
, u16
*ssn
)
1301 unsigned long flags
;
1302 struct iwl_tid_data
*tid_data
;
1304 if (likely(tid
< ARRAY_SIZE(default_tid_to_tx_fifo
)))
1305 tx_fifo
= default_tid_to_tx_fifo
[tid
];
1309 IWL_WARNING("%s on ra = %pM tid = %d\n",
1312 sta_id
= iwl_find_station(priv
, ra
);
1313 if (sta_id
== IWL_INVALID_STATION
)
1316 if (priv
->stations
[sta_id
].tid
[tid
].agg
.state
!= IWL_AGG_OFF
) {
1317 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
1321 txq_id
= iwl_txq_ctx_activate_free(priv
);
1325 spin_lock_irqsave(&priv
->sta_lock
, flags
);
1326 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
1327 *ssn
= SEQ_TO_SN(tid_data
->seq_number
);
1328 tid_data
->agg
.txq_id
= txq_id
;
1329 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
1331 ret
= priv
->cfg
->ops
->lib
->txq_agg_enable(priv
, txq_id
, tx_fifo
,
1336 if (tid_data
->tfds_in_queue
== 0) {
1337 printk(KERN_ERR
"HW queue is empty\n");
1338 tid_data
->agg
.state
= IWL_AGG_ON
;
1339 ieee80211_start_tx_ba_cb_irqsafe(priv
->hw
, ra
, tid
);
1341 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
1342 tid_data
->tfds_in_queue
);
1343 tid_data
->agg
.state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
1347 EXPORT_SYMBOL(iwl_tx_agg_start
);
1349 int iwl_tx_agg_stop(struct iwl_priv
*priv
, const u8
*ra
, u16 tid
)
1351 int tx_fifo_id
, txq_id
, sta_id
, ssn
= -1;
1352 struct iwl_tid_data
*tid_data
;
1353 int ret
, write_ptr
, read_ptr
;
1354 unsigned long flags
;
1357 IWL_ERROR("ra = NULL\n");
1361 if (likely(tid
< ARRAY_SIZE(default_tid_to_tx_fifo
)))
1362 tx_fifo_id
= default_tid_to_tx_fifo
[tid
];
1366 sta_id
= iwl_find_station(priv
, ra
);
1368 if (sta_id
== IWL_INVALID_STATION
)
1371 if (priv
->stations
[sta_id
].tid
[tid
].agg
.state
!= IWL_AGG_ON
)
1372 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
1374 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
1375 ssn
= (tid_data
->seq_number
& IEEE80211_SCTL_SEQ
) >> 4;
1376 txq_id
= tid_data
->agg
.txq_id
;
1377 write_ptr
= priv
->txq
[txq_id
].q
.write_ptr
;
1378 read_ptr
= priv
->txq
[txq_id
].q
.read_ptr
;
1380 /* The queue is not empty */
1381 if (write_ptr
!= read_ptr
) {
1382 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
1383 priv
->stations
[sta_id
].tid
[tid
].agg
.state
=
1384 IWL_EMPTYING_HW_QUEUE_DELBA
;
1388 IWL_DEBUG_HT("HW queue is empty\n");
1389 priv
->stations
[sta_id
].tid
[tid
].agg
.state
= IWL_AGG_OFF
;
1391 spin_lock_irqsave(&priv
->lock
, flags
);
1392 ret
= priv
->cfg
->ops
->lib
->txq_agg_disable(priv
, txq_id
, ssn
,
1394 spin_unlock_irqrestore(&priv
->lock
, flags
);
1399 ieee80211_stop_tx_ba_cb_irqsafe(priv
->hw
, ra
, tid
);
1403 EXPORT_SYMBOL(iwl_tx_agg_stop
);
1405 int iwl_txq_check_empty(struct iwl_priv
*priv
, int sta_id
, u8 tid
, int txq_id
)
1407 struct iwl_queue
*q
= &priv
->txq
[txq_id
].q
;
1408 u8
*addr
= priv
->stations
[sta_id
].sta
.sta
.addr
;
1409 struct iwl_tid_data
*tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
1411 switch (priv
->stations
[sta_id
].tid
[tid
].agg
.state
) {
1412 case IWL_EMPTYING_HW_QUEUE_DELBA
:
1413 /* We are reclaiming the last packet of the */
1414 /* aggregated HW queue */
1415 if ((txq_id
== tid_data
->agg
.txq_id
) &&
1416 (q
->read_ptr
== q
->write_ptr
)) {
1417 u16 ssn
= SEQ_TO_SN(tid_data
->seq_number
);
1418 int tx_fifo
= default_tid_to_tx_fifo
[tid
];
1419 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
1420 priv
->cfg
->ops
->lib
->txq_agg_disable(priv
, txq_id
,
1422 tid_data
->agg
.state
= IWL_AGG_OFF
;
1423 ieee80211_stop_tx_ba_cb_irqsafe(priv
->hw
, addr
, tid
);
1426 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
1427 /* We are reclaiming the last packet of the queue */
1428 if (tid_data
->tfds_in_queue
== 0) {
1429 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
1430 tid_data
->agg
.state
= IWL_AGG_ON
;
1431 ieee80211_start_tx_ba_cb_irqsafe(priv
->hw
, addr
, tid
);
1437 EXPORT_SYMBOL(iwl_txq_check_empty
);
1440 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1442 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1443 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1445 static int iwl_tx_status_reply_compressed_ba(struct iwl_priv
*priv
,
1446 struct iwl_ht_agg
*agg
,
1447 struct iwl_compressed_ba_resp
*ba_resp
)
1451 u16 seq_ctl
= le16_to_cpu(ba_resp
->seq_ctl
);
1452 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1455 struct ieee80211_tx_info
*info
;
1457 if (unlikely(!agg
->wait_for_ba
)) {
1458 IWL_ERROR("Received BA when not expected\n");
1462 /* Mark that the expected block-ack response arrived */
1463 agg
->wait_for_ba
= 0;
1464 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg
->start_idx
, ba_resp
->seq_ctl
);
1466 /* Calculate shift to align block-ack bits with our Tx window bits */
1467 sh
= agg
->start_idx
- SEQ_TO_INDEX(seq_ctl
>> 4);
1468 if (sh
< 0) /* tbw something is wrong with indices */
1471 /* don't use 64-bit values for now */
1472 bitmap
= le64_to_cpu(ba_resp
->bitmap
) >> sh
;
1474 if (agg
->frame_count
> (64 - sh
)) {
1475 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
1479 /* check for success or failure according to the
1480 * transmitted bitmap and block-ack bitmap */
1481 bitmap
&= agg
->bitmap
;
1483 /* For each frame attempted in aggregation,
1484 * update driver's record of tx frame's status. */
1485 for (i
= 0; i
< agg
->frame_count
; i
++) {
1486 ack
= bitmap
& (1ULL << i
);
1488 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
1489 ack
? "ACK" : "NACK", i
, (agg
->start_idx
+ i
) & 0xff,
1490 agg
->start_idx
+ i
);
1493 info
= IEEE80211_SKB_CB(priv
->txq
[scd_flow
].txb
[agg
->start_idx
].skb
[0]);
1494 memset(&info
->status
, 0, sizeof(info
->status
));
1495 info
->flags
= IEEE80211_TX_STAT_ACK
;
1496 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
1497 info
->status
.ampdu_ack_map
= successes
;
1498 info
->status
.ampdu_ack_len
= agg
->frame_count
;
1499 iwl_hwrate_to_tx_control(priv
, agg
->rate_n_flags
, info
);
1501 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap
);
1507 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1509 * Handles block-acknowledge notification from device, which reports success
1510 * of frames sent via aggregation.
1512 void iwl_rx_reply_compressed_ba(struct iwl_priv
*priv
,
1513 struct iwl_rx_mem_buffer
*rxb
)
1515 struct iwl_rx_packet
*pkt
= (struct iwl_rx_packet
*)rxb
->skb
->data
;
1516 struct iwl_compressed_ba_resp
*ba_resp
= &pkt
->u
.compressed_ba
;
1517 struct iwl_tx_queue
*txq
= NULL
;
1518 struct iwl_ht_agg
*agg
;
1523 /* "flow" corresponds to Tx queue */
1524 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1526 /* "ssn" is start of block-ack Tx window, corresponds to index
1527 * (in Tx queue's circular buffer) of first TFD/frame in window */
1528 u16 ba_resp_scd_ssn
= le16_to_cpu(ba_resp
->scd_ssn
);
1530 if (scd_flow
>= priv
->hw_params
.max_txq_num
) {
1531 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n");
1535 txq
= &priv
->txq
[scd_flow
];
1536 sta_id
= ba_resp
->sta_id
;
1538 agg
= &priv
->stations
[sta_id
].tid
[tid
].agg
;
1540 /* Find index just before block-ack window */
1541 index
= iwl_queue_dec_wrap(ba_resp_scd_ssn
& 0xff, txq
->q
.n_bd
);
1543 /* TODO: Need to get this copy more safely - now good for debug */
1545 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d] Received from %pM, "
1548 (u8
*) &ba_resp
->sta_addr_lo32
,
1550 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1551 "%d, scd_ssn = %d\n",
1554 (unsigned long long)le64_to_cpu(ba_resp
->bitmap
),
1557 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
1559 (unsigned long long)agg
->bitmap
);
1561 /* Update driver's record of ACK vs. not for each frame in window */
1562 iwl_tx_status_reply_compressed_ba(priv
, agg
, ba_resp
);
1564 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1565 * block-ack window (we assume that they've been successfully
1566 * transmitted ... if not, it's too late anyway). */
1567 if (txq
->q
.read_ptr
!= (ba_resp_scd_ssn
& 0xff)) {
1568 /* calculate mac80211 ampdu sw queue to wake */
1569 int freed
= iwl_tx_queue_reclaim(priv
, scd_flow
, index
);
1570 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
-= freed
;
1572 if ((iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
) &&
1573 priv
->mac80211_registered
&&
1574 (agg
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
))
1575 ieee80211_wake_queue(priv
->hw
, txq
->swq_id
);
1577 iwl_txq_check_empty(priv
, sta_id
, tid
, scd_flow
);
1580 EXPORT_SYMBOL(iwl_rx_reply_compressed_ba
);
1582 #ifdef CONFIG_IWLWIFI_DEBUG
1583 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1585 const char *iwl_get_tx_fail_reason(u32 status
)
1587 switch (status
& TX_STATUS_MSK
) {
1588 case TX_STATUS_SUCCESS
:
1590 TX_STATUS_ENTRY(SHORT_LIMIT
);
1591 TX_STATUS_ENTRY(LONG_LIMIT
);
1592 TX_STATUS_ENTRY(FIFO_UNDERRUN
);
1593 TX_STATUS_ENTRY(MGMNT_ABORT
);
1594 TX_STATUS_ENTRY(NEXT_FRAG
);
1595 TX_STATUS_ENTRY(LIFE_EXPIRE
);
1596 TX_STATUS_ENTRY(DEST_PS
);
1597 TX_STATUS_ENTRY(ABORTED
);
1598 TX_STATUS_ENTRY(BT_RETRY
);
1599 TX_STATUS_ENTRY(STA_INVALID
);
1600 TX_STATUS_ENTRY(FRAG_DROPPED
);
1601 TX_STATUS_ENTRY(TID_DISABLE
);
1602 TX_STATUS_ENTRY(FRAME_FLUSHED
);
1603 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL
);
1604 TX_STATUS_ENTRY(TX_LOCKED
);
1605 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR
);
1610 EXPORT_SYMBOL(iwl_get_tx_fail_reason
);
1611 #endif /* CONFIG_IWLWIFI_DEBUG */