1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2020 Intel Corporation
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2020 Intel Corporation
22 * All rights reserved.
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
34 * * Neither the name Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 *****************************************************************************/
52 #include <linux/tcp.h>
54 #include "iwl-debug.h"
56 #include "fw/api/tx.h"
60 #include <linux/dmapool.h>
63 * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
65 void iwl_txq_gen2_tx_stop(struct iwl_trans
*trans
)
70 * This function can be called before the op_mode disabled the
71 * queues. This happens when we have an rfkill interrupt.
72 * Since we stop Tx altogether - mark the queues as stopped.
74 memset(trans
->txqs
.queue_stopped
, 0,
75 sizeof(trans
->txqs
.queue_stopped
));
76 memset(trans
->txqs
.queue_used
, 0, sizeof(trans
->txqs
.queue_used
));
78 /* Unmap DMA from host system and free skb's */
79 for (txq_id
= 0; txq_id
< ARRAY_SIZE(trans
->txqs
.txq
); txq_id
++) {
80 if (!trans
->txqs
.txq
[txq_id
])
82 iwl_txq_gen2_unmap(trans
, txq_id
);
87 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
89 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans
*trans
,
90 struct iwl_txq
*txq
, u16 byte_cnt
,
93 int idx
= iwl_txq_get_cmd_index(txq
, txq
->write_ptr
);
94 u8 filled_tfd_size
, num_fetch_chunks
;
98 if (WARN(idx
>= txq
->n_window
, "%d >= %d\n", idx
, txq
->n_window
))
101 filled_tfd_size
= offsetof(struct iwl_tfh_tfd
, tbs
) +
102 num_tbs
* sizeof(struct iwl_tfh_tb
);
104 * filled_tfd_size contains the number of filled bytes in the TFD.
105 * Dividing it by 64 will give the number of chunks to fetch
106 * to SRAM- 0 for one chunk, 1 for 2 and so on.
107 * If, for example, TFD contains only 3 TBs then 32 bytes
108 * of the TFD are used, and only one chunk of 64 bytes should
111 num_fetch_chunks
= DIV_ROUND_UP(filled_tfd_size
, 64) - 1;
113 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
114 struct iwl_gen3_bc_tbl
*scd_bc_tbl_gen3
= txq
->bc_tbl
.addr
;
116 /* Starting from AX210, the HW expects bytes */
117 WARN_ON(trans
->txqs
.bc_table_dword
);
118 WARN_ON(len
> 0x3FFF);
119 bc_ent
= cpu_to_le16(len
| (num_fetch_chunks
<< 14));
120 scd_bc_tbl_gen3
->tfd_offset
[idx
] = bc_ent
;
122 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= txq
->bc_tbl
.addr
;
124 /* Before AX210, the HW expects DW */
125 WARN_ON(!trans
->txqs
.bc_table_dword
);
126 len
= DIV_ROUND_UP(len
, 4);
127 WARN_ON(len
> 0xFFF);
128 bc_ent
= cpu_to_le16(len
| (num_fetch_chunks
<< 12));
129 scd_bc_tbl
->tfd_offset
[idx
] = bc_ent
;
134 * iwl_txq_inc_wr_ptr - Send new write index to hardware
136 void iwl_txq_inc_wr_ptr(struct iwl_trans
*trans
, struct iwl_txq
*txq
)
138 lockdep_assert_held(&txq
->lock
);
140 IWL_DEBUG_TX(trans
, "Q:%d WR: 0x%x\n", txq
->id
, txq
->write_ptr
);
143 * if not in power-save mode, uCode will never sleep when we're
144 * trying to tx (during RFKILL, we're not trying to tx).
146 iwl_write32(trans
, HBUS_TARG_WRPTR
, txq
->write_ptr
| (txq
->id
<< 16));
149 static u8
iwl_txq_gen2_get_num_tbs(struct iwl_trans
*trans
,
150 struct iwl_tfh_tfd
*tfd
)
152 return le16_to_cpu(tfd
->num_tbs
) & 0x1f;
155 void iwl_txq_gen2_tfd_unmap(struct iwl_trans
*trans
, struct iwl_cmd_meta
*meta
,
156 struct iwl_tfh_tfd
*tfd
)
160 /* Sanity check on number of chunks */
161 num_tbs
= iwl_txq_gen2_get_num_tbs(trans
, tfd
);
163 if (num_tbs
> trans
->txqs
.tfd
.max_tbs
) {
164 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
168 /* first TB is never freed - it's the bidirectional DMA data */
169 for (i
= 1; i
< num_tbs
; i
++) {
170 if (meta
->tbs
& BIT(i
))
171 dma_unmap_page(trans
->dev
,
172 le64_to_cpu(tfd
->tbs
[i
].addr
),
173 le16_to_cpu(tfd
->tbs
[i
].tb_len
),
176 dma_unmap_single(trans
->dev
,
177 le64_to_cpu(tfd
->tbs
[i
].addr
),
178 le16_to_cpu(tfd
->tbs
[i
].tb_len
),
185 void iwl_txq_gen2_free_tfd(struct iwl_trans
*trans
, struct iwl_txq
*txq
)
187 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
188 * idx is bounded by n_window
190 int idx
= iwl_txq_get_cmd_index(txq
, txq
->read_ptr
);
192 lockdep_assert_held(&txq
->lock
);
194 iwl_txq_gen2_tfd_unmap(trans
, &txq
->entries
[idx
].meta
,
195 iwl_txq_get_tfd(trans
, txq
, idx
));
201 skb
= txq
->entries
[idx
].skb
;
203 /* Can be called from irqs-disabled context
204 * If skb is not NULL, it means that the whole queue is being
205 * freed and that the queue is not empty - free the skb
208 iwl_op_mode_free_skb(trans
->op_mode
, skb
);
209 txq
->entries
[idx
].skb
= NULL
;
214 int iwl_txq_gen2_set_tb(struct iwl_trans
*trans
, struct iwl_tfh_tfd
*tfd
,
215 dma_addr_t addr
, u16 len
)
217 int idx
= iwl_txq_gen2_get_num_tbs(trans
, tfd
);
218 struct iwl_tfh_tb
*tb
;
221 * Only WARN here so we know about the issue, but we mess up our
222 * unmap path because not every place currently checks for errors
223 * returned from this function - it can only return an error if
224 * there's no more space, and so when we know there is enough we
225 * don't always check ...
227 WARN(iwl_txq_crosses_4g_boundary(addr
, len
),
228 "possible DMA problem with iova:0x%llx, len:%d\n",
229 (unsigned long long)addr
, len
);
231 if (WARN_ON(idx
>= IWL_TFH_NUM_TBS
))
235 /* Each TFD can point to a maximum max_tbs Tx buffers */
236 if (le16_to_cpu(tfd
->num_tbs
) >= trans
->txqs
.tfd
.max_tbs
) {
237 IWL_ERR(trans
, "Error can not send more than %d chunks\n",
238 trans
->txqs
.tfd
.max_tbs
);
242 put_unaligned_le64(addr
, &tb
->addr
);
243 tb
->tb_len
= cpu_to_le16(len
);
245 tfd
->num_tbs
= cpu_to_le16(idx
+ 1);
250 static struct page
*get_workaround_page(struct iwl_trans
*trans
,
253 struct page
**page_ptr
;
256 page_ptr
= (void *)((u8
*)skb
->cb
+ trans
->txqs
.page_offs
);
258 ret
= alloc_page(GFP_ATOMIC
);
262 /* set the chaining pointer to the previous page if there */
263 *(void **)(page_address(ret
) + PAGE_SIZE
- sizeof(void *)) = *page_ptr
;
270 * Add a TB and if needed apply the FH HW bug workaround;
271 * meta != NULL indicates that it's a page mapping and we
272 * need to dma_unmap_page() and set the meta->tbs bit in
275 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans
*trans
,
277 struct iwl_tfh_tfd
*tfd
,
278 dma_addr_t phys
, void *virt
,
279 u16 len
, struct iwl_cmd_meta
*meta
)
281 dma_addr_t oldphys
= phys
;
285 if (unlikely(dma_mapping_error(trans
->dev
, phys
)))
288 if (likely(!iwl_txq_crosses_4g_boundary(phys
, len
))) {
289 ret
= iwl_txq_gen2_set_tb(trans
, tfd
, phys
, len
);
295 meta
->tbs
|= BIT(ret
);
302 * Work around a hardware bug. If (as expressed in the
303 * condition above) the TB ends on a 32-bit boundary,
304 * then the next TB may be accessed with the wrong
306 * To work around it, copy the data elsewhere and make
307 * a new mapping for it so the device will not fail.
310 if (WARN_ON(len
> PAGE_SIZE
- sizeof(void *))) {
315 page
= get_workaround_page(trans
, skb
);
321 memcpy(page_address(page
), virt
, len
);
323 phys
= dma_map_single(trans
->dev
, page_address(page
), len
,
325 if (unlikely(dma_mapping_error(trans
->dev
, phys
)))
327 ret
= iwl_txq_gen2_set_tb(trans
, tfd
, phys
, len
);
329 /* unmap the new allocation as single */
335 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
336 len
, (unsigned long long)oldphys
, (unsigned long long)phys
);
341 dma_unmap_page(trans
->dev
, oldphys
, len
, DMA_TO_DEVICE
);
343 dma_unmap_single(trans
->dev
, oldphys
, len
, DMA_TO_DEVICE
);
345 trace_iwlwifi_dev_tx_tb(trans
->dev
, skb
, virt
, phys
, len
);
351 struct iwl_tso_hdr_page
*get_page_hdr(struct iwl_trans
*trans
, size_t len
,
354 struct iwl_tso_hdr_page
*p
= this_cpu_ptr(trans
->txqs
.tso_hdr_page
);
355 struct page
**page_ptr
;
357 page_ptr
= (void *)((u8
*)skb
->cb
+ trans
->txqs
.page_offs
);
359 if (WARN_ON(*page_ptr
))
366 * Check if there's enough room on this page
368 * Note that we put a page chaining pointer *last* in the
369 * page - we need it somewhere, and if it's there then we
370 * avoid DMA mapping the last bits of the page which may
371 * trigger the 32-bit boundary hardware bug.
373 * (see also get_workaround_page() in tx-gen2.c)
375 if (p
->pos
+ len
< (u8
*)page_address(p
->page
) + PAGE_SIZE
-
379 /* We don't have enough room on this page, get a new one. */
380 __free_page(p
->page
);
383 p
->page
= alloc_page(GFP_ATOMIC
);
386 p
->pos
= page_address(p
->page
);
387 /* set the chaining pointer to NULL */
388 *(void **)(page_address(p
->page
) + PAGE_SIZE
- sizeof(void *)) = NULL
;
396 static int iwl_txq_gen2_build_amsdu(struct iwl_trans
*trans
,
398 struct iwl_tfh_tfd
*tfd
, int start_len
,
400 struct iwl_device_tx_cmd
*dev_cmd
)
403 struct iwl_tx_cmd_gen2
*tx_cmd
= (void *)dev_cmd
->payload
;
404 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
405 unsigned int snap_ip_tcp_hdrlen
, ip_hdrlen
, total_len
, hdr_room
;
406 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
407 u16 length
, amsdu_pad
;
409 struct iwl_tso_hdr_page
*hdr_page
;
412 trace_iwlwifi_dev_tx(trans
->dev
, skb
, tfd
, sizeof(*tfd
),
413 &dev_cmd
->hdr
, start_len
, 0);
415 ip_hdrlen
= skb_transport_header(skb
) - skb_network_header(skb
);
416 snap_ip_tcp_hdrlen
= 8 + ip_hdrlen
+ tcp_hdrlen(skb
);
417 total_len
= skb
->len
- snap_ip_tcp_hdrlen
- hdr_len
;
420 /* total amount of header we may need for this A-MSDU */
421 hdr_room
= DIV_ROUND_UP(total_len
, mss
) *
422 (3 + snap_ip_tcp_hdrlen
+ sizeof(struct ethhdr
));
424 /* Our device supports 9 segments at most, it will fit in 1 page */
425 hdr_page
= get_page_hdr(trans
, hdr_room
, skb
);
429 start_hdr
= hdr_page
->pos
;
432 * Pull the ieee80211 header to be able to use TSO core,
433 * we will restore it for the tx_status flow.
435 skb_pull(skb
, hdr_len
);
438 * Remove the length of all the headers that we don't actually
439 * have in the MPDU by themselves, but that we duplicate into
440 * all the different MSDUs inside the A-MSDU.
442 le16_add_cpu(&tx_cmd
->len
, -snap_ip_tcp_hdrlen
);
444 tso_start(skb
, &tso
);
447 /* this is the data left for this subframe */
448 unsigned int data_left
= min_t(unsigned int, mss
, total_len
);
449 struct sk_buff
*csum_skb
= NULL
;
452 u8
*subf_hdrs_start
= hdr_page
->pos
;
454 total_len
-= data_left
;
456 memset(hdr_page
->pos
, 0, amsdu_pad
);
457 hdr_page
->pos
+= amsdu_pad
;
458 amsdu_pad
= (4 - (sizeof(struct ethhdr
) + snap_ip_tcp_hdrlen
+
460 ether_addr_copy(hdr_page
->pos
, ieee80211_get_DA(hdr
));
461 hdr_page
->pos
+= ETH_ALEN
;
462 ether_addr_copy(hdr_page
->pos
, ieee80211_get_SA(hdr
));
463 hdr_page
->pos
+= ETH_ALEN
;
465 length
= snap_ip_tcp_hdrlen
+ data_left
;
466 *((__be16
*)hdr_page
->pos
) = cpu_to_be16(length
);
467 hdr_page
->pos
+= sizeof(length
);
470 * This will copy the SNAP as well which will be considered
473 tso_build_hdr(skb
, hdr_page
->pos
, &tso
, data_left
, !total_len
);
475 hdr_page
->pos
+= snap_ip_tcp_hdrlen
;
477 tb_len
= hdr_page
->pos
- start_hdr
;
478 tb_phys
= dma_map_single(trans
->dev
, start_hdr
,
479 tb_len
, DMA_TO_DEVICE
);
480 if (unlikely(dma_mapping_error(trans
->dev
, tb_phys
))) {
481 dev_kfree_skb(csum_skb
);
485 * No need for _with_wa, this is from the TSO page and
486 * we leave some space at the end of it so can't hit
487 * the buggy scenario.
489 iwl_txq_gen2_set_tb(trans
, tfd
, tb_phys
, tb_len
);
490 trace_iwlwifi_dev_tx_tb(trans
->dev
, skb
, start_hdr
,
492 /* add this subframe's headers' length to the tx_cmd */
493 le16_add_cpu(&tx_cmd
->len
, hdr_page
->pos
- subf_hdrs_start
);
495 /* prepare the start_hdr for the next subframe */
496 start_hdr
= hdr_page
->pos
;
498 /* put the payload */
502 tb_len
= min_t(unsigned int, tso
.size
, data_left
);
503 tb_phys
= dma_map_single(trans
->dev
, tso
.data
,
504 tb_len
, DMA_TO_DEVICE
);
505 ret
= iwl_txq_gen2_set_tb_with_wa(trans
, skb
, tfd
,
509 dev_kfree_skb(csum_skb
);
514 tso_build_data(skb
, &tso
, tb_len
);
518 /* re -add the WiFi header */
519 skb_push(skb
, hdr_len
);
529 iwl_tfh_tfd
*iwl_txq_gen2_build_tx_amsdu(struct iwl_trans
*trans
,
531 struct iwl_device_tx_cmd
*dev_cmd
,
533 struct iwl_cmd_meta
*out_meta
,
537 int idx
= iwl_txq_get_cmd_index(txq
, txq
->write_ptr
);
538 struct iwl_tfh_tfd
*tfd
= iwl_txq_get_tfd(trans
, txq
, idx
);
543 tb_phys
= iwl_txq_get_first_tb_dma(txq
, idx
);
546 * No need for _with_wa, the first TB allocation is aligned up
547 * to a 64-byte boundary and thus can't be at the end or cross
548 * a page boundary (much less a 2^32 boundary).
550 iwl_txq_gen2_set_tb(trans
, tfd
, tb_phys
, IWL_FIRST_TB_SIZE
);
553 * The second TB (tb1) points to the remainder of the TX command
554 * and the 802.11 header - dword aligned size
555 * (This calculation modifies the TX command, so do it before the
556 * setup of the first TB)
558 len
= tx_cmd_len
+ sizeof(struct iwl_cmd_header
) + hdr_len
-
561 /* do not align A-MSDU to dword as the subframe header aligns it */
563 /* map the data for TB1 */
564 tb1_addr
= ((u8
*)&dev_cmd
->hdr
) + IWL_FIRST_TB_SIZE
;
565 tb_phys
= dma_map_single(trans
->dev
, tb1_addr
, len
, DMA_TO_DEVICE
);
566 if (unlikely(dma_mapping_error(trans
->dev
, tb_phys
)))
569 * No need for _with_wa(), we ensure (via alignment) that the data
570 * here can never cross or end at a page boundary.
572 iwl_txq_gen2_set_tb(trans
, tfd
, tb_phys
, len
);
574 if (iwl_txq_gen2_build_amsdu(trans
, skb
, tfd
, len
+ IWL_FIRST_TB_SIZE
,
578 /* building the A-MSDU might have changed this data, memcpy it now */
579 memcpy(&txq
->first_tb_bufs
[idx
], dev_cmd
, IWL_FIRST_TB_SIZE
);
583 iwl_txq_gen2_tfd_unmap(trans
, out_meta
, tfd
);
587 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans
*trans
,
589 struct iwl_tfh_tfd
*tfd
,
590 struct iwl_cmd_meta
*out_meta
)
594 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
595 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
597 unsigned int fragsz
= skb_frag_size(frag
);
603 tb_phys
= skb_frag_dma_map(trans
->dev
, frag
, 0,
604 fragsz
, DMA_TO_DEVICE
);
605 ret
= iwl_txq_gen2_set_tb_with_wa(trans
, skb
, tfd
, tb_phys
,
606 skb_frag_address(frag
),
616 iwl_tfh_tfd
*iwl_txq_gen2_build_tx(struct iwl_trans
*trans
,
618 struct iwl_device_tx_cmd
*dev_cmd
,
620 struct iwl_cmd_meta
*out_meta
,
625 int idx
= iwl_txq_get_cmd_index(txq
, txq
->write_ptr
);
626 struct iwl_tfh_tfd
*tfd
= iwl_txq_get_tfd(trans
, txq
, idx
);
628 int len
, tb1_len
, tb2_len
;
630 struct sk_buff
*frag
;
632 tb_phys
= iwl_txq_get_first_tb_dma(txq
, idx
);
634 /* The first TB points to bi-directional DMA data */
635 memcpy(&txq
->first_tb_bufs
[idx
], dev_cmd
, IWL_FIRST_TB_SIZE
);
638 * No need for _with_wa, the first TB allocation is aligned up
639 * to a 64-byte boundary and thus can't be at the end or cross
640 * a page boundary (much less a 2^32 boundary).
642 iwl_txq_gen2_set_tb(trans
, tfd
, tb_phys
, IWL_FIRST_TB_SIZE
);
645 * The second TB (tb1) points to the remainder of the TX command
646 * and the 802.11 header - dword aligned size
647 * (This calculation modifies the TX command, so do it before the
648 * setup of the first TB)
650 len
= tx_cmd_len
+ sizeof(struct iwl_cmd_header
) + hdr_len
-
654 tb1_len
= ALIGN(len
, 4);
658 /* map the data for TB1 */
659 tb1_addr
= ((u8
*)&dev_cmd
->hdr
) + IWL_FIRST_TB_SIZE
;
660 tb_phys
= dma_map_single(trans
->dev
, tb1_addr
, tb1_len
, DMA_TO_DEVICE
);
661 if (unlikely(dma_mapping_error(trans
->dev
, tb_phys
)))
664 * No need for _with_wa(), we ensure (via alignment) that the data
665 * here can never cross or end at a page boundary.
667 iwl_txq_gen2_set_tb(trans
, tfd
, tb_phys
, tb1_len
);
668 trace_iwlwifi_dev_tx(trans
->dev
, skb
, tfd
, sizeof(*tfd
), &dev_cmd
->hdr
,
669 IWL_FIRST_TB_SIZE
+ tb1_len
, hdr_len
);
671 /* set up TFD's third entry to point to remainder of skb's head */
672 tb2_len
= skb_headlen(skb
) - hdr_len
;
677 tb_phys
= dma_map_single(trans
->dev
, skb
->data
+ hdr_len
,
678 tb2_len
, DMA_TO_DEVICE
);
679 ret
= iwl_txq_gen2_set_tb_with_wa(trans
, skb
, tfd
, tb_phys
,
680 skb
->data
+ hdr_len
, tb2_len
,
686 if (iwl_txq_gen2_tx_add_frags(trans
, skb
, tfd
, out_meta
))
689 skb_walk_frags(skb
, frag
) {
692 tb_phys
= dma_map_single(trans
->dev
, frag
->data
,
693 skb_headlen(frag
), DMA_TO_DEVICE
);
694 ret
= iwl_txq_gen2_set_tb_with_wa(trans
, skb
, tfd
, tb_phys
,
696 skb_headlen(frag
), NULL
);
699 if (iwl_txq_gen2_tx_add_frags(trans
, frag
, tfd
, out_meta
))
706 iwl_txq_gen2_tfd_unmap(trans
, out_meta
, tfd
);
711 struct iwl_tfh_tfd
*iwl_txq_gen2_build_tfd(struct iwl_trans
*trans
,
713 struct iwl_device_tx_cmd
*dev_cmd
,
715 struct iwl_cmd_meta
*out_meta
)
717 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
718 int idx
= iwl_txq_get_cmd_index(txq
, txq
->write_ptr
);
719 struct iwl_tfh_tfd
*tfd
= iwl_txq_get_tfd(trans
, txq
, idx
);
723 /* There must be data left over for TB1 or this code must be changed */
724 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2
) < IWL_FIRST_TB_SIZE
);
726 memset(tfd
, 0, sizeof(*tfd
));
728 if (trans
->trans_cfg
->device_family
< IWL_DEVICE_FAMILY_AX210
)
729 len
= sizeof(struct iwl_tx_cmd_gen2
);
731 len
= sizeof(struct iwl_tx_cmd_gen3
);
733 amsdu
= ieee80211_is_data_qos(hdr
->frame_control
) &&
734 (*ieee80211_get_qos_ctl(hdr
) &
735 IEEE80211_QOS_CTL_A_MSDU_PRESENT
);
737 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
740 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
741 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
742 * built in the higher layers already.
744 if (amsdu
&& skb_shinfo(skb
)->gso_size
)
745 return iwl_txq_gen2_build_tx_amsdu(trans
, txq
, dev_cmd
, skb
,
746 out_meta
, hdr_len
, len
);
747 return iwl_txq_gen2_build_tx(trans
, txq
, dev_cmd
, skb
, out_meta
,
748 hdr_len
, len
, !amsdu
);
751 int iwl_txq_space(struct iwl_trans
*trans
, const struct iwl_txq
*q
)
757 * To avoid ambiguity between empty and completely full queues, there
758 * should always be less than max_tfd_queue_size elements in the queue.
759 * If q->n_window is smaller than max_tfd_queue_size, there is no need
760 * to reserve any queue entries for this purpose.
762 if (q
->n_window
< trans
->trans_cfg
->base_params
->max_tfd_queue_size
)
765 max
= trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1;
768 * max_tfd_queue_size is a power of 2, so the following is equivalent to
769 * modulo by max_tfd_queue_size and is well defined.
771 used
= (q
->write_ptr
- q
->read_ptr
) &
772 (trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1);
774 if (WARN_ON(used
> max
))
780 int iwl_txq_gen2_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
781 struct iwl_device_tx_cmd
*dev_cmd
, int txq_id
)
783 struct iwl_cmd_meta
*out_meta
;
784 struct iwl_txq
*txq
= trans
->txqs
.txq
[txq_id
];
789 if (WARN_ONCE(txq_id
>= IWL_MAX_TVQM_QUEUES
,
790 "queue %d out of range", txq_id
))
793 if (WARN_ONCE(!test_bit(txq_id
, trans
->txqs
.queue_used
),
794 "TX on unused queue %d\n", txq_id
))
797 if (skb_is_nonlinear(skb
) &&
798 skb_shinfo(skb
)->nr_frags
> IWL_TRANS_MAX_FRAGS(trans
) &&
799 __skb_linearize(skb
))
802 spin_lock(&txq
->lock
);
804 if (iwl_txq_space(trans
, txq
) < txq
->high_mark
) {
805 iwl_txq_stop(trans
, txq
);
807 /* don't put the packet on the ring, if there is no room */
808 if (unlikely(iwl_txq_space(trans
, txq
) < 3)) {
809 struct iwl_device_tx_cmd
**dev_cmd_ptr
;
811 dev_cmd_ptr
= (void *)((u8
*)skb
->cb
+
812 trans
->txqs
.dev_cmd_offs
);
814 *dev_cmd_ptr
= dev_cmd
;
815 __skb_queue_tail(&txq
->overflow_q
, skb
);
816 spin_unlock(&txq
->lock
);
821 idx
= iwl_txq_get_cmd_index(txq
, txq
->write_ptr
);
823 /* Set up driver data for this TFD */
824 txq
->entries
[idx
].skb
= skb
;
825 txq
->entries
[idx
].cmd
= dev_cmd
;
827 dev_cmd
->hdr
.sequence
=
828 cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
831 /* Set up first empty entry in queue's array of Tx/cmd buffers */
832 out_meta
= &txq
->entries
[idx
].meta
;
835 tfd
= iwl_txq_gen2_build_tfd(trans
, txq
, dev_cmd
, skb
, out_meta
);
837 spin_unlock(&txq
->lock
);
841 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
842 struct iwl_tx_cmd_gen3
*tx_cmd_gen3
=
843 (void *)dev_cmd
->payload
;
845 cmd_len
= le16_to_cpu(tx_cmd_gen3
->len
);
847 struct iwl_tx_cmd_gen2
*tx_cmd_gen2
=
848 (void *)dev_cmd
->payload
;
850 cmd_len
= le16_to_cpu(tx_cmd_gen2
->len
);
853 /* Set up entry for this TFD in Tx byte-count array */
854 iwl_pcie_gen2_update_byte_tbl(trans
, txq
, cmd_len
,
855 iwl_txq_gen2_get_num_tbs(trans
, tfd
));
857 /* start timer if queue currently empty */
858 if (txq
->read_ptr
== txq
->write_ptr
&& txq
->wd_timeout
)
859 mod_timer(&txq
->stuck_timer
, jiffies
+ txq
->wd_timeout
);
861 /* Tell device the write index *just past* this latest filled TFD */
862 txq
->write_ptr
= iwl_txq_inc_wrap(trans
, txq
->write_ptr
);
863 iwl_txq_inc_wr_ptr(trans
, txq
);
865 * At this point the frame is "transmitted" successfully
866 * and we will get a TX status notification eventually.
868 spin_unlock(&txq
->lock
);
872 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
875 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
877 void iwl_txq_gen2_unmap(struct iwl_trans
*trans
, int txq_id
)
879 struct iwl_txq
*txq
= trans
->txqs
.txq
[txq_id
];
881 spin_lock_bh(&txq
->lock
);
882 while (txq
->write_ptr
!= txq
->read_ptr
) {
883 IWL_DEBUG_TX_REPLY(trans
, "Q %d Free %d\n",
884 txq_id
, txq
->read_ptr
);
886 if (txq_id
!= trans
->txqs
.cmd
.q_id
) {
887 int idx
= iwl_txq_get_cmd_index(txq
, txq
->read_ptr
);
888 struct sk_buff
*skb
= txq
->entries
[idx
].skb
;
890 if (WARN_ON_ONCE(!skb
))
893 iwl_txq_free_tso_page(trans
, skb
);
895 iwl_txq_gen2_free_tfd(trans
, txq
);
896 txq
->read_ptr
= iwl_txq_inc_wrap(trans
, txq
->read_ptr
);
899 while (!skb_queue_empty(&txq
->overflow_q
)) {
900 struct sk_buff
*skb
= __skb_dequeue(&txq
->overflow_q
);
902 iwl_op_mode_free_skb(trans
->op_mode
, skb
);
905 spin_unlock_bh(&txq
->lock
);
907 /* just in case - this queue may have been stopped */
908 iwl_wake_queue(trans
, txq
);
911 static void iwl_txq_gen2_free_memory(struct iwl_trans
*trans
,
914 struct device
*dev
= trans
->dev
;
916 /* De-alloc circular buffer of TFDs */
918 dma_free_coherent(dev
,
919 trans
->txqs
.tfd
.size
* txq
->n_window
,
920 txq
->tfds
, txq
->dma_addr
);
921 dma_free_coherent(dev
,
922 sizeof(*txq
->first_tb_bufs
) * txq
->n_window
,
923 txq
->first_tb_bufs
, txq
->first_tb_dma
);
927 if (txq
->bc_tbl
.addr
)
928 dma_pool_free(trans
->txqs
.bc_pool
,
929 txq
->bc_tbl
.addr
, txq
->bc_tbl
.dma
);
934 * iwl_pcie_txq_free - Deallocate DMA queue.
935 * @txq: Transmit queue to deallocate.
937 * Empty queue by removing and destroying all BD's.
939 * 0-fill, but do not free "txq" descriptor structure.
941 static void iwl_txq_gen2_free(struct iwl_trans
*trans
, int txq_id
)
946 if (WARN_ONCE(txq_id
>= IWL_MAX_TVQM_QUEUES
,
947 "queue %d out of range", txq_id
))
950 txq
= trans
->txqs
.txq
[txq_id
];
955 iwl_txq_gen2_unmap(trans
, txq_id
);
957 /* De-alloc array of command/tx buffers */
958 if (txq_id
== trans
->txqs
.cmd
.q_id
)
959 for (i
= 0; i
< txq
->n_window
; i
++) {
960 kfree_sensitive(txq
->entries
[i
].cmd
);
961 kfree_sensitive(txq
->entries
[i
].free_buf
);
963 del_timer_sync(&txq
->stuck_timer
);
965 iwl_txq_gen2_free_memory(trans
, txq
);
967 trans
->txqs
.txq
[txq_id
] = NULL
;
969 clear_bit(txq_id
, trans
->txqs
.queue_used
);
973 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
975 static int iwl_queue_init(struct iwl_txq
*q
, int slots_num
)
977 q
->n_window
= slots_num
;
979 /* slots_num must be power-of-two size, otherwise
980 * iwl_txq_get_cmd_index is broken. */
981 if (WARN_ON(!is_power_of_2(slots_num
)))
984 q
->low_mark
= q
->n_window
/ 4;
988 q
->high_mark
= q
->n_window
/ 8;
989 if (q
->high_mark
< 2)
998 int iwl_txq_init(struct iwl_trans
*trans
, struct iwl_txq
*txq
, int slots_num
,
1002 u32 tfd_queue_max_size
=
1003 trans
->trans_cfg
->base_params
->max_tfd_queue_size
;
1005 txq
->need_update
= false;
1007 /* max_tfd_queue_size must be power-of-two size, otherwise
1008 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
1009 if (WARN_ONCE(tfd_queue_max_size
& (tfd_queue_max_size
- 1),
1010 "Max tfd queue size must be a power of two, but is %d",
1011 tfd_queue_max_size
))
1014 /* Initialize queue's high/low-water marks, and head/tail indexes */
1015 ret
= iwl_queue_init(txq
, slots_num
);
1019 spin_lock_init(&txq
->lock
);
1022 static struct lock_class_key iwl_txq_cmd_queue_lock_class
;
1024 lockdep_set_class(&txq
->lock
, &iwl_txq_cmd_queue_lock_class
);
1027 __skb_queue_head_init(&txq
->overflow_q
);
1032 void iwl_txq_free_tso_page(struct iwl_trans
*trans
, struct sk_buff
*skb
)
1034 struct page
**page_ptr
;
1037 page_ptr
= (void *)((u8
*)skb
->cb
+ trans
->txqs
.page_offs
);
1042 struct page
*tmp
= next
;
1044 next
= *(void **)(page_address(next
) + PAGE_SIZE
-
1050 void iwl_txq_log_scd_error(struct iwl_trans
*trans
, struct iwl_txq
*txq
)
1052 u32 txq_id
= txq
->id
;
1057 if (trans
->trans_cfg
->use_tfh
) {
1058 IWL_ERR(trans
, "Queue %d is stuck %d %d\n", txq_id
,
1059 txq
->read_ptr
, txq
->write_ptr
);
1060 /* TODO: access new SCD registers and dump them */
1064 status
= iwl_read_prph(trans
, SCD_QUEUE_STATUS_BITS(txq_id
));
1065 fifo
= (status
>> SCD_QUEUE_STTS_REG_POS_TXF
) & 0x7;
1066 active
= !!(status
& BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE
));
1069 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1070 txq_id
, active
? "" : "in", fifo
,
1071 jiffies_to_msecs(txq
->wd_timeout
),
1072 txq
->read_ptr
, txq
->write_ptr
,
1073 iwl_read_prph(trans
, SCD_QUEUE_RDPTR(txq_id
)) &
1074 (trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1),
1075 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(txq_id
)) &
1076 (trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1),
1077 iwl_read_direct32(trans
, FH_TX_TRB_REG(fifo
)));
1080 static void iwl_txq_stuck_timer(struct timer_list
*t
)
1082 struct iwl_txq
*txq
= from_timer(txq
, t
, stuck_timer
);
1083 struct iwl_trans
*trans
= txq
->trans
;
1085 spin_lock(&txq
->lock
);
1086 /* check if triggered erroneously */
1087 if (txq
->read_ptr
== txq
->write_ptr
) {
1088 spin_unlock(&txq
->lock
);
1091 spin_unlock(&txq
->lock
);
1093 iwl_txq_log_scd_error(trans
, txq
);
1095 iwl_force_nmi(trans
);
1098 int iwl_txq_alloc(struct iwl_trans
*trans
, struct iwl_txq
*txq
, int slots_num
,
1101 size_t tfd_sz
= trans
->txqs
.tfd
.size
*
1102 trans
->trans_cfg
->base_params
->max_tfd_queue_size
;
1106 if (WARN_ON(txq
->entries
|| txq
->tfds
))
1109 if (trans
->trans_cfg
->use_tfh
)
1110 tfd_sz
= trans
->txqs
.tfd
.size
* slots_num
;
1112 timer_setup(&txq
->stuck_timer
, iwl_txq_stuck_timer
, 0);
1115 txq
->n_window
= slots_num
;
1117 txq
->entries
= kcalloc(slots_num
,
1118 sizeof(struct iwl_pcie_txq_entry
),
1125 for (i
= 0; i
< slots_num
; i
++) {
1126 txq
->entries
[i
].cmd
=
1127 kmalloc(sizeof(struct iwl_device_cmd
),
1129 if (!txq
->entries
[i
].cmd
)
1133 /* Circular buffer of transmit frame descriptors (TFDs),
1134 * shared with device */
1135 txq
->tfds
= dma_alloc_coherent(trans
->dev
, tfd_sz
,
1136 &txq
->dma_addr
, GFP_KERNEL
);
1140 BUILD_BUG_ON(sizeof(*txq
->first_tb_bufs
) != IWL_FIRST_TB_SIZE_ALIGN
);
1142 tb0_buf_sz
= sizeof(*txq
->first_tb_bufs
) * slots_num
;
1144 txq
->first_tb_bufs
= dma_alloc_coherent(trans
->dev
, tb0_buf_sz
,
1147 if (!txq
->first_tb_bufs
)
1152 dma_free_coherent(trans
->dev
, tfd_sz
, txq
->tfds
, txq
->dma_addr
);
1154 if (txq
->entries
&& cmd_queue
)
1155 for (i
= 0; i
< slots_num
; i
++)
1156 kfree(txq
->entries
[i
].cmd
);
1157 kfree(txq
->entries
);
1158 txq
->entries
= NULL
;
1163 static int iwl_txq_dyn_alloc_dma(struct iwl_trans
*trans
,
1164 struct iwl_txq
**intxq
, int size
,
1165 unsigned int timeout
)
1167 size_t bc_tbl_size
, bc_tbl_entries
;
1168 struct iwl_txq
*txq
;
1171 WARN_ON(!trans
->txqs
.bc_tbl_size
);
1173 bc_tbl_size
= trans
->txqs
.bc_tbl_size
;
1174 bc_tbl_entries
= bc_tbl_size
/ sizeof(u16
);
1176 if (WARN_ON(size
> bc_tbl_entries
))
1179 txq
= kzalloc(sizeof(*txq
), GFP_KERNEL
);
1183 txq
->bc_tbl
.addr
= dma_pool_alloc(trans
->txqs
.bc_pool
, GFP_KERNEL
,
1185 if (!txq
->bc_tbl
.addr
) {
1186 IWL_ERR(trans
, "Scheduler BC Table allocation failed\n");
1191 ret
= iwl_txq_alloc(trans
, txq
, size
, false);
1193 IWL_ERR(trans
, "Tx queue alloc failed\n");
1196 ret
= iwl_txq_init(trans
, txq
, size
, false);
1198 IWL_ERR(trans
, "Tx queue init failed\n");
1202 txq
->wd_timeout
= msecs_to_jiffies(timeout
);
1208 iwl_txq_gen2_free_memory(trans
, txq
);
1212 static int iwl_txq_alloc_response(struct iwl_trans
*trans
, struct iwl_txq
*txq
,
1213 struct iwl_host_cmd
*hcmd
)
1215 struct iwl_tx_queue_cfg_rsp
*rsp
;
1219 if (WARN_ON(iwl_rx_packet_payload_len(hcmd
->resp_pkt
) !=
1222 goto error_free_resp
;
1225 rsp
= (void *)hcmd
->resp_pkt
->data
;
1226 qid
= le16_to_cpu(rsp
->queue_number
);
1227 wr_ptr
= le16_to_cpu(rsp
->write_pointer
);
1229 if (qid
>= ARRAY_SIZE(trans
->txqs
.txq
)) {
1230 WARN_ONCE(1, "queue index %d unsupported", qid
);
1232 goto error_free_resp
;
1235 if (test_and_set_bit(qid
, trans
->txqs
.queue_used
)) {
1236 WARN_ONCE(1, "queue %d already used", qid
);
1238 goto error_free_resp
;
1242 trans
->txqs
.txq
[qid
] = txq
;
1243 wr_ptr
&= (trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1);
1245 /* Place first TFD at index corresponding to start sequence number */
1246 txq
->read_ptr
= wr_ptr
;
1247 txq
->write_ptr
= wr_ptr
;
1249 IWL_DEBUG_TX_QUEUES(trans
, "Activate queue %d\n", qid
);
1251 iwl_free_resp(hcmd
);
1255 iwl_free_resp(hcmd
);
1256 iwl_txq_gen2_free_memory(trans
, txq
);
1260 int iwl_txq_dyn_alloc(struct iwl_trans
*trans
, __le16 flags
, u8 sta_id
, u8 tid
,
1261 int cmd_id
, int size
, unsigned int timeout
)
1263 struct iwl_txq
*txq
= NULL
;
1264 struct iwl_tx_queue_cfg_cmd cmd
= {
1269 struct iwl_host_cmd hcmd
= {
1271 .len
= { sizeof(cmd
) },
1273 .flags
= CMD_WANT_SKB
,
1277 ret
= iwl_txq_dyn_alloc_dma(trans
, &txq
, size
, timeout
);
1281 cmd
.tfdq_addr
= cpu_to_le64(txq
->dma_addr
);
1282 cmd
.byte_cnt_addr
= cpu_to_le64(txq
->bc_tbl
.dma
);
1283 cmd
.cb_size
= cpu_to_le32(TFD_QUEUE_CB_SIZE(size
));
1285 ret
= iwl_trans_send_cmd(trans
, &hcmd
);
1289 return iwl_txq_alloc_response(trans
, txq
, &hcmd
);
1292 iwl_txq_gen2_free_memory(trans
, txq
);
1296 void iwl_txq_dyn_free(struct iwl_trans
*trans
, int queue
)
1298 if (WARN(queue
>= IWL_MAX_TVQM_QUEUES
,
1299 "queue %d out of range", queue
))
1303 * Upon HW Rfkill - we stop the device, and then stop the queues
1304 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1305 * allow the op_mode to call txq_disable after it already called
1308 if (!test_and_clear_bit(queue
, trans
->txqs
.queue_used
)) {
1309 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
),
1310 "queue %d not used", queue
);
1314 iwl_txq_gen2_unmap(trans
, queue
);
1316 iwl_txq_gen2_free_memory(trans
, trans
->txqs
.txq
[queue
]);
1318 trans
->txqs
.txq
[queue
] = NULL
;
1320 IWL_DEBUG_TX_QUEUES(trans
, "Deactivate queue %d\n", queue
);
1323 void iwl_txq_gen2_tx_free(struct iwl_trans
*trans
)
1327 memset(trans
->txqs
.queue_used
, 0, sizeof(trans
->txqs
.queue_used
));
1329 /* Free all TX queues */
1330 for (i
= 0; i
< ARRAY_SIZE(trans
->txqs
.txq
); i
++) {
1331 if (!trans
->txqs
.txq
[i
])
1334 iwl_txq_gen2_free(trans
, i
);
1338 int iwl_txq_gen2_init(struct iwl_trans
*trans
, int txq_id
, int queue_size
)
1340 struct iwl_txq
*queue
;
1343 /* alloc and init the tx queue */
1344 if (!trans
->txqs
.txq
[txq_id
]) {
1345 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
1347 IWL_ERR(trans
, "Not enough memory for tx queue\n");
1350 trans
->txqs
.txq
[txq_id
] = queue
;
1351 ret
= iwl_txq_alloc(trans
, queue
, queue_size
, true);
1353 IWL_ERR(trans
, "Tx %d queue init failed\n", txq_id
);
1357 queue
= trans
->txqs
.txq
[txq_id
];
1360 ret
= iwl_txq_init(trans
, queue
, queue_size
,
1361 (txq_id
== trans
->txqs
.cmd
.q_id
));
1363 IWL_ERR(trans
, "Tx %d queue alloc failed\n", txq_id
);
1366 trans
->txqs
.txq
[txq_id
]->id
= txq_id
;
1367 set_bit(txq_id
, trans
->txqs
.queue_used
);
1372 iwl_txq_gen2_tx_free(trans
);
1376 static inline dma_addr_t
iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans
*trans
,
1379 struct iwl_tfd
*tfd
;
1380 struct iwl_tfd_tb
*tb
;
1384 if (trans
->trans_cfg
->use_tfh
) {
1385 struct iwl_tfh_tfd
*tfd
= _tfd
;
1386 struct iwl_tfh_tb
*tb
= &tfd
->tbs
[idx
];
1388 return (dma_addr_t
)(le64_to_cpu(tb
->addr
));
1392 tb
= &tfd
->tbs
[idx
];
1393 addr
= get_unaligned_le32(&tb
->lo
);
1395 if (sizeof(dma_addr_t
) <= sizeof(u32
))
1398 hi_len
= le16_to_cpu(tb
->hi_n_len
) & 0xF;
1401 * shift by 16 twice to avoid warnings on 32-bit
1402 * (where this code never runs anyway due to the
1403 * if statement above)
1405 return addr
| ((hi_len
<< 16) << 16);
1408 void iwl_txq_gen1_tfd_unmap(struct iwl_trans
*trans
,
1409 struct iwl_cmd_meta
*meta
,
1410 struct iwl_txq
*txq
, int index
)
1413 void *tfd
= iwl_txq_get_tfd(trans
, txq
, index
);
1415 /* Sanity check on number of chunks */
1416 num_tbs
= iwl_txq_gen1_tfd_get_num_tbs(trans
, tfd
);
1418 if (num_tbs
> trans
->txqs
.tfd
.max_tbs
) {
1419 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
1420 /* @todo issue fatal error, it is quite serious situation */
1424 /* first TB is never freed - it's the bidirectional DMA data */
1426 for (i
= 1; i
< num_tbs
; i
++) {
1427 if (meta
->tbs
& BIT(i
))
1428 dma_unmap_page(trans
->dev
,
1429 iwl_txq_gen1_tfd_tb_get_addr(trans
,
1431 iwl_txq_gen1_tfd_tb_get_len(trans
,
1435 dma_unmap_single(trans
->dev
,
1436 iwl_txq_gen1_tfd_tb_get_addr(trans
,
1438 iwl_txq_gen1_tfd_tb_get_len(trans
,
1445 if (trans
->trans_cfg
->use_tfh
) {
1446 struct iwl_tfh_tfd
*tfd_fh
= (void *)tfd
;
1448 tfd_fh
->num_tbs
= 0;
1450 struct iwl_tfd
*tfd_fh
= (void *)tfd
;
1452 tfd_fh
->num_tbs
= 0;
1456 #define IWL_TX_CRC_SIZE 4
1457 #define IWL_TX_DELIMITER_SIZE 4
1460 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1462 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans
*trans
,
1463 struct iwl_txq
*txq
, u16 byte_cnt
,
1466 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
;
1467 int write_ptr
= txq
->write_ptr
;
1468 int txq_id
= txq
->id
;
1470 u16 len
= byte_cnt
+ IWL_TX_CRC_SIZE
+ IWL_TX_DELIMITER_SIZE
;
1472 struct iwl_device_tx_cmd
*dev_cmd
= txq
->entries
[txq
->write_ptr
].cmd
;
1473 struct iwl_tx_cmd
*tx_cmd
= (void *)dev_cmd
->payload
;
1474 u8 sta_id
= tx_cmd
->sta_id
;
1476 scd_bc_tbl
= trans
->txqs
.scd_bc_tbls
.addr
;
1478 sec_ctl
= tx_cmd
->sec_ctl
;
1480 switch (sec_ctl
& TX_CMD_SEC_MSK
) {
1481 case TX_CMD_SEC_CCM
:
1482 len
+= IEEE80211_CCMP_MIC_LEN
;
1484 case TX_CMD_SEC_TKIP
:
1485 len
+= IEEE80211_TKIP_ICV_LEN
;
1487 case TX_CMD_SEC_WEP
:
1488 len
+= IEEE80211_WEP_IV_LEN
+ IEEE80211_WEP_ICV_LEN
;
1491 if (trans
->txqs
.bc_table_dword
)
1492 len
= DIV_ROUND_UP(len
, 4);
1494 if (WARN_ON(len
> 0xFFF || write_ptr
>= TFD_QUEUE_SIZE_MAX
))
1497 bc_ent
= cpu_to_le16(len
| (sta_id
<< 12));
1499 scd_bc_tbl
[txq_id
].tfd_offset
[write_ptr
] = bc_ent
;
1501 if (write_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
1502 scd_bc_tbl
[txq_id
].tfd_offset
[TFD_QUEUE_SIZE_MAX
+ write_ptr
] =
1506 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans
*trans
,
1507 struct iwl_txq
*txq
)
1509 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= trans
->txqs
.scd_bc_tbls
.addr
;
1510 int txq_id
= txq
->id
;
1511 int read_ptr
= txq
->read_ptr
;
1514 struct iwl_device_tx_cmd
*dev_cmd
= txq
->entries
[read_ptr
].cmd
;
1515 struct iwl_tx_cmd
*tx_cmd
= (void *)dev_cmd
->payload
;
1517 WARN_ON(read_ptr
>= TFD_QUEUE_SIZE_MAX
);
1519 if (txq_id
!= trans
->txqs
.cmd
.q_id
)
1520 sta_id
= tx_cmd
->sta_id
;
1522 bc_ent
= cpu_to_le16(1 | (sta_id
<< 12));
1524 scd_bc_tbl
[txq_id
].tfd_offset
[read_ptr
] = bc_ent
;
1526 if (read_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
1527 scd_bc_tbl
[txq_id
].tfd_offset
[TFD_QUEUE_SIZE_MAX
+ read_ptr
] =