2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/etherdevice.h>
25 static u8
ath10k_htt_tx_txq_calc_size(size_t count
)
33 while (factor
>= 64 && exp
< 4) {
42 factor
= max(1, factor
);
44 return SM(exp
, HTT_TX_Q_STATE_ENTRY_EXP
) |
45 SM(factor
, HTT_TX_Q_STATE_ENTRY_FACTOR
);
48 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw
*hw
,
49 struct ieee80211_txq
*txq
)
51 struct ath10k
*ar
= hw
->priv
;
52 struct ath10k_sta
*arsta
;
53 struct ath10k_vif
*arvif
= (void *)txq
->vif
->drv_priv
;
54 unsigned long frame_cnt
;
55 unsigned long byte_cnt
;
62 lockdep_assert_held(&ar
->htt
.tx_lock
);
64 if (!ar
->htt
.tx_q_state
.enabled
)
67 if (ar
->htt
.tx_q_state
.mode
!= HTT_TX_MODE_SWITCH_PUSH_PULL
)
71 arsta
= (void *)txq
->sta
->drv_priv
;
72 peer_id
= arsta
->peer_id
;
74 peer_id
= arvif
->peer_id
;
78 bit
= BIT(peer_id
% 32);
81 ieee80211_txq_get_depth(txq
, &frame_cnt
, &byte_cnt
);
82 count
= ath10k_htt_tx_txq_calc_size(byte_cnt
);
84 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
85 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
86 ath10k_warn(ar
, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
91 ar
->htt
.tx_q_state
.vaddr
->count
[tid
][peer_id
] = count
;
92 ar
->htt
.tx_q_state
.vaddr
->map
[tid
][idx
] &= ~bit
;
93 ar
->htt
.tx_q_state
.vaddr
->map
[tid
][idx
] |= count
? bit
: 0;
95 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
99 static void __ath10k_htt_tx_txq_sync(struct ath10k
*ar
)
104 lockdep_assert_held(&ar
->htt
.tx_lock
);
106 if (!ar
->htt
.tx_q_state
.enabled
)
109 if (ar
->htt
.tx_q_state
.mode
!= HTT_TX_MODE_SWITCH_PUSH_PULL
)
112 seq
= le32_to_cpu(ar
->htt
.tx_q_state
.vaddr
->seq
);
114 ar
->htt
.tx_q_state
.vaddr
->seq
= cpu_to_le32(seq
);
116 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx txq state update commit seq %u\n",
119 size
= sizeof(*ar
->htt
.tx_q_state
.vaddr
);
120 dma_sync_single_for_device(ar
->dev
,
121 ar
->htt
.tx_q_state
.paddr
,
126 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw
*hw
,
127 struct ieee80211_txq
*txq
)
129 struct ath10k
*ar
= hw
->priv
;
131 spin_lock_bh(&ar
->htt
.tx_lock
);
132 __ath10k_htt_tx_txq_recalc(hw
, txq
);
133 spin_unlock_bh(&ar
->htt
.tx_lock
);
136 void ath10k_htt_tx_txq_sync(struct ath10k
*ar
)
138 spin_lock_bh(&ar
->htt
.tx_lock
);
139 __ath10k_htt_tx_txq_sync(ar
);
140 spin_unlock_bh(&ar
->htt
.tx_lock
);
143 void ath10k_htt_tx_txq_update(struct ieee80211_hw
*hw
,
144 struct ieee80211_txq
*txq
)
146 struct ath10k
*ar
= hw
->priv
;
148 spin_lock_bh(&ar
->htt
.tx_lock
);
149 __ath10k_htt_tx_txq_recalc(hw
, txq
);
150 __ath10k_htt_tx_txq_sync(ar
);
151 spin_unlock_bh(&ar
->htt
.tx_lock
);
154 void ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
)
156 lockdep_assert_held(&htt
->tx_lock
);
158 htt
->num_pending_tx
--;
159 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
- 1)
160 ath10k_mac_tx_unlock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
163 int ath10k_htt_tx_inc_pending(struct ath10k_htt
*htt
)
165 lockdep_assert_held(&htt
->tx_lock
);
167 if (htt
->num_pending_tx
>= htt
->max_num_pending_tx
)
170 htt
->num_pending_tx
++;
171 if (htt
->num_pending_tx
== htt
->max_num_pending_tx
)
172 ath10k_mac_tx_lock(htt
->ar
, ATH10K_TX_PAUSE_Q_FULL
);
177 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt
*htt
, bool is_mgmt
,
180 struct ath10k
*ar
= htt
->ar
;
182 lockdep_assert_held(&htt
->tx_lock
);
184 if (!is_mgmt
|| !ar
->hw_params
.max_probe_resp_desc_thres
)
188 ar
->hw_params
.max_probe_resp_desc_thres
< htt
->num_pending_mgmt_tx
)
191 htt
->num_pending_mgmt_tx
++;
196 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt
*htt
)
198 lockdep_assert_held(&htt
->tx_lock
);
200 if (!htt
->ar
->hw_params
.max_probe_resp_desc_thres
)
203 htt
->num_pending_mgmt_tx
--;
206 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt
*htt
, struct sk_buff
*skb
)
208 struct ath10k
*ar
= htt
->ar
;
211 lockdep_assert_held(&htt
->tx_lock
);
213 ret
= idr_alloc(&htt
->pending_tx
, skb
, 0,
214 htt
->max_num_pending_tx
, GFP_ATOMIC
);
216 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx alloc msdu_id %d\n", ret
);
221 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt
*htt
, u16 msdu_id
)
223 struct ath10k
*ar
= htt
->ar
;
225 lockdep_assert_held(&htt
->tx_lock
);
227 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx free msdu_id %hu\n", msdu_id
);
229 idr_remove(&htt
->pending_tx
, msdu_id
);
232 static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt
*htt
)
236 if (!htt
->frag_desc
.vaddr
)
239 size
= htt
->max_num_pending_tx
* sizeof(struct htt_msdu_ext_desc
);
241 dma_free_coherent(htt
->ar
->dev
,
243 htt
->frag_desc
.vaddr
,
244 htt
->frag_desc
.paddr
);
247 static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt
*htt
)
249 struct ath10k
*ar
= htt
->ar
;
252 if (!ar
->hw_params
.continuous_frag_desc
)
255 size
= htt
->max_num_pending_tx
* sizeof(struct htt_msdu_ext_desc
);
256 htt
->frag_desc
.vaddr
= dma_alloc_coherent(ar
->dev
, size
,
257 &htt
->frag_desc
.paddr
,
259 if (!htt
->frag_desc
.vaddr
) {
260 ath10k_err(ar
, "failed to alloc fragment desc memory\n");
267 static void ath10k_htt_tx_free_txq(struct ath10k_htt
*htt
)
269 struct ath10k
*ar
= htt
->ar
;
272 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
273 ar
->running_fw
->fw_file
.fw_features
))
276 size
= sizeof(*htt
->tx_q_state
.vaddr
);
278 dma_unmap_single(ar
->dev
, htt
->tx_q_state
.paddr
, size
, DMA_TO_DEVICE
);
279 kfree(htt
->tx_q_state
.vaddr
);
282 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt
*htt
)
284 struct ath10k
*ar
= htt
->ar
;
288 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
289 ar
->running_fw
->fw_file
.fw_features
))
292 htt
->tx_q_state
.num_peers
= HTT_TX_Q_STATE_NUM_PEERS
;
293 htt
->tx_q_state
.num_tids
= HTT_TX_Q_STATE_NUM_TIDS
;
294 htt
->tx_q_state
.type
= HTT_Q_DEPTH_TYPE_BYTES
;
296 size
= sizeof(*htt
->tx_q_state
.vaddr
);
297 htt
->tx_q_state
.vaddr
= kzalloc(size
, GFP_KERNEL
);
298 if (!htt
->tx_q_state
.vaddr
)
301 htt
->tx_q_state
.paddr
= dma_map_single(ar
->dev
, htt
->tx_q_state
.vaddr
,
302 size
, DMA_TO_DEVICE
);
303 ret
= dma_mapping_error(ar
->dev
, htt
->tx_q_state
.paddr
);
305 ath10k_warn(ar
, "failed to dma map tx_q_state: %d\n", ret
);
306 kfree(htt
->tx_q_state
.vaddr
);
313 int ath10k_htt_tx_alloc(struct ath10k_htt
*htt
)
315 struct ath10k
*ar
= htt
->ar
;
318 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt tx max num pending tx %d\n",
319 htt
->max_num_pending_tx
);
321 spin_lock_init(&htt
->tx_lock
);
322 idr_init(&htt
->pending_tx
);
324 size
= htt
->max_num_pending_tx
* sizeof(struct ath10k_htt_txbuf
);
325 htt
->txbuf
.vaddr
= dma_alloc_coherent(ar
->dev
, size
,
328 if (!htt
->txbuf
.vaddr
) {
329 ath10k_err(ar
, "failed to alloc tx buffer\n");
331 goto free_idr_pending_tx
;
334 ret
= ath10k_htt_tx_alloc_cont_frag_desc(htt
);
336 ath10k_err(ar
, "failed to alloc cont frag desc: %d\n", ret
);
340 ret
= ath10k_htt_tx_alloc_txq(htt
);
342 ath10k_err(ar
, "failed to alloc txq: %d\n", ret
);
346 size
= roundup_pow_of_two(htt
->max_num_pending_tx
);
347 ret
= kfifo_alloc(&htt
->txdone_fifo
, size
, GFP_KERNEL
);
349 ath10k_err(ar
, "failed to alloc txdone fifo: %d\n", ret
);
356 ath10k_htt_tx_free_txq(htt
);
359 ath10k_htt_tx_free_cont_frag_desc(htt
);
362 size
= htt
->max_num_pending_tx
*
363 sizeof(struct ath10k_htt_txbuf
);
364 dma_free_coherent(htt
->ar
->dev
, size
, htt
->txbuf
.vaddr
,
368 idr_destroy(&htt
->pending_tx
);
373 static int ath10k_htt_tx_clean_up_pending(int msdu_id
, void *skb
, void *ctx
)
375 struct ath10k
*ar
= ctx
;
376 struct ath10k_htt
*htt
= &ar
->htt
;
377 struct htt_tx_done tx_done
= {0};
379 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "force cleanup msdu_id %hu\n", msdu_id
);
381 tx_done
.msdu_id
= msdu_id
;
382 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
384 ath10k_txrx_tx_unref(htt
, &tx_done
);
389 void ath10k_htt_tx_free(struct ath10k_htt
*htt
)
393 tasklet_kill(&htt
->txrx_compl_task
);
395 idr_for_each(&htt
->pending_tx
, ath10k_htt_tx_clean_up_pending
, htt
->ar
);
396 idr_destroy(&htt
->pending_tx
);
398 if (htt
->txbuf
.vaddr
) {
399 size
= htt
->max_num_pending_tx
*
400 sizeof(struct ath10k_htt_txbuf
);
401 dma_free_coherent(htt
->ar
->dev
, size
, htt
->txbuf
.vaddr
,
405 ath10k_htt_tx_free_txq(htt
);
406 ath10k_htt_tx_free_cont_frag_desc(htt
);
407 WARN_ON(!kfifo_is_empty(&htt
->txdone_fifo
));
408 kfifo_free(&htt
->txdone_fifo
);
411 void ath10k_htt_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
413 dev_kfree_skb_any(skb
);
416 void ath10k_htt_hif_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
418 dev_kfree_skb_any(skb
);
420 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete
);
422 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt
*htt
)
424 struct ath10k
*ar
= htt
->ar
;
430 len
+= sizeof(cmd
->hdr
);
431 len
+= sizeof(cmd
->ver_req
);
433 skb
= ath10k_htc_alloc_skb(ar
, len
);
438 cmd
= (struct htt_cmd
*)skb
->data
;
439 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_VERSION_REQ
;
441 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
443 dev_kfree_skb_any(skb
);
450 int ath10k_htt_h2t_stats_req(struct ath10k_htt
*htt
, u8 mask
, u64 cookie
)
452 struct ath10k
*ar
= htt
->ar
;
453 struct htt_stats_req
*req
;
458 len
+= sizeof(cmd
->hdr
);
459 len
+= sizeof(cmd
->stats_req
);
461 skb
= ath10k_htc_alloc_skb(ar
, len
);
466 cmd
= (struct htt_cmd
*)skb
->data
;
467 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_STATS_REQ
;
469 req
= &cmd
->stats_req
;
471 memset(req
, 0, sizeof(*req
));
473 /* currently we support only max 8 bit masks so no need to worry
474 * about endian support */
475 req
->upload_types
[0] = mask
;
476 req
->reset_types
[0] = mask
;
477 req
->stat_type
= HTT_STATS_REQ_CFG_STAT_TYPE_INVALID
;
478 req
->cookie_lsb
= cpu_to_le32(cookie
& 0xffffffff);
479 req
->cookie_msb
= cpu_to_le32((cookie
& 0xffffffff00000000ULL
) >> 32);
481 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
483 ath10k_warn(ar
, "failed to send htt type stats request: %d",
485 dev_kfree_skb_any(skb
);
492 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt
*htt
)
494 struct ath10k
*ar
= htt
->ar
;
497 struct htt_frag_desc_bank_cfg
*cfg
;
501 if (!ar
->hw_params
.continuous_frag_desc
)
504 if (!htt
->frag_desc
.paddr
) {
505 ath10k_warn(ar
, "invalid frag desc memory\n");
509 size
= sizeof(cmd
->hdr
) + sizeof(cmd
->frag_desc_bank_cfg
);
510 skb
= ath10k_htc_alloc_skb(ar
, size
);
515 cmd
= (struct htt_cmd
*)skb
->data
;
516 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG
;
519 info
|= SM(htt
->tx_q_state
.type
,
520 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE
);
522 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL
,
523 ar
->running_fw
->fw_file
.fw_features
))
524 info
|= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID
;
526 cfg
= &cmd
->frag_desc_bank_cfg
;
529 cfg
->desc_size
= sizeof(struct htt_msdu_ext_desc
);
530 cfg
->bank_base_addrs
[0] = __cpu_to_le32(htt
->frag_desc
.paddr
);
531 cfg
->bank_id
[0].bank_min_id
= 0;
532 cfg
->bank_id
[0].bank_max_id
= __cpu_to_le16(htt
->max_num_pending_tx
-
535 cfg
->q_state
.paddr
= cpu_to_le32(htt
->tx_q_state
.paddr
);
536 cfg
->q_state
.num_peers
= cpu_to_le16(htt
->tx_q_state
.num_peers
);
537 cfg
->q_state
.num_tids
= cpu_to_le16(htt
->tx_q_state
.num_tids
);
538 cfg
->q_state
.record_size
= HTT_TX_Q_STATE_ENTRY_SIZE
;
539 cfg
->q_state
.record_multiplier
= HTT_TX_Q_STATE_ENTRY_MULTIPLIER
;
541 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt frag desc bank cmd\n");
543 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
545 ath10k_warn(ar
, "failed to send frag desc bank cfg request: %d\n",
547 dev_kfree_skb_any(skb
);
554 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt
*htt
)
556 struct ath10k
*ar
= htt
->ar
;
559 struct htt_rx_ring_setup_ring
*ring
;
560 const int num_rx_ring
= 1;
567 * the HW expects the buffer to be an integral number of 4-byte
570 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE
, 4));
571 BUILD_BUG_ON((HTT_RX_BUF_SIZE
& HTT_MAX_CACHE_LINE_SIZE_MASK
) != 0);
573 len
= sizeof(cmd
->hdr
) + sizeof(cmd
->rx_setup
.hdr
)
574 + (sizeof(*ring
) * num_rx_ring
);
575 skb
= ath10k_htc_alloc_skb(ar
, len
);
581 cmd
= (struct htt_cmd
*)skb
->data
;
582 ring
= &cmd
->rx_setup
.rings
[0];
584 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_RX_RING_CFG
;
585 cmd
->rx_setup
.hdr
.num_rings
= 1;
587 /* FIXME: do we need all of this? */
589 flags
|= HTT_RX_RING_FLAGS_MAC80211_HDR
;
590 flags
|= HTT_RX_RING_FLAGS_MSDU_PAYLOAD
;
591 flags
|= HTT_RX_RING_FLAGS_PPDU_START
;
592 flags
|= HTT_RX_RING_FLAGS_PPDU_END
;
593 flags
|= HTT_RX_RING_FLAGS_MPDU_START
;
594 flags
|= HTT_RX_RING_FLAGS_MPDU_END
;
595 flags
|= HTT_RX_RING_FLAGS_MSDU_START
;
596 flags
|= HTT_RX_RING_FLAGS_MSDU_END
;
597 flags
|= HTT_RX_RING_FLAGS_RX_ATTENTION
;
598 flags
|= HTT_RX_RING_FLAGS_FRAG_INFO
;
599 flags
|= HTT_RX_RING_FLAGS_UNICAST_RX
;
600 flags
|= HTT_RX_RING_FLAGS_MULTICAST_RX
;
601 flags
|= HTT_RX_RING_FLAGS_CTRL_RX
;
602 flags
|= HTT_RX_RING_FLAGS_MGMT_RX
;
603 flags
|= HTT_RX_RING_FLAGS_NULL_RX
;
604 flags
|= HTT_RX_RING_FLAGS_PHY_DATA_RX
;
606 fw_idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
608 ring
->fw_idx_shadow_reg_paddr
=
609 __cpu_to_le32(htt
->rx_ring
.alloc_idx
.paddr
);
610 ring
->rx_ring_base_paddr
= __cpu_to_le32(htt
->rx_ring
.base_paddr
);
611 ring
->rx_ring_len
= __cpu_to_le16(htt
->rx_ring
.size
);
612 ring
->rx_ring_bufsize
= __cpu_to_le16(HTT_RX_BUF_SIZE
);
613 ring
->flags
= __cpu_to_le16(flags
);
614 ring
->fw_idx_init_val
= __cpu_to_le16(fw_idx
);
616 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
618 ring
->mac80211_hdr_offset
= __cpu_to_le16(desc_offset(rx_hdr_status
));
619 ring
->msdu_payload_offset
= __cpu_to_le16(desc_offset(msdu_payload
));
620 ring
->ppdu_start_offset
= __cpu_to_le16(desc_offset(ppdu_start
));
621 ring
->ppdu_end_offset
= __cpu_to_le16(desc_offset(ppdu_end
));
622 ring
->mpdu_start_offset
= __cpu_to_le16(desc_offset(mpdu_start
));
623 ring
->mpdu_end_offset
= __cpu_to_le16(desc_offset(mpdu_end
));
624 ring
->msdu_start_offset
= __cpu_to_le16(desc_offset(msdu_start
));
625 ring
->msdu_end_offset
= __cpu_to_le16(desc_offset(msdu_end
));
626 ring
->rx_attention_offset
= __cpu_to_le16(desc_offset(attention
));
627 ring
->frag_info_offset
= __cpu_to_le16(desc_offset(frag_info
));
631 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
633 dev_kfree_skb_any(skb
);
640 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt
*htt
,
641 u8 max_subfrms_ampdu
,
642 u8 max_subfrms_amsdu
)
644 struct ath10k
*ar
= htt
->ar
;
645 struct htt_aggr_conf
*aggr_conf
;
651 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
653 if (max_subfrms_ampdu
== 0 || max_subfrms_ampdu
> 64)
656 if (max_subfrms_amsdu
== 0 || max_subfrms_amsdu
> 31)
659 len
= sizeof(cmd
->hdr
);
660 len
+= sizeof(cmd
->aggr_conf
);
662 skb
= ath10k_htc_alloc_skb(ar
, len
);
667 cmd
= (struct htt_cmd
*)skb
->data
;
668 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_AGGR_CFG
;
670 aggr_conf
= &cmd
->aggr_conf
;
671 aggr_conf
->max_num_ampdu_subframes
= max_subfrms_ampdu
;
672 aggr_conf
->max_num_amsdu_subframes
= max_subfrms_amsdu
;
674 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt h2t aggr cfg msg amsdu %d ampdu %d",
675 aggr_conf
->max_num_amsdu_subframes
,
676 aggr_conf
->max_num_ampdu_subframes
);
678 ret
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, skb
);
680 dev_kfree_skb_any(skb
);
687 int ath10k_htt_tx_fetch_resp(struct ath10k
*ar
,
689 __le16 fetch_seq_num
,
690 struct htt_tx_fetch_record
*records
,
695 const u16 resp_id
= 0;
699 /* Response IDs are echo-ed back only for host driver convienence
700 * purposes. They aren't used for anything in the driver yet so use 0.
703 len
+= sizeof(cmd
->hdr
);
704 len
+= sizeof(cmd
->tx_fetch_resp
);
705 len
+= sizeof(cmd
->tx_fetch_resp
.records
[0]) * num_records
;
707 skb
= ath10k_htc_alloc_skb(ar
, len
);
712 cmd
= (struct htt_cmd
*)skb
->data
;
713 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FETCH_RESP
;
714 cmd
->tx_fetch_resp
.resp_id
= cpu_to_le16(resp_id
);
715 cmd
->tx_fetch_resp
.fetch_seq_num
= fetch_seq_num
;
716 cmd
->tx_fetch_resp
.num_records
= cpu_to_le16(num_records
);
717 cmd
->tx_fetch_resp
.token
= token
;
719 memcpy(cmd
->tx_fetch_resp
.records
, records
,
720 sizeof(records
[0]) * num_records
);
722 ret
= ath10k_htc_send(&ar
->htc
, ar
->htt
.eid
, skb
);
724 ath10k_warn(ar
, "failed to submit htc command: %d\n", ret
);
731 dev_kfree_skb_any(skb
);
736 static u8
ath10k_htt_tx_get_vdev_id(struct ath10k
*ar
, struct sk_buff
*skb
)
738 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
739 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
740 struct ath10k_vif
*arvif
;
742 if (info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
) {
743 return ar
->scan
.vdev_id
;
744 } else if (cb
->vif
) {
745 arvif
= (void *)cb
->vif
->drv_priv
;
746 return arvif
->vdev_id
;
747 } else if (ar
->monitor_started
) {
748 return ar
->monitor_vdev_id
;
754 static u8
ath10k_htt_tx_get_tid(struct sk_buff
*skb
, bool is_eth
)
756 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
757 struct ath10k_skb_cb
*cb
= ATH10K_SKB_CB(skb
);
759 if (!is_eth
&& ieee80211_is_mgmt(hdr
->frame_control
))
760 return HTT_DATA_TX_EXT_TID_MGMT
;
761 else if (cb
->flags
& ATH10K_SKB_F_QOS
)
762 return skb
->priority
% IEEE80211_QOS_CTL_TID_MASK
;
764 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST
;
767 int ath10k_htt_mgmt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
)
769 struct ath10k
*ar
= htt
->ar
;
770 struct device
*dev
= ar
->dev
;
771 struct sk_buff
*txdesc
= NULL
;
773 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
774 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
778 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
780 len
+= sizeof(cmd
->hdr
);
781 len
+= sizeof(cmd
->mgmt_tx
);
783 spin_lock_bh(&htt
->tx_lock
);
784 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
785 spin_unlock_bh(&htt
->tx_lock
);
791 if ((ieee80211_is_action(hdr
->frame_control
) ||
792 ieee80211_is_deauth(hdr
->frame_control
) ||
793 ieee80211_is_disassoc(hdr
->frame_control
)) &&
794 ieee80211_has_protected(hdr
->frame_control
)) {
795 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
798 txdesc
= ath10k_htc_alloc_skb(ar
, len
);
801 goto err_free_msdu_id
;
804 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
806 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
809 goto err_free_txdesc
;
812 skb_put(txdesc
, len
);
813 cmd
= (struct htt_cmd
*)txdesc
->data
;
816 cmd
->hdr
.msg_type
= HTT_H2T_MSG_TYPE_MGMT_TX
;
817 cmd
->mgmt_tx
.msdu_paddr
= __cpu_to_le32(ATH10K_SKB_CB(msdu
)->paddr
);
818 cmd
->mgmt_tx
.len
= __cpu_to_le32(msdu
->len
);
819 cmd
->mgmt_tx
.desc_id
= __cpu_to_le32(msdu_id
);
820 cmd
->mgmt_tx
.vdev_id
= __cpu_to_le32(vdev_id
);
821 memcpy(cmd
->mgmt_tx
.hdr
, msdu
->data
,
822 min_t(int, msdu
->len
, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN
));
824 res
= ath10k_htc_send(&htt
->ar
->htc
, htt
->eid
, txdesc
);
831 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
833 dev_kfree_skb_any(txdesc
);
835 spin_lock_bh(&htt
->tx_lock
);
836 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);
837 spin_unlock_bh(&htt
->tx_lock
);
842 int ath10k_htt_tx(struct ath10k_htt
*htt
, enum ath10k_hw_txrx_mode txmode
,
843 struct sk_buff
*msdu
)
845 struct ath10k
*ar
= htt
->ar
;
846 struct device
*dev
= ar
->dev
;
847 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)msdu
->data
;
848 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(msdu
);
849 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(msdu
);
850 struct ath10k_hif_sg_item sg_items
[2];
851 struct ath10k_htt_txbuf
*txbuf
;
852 struct htt_data_tx_desc_frag
*frags
;
853 bool is_eth
= (txmode
== ATH10K_HW_TXRX_ETHERNET
);
854 u8 vdev_id
= ath10k_htt_tx_get_vdev_id(ar
, msdu
);
855 u8 tid
= ath10k_htt_tx_get_tid(msdu
, is_eth
);
859 u16 msdu_id
, flags1
= 0;
863 struct htt_msdu_ext_desc
*ext_desc
= NULL
;
865 spin_lock_bh(&htt
->tx_lock
);
866 res
= ath10k_htt_tx_alloc_msdu_id(htt
, msdu
);
867 spin_unlock_bh(&htt
->tx_lock
);
873 prefetch_len
= min(htt
->prefetch_len
, msdu
->len
);
874 prefetch_len
= roundup(prefetch_len
, 4);
876 txbuf
= &htt
->txbuf
.vaddr
[msdu_id
];
877 txbuf_paddr
= htt
->txbuf
.paddr
+
878 (sizeof(struct ath10k_htt_txbuf
) * msdu_id
);
880 if ((ieee80211_is_action(hdr
->frame_control
) ||
881 ieee80211_is_deauth(hdr
->frame_control
) ||
882 ieee80211_is_disassoc(hdr
->frame_control
)) &&
883 ieee80211_has_protected(hdr
->frame_control
)) {
884 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
885 } else if (!(skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
) &&
886 txmode
== ATH10K_HW_TXRX_RAW
&&
887 ieee80211_has_protected(hdr
->frame_control
)) {
888 skb_put(msdu
, IEEE80211_CCMP_MIC_LEN
);
891 skb_cb
->paddr
= dma_map_single(dev
, msdu
->data
, msdu
->len
,
893 res
= dma_mapping_error(dev
, skb_cb
->paddr
);
896 goto err_free_msdu_id
;
899 if (unlikely(info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
))
900 freq
= ar
->scan
.roc_freq
;
903 case ATH10K_HW_TXRX_RAW
:
904 case ATH10K_HW_TXRX_NATIVE_WIFI
:
905 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
907 case ATH10K_HW_TXRX_ETHERNET
:
908 if (ar
->hw_params
.continuous_frag_desc
) {
909 memset(&htt
->frag_desc
.vaddr
[msdu_id
], 0,
910 sizeof(struct htt_msdu_ext_desc
));
911 frags
= (struct htt_data_tx_desc_frag
*)
912 &htt
->frag_desc
.vaddr
[msdu_id
].frags
;
913 ext_desc
= &htt
->frag_desc
.vaddr
[msdu_id
];
914 frags
[0].tword_addr
.paddr_lo
=
915 __cpu_to_le32(skb_cb
->paddr
);
916 frags
[0].tword_addr
.paddr_hi
= 0;
917 frags
[0].tword_addr
.len_16
= __cpu_to_le16(msdu
->len
);
919 frags_paddr
= htt
->frag_desc
.paddr
+
920 (sizeof(struct htt_msdu_ext_desc
) * msdu_id
);
922 frags
= txbuf
->frags
;
923 frags
[0].dword_addr
.paddr
=
924 __cpu_to_le32(skb_cb
->paddr
);
925 frags
[0].dword_addr
.len
= __cpu_to_le32(msdu
->len
);
926 frags
[1].dword_addr
.paddr
= 0;
927 frags
[1].dword_addr
.len
= 0;
929 frags_paddr
= txbuf_paddr
;
931 flags0
|= SM(txmode
, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
933 case ATH10K_HW_TXRX_MGMT
:
934 flags0
|= SM(ATH10K_HW_TXRX_MGMT
,
935 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE
);
936 flags0
|= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
;
938 frags_paddr
= skb_cb
->paddr
;
942 /* Normally all commands go through HTC which manages tx credits for
943 * each endpoint and notifies when tx is completed.
945 * HTT endpoint is creditless so there's no need to care about HTC
946 * flags. In that case it is trivial to fill the HTC header here.
948 * MSDU transmission is considered completed upon HTT event. This
949 * implies no relevant resources can be freed until after the event is
950 * received. That's why HTC tx completion handler itself is ignored by
951 * setting NULL to transfer_context for all sg items.
953 * There is simply no point in pushing HTT TX_FRM through HTC tx path
954 * as it's a waste of resources. By bypassing HTC it is possible to
955 * avoid extra memory allocations, compress data structures and thus
956 * improve performance. */
958 txbuf
->htc_hdr
.eid
= htt
->eid
;
959 txbuf
->htc_hdr
.len
= __cpu_to_le16(sizeof(txbuf
->cmd_hdr
) +
960 sizeof(txbuf
->cmd_tx
) +
962 txbuf
->htc_hdr
.flags
= 0;
964 if (skb_cb
->flags
& ATH10K_SKB_F_NO_HWCRYPT
)
965 flags0
|= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
;
967 flags1
|= SM((u16
)vdev_id
, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID
);
968 flags1
|= SM((u16
)tid
, HTT_DATA_TX_DESC_FLAGS1_EXT_TID
);
969 if (msdu
->ip_summed
== CHECKSUM_PARTIAL
&&
970 !test_bit(ATH10K_FLAG_RAW_MODE
, &ar
->dev_flags
)) {
971 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
;
972 flags1
|= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
;
973 if (ar
->hw_params
.continuous_frag_desc
)
974 ext_desc
->flags
|= HTT_MSDU_CHECKSUM_ENABLE
;
977 /* Prevent firmware from sending up tx inspection requests. There's
978 * nothing ath10k can do with frames requested for inspection so force
979 * it to simply rely a regular tx completion with discard status.
981 flags1
|= HTT_DATA_TX_DESC_FLAGS1_POSTPONED
;
983 txbuf
->cmd_hdr
.msg_type
= HTT_H2T_MSG_TYPE_TX_FRM
;
984 txbuf
->cmd_tx
.flags0
= flags0
;
985 txbuf
->cmd_tx
.flags1
= __cpu_to_le16(flags1
);
986 txbuf
->cmd_tx
.len
= __cpu_to_le16(msdu
->len
);
987 txbuf
->cmd_tx
.id
= __cpu_to_le16(msdu_id
);
988 txbuf
->cmd_tx
.frags_paddr
= __cpu_to_le32(frags_paddr
);
989 if (ath10k_mac_tx_frm_has_freq(ar
)) {
990 txbuf
->cmd_tx
.offchan_tx
.peerid
=
991 __cpu_to_le16(HTT_INVALID_PEERID
);
992 txbuf
->cmd_tx
.offchan_tx
.freq
=
995 txbuf
->cmd_tx
.peerid
=
996 __cpu_to_le32(HTT_INVALID_PEERID
);
999 trace_ath10k_htt_tx(ar
, msdu_id
, msdu
->len
, vdev_id
, tid
);
1000 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
1001 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
1002 flags0
, flags1
, msdu
->len
, msdu_id
, frags_paddr
,
1003 (u32
)skb_cb
->paddr
, vdev_id
, tid
, freq
);
1004 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt tx msdu: ",
1005 msdu
->data
, msdu
->len
);
1006 trace_ath10k_tx_hdr(ar
, msdu
->data
, msdu
->len
);
1007 trace_ath10k_tx_payload(ar
, msdu
->data
, msdu
->len
);
1009 sg_items
[0].transfer_id
= 0;
1010 sg_items
[0].transfer_context
= NULL
;
1011 sg_items
[0].vaddr
= &txbuf
->htc_hdr
;
1012 sg_items
[0].paddr
= txbuf_paddr
+
1013 sizeof(txbuf
->frags
);
1014 sg_items
[0].len
= sizeof(txbuf
->htc_hdr
) +
1015 sizeof(txbuf
->cmd_hdr
) +
1016 sizeof(txbuf
->cmd_tx
);
1018 sg_items
[1].transfer_id
= 0;
1019 sg_items
[1].transfer_context
= NULL
;
1020 sg_items
[1].vaddr
= msdu
->data
;
1021 sg_items
[1].paddr
= skb_cb
->paddr
;
1022 sg_items
[1].len
= prefetch_len
;
1024 res
= ath10k_hif_tx_sg(htt
->ar
,
1025 htt
->ar
->htc
.endpoint
[htt
->eid
].ul_pipe_id
,
1026 sg_items
, ARRAY_SIZE(sg_items
));
1028 goto err_unmap_msdu
;
1033 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
1035 ath10k_htt_tx_free_msdu_id(htt
, msdu_id
);