1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
16 #include <linux/log2.h>
17 #include <linux/bitfield.h>
19 /* when under memory pressure rx ring refill may fail and needs a retry */
20 #define HTT_RX_RING_REFILL_RETRY_MS 50
22 #define HTT_RX_RING_REFILL_RESCHED_MS 5
24 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
);
26 static struct sk_buff
*
27 ath10k_htt_rx_find_skb_paddr(struct ath10k
*ar
, u64 paddr
)
29 struct ath10k_skb_rxcb
*rxcb
;
31 hash_for_each_possible(ar
->htt
.rx_ring
.skb_table
, rxcb
, hlist
, paddr
)
32 if (rxcb
->paddr
== paddr
)
33 return ATH10K_RXCB_SKB(rxcb
);
39 static void ath10k_htt_rx_ring_free(struct ath10k_htt
*htt
)
42 struct ath10k_skb_rxcb
*rxcb
;
46 if (htt
->rx_ring
.in_ord_rx
) {
47 hash_for_each_safe(htt
->rx_ring
.skb_table
, i
, n
, rxcb
, hlist
) {
48 skb
= ATH10K_RXCB_SKB(rxcb
);
49 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
50 skb
->len
+ skb_tailroom(skb
),
52 hash_del(&rxcb
->hlist
);
53 dev_kfree_skb_any(skb
);
56 for (i
= 0; i
< htt
->rx_ring
.size
; i
++) {
57 skb
= htt
->rx_ring
.netbufs_ring
[i
];
61 rxcb
= ATH10K_SKB_RXCB(skb
);
62 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
63 skb
->len
+ skb_tailroom(skb
),
65 dev_kfree_skb_any(skb
);
69 htt
->rx_ring
.fill_cnt
= 0;
70 hash_init(htt
->rx_ring
.skb_table
);
71 memset(htt
->rx_ring
.netbufs_ring
, 0,
72 htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.netbufs_ring
[0]));
75 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt
*htt
)
77 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_32
);
80 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt
*htt
)
82 return htt
->rx_ring
.size
* sizeof(htt
->rx_ring
.paddrs_ring_64
);
85 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt
*htt
,
88 htt
->rx_ring
.paddrs_ring_32
= vaddr
;
91 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt
*htt
,
94 htt
->rx_ring
.paddrs_ring_64
= vaddr
;
97 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt
*htt
,
98 dma_addr_t paddr
, int idx
)
100 htt
->rx_ring
.paddrs_ring_32
[idx
] = __cpu_to_le32(paddr
);
103 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt
*htt
,
104 dma_addr_t paddr
, int idx
)
106 htt
->rx_ring
.paddrs_ring_64
[idx
] = __cpu_to_le64(paddr
);
109 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt
*htt
, int idx
)
111 htt
->rx_ring
.paddrs_ring_32
[idx
] = 0;
114 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt
*htt
, int idx
)
116 htt
->rx_ring
.paddrs_ring_64
[idx
] = 0;
119 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt
*htt
)
121 return (void *)htt
->rx_ring
.paddrs_ring_32
;
124 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt
*htt
)
126 return (void *)htt
->rx_ring
.paddrs_ring_64
;
129 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
131 struct htt_rx_desc
*rx_desc
;
132 struct ath10k_skb_rxcb
*rxcb
;
137 /* The Full Rx Reorder firmware has no way of telling the host
138 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
139 * To keep things simple make sure ring is always half empty. This
140 * guarantees there'll be no replenishment overruns possible.
142 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL
>= HTT_RX_RING_SIZE
/ 2);
144 idx
= __le32_to_cpu(*htt
->rx_ring
.alloc_idx
.vaddr
);
146 if (idx
< 0 || idx
>= htt
->rx_ring
.size
) {
147 ath10k_err(htt
->ar
, "rx ring index is not valid, firmware malfunctioning?\n");
148 idx
&= htt
->rx_ring
.size_mask
;
154 skb
= dev_alloc_skb(HTT_RX_BUF_SIZE
+ HTT_RX_DESC_ALIGN
);
160 if (!IS_ALIGNED((unsigned long)skb
->data
, HTT_RX_DESC_ALIGN
))
162 PTR_ALIGN(skb
->data
, HTT_RX_DESC_ALIGN
) -
165 /* Clear rx_desc attention word before posting to Rx ring */
166 rx_desc
= (struct htt_rx_desc
*)skb
->data
;
167 rx_desc
->attention
.flags
= __cpu_to_le32(0);
169 paddr
= dma_map_single(htt
->ar
->dev
, skb
->data
,
170 skb
->len
+ skb_tailroom(skb
),
173 if (unlikely(dma_mapping_error(htt
->ar
->dev
, paddr
))) {
174 dev_kfree_skb_any(skb
);
179 rxcb
= ATH10K_SKB_RXCB(skb
);
181 htt
->rx_ring
.netbufs_ring
[idx
] = skb
;
182 ath10k_htt_set_paddrs_ring(htt
, paddr
, idx
);
183 htt
->rx_ring
.fill_cnt
++;
185 if (htt
->rx_ring
.in_ord_rx
) {
186 hash_add(htt
->rx_ring
.skb_table
,
187 &ATH10K_SKB_RXCB(skb
)->hlist
,
193 idx
&= htt
->rx_ring
.size_mask
;
198 * Make sure the rx buffer is updated before available buffer
199 * index to avoid any potential rx ring corruption.
202 *htt
->rx_ring
.alloc_idx
.vaddr
= __cpu_to_le32(idx
);
206 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt
*htt
, int num
)
208 lockdep_assert_held(&htt
->rx_ring
.lock
);
209 return __ath10k_htt_rx_ring_fill_n(htt
, num
);
212 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt
*htt
)
214 int ret
, num_deficit
, num_to_fill
;
216 /* Refilling the whole RX ring buffer proves to be a bad idea. The
217 * reason is RX may take up significant amount of CPU cycles and starve
218 * other tasks, e.g. TX on an ethernet device while acting as a bridge
219 * with ath10k wlan interface. This ended up with very poor performance
220 * once CPU the host system was overwhelmed with RX on ath10k.
222 * By limiting the number of refills the replenishing occurs
223 * progressively. This in turns makes use of the fact tasklets are
224 * processed in FIFO order. This means actual RX processing can starve
225 * out refilling. If there's not enough buffers on RX ring FW will not
226 * report RX until it is refilled with enough buffers. This
227 * automatically balances load wrt to CPU power.
229 * This probably comes at a cost of lower maximum throughput but
230 * improves the average and stability.
232 spin_lock_bh(&htt
->rx_ring
.lock
);
233 num_deficit
= htt
->rx_ring
.fill_level
- htt
->rx_ring
.fill_cnt
;
234 num_to_fill
= min(ATH10K_HTT_MAX_NUM_REFILL
, num_deficit
);
235 num_deficit
-= num_to_fill
;
236 ret
= ath10k_htt_rx_ring_fill_n(htt
, num_to_fill
);
237 if (ret
== -ENOMEM
) {
239 * Failed to fill it to the desired level -
240 * we'll start a timer and try again next time.
241 * As long as enough buffers are left in the ring for
242 * another A-MPDU rx, no special recovery is needed.
244 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
245 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS
));
246 } else if (num_deficit
> 0) {
247 mod_timer(&htt
->rx_ring
.refill_retry_timer
, jiffies
+
248 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS
));
250 spin_unlock_bh(&htt
->rx_ring
.lock
);
253 static void ath10k_htt_rx_ring_refill_retry(struct timer_list
*t
)
255 struct ath10k_htt
*htt
= from_timer(htt
, t
, rx_ring
.refill_retry_timer
);
257 ath10k_htt_rx_msdu_buff_replenish(htt
);
260 int ath10k_htt_rx_ring_refill(struct ath10k
*ar
)
262 struct ath10k_htt
*htt
= &ar
->htt
;
265 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
268 spin_lock_bh(&htt
->rx_ring
.lock
);
269 ret
= ath10k_htt_rx_ring_fill_n(htt
, (htt
->rx_ring
.fill_level
-
270 htt
->rx_ring
.fill_cnt
));
273 ath10k_htt_rx_ring_free(htt
);
275 spin_unlock_bh(&htt
->rx_ring
.lock
);
280 void ath10k_htt_rx_free(struct ath10k_htt
*htt
)
282 if (htt
->ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
285 del_timer_sync(&htt
->rx_ring
.refill_retry_timer
);
287 skb_queue_purge(&htt
->rx_msdus_q
);
288 skb_queue_purge(&htt
->rx_in_ord_compl_q
);
289 skb_queue_purge(&htt
->tx_fetch_ind_q
);
291 spin_lock_bh(&htt
->rx_ring
.lock
);
292 ath10k_htt_rx_ring_free(htt
);
293 spin_unlock_bh(&htt
->rx_ring
.lock
);
295 dma_free_coherent(htt
->ar
->dev
,
296 ath10k_htt_get_rx_ring_size(htt
),
297 ath10k_htt_get_vaddr_ring(htt
),
298 htt
->rx_ring
.base_paddr
);
300 dma_free_coherent(htt
->ar
->dev
,
301 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
302 htt
->rx_ring
.alloc_idx
.vaddr
,
303 htt
->rx_ring
.alloc_idx
.paddr
);
305 kfree(htt
->rx_ring
.netbufs_ring
);
308 static inline struct sk_buff
*ath10k_htt_rx_netbuf_pop(struct ath10k_htt
*htt
)
310 struct ath10k
*ar
= htt
->ar
;
312 struct sk_buff
*msdu
;
314 lockdep_assert_held(&htt
->rx_ring
.lock
);
316 if (htt
->rx_ring
.fill_cnt
== 0) {
317 ath10k_warn(ar
, "tried to pop sk_buff from an empty rx ring\n");
321 idx
= htt
->rx_ring
.sw_rd_idx
.msdu_payld
;
322 msdu
= htt
->rx_ring
.netbufs_ring
[idx
];
323 htt
->rx_ring
.netbufs_ring
[idx
] = NULL
;
324 ath10k_htt_reset_paddrs_ring(htt
, idx
);
327 idx
&= htt
->rx_ring
.size_mask
;
328 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= idx
;
329 htt
->rx_ring
.fill_cnt
--;
331 dma_unmap_single(htt
->ar
->dev
,
332 ATH10K_SKB_RXCB(msdu
)->paddr
,
333 msdu
->len
+ skb_tailroom(msdu
),
335 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
336 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
341 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
342 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt
*htt
,
343 struct sk_buff_head
*amsdu
)
345 struct ath10k
*ar
= htt
->ar
;
346 int msdu_len
, msdu_chaining
= 0;
347 struct sk_buff
*msdu
;
348 struct htt_rx_desc
*rx_desc
;
350 lockdep_assert_held(&htt
->rx_ring
.lock
);
353 int last_msdu
, msdu_len_invalid
, msdu_chained
;
355 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
357 __skb_queue_purge(amsdu
);
361 __skb_queue_tail(amsdu
, msdu
);
363 rx_desc
= (struct htt_rx_desc
*)msdu
->data
;
365 /* FIXME: we must report msdu payload since this is what caller
368 skb_put(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
369 skb_pull(msdu
, offsetof(struct htt_rx_desc
, msdu_payload
));
372 * Sanity check - confirm the HW is finished filling in the
374 * If the HW and SW are working correctly, then it's guaranteed
375 * that the HW's MAC DMA is done before this point in the SW.
376 * To prevent the case that we handle a stale Rx descriptor,
377 * just assert for now until we have a way to recover.
379 if (!(__le32_to_cpu(rx_desc
->attention
.flags
)
380 & RX_ATTENTION_FLAGS_MSDU_DONE
)) {
381 __skb_queue_purge(amsdu
);
385 msdu_len_invalid
= !!(__le32_to_cpu(rx_desc
->attention
.flags
)
386 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR
|
387 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR
));
388 msdu_len
= MS(__le32_to_cpu(rx_desc
->msdu_start
.common
.info0
),
389 RX_MSDU_START_INFO0_MSDU_LENGTH
);
390 msdu_chained
= rx_desc
->frag_info
.ring2_more_count
;
392 if (msdu_len_invalid
)
396 skb_put(msdu
, min(msdu_len
, HTT_RX_MSDU_SIZE
));
397 msdu_len
-= msdu
->len
;
399 /* Note: Chained buffers do not contain rx descriptor */
400 while (msdu_chained
--) {
401 msdu
= ath10k_htt_rx_netbuf_pop(htt
);
403 __skb_queue_purge(amsdu
);
407 __skb_queue_tail(amsdu
, msdu
);
409 skb_put(msdu
, min(msdu_len
, HTT_RX_BUF_SIZE
));
410 msdu_len
-= msdu
->len
;
414 last_msdu
= __le32_to_cpu(rx_desc
->msdu_end
.common
.info0
) &
415 RX_MSDU_END_INFO0_LAST_MSDU
;
417 trace_ath10k_htt_rx_desc(ar
, &rx_desc
->attention
,
418 sizeof(*rx_desc
) - sizeof(u32
));
424 if (skb_queue_empty(amsdu
))
428 * Don't refill the ring yet.
430 * First, the elements popped here are still in use - it is not
431 * safe to overwrite them until the matching call to
432 * mpdu_desc_list_next. Second, for efficiency it is preferable to
433 * refill the rx ring with 1 PPDU's worth of rx buffers (something
434 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
435 * (something like 3 buffers). Consequently, we'll rely on the txrx
436 * SW to tell us when it is done pulling all the PPDU's rx buffers
437 * out of the rx ring, and then refill it just once.
440 return msdu_chaining
;
443 static struct sk_buff
*ath10k_htt_rx_pop_paddr(struct ath10k_htt
*htt
,
446 struct ath10k
*ar
= htt
->ar
;
447 struct ath10k_skb_rxcb
*rxcb
;
448 struct sk_buff
*msdu
;
450 lockdep_assert_held(&htt
->rx_ring
.lock
);
452 msdu
= ath10k_htt_rx_find_skb_paddr(ar
, paddr
);
456 rxcb
= ATH10K_SKB_RXCB(msdu
);
457 hash_del(&rxcb
->hlist
);
458 htt
->rx_ring
.fill_cnt
--;
460 dma_unmap_single(htt
->ar
->dev
, rxcb
->paddr
,
461 msdu
->len
+ skb_tailroom(msdu
),
463 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx netbuf pop: ",
464 msdu
->data
, msdu
->len
+ skb_tailroom(msdu
));
469 static inline void ath10k_htt_append_frag_list(struct sk_buff
*skb_head
,
470 struct sk_buff
*frag_list
,
471 unsigned int frag_len
)
473 skb_shinfo(skb_head
)->frag_list
= frag_list
;
474 skb_head
->data_len
= frag_len
;
475 skb_head
->len
+= skb_head
->data_len
;
478 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt
*htt
,
479 struct sk_buff
*msdu
,
480 struct htt_rx_in_ord_msdu_desc
**msdu_desc
)
482 struct ath10k
*ar
= htt
->ar
;
484 struct sk_buff
*frag_buf
;
485 struct sk_buff
*prev_frag_buf
;
487 struct htt_rx_in_ord_msdu_desc
*ind_desc
= *msdu_desc
;
488 struct htt_rx_desc
*rxd
;
489 int amsdu_len
= __le16_to_cpu(ind_desc
->msdu_len
);
491 rxd
= (void *)msdu
->data
;
492 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
494 skb_put(msdu
, sizeof(struct htt_rx_desc
));
495 skb_pull(msdu
, sizeof(struct htt_rx_desc
));
496 skb_put(msdu
, min(amsdu_len
, HTT_RX_MSDU_SIZE
));
497 amsdu_len
-= msdu
->len
;
499 last_frag
= ind_desc
->reserved
;
502 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
503 __le16_to_cpu(ind_desc
->msdu_len
),
510 paddr
= __le32_to_cpu(ind_desc
->msdu_paddr
);
511 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
513 ath10k_warn(ar
, "failed to pop frag-1 paddr: 0x%x", paddr
);
517 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
518 ath10k_htt_append_frag_list(msdu
, frag_buf
, amsdu_len
);
520 amsdu_len
-= frag_buf
->len
;
521 prev_frag_buf
= frag_buf
;
522 last_frag
= ind_desc
->reserved
;
525 paddr
= __le32_to_cpu(ind_desc
->msdu_paddr
);
526 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
528 ath10k_warn(ar
, "failed to pop frag-n paddr: 0x%x",
530 prev_frag_buf
->next
= NULL
;
534 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
535 last_frag
= ind_desc
->reserved
;
536 amsdu_len
-= frag_buf
->len
;
538 prev_frag_buf
->next
= frag_buf
;
539 prev_frag_buf
= frag_buf
;
543 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
544 __le16_to_cpu(ind_desc
->msdu_len
), amsdu_len
);
547 *msdu_desc
= ind_desc
;
549 prev_frag_buf
->next
= NULL
;
554 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt
*htt
,
555 struct sk_buff
*msdu
,
556 struct htt_rx_in_ord_msdu_desc_ext
**msdu_desc
)
558 struct ath10k
*ar
= htt
->ar
;
560 struct sk_buff
*frag_buf
;
561 struct sk_buff
*prev_frag_buf
;
563 struct htt_rx_in_ord_msdu_desc_ext
*ind_desc
= *msdu_desc
;
564 struct htt_rx_desc
*rxd
;
565 int amsdu_len
= __le16_to_cpu(ind_desc
->msdu_len
);
567 rxd
= (void *)msdu
->data
;
568 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
570 skb_put(msdu
, sizeof(struct htt_rx_desc
));
571 skb_pull(msdu
, sizeof(struct htt_rx_desc
));
572 skb_put(msdu
, min(amsdu_len
, HTT_RX_MSDU_SIZE
));
573 amsdu_len
-= msdu
->len
;
575 last_frag
= ind_desc
->reserved
;
578 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
579 __le16_to_cpu(ind_desc
->msdu_len
),
586 paddr
= __le64_to_cpu(ind_desc
->msdu_paddr
);
587 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
589 ath10k_warn(ar
, "failed to pop frag-1 paddr: 0x%llx", paddr
);
593 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
594 ath10k_htt_append_frag_list(msdu
, frag_buf
, amsdu_len
);
596 amsdu_len
-= frag_buf
->len
;
597 prev_frag_buf
= frag_buf
;
598 last_frag
= ind_desc
->reserved
;
601 paddr
= __le64_to_cpu(ind_desc
->msdu_paddr
);
602 frag_buf
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
604 ath10k_warn(ar
, "failed to pop frag-n paddr: 0x%llx",
606 prev_frag_buf
->next
= NULL
;
610 skb_put(frag_buf
, min(amsdu_len
, HTT_RX_BUF_SIZE
));
611 last_frag
= ind_desc
->reserved
;
612 amsdu_len
-= frag_buf
->len
;
614 prev_frag_buf
->next
= frag_buf
;
615 prev_frag_buf
= frag_buf
;
619 ath10k_warn(ar
, "invalid amsdu len %u, left %d",
620 __le16_to_cpu(ind_desc
->msdu_len
), amsdu_len
);
623 *msdu_desc
= ind_desc
;
625 prev_frag_buf
->next
= NULL
;
629 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt
*htt
,
630 struct htt_rx_in_ord_ind
*ev
,
631 struct sk_buff_head
*list
)
633 struct ath10k
*ar
= htt
->ar
;
634 struct htt_rx_in_ord_msdu_desc
*msdu_desc
= ev
->msdu_descs32
;
635 struct htt_rx_desc
*rxd
;
636 struct sk_buff
*msdu
;
641 lockdep_assert_held(&htt
->rx_ring
.lock
);
643 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
644 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
646 while (msdu_count
--) {
647 paddr
= __le32_to_cpu(msdu_desc
->msdu_paddr
);
649 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
651 __skb_queue_purge(list
);
655 if (!is_offload
&& ar
->monitor_arvif
) {
656 ret
= ath10k_htt_rx_handle_amsdu_mon_32(htt
, msdu
,
659 __skb_queue_purge(list
);
662 __skb_queue_tail(list
, msdu
);
667 __skb_queue_tail(list
, msdu
);
670 rxd
= (void *)msdu
->data
;
672 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
674 skb_put(msdu
, sizeof(*rxd
));
675 skb_pull(msdu
, sizeof(*rxd
));
676 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
678 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
679 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
680 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
691 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt
*htt
,
692 struct htt_rx_in_ord_ind
*ev
,
693 struct sk_buff_head
*list
)
695 struct ath10k
*ar
= htt
->ar
;
696 struct htt_rx_in_ord_msdu_desc_ext
*msdu_desc
= ev
->msdu_descs64
;
697 struct htt_rx_desc
*rxd
;
698 struct sk_buff
*msdu
;
703 lockdep_assert_held(&htt
->rx_ring
.lock
);
705 msdu_count
= __le16_to_cpu(ev
->msdu_count
);
706 is_offload
= !!(ev
->info
& HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
708 while (msdu_count
--) {
709 paddr
= __le64_to_cpu(msdu_desc
->msdu_paddr
);
710 msdu
= ath10k_htt_rx_pop_paddr(htt
, paddr
);
712 __skb_queue_purge(list
);
716 if (!is_offload
&& ar
->monitor_arvif
) {
717 ret
= ath10k_htt_rx_handle_amsdu_mon_64(htt
, msdu
,
720 __skb_queue_purge(list
);
723 __skb_queue_tail(list
, msdu
);
728 __skb_queue_tail(list
, msdu
);
731 rxd
= (void *)msdu
->data
;
733 trace_ath10k_htt_rx_desc(ar
, rxd
, sizeof(*rxd
));
735 skb_put(msdu
, sizeof(*rxd
));
736 skb_pull(msdu
, sizeof(*rxd
));
737 skb_put(msdu
, __le16_to_cpu(msdu_desc
->msdu_len
));
739 if (!(__le32_to_cpu(rxd
->attention
.flags
) &
740 RX_ATTENTION_FLAGS_MSDU_DONE
)) {
741 ath10k_warn(htt
->ar
, "tried to pop an incomplete frame, oops!\n");
752 int ath10k_htt_rx_alloc(struct ath10k_htt
*htt
)
754 struct ath10k
*ar
= htt
->ar
;
756 void *vaddr
, *vaddr_ring
;
758 struct timer_list
*timer
= &htt
->rx_ring
.refill_retry_timer
;
760 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
763 htt
->rx_confused
= false;
765 /* XXX: The fill level could be changed during runtime in response to
766 * the host processing latency. Is this really worth it?
768 htt
->rx_ring
.size
= HTT_RX_RING_SIZE
;
769 htt
->rx_ring
.size_mask
= htt
->rx_ring
.size
- 1;
770 htt
->rx_ring
.fill_level
= ar
->hw_params
.rx_ring_fill_level
;
772 if (!is_power_of_2(htt
->rx_ring
.size
)) {
773 ath10k_warn(ar
, "htt rx ring size is not power of 2\n");
777 htt
->rx_ring
.netbufs_ring
=
778 kcalloc(htt
->rx_ring
.size
, sizeof(struct sk_buff
*),
780 if (!htt
->rx_ring
.netbufs_ring
)
783 size
= ath10k_htt_get_rx_ring_size(htt
);
785 vaddr_ring
= dma_alloc_coherent(htt
->ar
->dev
, size
, &paddr
, GFP_KERNEL
);
789 ath10k_htt_config_paddrs_ring(htt
, vaddr_ring
);
790 htt
->rx_ring
.base_paddr
= paddr
;
792 vaddr
= dma_alloc_coherent(htt
->ar
->dev
,
793 sizeof(*htt
->rx_ring
.alloc_idx
.vaddr
),
798 htt
->rx_ring
.alloc_idx
.vaddr
= vaddr
;
799 htt
->rx_ring
.alloc_idx
.paddr
= paddr
;
800 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= htt
->rx_ring
.size_mask
;
801 *htt
->rx_ring
.alloc_idx
.vaddr
= 0;
803 /* Initialize the Rx refill retry timer */
804 timer_setup(timer
, ath10k_htt_rx_ring_refill_retry
, 0);
806 spin_lock_init(&htt
->rx_ring
.lock
);
808 htt
->rx_ring
.fill_cnt
= 0;
809 htt
->rx_ring
.sw_rd_idx
.msdu_payld
= 0;
810 hash_init(htt
->rx_ring
.skb_table
);
812 skb_queue_head_init(&htt
->rx_msdus_q
);
813 skb_queue_head_init(&htt
->rx_in_ord_compl_q
);
814 skb_queue_head_init(&htt
->tx_fetch_ind_q
);
815 atomic_set(&htt
->num_mpdus_ready
, 0);
817 ath10k_dbg(ar
, ATH10K_DBG_BOOT
, "htt rx ring size %d fill_level %d\n",
818 htt
->rx_ring
.size
, htt
->rx_ring
.fill_level
);
822 dma_free_coherent(htt
->ar
->dev
,
823 ath10k_htt_get_rx_ring_size(htt
),
825 htt
->rx_ring
.base_paddr
);
827 kfree(htt
->rx_ring
.netbufs_ring
);
832 static int ath10k_htt_rx_crypto_param_len(struct ath10k
*ar
,
833 enum htt_rx_mpdu_encrypt_type type
)
836 case HTT_RX_MPDU_ENCRYPT_NONE
:
838 case HTT_RX_MPDU_ENCRYPT_WEP40
:
839 case HTT_RX_MPDU_ENCRYPT_WEP104
:
840 return IEEE80211_WEP_IV_LEN
;
841 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
842 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
843 return IEEE80211_TKIP_IV_LEN
;
844 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
845 return IEEE80211_CCMP_HDR_LEN
;
846 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
847 return IEEE80211_CCMP_256_HDR_LEN
;
848 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
849 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
850 return IEEE80211_GCMP_HDR_LEN
;
851 case HTT_RX_MPDU_ENCRYPT_WEP128
:
852 case HTT_RX_MPDU_ENCRYPT_WAPI
:
856 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
860 #define MICHAEL_MIC_LEN 8
862 static int ath10k_htt_rx_crypto_mic_len(struct ath10k
*ar
,
863 enum htt_rx_mpdu_encrypt_type type
)
866 case HTT_RX_MPDU_ENCRYPT_NONE
:
867 case HTT_RX_MPDU_ENCRYPT_WEP40
:
868 case HTT_RX_MPDU_ENCRYPT_WEP104
:
869 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
870 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
872 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
873 return IEEE80211_CCMP_MIC_LEN
;
874 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
875 return IEEE80211_CCMP_256_MIC_LEN
;
876 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
877 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
878 return IEEE80211_GCMP_MIC_LEN
;
879 case HTT_RX_MPDU_ENCRYPT_WEP128
:
880 case HTT_RX_MPDU_ENCRYPT_WAPI
:
884 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
888 static int ath10k_htt_rx_crypto_icv_len(struct ath10k
*ar
,
889 enum htt_rx_mpdu_encrypt_type type
)
892 case HTT_RX_MPDU_ENCRYPT_NONE
:
893 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
:
894 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2
:
895 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2
:
896 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2
:
898 case HTT_RX_MPDU_ENCRYPT_WEP40
:
899 case HTT_RX_MPDU_ENCRYPT_WEP104
:
900 return IEEE80211_WEP_ICV_LEN
;
901 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC
:
902 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA
:
903 return IEEE80211_TKIP_ICV_LEN
;
904 case HTT_RX_MPDU_ENCRYPT_WEP128
:
905 case HTT_RX_MPDU_ENCRYPT_WAPI
:
909 ath10k_warn(ar
, "unsupported encryption type %d\n", type
);
913 struct amsdu_subframe_hdr
{
919 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
921 static inline u8
ath10k_bw_to_mac80211_bw(u8 bw
)
927 ret
= RATE_INFO_BW_20
;
930 ret
= RATE_INFO_BW_40
;
933 ret
= RATE_INFO_BW_80
;
936 ret
= RATE_INFO_BW_160
;
943 static void ath10k_htt_rx_h_rates(struct ath10k
*ar
,
944 struct ieee80211_rx_status
*status
,
945 struct htt_rx_desc
*rxd
)
947 struct ieee80211_supported_band
*sband
;
948 u8 cck
, rate
, bw
, sgi
, mcs
, nss
;
951 u32 info1
, info2
, info3
;
954 info1
= __le32_to_cpu(rxd
->ppdu_start
.info1
);
955 info2
= __le32_to_cpu(rxd
->ppdu_start
.info2
);
956 info3
= __le32_to_cpu(rxd
->ppdu_start
.info3
);
958 preamble
= MS(info1
, RX_PPDU_START_INFO1_PREAMBLE_TYPE
);
962 /* To get legacy rate index band is required. Since band can't
963 * be undefined check if freq is non-zero.
968 cck
= info1
& RX_PPDU_START_INFO1_L_SIG_RATE_SELECT
;
969 rate
= MS(info1
, RX_PPDU_START_INFO1_L_SIG_RATE
);
970 rate
&= ~RX_PPDU_START_RATE_FLAG
;
972 sband
= &ar
->mac
.sbands
[status
->band
];
973 status
->rate_idx
= ath10k_mac_hw_rate_to_idx(sband
, rate
, cck
);
976 case HTT_RX_HT_WITH_TXBF
:
977 /* HT-SIG - Table 20-11 in info2 and info3 */
980 bw
= (info2
>> 7) & 1;
981 sgi
= (info3
>> 7) & 1;
983 status
->rate_idx
= mcs
;
984 status
->encoding
= RX_ENC_HT
;
986 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
988 status
->bw
= RATE_INFO_BW_40
;
991 case HTT_RX_VHT_WITH_TXBF
:
992 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
997 stbc
= (info2
>> 3) & 1;
998 group_id
= (info2
>> 4) & 0x3F;
1000 if (GROUP_ID_IS_SU_MIMO(group_id
)) {
1001 mcs
= (info3
>> 4) & 0x0F;
1002 nsts_su
= ((info2
>> 10) & 0x07);
1004 nss
= (nsts_su
>> 2) + 1;
1006 nss
= (nsts_su
+ 1);
1008 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1009 * so it's impossible to decode MCS. Also since
1010 * firmware consumes Group Id Management frames host
1011 * has no knowledge regarding group/user position
1012 * mapping so it's impossible to pick the correct Nsts
1015 * Bandwidth and SGI are valid so report the rateinfo
1016 * on best-effort basis.
1023 ath10k_warn(ar
, "invalid MCS received %u\n", mcs
);
1024 ath10k_warn(ar
, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1025 __le32_to_cpu(rxd
->attention
.flags
),
1026 __le32_to_cpu(rxd
->mpdu_start
.info0
),
1027 __le32_to_cpu(rxd
->mpdu_start
.info1
),
1028 __le32_to_cpu(rxd
->msdu_start
.common
.info0
),
1029 __le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1030 rxd
->ppdu_start
.info0
,
1031 __le32_to_cpu(rxd
->ppdu_start
.info1
),
1032 __le32_to_cpu(rxd
->ppdu_start
.info2
),
1033 __le32_to_cpu(rxd
->ppdu_start
.info3
),
1034 __le32_to_cpu(rxd
->ppdu_start
.info4
));
1036 ath10k_warn(ar
, "msdu end %08x mpdu end %08x\n",
1037 __le32_to_cpu(rxd
->msdu_end
.common
.info0
),
1038 __le32_to_cpu(rxd
->mpdu_end
.info0
));
1040 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
,
1041 "rx desc msdu payload: ",
1042 rxd
->msdu_payload
, 50);
1045 status
->rate_idx
= mcs
;
1049 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
1051 status
->bw
= ath10k_bw_to_mac80211_bw(bw
);
1052 status
->encoding
= RX_ENC_VHT
;
1059 static struct ieee80211_channel
*
1060 ath10k_htt_rx_h_peer_channel(struct ath10k
*ar
, struct htt_rx_desc
*rxd
)
1062 struct ath10k_peer
*peer
;
1063 struct ath10k_vif
*arvif
;
1064 struct cfg80211_chan_def def
;
1067 lockdep_assert_held(&ar
->data_lock
);
1072 if (rxd
->attention
.flags
&
1073 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID
))
1076 if (!(rxd
->msdu_end
.common
.info0
&
1077 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
)))
1080 peer_id
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1081 RX_MPDU_START_INFO0_PEER_IDX
);
1083 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1087 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
1088 if (WARN_ON_ONCE(!arvif
))
1091 if (ath10k_mac_vif_chan(arvif
->vif
, &def
))
1097 static struct ieee80211_channel
*
1098 ath10k_htt_rx_h_vdev_channel(struct ath10k
*ar
, u32 vdev_id
)
1100 struct ath10k_vif
*arvif
;
1101 struct cfg80211_chan_def def
;
1103 lockdep_assert_held(&ar
->data_lock
);
1105 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
1106 if (arvif
->vdev_id
== vdev_id
&&
1107 ath10k_mac_vif_chan(arvif
->vif
, &def
) == 0)
1115 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw
*hw
,
1116 struct ieee80211_chanctx_conf
*conf
,
1119 struct cfg80211_chan_def
*def
= data
;
1124 static struct ieee80211_channel
*
1125 ath10k_htt_rx_h_any_channel(struct ath10k
*ar
)
1127 struct cfg80211_chan_def def
= {};
1129 ieee80211_iter_chan_contexts_atomic(ar
->hw
,
1130 ath10k_htt_rx_h_any_chan_iter
,
1136 static bool ath10k_htt_rx_h_channel(struct ath10k
*ar
,
1137 struct ieee80211_rx_status
*status
,
1138 struct htt_rx_desc
*rxd
,
1141 struct ieee80211_channel
*ch
;
1143 spin_lock_bh(&ar
->data_lock
);
1144 ch
= ar
->scan_channel
;
1146 ch
= ar
->rx_channel
;
1148 ch
= ath10k_htt_rx_h_peer_channel(ar
, rxd
);
1150 ch
= ath10k_htt_rx_h_vdev_channel(ar
, vdev_id
);
1152 ch
= ath10k_htt_rx_h_any_channel(ar
);
1154 ch
= ar
->tgt_oper_chan
;
1155 spin_unlock_bh(&ar
->data_lock
);
1160 status
->band
= ch
->band
;
1161 status
->freq
= ch
->center_freq
;
1166 static void ath10k_htt_rx_h_signal(struct ath10k
*ar
,
1167 struct ieee80211_rx_status
*status
,
1168 struct htt_rx_desc
*rxd
)
1172 for (i
= 0; i
< IEEE80211_MAX_CHAINS
; i
++) {
1173 status
->chains
&= ~BIT(i
);
1175 if (rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
!= 0x80) {
1176 status
->chain_signal
[i
] = ATH10K_DEFAULT_NOISE_FLOOR
+
1177 rxd
->ppdu_start
.rssi_chains
[i
].pri20_mhz
;
1179 status
->chains
|= BIT(i
);
1183 /* FIXME: Get real NF */
1184 status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
1185 rxd
->ppdu_start
.rssi_comb
;
1186 status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
1189 static void ath10k_htt_rx_h_mactime(struct ath10k
*ar
,
1190 struct ieee80211_rx_status
*status
,
1191 struct htt_rx_desc
*rxd
)
1193 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1194 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1195 * TSF. Is it worth holding frames until end of PPDU is known?
1197 * FIXME: Can we get/compute 64bit TSF?
1199 status
->mactime
= __le32_to_cpu(rxd
->ppdu_end
.common
.tsf_timestamp
);
1200 status
->flag
|= RX_FLAG_MACTIME_END
;
1203 static void ath10k_htt_rx_h_ppdu(struct ath10k
*ar
,
1204 struct sk_buff_head
*amsdu
,
1205 struct ieee80211_rx_status
*status
,
1208 struct sk_buff
*first
;
1209 struct htt_rx_desc
*rxd
;
1213 if (skb_queue_empty(amsdu
))
1216 first
= skb_peek(amsdu
);
1217 rxd
= (void *)first
->data
- sizeof(*rxd
);
1219 is_first_ppdu
= !!(rxd
->attention
.flags
&
1220 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU
));
1221 is_last_ppdu
= !!(rxd
->attention
.flags
&
1222 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU
));
1224 if (is_first_ppdu
) {
1225 /* New PPDU starts so clear out the old per-PPDU status. */
1227 status
->rate_idx
= 0;
1229 status
->encoding
= RX_ENC_LEGACY
;
1230 status
->bw
= RATE_INFO_BW_20
;
1232 status
->flag
&= ~RX_FLAG_MACTIME_END
;
1233 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
1235 status
->flag
&= ~(RX_FLAG_AMPDU_IS_LAST
);
1236 status
->flag
|= RX_FLAG_AMPDU_DETAILS
| RX_FLAG_AMPDU_LAST_KNOWN
;
1237 status
->ampdu_reference
= ar
->ampdu_reference
;
1239 ath10k_htt_rx_h_signal(ar
, status
, rxd
);
1240 ath10k_htt_rx_h_channel(ar
, status
, rxd
, vdev_id
);
1241 ath10k_htt_rx_h_rates(ar
, status
, rxd
);
1245 ath10k_htt_rx_h_mactime(ar
, status
, rxd
);
1247 /* set ampdu last segment flag */
1248 status
->flag
|= RX_FLAG_AMPDU_IS_LAST
;
1249 ar
->ampdu_reference
++;
1253 static const char * const tid_to_ac
[] = {
1264 static char *ath10k_get_tid(struct ieee80211_hdr
*hdr
, char *out
, size_t size
)
1269 if (!ieee80211_is_data_qos(hdr
->frame_control
))
1272 qc
= ieee80211_get_qos_ctl(hdr
);
1273 tid
= *qc
& IEEE80211_QOS_CTL_TID_MASK
;
1275 snprintf(out
, size
, "tid %d (%s)", tid
, tid_to_ac
[tid
]);
1277 snprintf(out
, size
, "tid %d", tid
);
1282 static void ath10k_htt_rx_h_queue_msdu(struct ath10k
*ar
,
1283 struct ieee80211_rx_status
*rx_status
,
1284 struct sk_buff
*skb
)
1286 struct ieee80211_rx_status
*status
;
1288 status
= IEEE80211_SKB_RXCB(skb
);
1289 *status
= *rx_status
;
1291 skb_queue_tail(&ar
->htt
.rx_msdus_q
, skb
);
1294 static void ath10k_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
1296 struct ieee80211_rx_status
*status
;
1297 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1300 status
= IEEE80211_SKB_RXCB(skb
);
1302 if (!(ar
->filter_flags
& FIF_FCSFAIL
) &&
1303 status
->flag
& RX_FLAG_FAILED_FCS_CRC
) {
1304 ar
->stats
.rx_crc_err_drop
++;
1305 dev_kfree_skb_any(skb
);
1309 ath10k_dbg(ar
, ATH10K_DBG_DATA
,
1310 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1313 ieee80211_get_SA(hdr
),
1314 ath10k_get_tid(hdr
, tid
, sizeof(tid
)),
1315 is_multicast_ether_addr(ieee80211_get_DA(hdr
)) ?
1317 (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4,
1318 (status
->encoding
== RX_ENC_LEGACY
) ? "legacy" : "",
1319 (status
->encoding
== RX_ENC_HT
) ? "ht" : "",
1320 (status
->encoding
== RX_ENC_VHT
) ? "vht" : "",
1321 (status
->bw
== RATE_INFO_BW_40
) ? "40" : "",
1322 (status
->bw
== RATE_INFO_BW_80
) ? "80" : "",
1323 (status
->bw
== RATE_INFO_BW_160
) ? "160" : "",
1324 status
->enc_flags
& RX_ENC_FLAG_SHORT_GI
? "sgi " : "",
1328 status
->band
, status
->flag
,
1329 !!(status
->flag
& RX_FLAG_FAILED_FCS_CRC
),
1330 !!(status
->flag
& RX_FLAG_MMIC_ERROR
),
1331 !!(status
->flag
& RX_FLAG_AMSDU_MORE
));
1332 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "rx skb: ",
1333 skb
->data
, skb
->len
);
1334 trace_ath10k_rx_hdr(ar
, skb
->data
, skb
->len
);
1335 trace_ath10k_rx_payload(ar
, skb
->data
, skb
->len
);
1337 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
1340 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k
*ar
,
1341 struct ieee80211_hdr
*hdr
)
1343 int len
= ieee80211_hdrlen(hdr
->frame_control
);
1345 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING
,
1346 ar
->running_fw
->fw_file
.fw_features
))
1347 len
= round_up(len
, 4);
1352 static void ath10k_htt_rx_h_undecap_raw(struct ath10k
*ar
,
1353 struct sk_buff
*msdu
,
1354 struct ieee80211_rx_status
*status
,
1355 enum htt_rx_mpdu_encrypt_type enctype
,
1357 const u8 first_hdr
[64])
1359 struct ieee80211_hdr
*hdr
;
1360 struct htt_rx_desc
*rxd
;
1365 bool msdu_limit_err
;
1366 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1369 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1370 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1371 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1372 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1373 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1375 /* Delivered decapped frame:
1377 * [crypto param] <-- can be trimmed if !fcs_err &&
1378 * !decrypt_err && !peer_idx_invalid
1379 * [amsdu header] <-- only if A-MSDU
1382 * [FCS] <-- at end, needs to be trimmed
1385 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1386 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1387 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1388 * a single last MSDU with this msdu limit error set.
1390 msdu_limit_err
= ath10k_rx_desc_msdu_limit_error(&ar
->hw_params
, rxd
);
1392 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1393 * without first MSDU is expected in that case, and handled later here.
1395 /* This probably shouldn't happen but warn just in case */
1396 if (WARN_ON_ONCE(!is_first
&& !msdu_limit_err
))
1399 /* This probably shouldn't happen but warn just in case */
1400 if (WARN_ON_ONCE(!(is_first
&& is_last
) && !msdu_limit_err
))
1403 skb_trim(msdu
, msdu
->len
- FCS_LEN
);
1405 /* Push original 80211 header */
1406 if (unlikely(msdu_limit_err
)) {
1407 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1408 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1409 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1411 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1412 qos
= ieee80211_get_qos_ctl(hdr
);
1413 qos
[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1417 memcpy(skb_push(msdu
, crypto_len
),
1418 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1421 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1424 /* In most cases this will be true for sniffed frames. It makes sense
1425 * to deliver them as-is without stripping the crypto param. This is
1426 * necessary for software based decryption.
1428 * If there's no error then the frame is decrypted. At least that is
1429 * the case for frames that come in via fragmented rx indication.
1434 /* The payload is decrypted so strip crypto params. Start from tail
1435 * since hdr is used to compute some stuff.
1438 hdr
= (void *)msdu
->data
;
1441 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1442 skb_trim(msdu
, msdu
->len
-
1443 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1445 skb_trim(msdu
, msdu
->len
-
1446 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1449 if (status
->flag
& RX_FLAG_MIC_STRIPPED
)
1450 skb_trim(msdu
, msdu
->len
-
1451 ath10k_htt_rx_crypto_mic_len(ar
, enctype
));
1454 if (status
->flag
& RX_FLAG_ICV_STRIPPED
)
1455 skb_trim(msdu
, msdu
->len
-
1456 ath10k_htt_rx_crypto_icv_len(ar
, enctype
));
1460 if ((status
->flag
& RX_FLAG_MMIC_STRIPPED
) &&
1461 !ieee80211_has_morefrags(hdr
->frame_control
) &&
1462 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1463 skb_trim(msdu
, msdu
->len
- MICHAEL_MIC_LEN
);
1466 if (status
->flag
& RX_FLAG_IV_STRIPPED
) {
1467 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1468 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1470 memmove((void *)msdu
->data
+ crypto_len
,
1471 (void *)msdu
->data
, hdr_len
);
1472 skb_pull(msdu
, crypto_len
);
1476 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k
*ar
,
1477 struct sk_buff
*msdu
,
1478 struct ieee80211_rx_status
*status
,
1479 const u8 first_hdr
[64],
1480 enum htt_rx_mpdu_encrypt_type enctype
)
1482 struct ieee80211_hdr
*hdr
;
1483 struct htt_rx_desc
*rxd
;
1488 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1490 /* Delivered decapped frame:
1491 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1494 * Note: The nwifi header doesn't have QoS Control and is
1495 * (always?) a 3addr frame.
1497 * Note2: There's no A-MSDU subframe header. Even if it's part
1501 /* pull decapped header and copy SA & DA */
1502 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1504 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1505 skb_put(msdu
, l3_pad_bytes
);
1507 hdr
= (struct ieee80211_hdr
*)(msdu
->data
+ l3_pad_bytes
);
1509 hdr_len
= ath10k_htt_rx_nwifi_hdrlen(ar
, hdr
);
1510 ether_addr_copy(da
, ieee80211_get_DA(hdr
));
1511 ether_addr_copy(sa
, ieee80211_get_SA(hdr
));
1512 skb_pull(msdu
, hdr_len
);
1514 /* push original 802.11 header */
1515 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1516 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1518 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1519 memcpy(skb_push(msdu
,
1520 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1521 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1522 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1525 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1527 /* original 802.11 header has a different DA and in
1528 * case of 4addr it may also have different SA
1530 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1531 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1532 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1535 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k
*ar
,
1536 struct sk_buff
*msdu
,
1537 enum htt_rx_mpdu_encrypt_type enctype
)
1539 struct ieee80211_hdr
*hdr
;
1540 struct htt_rx_desc
*rxd
;
1541 size_t hdr_len
, crypto_len
;
1543 bool is_first
, is_last
, is_amsdu
;
1544 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1546 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1547 hdr
= (void *)rxd
->rx_hdr_status
;
1549 is_first
= !!(rxd
->msdu_end
.common
.info0
&
1550 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU
));
1551 is_last
= !!(rxd
->msdu_end
.common
.info0
&
1552 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
));
1553 is_amsdu
= !(is_first
&& is_last
);
1558 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1559 crypto_len
= ath10k_htt_rx_crypto_param_len(ar
, enctype
);
1561 rfc1042
+= round_up(hdr_len
, bytes_aligned
) +
1562 round_up(crypto_len
, bytes_aligned
);
1566 rfc1042
+= sizeof(struct amsdu_subframe_hdr
);
1571 static void ath10k_htt_rx_h_undecap_eth(struct ath10k
*ar
,
1572 struct sk_buff
*msdu
,
1573 struct ieee80211_rx_status
*status
,
1574 const u8 first_hdr
[64],
1575 enum htt_rx_mpdu_encrypt_type enctype
)
1577 struct ieee80211_hdr
*hdr
;
1584 struct htt_rx_desc
*rxd
;
1585 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1587 /* Delivered decapped frame:
1588 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1592 rfc1042
= ath10k_htt_rx_h_find_rfc1042(ar
, msdu
, enctype
);
1593 if (WARN_ON_ONCE(!rfc1042
))
1596 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1597 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1598 skb_put(msdu
, l3_pad_bytes
);
1599 skb_pull(msdu
, l3_pad_bytes
);
1601 /* pull decapped header and copy SA & DA */
1602 eth
= (struct ethhdr
*)msdu
->data
;
1603 ether_addr_copy(da
, eth
->h_dest
);
1604 ether_addr_copy(sa
, eth
->h_source
);
1605 skb_pull(msdu
, sizeof(struct ethhdr
));
1607 /* push rfc1042/llc/snap */
1608 memcpy(skb_push(msdu
, sizeof(struct rfc1042_hdr
)), rfc1042
,
1609 sizeof(struct rfc1042_hdr
));
1611 /* push original 802.11 header */
1612 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1613 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1615 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1616 memcpy(skb_push(msdu
,
1617 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1618 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1619 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1622 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1624 /* original 802.11 header has a different DA and in
1625 * case of 4addr it may also have different SA
1627 hdr
= (struct ieee80211_hdr
*)msdu
->data
;
1628 ether_addr_copy(ieee80211_get_DA(hdr
), da
);
1629 ether_addr_copy(ieee80211_get_SA(hdr
), sa
);
1632 static void ath10k_htt_rx_h_undecap_snap(struct ath10k
*ar
,
1633 struct sk_buff
*msdu
,
1634 struct ieee80211_rx_status
*status
,
1635 const u8 first_hdr
[64],
1636 enum htt_rx_mpdu_encrypt_type enctype
)
1638 struct ieee80211_hdr
*hdr
;
1641 struct htt_rx_desc
*rxd
;
1642 int bytes_aligned
= ar
->hw_params
.decap_align_bytes
;
1644 /* Delivered decapped frame:
1645 * [amsdu header] <-- replaced with 802.11 hdr
1650 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1651 l3_pad_bytes
= ath10k_rx_desc_get_l3_pad_bytes(&ar
->hw_params
, rxd
);
1653 skb_put(msdu
, l3_pad_bytes
);
1654 skb_pull(msdu
, sizeof(struct amsdu_subframe_hdr
) + l3_pad_bytes
);
1656 hdr
= (struct ieee80211_hdr
*)first_hdr
;
1657 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
1659 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
1660 memcpy(skb_push(msdu
,
1661 ath10k_htt_rx_crypto_param_len(ar
, enctype
)),
1662 (void *)hdr
+ round_up(hdr_len
, bytes_aligned
),
1663 ath10k_htt_rx_crypto_param_len(ar
, enctype
));
1666 memcpy(skb_push(msdu
, hdr_len
), hdr
, hdr_len
);
1669 static void ath10k_htt_rx_h_undecap(struct ath10k
*ar
,
1670 struct sk_buff
*msdu
,
1671 struct ieee80211_rx_status
*status
,
1673 enum htt_rx_mpdu_encrypt_type enctype
,
1676 struct htt_rx_desc
*rxd
;
1677 enum rx_msdu_decap_format decap
;
1679 /* First msdu's decapped header:
1680 * [802.11 header] <-- padded to 4 bytes long
1681 * [crypto param] <-- padded to 4 bytes long
1682 * [amsdu header] <-- only if A-MSDU
1685 * Other (2nd, 3rd, ..) msdu's decapped header:
1686 * [amsdu header] <-- only if A-MSDU
1690 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
1691 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
1692 RX_MSDU_START_INFO1_DECAP_FORMAT
);
1695 case RX_MSDU_DECAP_RAW
:
1696 ath10k_htt_rx_h_undecap_raw(ar
, msdu
, status
, enctype
,
1697 is_decrypted
, first_hdr
);
1699 case RX_MSDU_DECAP_NATIVE_WIFI
:
1700 ath10k_htt_rx_h_undecap_nwifi(ar
, msdu
, status
, first_hdr
,
1703 case RX_MSDU_DECAP_ETHERNET2_DIX
:
1704 ath10k_htt_rx_h_undecap_eth(ar
, msdu
, status
, first_hdr
, enctype
);
1706 case RX_MSDU_DECAP_8023_SNAP_LLC
:
1707 ath10k_htt_rx_h_undecap_snap(ar
, msdu
, status
, first_hdr
,
1713 static int ath10k_htt_rx_get_csum_state(struct sk_buff
*skb
)
1715 struct htt_rx_desc
*rxd
;
1717 bool is_ip4
, is_ip6
;
1718 bool is_tcp
, is_udp
;
1719 bool ip_csum_ok
, tcpudp_csum_ok
;
1721 rxd
= (void *)skb
->data
- sizeof(*rxd
);
1722 flags
= __le32_to_cpu(rxd
->attention
.flags
);
1723 info
= __le32_to_cpu(rxd
->msdu_start
.common
.info1
);
1725 is_ip4
= !!(info
& RX_MSDU_START_INFO1_IPV4_PROTO
);
1726 is_ip6
= !!(info
& RX_MSDU_START_INFO1_IPV6_PROTO
);
1727 is_tcp
= !!(info
& RX_MSDU_START_INFO1_TCP_PROTO
);
1728 is_udp
= !!(info
& RX_MSDU_START_INFO1_UDP_PROTO
);
1729 ip_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL
);
1730 tcpudp_csum_ok
= !(flags
& RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL
);
1732 if (!is_ip4
&& !is_ip6
)
1733 return CHECKSUM_NONE
;
1734 if (!is_tcp
&& !is_udp
)
1735 return CHECKSUM_NONE
;
1737 return CHECKSUM_NONE
;
1738 if (!tcpudp_csum_ok
)
1739 return CHECKSUM_NONE
;
1741 return CHECKSUM_UNNECESSARY
;
1744 static void ath10k_htt_rx_h_csum_offload(struct sk_buff
*msdu
)
1746 msdu
->ip_summed
= ath10k_htt_rx_get_csum_state(msdu
);
1749 static u64
ath10k_htt_rx_h_get_pn(struct ath10k
*ar
, struct sk_buff
*skb
,
1751 enum htt_rx_mpdu_encrypt_type enctype
)
1753 struct ieee80211_hdr
*hdr
;
1757 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ offset
);
1758 ehdr
= skb
->data
+ offset
+ ieee80211_hdrlen(hdr
->frame_control
);
1760 if (enctype
== HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
) {
1762 pn
|= (u64
)ehdr
[1] << 8;
1763 pn
|= (u64
)ehdr
[4] << 16;
1764 pn
|= (u64
)ehdr
[5] << 24;
1765 pn
|= (u64
)ehdr
[6] << 32;
1766 pn
|= (u64
)ehdr
[7] << 40;
1771 static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k
*ar
,
1772 struct sk_buff
*skb
,
1775 enum htt_rx_mpdu_encrypt_type enctype
)
1777 struct ath10k_peer
*peer
;
1778 union htt_rx_pn_t
*last_pn
, new_pn
= {0};
1779 struct ieee80211_hdr
*hdr
;
1781 u8 tid
, frag_number
;
1784 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
1786 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "invalid peer for frag pn check\n");
1790 hdr
= (struct ieee80211_hdr
*)(skb
->data
+ offset
);
1791 if (ieee80211_is_data_qos(hdr
->frame_control
))
1792 tid
= ieee80211_get_tid(hdr
);
1794 tid
= ATH10K_TXRX_NON_QOS_TID
;
1796 last_pn
= &peer
->frag_tids_last_pn
[tid
];
1797 new_pn
.pn48
= ath10k_htt_rx_h_get_pn(ar
, skb
, offset
, enctype
);
1798 more_frags
= ieee80211_has_morefrags(hdr
->frame_control
);
1799 frag_number
= le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_FRAG
;
1800 seq
= (__le16_to_cpu(hdr
->seq_ctrl
) & IEEE80211_SCTL_SEQ
) >> 4;
1802 if (frag_number
== 0) {
1803 last_pn
->pn48
= new_pn
.pn48
;
1804 peer
->frag_tids_seq
[tid
] = seq
;
1806 if (seq
!= peer
->frag_tids_seq
[tid
])
1809 if (new_pn
.pn48
!= last_pn
->pn48
+ 1)
1812 last_pn
->pn48
= new_pn
.pn48
;
1818 static void ath10k_htt_rx_h_mpdu(struct ath10k
*ar
,
1819 struct sk_buff_head
*amsdu
,
1820 struct ieee80211_rx_status
*status
,
1821 bool fill_crypt_header
,
1823 enum ath10k_pkt_rx_err
*err
,
1827 struct sk_buff
*first
;
1828 struct sk_buff
*last
;
1829 struct sk_buff
*msdu
, *temp
;
1830 struct htt_rx_desc
*rxd
;
1831 struct ieee80211_hdr
*hdr
;
1832 enum htt_rx_mpdu_encrypt_type enctype
;
1836 bool has_crypto_err
;
1838 bool has_peer_idx_invalid
;
1842 bool frag_pn_check
= true;
1844 if (skb_queue_empty(amsdu
))
1847 first
= skb_peek(amsdu
);
1848 rxd
= (void *)first
->data
- sizeof(*rxd
);
1850 is_mgmt
= !!(rxd
->attention
.flags
&
1851 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE
));
1853 enctype
= MS(__le32_to_cpu(rxd
->mpdu_start
.info0
),
1854 RX_MPDU_START_INFO0_ENCRYPT_TYPE
);
1856 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1857 * decapped header. It'll be used for undecapping of each MSDU.
1859 hdr
= (void *)rxd
->rx_hdr_status
;
1860 memcpy(first_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1863 memcpy(rx_hdr
, hdr
, RX_HTT_HDR_STATUS_LEN
);
1865 /* Each A-MSDU subframe will use the original header as the base and be
1866 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1868 hdr
= (void *)first_hdr
;
1870 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
1871 qos
= ieee80211_get_qos_ctl(hdr
);
1872 qos
[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
1875 /* Some attention flags are valid only in the last MSDU. */
1876 last
= skb_peek_tail(amsdu
);
1877 rxd
= (void *)last
->data
- sizeof(*rxd
);
1878 attention
= __le32_to_cpu(rxd
->attention
.flags
);
1880 has_fcs_err
= !!(attention
& RX_ATTENTION_FLAGS_FCS_ERR
);
1881 has_crypto_err
= !!(attention
& RX_ATTENTION_FLAGS_DECRYPT_ERR
);
1882 has_tkip_err
= !!(attention
& RX_ATTENTION_FLAGS_TKIP_MIC_ERR
);
1883 has_peer_idx_invalid
= !!(attention
& RX_ATTENTION_FLAGS_PEER_IDX_INVALID
);
1885 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1886 * e.g. due to fcs error, missing peer or invalid key data it will
1887 * report the frame as raw.
1889 is_decrypted
= (enctype
!= HTT_RX_MPDU_ENCRYPT_NONE
&&
1892 !has_peer_idx_invalid
);
1894 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1895 status
->flag
&= ~(RX_FLAG_FAILED_FCS_CRC
|
1896 RX_FLAG_MMIC_ERROR
|
1898 RX_FLAG_IV_STRIPPED
|
1899 RX_FLAG_ONLY_MONITOR
|
1900 RX_FLAG_MMIC_STRIPPED
);
1903 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
1906 status
->flag
|= RX_FLAG_MMIC_ERROR
;
1910 *err
= ATH10K_PKT_RX_ERR_FCS
;
1911 else if (has_tkip_err
)
1912 *err
= ATH10K_PKT_RX_ERR_TKIP
;
1913 else if (has_crypto_err
)
1914 *err
= ATH10K_PKT_RX_ERR_CRYPT
;
1915 else if (has_peer_idx_invalid
)
1916 *err
= ATH10K_PKT_RX_ERR_PEER_IDX_INVAL
;
1919 /* Firmware reports all necessary management frames via WMI already.
1920 * They are not reported to monitor interfaces at all so pass the ones
1921 * coming via HTT to monitor interfaces instead. This simplifies
1925 status
->flag
|= RX_FLAG_ONLY_MONITOR
;
1928 status
->flag
|= RX_FLAG_DECRYPTED
;
1930 if (likely(!is_mgmt
))
1931 status
->flag
|= RX_FLAG_MMIC_STRIPPED
;
1933 if (fill_crypt_header
)
1934 status
->flag
|= RX_FLAG_MIC_STRIPPED
|
1935 RX_FLAG_ICV_STRIPPED
;
1937 status
->flag
|= RX_FLAG_IV_STRIPPED
;
1940 skb_queue_walk(amsdu
, msdu
) {
1941 if (frag
&& !fill_crypt_header
&& is_decrypted
&&
1942 enctype
== HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2
)
1943 frag_pn_check
= ath10k_htt_rx_h_frag_pn_check(ar
,
1949 if (!frag_pn_check
) {
1950 /* Discard the fragment with invalid PN */
1952 __skb_unlink(msdu
, amsdu
);
1953 dev_kfree_skb_any(msdu
);
1955 frag_pn_check
= true;
1959 ath10k_htt_rx_h_csum_offload(msdu
);
1961 if (frag
&& !fill_crypt_header
&&
1962 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1963 status
->flag
&= ~RX_FLAG_MMIC_STRIPPED
;
1965 ath10k_htt_rx_h_undecap(ar
, msdu
, status
, first_hdr
, enctype
,
1968 /* Undecapping involves copying the original 802.11 header back
1969 * to sk_buff. If frame is protected and hardware has decrypted
1970 * it then remove the protected bit.
1977 if (fill_crypt_header
)
1980 hdr
= (void *)msdu
->data
;
1981 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
1983 if (frag
&& !fill_crypt_header
&&
1984 enctype
== HTT_RX_MPDU_ENCRYPT_TKIP_WPA
)
1985 status
->flag
&= ~RX_FLAG_IV_STRIPPED
&
1986 ~RX_FLAG_MMIC_STRIPPED
;
1990 static void ath10k_htt_rx_h_enqueue(struct ath10k
*ar
,
1991 struct sk_buff_head
*amsdu
,
1992 struct ieee80211_rx_status
*status
)
1994 struct sk_buff
*msdu
;
1995 struct sk_buff
*first_subframe
;
1997 first_subframe
= skb_peek(amsdu
);
1999 while ((msdu
= __skb_dequeue(amsdu
))) {
2000 /* Setup per-MSDU flags */
2001 if (skb_queue_empty(amsdu
))
2002 status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
2004 status
->flag
|= RX_FLAG_AMSDU_MORE
;
2006 if (msdu
== first_subframe
) {
2007 first_subframe
= NULL
;
2008 status
->flag
&= ~RX_FLAG_ALLOW_SAME_PN
;
2010 status
->flag
|= RX_FLAG_ALLOW_SAME_PN
;
2013 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
2017 static int ath10k_unchain_msdu(struct sk_buff_head
*amsdu
,
2018 unsigned long *unchain_cnt
)
2020 struct sk_buff
*skb
, *first
;
2023 int amsdu_len
= skb_queue_len(amsdu
);
2025 /* TODO: Might could optimize this by using
2026 * skb_try_coalesce or similar method to
2027 * decrease copying, or maybe get mac80211 to
2028 * provide a way to just receive a list of
2032 first
= __skb_dequeue(amsdu
);
2034 /* Allocate total length all at once. */
2035 skb_queue_walk(amsdu
, skb
)
2036 total_len
+= skb
->len
;
2038 space
= total_len
- skb_tailroom(first
);
2040 (pskb_expand_head(first
, 0, space
, GFP_ATOMIC
) < 0)) {
2041 /* TODO: bump some rx-oom error stat */
2042 /* put it back together so we can free the
2043 * whole list at once.
2045 __skb_queue_head(amsdu
, first
);
2049 /* Walk list again, copying contents into
2052 while ((skb
= __skb_dequeue(amsdu
))) {
2053 skb_copy_from_linear_data(skb
, skb_put(first
, skb
->len
),
2055 dev_kfree_skb_any(skb
);
2058 __skb_queue_head(amsdu
, first
);
2060 *unchain_cnt
+= amsdu_len
- 1;
2065 static void ath10k_htt_rx_h_unchain(struct ath10k
*ar
,
2066 struct sk_buff_head
*amsdu
,
2067 unsigned long *drop_cnt
,
2068 unsigned long *unchain_cnt
)
2070 struct sk_buff
*first
;
2071 struct htt_rx_desc
*rxd
;
2072 enum rx_msdu_decap_format decap
;
2074 first
= skb_peek(amsdu
);
2075 rxd
= (void *)first
->data
- sizeof(*rxd
);
2076 decap
= MS(__le32_to_cpu(rxd
->msdu_start
.common
.info1
),
2077 RX_MSDU_START_INFO1_DECAP_FORMAT
);
2079 /* FIXME: Current unchaining logic can only handle simple case of raw
2080 * msdu chaining. If decapping is other than raw the chaining may be
2081 * more complex and this isn't handled by the current code. Don't even
2082 * try re-constructing such frames - it'll be pretty much garbage.
2084 if (decap
!= RX_MSDU_DECAP_RAW
||
2085 skb_queue_len(amsdu
) != 1 + rxd
->frag_info
.ring2_more_count
) {
2086 *drop_cnt
+= skb_queue_len(amsdu
);
2087 __skb_queue_purge(amsdu
);
2091 ath10k_unchain_msdu(amsdu
, unchain_cnt
);
2094 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k
*ar
,
2095 struct sk_buff_head
*amsdu
,
2096 struct ieee80211_rx_status
*rx_status
)
2098 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
2099 * invalid/dangerous frames.
2102 if (!rx_status
->freq
) {
2103 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "no channel configured; ignoring frame(s)!\n");
2107 if (test_bit(ATH10K_CAC_RUNNING
, &ar
->dev_flags
)) {
2108 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx cac running\n");
2115 static void ath10k_htt_rx_h_filter(struct ath10k
*ar
,
2116 struct sk_buff_head
*amsdu
,
2117 struct ieee80211_rx_status
*rx_status
,
2118 unsigned long *drop_cnt
)
2120 if (skb_queue_empty(amsdu
))
2123 if (ath10k_htt_rx_amsdu_allowed(ar
, amsdu
, rx_status
))
2127 *drop_cnt
+= skb_queue_len(amsdu
);
2129 __skb_queue_purge(amsdu
);
2132 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt
*htt
)
2134 struct ath10k
*ar
= htt
->ar
;
2135 struct ieee80211_rx_status
*rx_status
= &htt
->rx_status
;
2136 struct sk_buff_head amsdu
;
2138 unsigned long drop_cnt
= 0;
2139 unsigned long unchain_cnt
= 0;
2140 unsigned long drop_cnt_filter
= 0;
2141 unsigned long msdus_to_queue
, num_msdus
;
2142 enum ath10k_pkt_rx_err err
= ATH10K_PKT_RX_ERR_MAX
;
2143 u8 first_hdr
[RX_HTT_HDR_STATUS_LEN
];
2145 __skb_queue_head_init(&amsdu
);
2147 spin_lock_bh(&htt
->rx_ring
.lock
);
2148 if (htt
->rx_confused
) {
2149 spin_unlock_bh(&htt
->rx_ring
.lock
);
2152 ret
= ath10k_htt_rx_amsdu_pop(htt
, &amsdu
);
2153 spin_unlock_bh(&htt
->rx_ring
.lock
);
2156 ath10k_warn(ar
, "rx ring became corrupted: %d\n", ret
);
2157 __skb_queue_purge(&amsdu
);
2158 /* FIXME: It's probably a good idea to reboot the
2159 * device instead of leaving it inoperable.
2161 htt
->rx_confused
= true;
2165 num_msdus
= skb_queue_len(&amsdu
);
2167 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, rx_status
, 0xffff);
2169 /* only for ret = 1 indicates chained msdus */
2171 ath10k_htt_rx_h_unchain(ar
, &amsdu
, &drop_cnt
, &unchain_cnt
);
2173 ath10k_htt_rx_h_filter(ar
, &amsdu
, rx_status
, &drop_cnt_filter
);
2174 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, rx_status
, true, first_hdr
, &err
, 0,
2176 msdus_to_queue
= skb_queue_len(&amsdu
);
2177 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, rx_status
);
2179 ath10k_sta_update_rx_tid_stats(ar
, first_hdr
, num_msdus
, err
,
2180 unchain_cnt
, drop_cnt
, drop_cnt_filter
,
2186 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc
*rx_desc
,
2187 union htt_rx_pn_t
*pn
,
2190 switch (pn_len_bits
) {
2192 pn
->pn48
= __le32_to_cpu(rx_desc
->pn_31_0
) +
2193 ((u64
)(__le32_to_cpu(rx_desc
->u0
.pn_63_32
) & 0xFFFF) << 32);
2196 pn
->pn24
= __le32_to_cpu(rx_desc
->pn_31_0
);
2201 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t
*new_pn
,
2202 union htt_rx_pn_t
*old_pn
)
2204 return ((new_pn
->pn48
& 0xffffffffffffULL
) <=
2205 (old_pn
->pn48
& 0xffffffffffffULL
));
2208 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k
*ar
,
2209 struct ath10k_peer
*peer
,
2210 struct htt_rx_indication_hl
*rx
)
2212 bool last_pn_valid
, pn_invalid
= false;
2213 enum htt_txrx_sec_cast_type sec_index
;
2214 enum htt_security_types sec_type
;
2215 union htt_rx_pn_t new_pn
= {0};
2216 struct htt_hl_rx_desc
*rx_desc
;
2217 union htt_rx_pn_t
*last_pn
;
2218 u32 rx_desc_info
, tid
;
2219 int num_mpdu_ranges
;
2221 lockdep_assert_held(&ar
->data_lock
);
2226 if (!(rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_FIRST_MSDU
))
2229 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2230 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2232 rx_desc
= (struct htt_hl_rx_desc
*)&rx
->mpdu_ranges
[num_mpdu_ranges
];
2233 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2235 if (!MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_ENCRYPTED
))
2238 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2239 last_pn_valid
= peer
->tids_last_pn_valid
[tid
];
2240 last_pn
= &peer
->tids_last_pn
[tid
];
2242 if (MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
))
2243 sec_index
= HTT_TXRX_SEC_MCAST
;
2245 sec_index
= HTT_TXRX_SEC_UCAST
;
2247 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2248 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2250 if (sec_type
!= HTT_SECURITY_AES_CCMP
&&
2251 sec_type
!= HTT_SECURITY_TKIP
&&
2252 sec_type
!= HTT_SECURITY_TKIP_NOMIC
)
2256 pn_invalid
= ath10k_htt_rx_pn_cmp48(&new_pn
, last_pn
);
2258 peer
->tids_last_pn_valid
[tid
] = true;
2261 last_pn
->pn48
= new_pn
.pn48
;
2266 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt
*htt
,
2267 struct htt_rx_indication_hl
*rx
,
2268 struct sk_buff
*skb
,
2269 enum htt_rx_pn_check_type check_pn_type
,
2270 enum htt_rx_tkip_demic_type tkip_mic_type
)
2272 struct ath10k
*ar
= htt
->ar
;
2273 struct ath10k_peer
*peer
;
2274 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
2275 struct fw_rx_desc_hl
*fw_desc
;
2276 enum htt_txrx_sec_cast_type sec_index
;
2277 enum htt_security_types sec_type
;
2278 union htt_rx_pn_t new_pn
= {0};
2279 struct htt_hl_rx_desc
*rx_desc
;
2280 struct ieee80211_hdr
*hdr
;
2281 struct ieee80211_rx_status
*rx_status
;
2284 int num_mpdu_ranges
;
2286 struct ieee80211_channel
*ch
;
2287 bool pn_invalid
, qos
, first_msdu
;
2288 u32 tid
, rx_desc_info
;
2290 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
2291 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2293 spin_lock_bh(&ar
->data_lock
);
2294 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2295 spin_unlock_bh(&ar
->data_lock
);
2296 if (!peer
&& peer_id
!= HTT_INVALID_PEERID
)
2297 ath10k_warn(ar
, "Got RX ind from invalid peer: %u\n", peer_id
);
2302 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2303 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2304 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges_hl(rx
);
2305 fw_desc
= &rx
->fw_desc
;
2306 rx_desc_len
= fw_desc
->len
;
2308 if (fw_desc
->u
.bits
.discard
) {
2309 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt discard mpdu\n");
2313 /* I have not yet seen any case where num_mpdu_ranges > 1.
2314 * qcacld does not seem handle that case either, so we introduce the
2315 * same limitiation here as well.
2317 if (num_mpdu_ranges
> 1)
2319 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2322 if (mpdu_ranges
->mpdu_range_status
!=
2323 HTT_RX_IND_MPDU_STATUS_OK
&&
2324 mpdu_ranges
->mpdu_range_status
!=
2325 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
) {
2326 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt mpdu_range_status %d\n",
2327 mpdu_ranges
->mpdu_range_status
);
2331 rx_desc
= (struct htt_hl_rx_desc
*)&rx
->mpdu_ranges
[num_mpdu_ranges
];
2332 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2334 if (MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
))
2335 sec_index
= HTT_TXRX_SEC_MCAST
;
2337 sec_index
= HTT_TXRX_SEC_UCAST
;
2339 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2340 first_msdu
= rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_FIRST_MSDU
;
2342 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2344 if (check_pn_type
== HTT_RX_PN_CHECK
&& tid
>= IEEE80211_NUM_TIDS
) {
2345 spin_lock_bh(&ar
->data_lock
);
2346 pn_invalid
= ath10k_htt_rx_pn_check_replay_hl(ar
, peer
, rx
);
2347 spin_unlock_bh(&ar
->data_lock
);
2353 /* Strip off all headers before the MAC header before delivery to
2356 tot_hdr_len
= sizeof(struct htt_resp_hdr
) + sizeof(rx
->hdr
) +
2357 sizeof(rx
->ppdu
) + sizeof(rx
->prefix
) +
2358 sizeof(rx
->fw_desc
) +
2359 sizeof(*mpdu_ranges
) * num_mpdu_ranges
+ rx_desc_len
;
2361 skb_pull(skb
, tot_hdr_len
);
2363 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2364 qos
= ieee80211_is_data_qos(hdr
->frame_control
);
2366 rx_status
= IEEE80211_SKB_RXCB(skb
);
2367 memset(rx_status
, 0, sizeof(*rx_status
));
2369 if (rx
->ppdu
.combined_rssi
== 0) {
2370 /* SDIO firmware does not provide signal */
2371 rx_status
->signal
= 0;
2372 rx_status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
2374 rx_status
->signal
= ATH10K_DEFAULT_NOISE_FLOOR
+
2375 rx
->ppdu
.combined_rssi
;
2376 rx_status
->flag
&= ~RX_FLAG_NO_SIGNAL_VAL
;
2379 spin_lock_bh(&ar
->data_lock
);
2380 ch
= ar
->scan_channel
;
2382 ch
= ar
->rx_channel
;
2384 ch
= ath10k_htt_rx_h_any_channel(ar
);
2386 ch
= ar
->tgt_oper_chan
;
2387 spin_unlock_bh(&ar
->data_lock
);
2390 rx_status
->band
= ch
->band
;
2391 rx_status
->freq
= ch
->center_freq
;
2393 if (rx
->fw_desc
.flags
& FW_RX_DESC_FLAGS_LAST_MSDU
)
2394 rx_status
->flag
&= ~RX_FLAG_AMSDU_MORE
;
2396 rx_status
->flag
|= RX_FLAG_AMSDU_MORE
;
2398 /* Not entirely sure about this, but all frames from the chipset has
2399 * the protected flag set even though they have already been decrypted.
2400 * Unmasking this flag is necessary in order for mac80211 not to drop
2402 * TODO: Verify this is always the case or find out a way to check
2403 * if there has been hw decryption.
2405 if (ieee80211_has_protected(hdr
->frame_control
)) {
2406 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2407 rx_status
->flag
|= RX_FLAG_DECRYPTED
|
2408 RX_FLAG_IV_STRIPPED
|
2409 RX_FLAG_MMIC_STRIPPED
;
2411 if (tid
< IEEE80211_NUM_TIDS
&&
2413 check_pn_type
== HTT_RX_PN_CHECK
&&
2414 (sec_type
== HTT_SECURITY_AES_CCMP
||
2415 sec_type
== HTT_SECURITY_TKIP
||
2416 sec_type
== HTT_SECURITY_TKIP_NOMIC
)) {
2419 __le64 pn48
= cpu_to_le64(new_pn
.pn48
);
2421 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2422 offset
= ieee80211_hdrlen(hdr
->frame_control
);
2423 hdr
->frame_control
|= __cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
2424 rx_status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
2426 memmove(skb
->data
- IEEE80211_CCMP_HDR_LEN
,
2428 skb_push(skb
, IEEE80211_CCMP_HDR_LEN
);
2429 ivp
= skb
->data
+ offset
;
2430 memset(skb
->data
+ offset
, 0, IEEE80211_CCMP_HDR_LEN
);
2432 ivp
[IEEE80211_WEP_IV_LEN
- 1] |= ATH10K_IEEE80211_EXTIV
;
2434 for (i
= 0; i
< ARRAY_SIZE(peer
->keys
); i
++) {
2435 if (peer
->keys
[i
] &&
2436 peer
->keys
[i
]->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)
2437 keyidx
= peer
->keys
[i
]->keyidx
;
2441 ivp
[IEEE80211_WEP_IV_LEN
- 1] |= keyidx
<< 6;
2443 if (sec_type
== HTT_SECURITY_AES_CCMP
) {
2444 rx_status
->flag
|= RX_FLAG_MIC_STRIPPED
;
2446 memcpy(skb
->data
+ offset
, &pn48
, 2);
2447 /* pn 1, pn 3 , pn 34 , pn 5 */
2448 memcpy(skb
->data
+ offset
+ 4, ((u8
*)&pn48
) + 2, 4);
2450 rx_status
->flag
|= RX_FLAG_ICV_STRIPPED
;
2452 memcpy(skb
->data
+ offset
+ 2, &pn48
, 1);
2454 memcpy(skb
->data
+ offset
, ((u8
*)&pn48
) + 1, 1);
2455 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2456 memcpy(skb
->data
+ offset
+ 4, ((u8
*)&pn48
) + 2, 4);
2461 if (tkip_mic_type
== HTT_RX_TKIP_MIC
)
2462 rx_status
->flag
&= ~RX_FLAG_IV_STRIPPED
&
2463 ~RX_FLAG_MMIC_STRIPPED
;
2465 if (mpdu_ranges
->mpdu_range_status
== HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
)
2466 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
2468 if (!qos
&& tid
< IEEE80211_NUM_TIDS
) {
2470 __le16 qos_ctrl
= 0;
2472 hdr
= (struct ieee80211_hdr
*)skb
->data
;
2473 offset
= ieee80211_hdrlen(hdr
->frame_control
);
2475 hdr
->frame_control
|= cpu_to_le16(IEEE80211_STYPE_QOS_DATA
);
2476 memmove(skb
->data
- IEEE80211_QOS_CTL_LEN
, skb
->data
, offset
);
2477 skb_push(skb
, IEEE80211_QOS_CTL_LEN
);
2478 qos_ctrl
= cpu_to_le16(tid
);
2479 memcpy(skb
->data
+ offset
, &qos_ctrl
, IEEE80211_QOS_CTL_LEN
);
2483 ieee80211_rx_napi(ar
->hw
, NULL
, skb
, &ar
->napi
);
2485 ieee80211_rx_ni(ar
->hw
, skb
);
2487 /* We have delivered the skb to the upper layers (mac80211) so we
2492 /* Tell the caller that it must free the skb since we have not
2498 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff
*skb
,
2504 orig_hdr
= skb
->data
;
2505 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2507 /* the ExtIV bit is always set to 1 for TKIP */
2508 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2511 memmove(orig_hdr
+ IEEE80211_TKIP_IV_LEN
, orig_hdr
, head_len
+ hdr_len
);
2512 skb_pull(skb
, IEEE80211_TKIP_IV_LEN
);
2513 skb_trim(skb
, skb
->len
- ATH10K_IEEE80211_TKIP_MICLEN
);
2517 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff
*skb
,
2523 orig_hdr
= skb
->data
;
2524 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2526 /* the ExtIV bit is always set to 1 for TKIP */
2527 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2530 memmove(orig_hdr
+ IEEE80211_TKIP_IV_LEN
, orig_hdr
, head_len
+ hdr_len
);
2531 skb_pull(skb
, IEEE80211_TKIP_IV_LEN
);
2532 skb_trim(skb
, skb
->len
- IEEE80211_TKIP_ICV_LEN
);
2536 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff
*skb
,
2542 orig_hdr
= skb
->data
;
2543 ivp
= orig_hdr
+ hdr_len
+ head_len
;
2545 /* the ExtIV bit is always set to 1 for CCMP */
2546 if (!(ivp
[IEEE80211_WEP_IV_LEN
- 1] & ATH10K_IEEE80211_EXTIV
))
2549 skb_trim(skb
, skb
->len
- IEEE80211_CCMP_MIC_LEN
);
2550 memmove(orig_hdr
+ IEEE80211_CCMP_HDR_LEN
, orig_hdr
, head_len
+ hdr_len
);
2551 skb_pull(skb
, IEEE80211_CCMP_HDR_LEN
);
2555 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff
*skb
,
2561 orig_hdr
= skb
->data
;
2563 memmove(orig_hdr
+ IEEE80211_WEP_IV_LEN
,
2564 orig_hdr
, head_len
+ hdr_len
);
2565 skb_pull(skb
, IEEE80211_WEP_IV_LEN
);
2566 skb_trim(skb
, skb
->len
- IEEE80211_WEP_ICV_LEN
);
2570 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt
*htt
,
2571 struct htt_rx_fragment_indication
*rx
,
2572 struct sk_buff
*skb
)
2574 struct ath10k
*ar
= htt
->ar
;
2575 enum htt_rx_tkip_demic_type tkip_mic
= HTT_RX_NON_TKIP_MIC
;
2576 enum htt_txrx_sec_cast_type sec_index
;
2577 struct htt_rx_indication_hl
*rx_hl
;
2578 enum htt_security_types sec_type
;
2579 u32 tid
, frag
, seq
, rx_desc_info
;
2580 union htt_rx_pn_t new_pn
= {0};
2581 struct htt_hl_rx_desc
*rx_desc
;
2582 u16 peer_id
, sc
, hdr_space
;
2583 union htt_rx_pn_t
*last_pn
;
2584 struct ieee80211_hdr
*hdr
;
2585 int ret
, num_mpdu_ranges
;
2586 struct ath10k_peer
*peer
;
2587 struct htt_resp
*resp
;
2590 resp
= (struct htt_resp
*)(skb
->data
+ HTT_RX_FRAG_IND_INFO0_HEADER_LEN
);
2591 skb_pull(skb
, HTT_RX_FRAG_IND_INFO0_HEADER_LEN
);
2592 skb_trim(skb
, skb
->len
- FCS_LEN
);
2594 peer_id
= __le16_to_cpu(rx
->peer_id
);
2595 rx_hl
= (struct htt_rx_indication_hl
*)(&resp
->rx_ind_hl
);
2597 spin_lock_bh(&ar
->data_lock
);
2598 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2600 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "invalid peer: %u\n", peer_id
);
2604 num_mpdu_ranges
= MS(__le32_to_cpu(rx_hl
->hdr
.info1
),
2605 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2607 tot_hdr_len
= sizeof(struct htt_resp_hdr
) +
2608 sizeof(rx_hl
->hdr
) +
2609 sizeof(rx_hl
->ppdu
) +
2610 sizeof(rx_hl
->prefix
) +
2611 sizeof(rx_hl
->fw_desc
) +
2612 sizeof(struct htt_rx_indication_mpdu_range
) * num_mpdu_ranges
;
2614 tid
= MS(rx_hl
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2615 rx_desc
= (struct htt_hl_rx_desc
*)(skb
->data
+ tot_hdr_len
);
2616 rx_desc_info
= __le32_to_cpu(rx_desc
->info
);
2618 hdr
= (struct ieee80211_hdr
*)((u8
*)rx_desc
+ rx_hl
->fw_desc
.len
);
2620 if (is_multicast_ether_addr(hdr
->addr1
)) {
2621 /* Discard the fragment with multicast DA */
2625 if (!MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_ENCRYPTED
)) {
2626 spin_unlock_bh(&ar
->data_lock
);
2627 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2628 HTT_RX_NON_PN_CHECK
,
2629 HTT_RX_NON_TKIP_MIC
);
2632 if (ieee80211_has_retry(hdr
->frame_control
))
2635 hdr_space
= ieee80211_hdrlen(hdr
->frame_control
);
2636 sc
= __le16_to_cpu(hdr
->seq_ctrl
);
2637 seq
= (sc
& IEEE80211_SCTL_SEQ
) >> 4;
2638 frag
= sc
& IEEE80211_SCTL_FRAG
;
2640 sec_index
= MS(rx_desc_info
, HTT_RX_DESC_HL_INFO_MCAST_BCAST
) ?
2641 HTT_TXRX_SEC_MCAST
: HTT_TXRX_SEC_UCAST
;
2642 sec_type
= peer
->rx_pn
[sec_index
].sec_type
;
2643 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc
, &new_pn
, peer
->rx_pn
[sec_index
].pn_len
);
2646 case HTT_SECURITY_TKIP
:
2647 tkip_mic
= HTT_RX_TKIP_MIC
;
2648 ret
= ath10k_htt_rx_frag_tkip_decap_withmic(skb
,
2655 case HTT_SECURITY_TKIP_NOMIC
:
2656 ret
= ath10k_htt_rx_frag_tkip_decap_nomic(skb
,
2663 case HTT_SECURITY_AES_CCMP
:
2664 ret
= ath10k_htt_rx_frag_ccmp_decap(skb
,
2665 tot_hdr_len
+ rx_hl
->fw_desc
.len
,
2670 case HTT_SECURITY_WEP128
:
2671 case HTT_SECURITY_WEP104
:
2672 case HTT_SECURITY_WEP40
:
2673 ret
= ath10k_htt_rx_frag_wep_decap(skb
,
2674 tot_hdr_len
+ rx_hl
->fw_desc
.len
,
2683 resp
= (struct htt_resp
*)(skb
->data
);
2685 if (sec_type
!= HTT_SECURITY_AES_CCMP
&&
2686 sec_type
!= HTT_SECURITY_TKIP
&&
2687 sec_type
!= HTT_SECURITY_TKIP_NOMIC
) {
2688 spin_unlock_bh(&ar
->data_lock
);
2689 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2690 HTT_RX_NON_PN_CHECK
,
2691 HTT_RX_NON_TKIP_MIC
);
2694 last_pn
= &peer
->frag_tids_last_pn
[tid
];
2697 if (ath10k_htt_rx_pn_check_replay_hl(ar
, peer
, &resp
->rx_ind_hl
))
2700 last_pn
->pn48
= new_pn
.pn48
;
2701 peer
->frag_tids_seq
[tid
] = seq
;
2702 } else if (sec_type
== HTT_SECURITY_AES_CCMP
) {
2703 if (seq
!= peer
->frag_tids_seq
[tid
])
2706 if (new_pn
.pn48
!= last_pn
->pn48
+ 1)
2709 last_pn
->pn48
= new_pn
.pn48
;
2710 last_pn
= &peer
->tids_last_pn
[tid
];
2711 last_pn
->pn48
= new_pn
.pn48
;
2714 spin_unlock_bh(&ar
->data_lock
);
2716 return ath10k_htt_rx_proc_rx_ind_hl(htt
, &resp
->rx_ind_hl
, skb
,
2717 HTT_RX_NON_PN_CHECK
, tkip_mic
);
2720 spin_unlock_bh(&ar
->data_lock
);
2722 /* Tell the caller that it must free the skb since we have not
2728 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt
*htt
,
2729 struct htt_rx_indication
*rx
)
2731 struct ath10k
*ar
= htt
->ar
;
2732 struct htt_rx_indication_mpdu_range
*mpdu_ranges
;
2733 int num_mpdu_ranges
;
2734 int i
, mpdu_count
= 0;
2738 num_mpdu_ranges
= MS(__le32_to_cpu(rx
->hdr
.info1
),
2739 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES
);
2740 peer_id
= __le16_to_cpu(rx
->hdr
.peer_id
);
2741 tid
= MS(rx
->hdr
.info0
, HTT_RX_INDICATION_INFO0_EXT_TID
);
2743 mpdu_ranges
= htt_rx_ind_get_mpdu_ranges(rx
);
2745 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt rx ind: ",
2746 rx
, struct_size(rx
, mpdu_ranges
, num_mpdu_ranges
));
2748 for (i
= 0; i
< num_mpdu_ranges
; i
++)
2749 mpdu_count
+= mpdu_ranges
[i
].mpdu_count
;
2751 atomic_add(mpdu_count
, &htt
->num_mpdus_ready
);
2753 ath10k_sta_update_rx_tid_stats_ampdu(ar
, peer_id
, tid
, mpdu_ranges
,
2757 static void ath10k_htt_rx_tx_compl_ind(struct ath10k
*ar
,
2758 struct sk_buff
*skb
)
2760 struct ath10k_htt
*htt
= &ar
->htt
;
2761 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
2762 struct htt_tx_done tx_done
= {};
2763 int status
= MS(resp
->data_tx_completion
.flags
, HTT_DATA_TX_STATUS
);
2764 __le16 msdu_id
, *msdus
;
2765 bool rssi_enabled
= false;
2766 u8 msdu_count
= 0, num_airtime_records
, tid
;
2768 struct htt_data_tx_compl_ppdu_dur
*ppdu_info
;
2769 struct ath10k_peer
*peer
;
2770 u16 ppdu_info_offset
= 0, peer_id
;
2774 case HTT_DATA_TX_STATUS_NO_ACK
:
2775 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
2777 case HTT_DATA_TX_STATUS_OK
:
2778 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
2780 case HTT_DATA_TX_STATUS_DISCARD
:
2781 case HTT_DATA_TX_STATUS_POSTPONE
:
2782 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
:
2783 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2786 ath10k_warn(ar
, "unhandled tx completion status %d\n", status
);
2787 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
2791 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx completion num_msdus %d\n",
2792 resp
->data_tx_completion
.num_msdus
);
2794 msdu_count
= resp
->data_tx_completion
.num_msdus
;
2795 msdus
= resp
->data_tx_completion
.msdus
;
2796 rssi_enabled
= ath10k_is_rssi_enable(&ar
->hw_params
, resp
);
2799 htt_pad
= ath10k_tx_data_rssi_get_pad_bytes(&ar
->hw_params
,
2802 for (i
= 0; i
< msdu_count
; i
++) {
2804 tx_done
.msdu_id
= __le16_to_cpu(msdu_id
);
2807 /* Total no of MSDUs should be even,
2808 * if odd MSDUs are sent firmware fills
2809 * last msdu id with 0xffff
2811 if (msdu_count
& 0x01) {
2812 msdu_id
= msdus
[msdu_count
+ i
+ 1 + htt_pad
];
2813 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
2815 msdu_id
= msdus
[msdu_count
+ i
+ htt_pad
];
2816 tx_done
.ack_rssi
= __le16_to_cpu(msdu_id
);
2820 /* kfifo_put: In practice firmware shouldn't fire off per-CE
2821 * interrupt and main interrupt (MSI/-X range case) for the same
2822 * HTC service so it should be safe to use kfifo_put w/o lock.
2824 * From kfifo_put() documentation:
2825 * Note that with only one concurrent reader and one concurrent
2826 * writer, you don't need extra locking to use these macro.
2828 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
) {
2829 ath10k_txrx_tx_unref(htt
, &tx_done
);
2830 } else if (!kfifo_put(&htt
->txdone_fifo
, tx_done
)) {
2831 ath10k_warn(ar
, "txdone fifo overrun, msdu_id %d status %d\n",
2832 tx_done
.msdu_id
, tx_done
.status
);
2833 ath10k_txrx_tx_unref(htt
, &tx_done
);
2837 if (!(resp
->data_tx_completion
.flags2
& HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT
))
2840 ppdu_info_offset
= (msdu_count
& 0x01) ? msdu_count
+ 1 : msdu_count
;
2843 ppdu_info_offset
+= ppdu_info_offset
;
2845 if (resp
->data_tx_completion
.flags2
&
2846 (HTT_TX_CMPL_FLAG_PPID_PRESENT
| HTT_TX_CMPL_FLAG_PA_PRESENT
))
2847 ppdu_info_offset
+= 2;
2849 ppdu_info
= (struct htt_data_tx_compl_ppdu_dur
*)&msdus
[ppdu_info_offset
];
2850 num_airtime_records
= FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK
,
2851 __le32_to_cpu(ppdu_info
->info0
));
2853 for (i
= 0; i
< num_airtime_records
; i
++) {
2854 struct htt_data_tx_ppdu_dur
*ppdu_dur
;
2857 ppdu_dur
= &ppdu_info
->ppdu_dur
[i
];
2858 info0
= __le32_to_cpu(ppdu_dur
->info0
);
2860 peer_id
= FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK
,
2863 spin_lock_bh(&ar
->data_lock
);
2865 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2866 if (!peer
|| !peer
->sta
) {
2867 spin_unlock_bh(&ar
->data_lock
);
2872 tid
= FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK
, info0
) &
2873 IEEE80211_QOS_CTL_TID_MASK
;
2874 tx_duration
= __le32_to_cpu(ppdu_dur
->tx_duration
);
2876 ieee80211_sta_register_airtime(peer
->sta
, tid
, tx_duration
, 0);
2878 spin_unlock_bh(&ar
->data_lock
);
2883 static void ath10k_htt_rx_addba(struct ath10k
*ar
, struct htt_resp
*resp
)
2885 struct htt_rx_addba
*ev
= &resp
->rx_addba
;
2886 struct ath10k_peer
*peer
;
2887 struct ath10k_vif
*arvif
;
2888 u16 info0
, tid
, peer_id
;
2890 info0
= __le16_to_cpu(ev
->info0
);
2891 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
2892 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
2894 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2895 "htt rx addba tid %hu peer_id %hu size %hhu\n",
2896 tid
, peer_id
, ev
->window_size
);
2898 spin_lock_bh(&ar
->data_lock
);
2899 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2901 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
2903 spin_unlock_bh(&ar
->data_lock
);
2907 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
2909 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
2911 spin_unlock_bh(&ar
->data_lock
);
2915 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2916 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2917 peer
->addr
, tid
, ev
->window_size
);
2919 ieee80211_start_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
2920 spin_unlock_bh(&ar
->data_lock
);
2923 static void ath10k_htt_rx_delba(struct ath10k
*ar
, struct htt_resp
*resp
)
2925 struct htt_rx_delba
*ev
= &resp
->rx_delba
;
2926 struct ath10k_peer
*peer
;
2927 struct ath10k_vif
*arvif
;
2928 u16 info0
, tid
, peer_id
;
2930 info0
= __le16_to_cpu(ev
->info0
);
2931 tid
= MS(info0
, HTT_RX_BA_INFO0_TID
);
2932 peer_id
= MS(info0
, HTT_RX_BA_INFO0_PEER_ID
);
2934 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2935 "htt rx delba tid %hu peer_id %hu\n",
2938 spin_lock_bh(&ar
->data_lock
);
2939 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
2941 ath10k_warn(ar
, "received addba event for invalid peer_id: %hu\n",
2943 spin_unlock_bh(&ar
->data_lock
);
2947 arvif
= ath10k_get_arvif(ar
, peer
->vdev_id
);
2949 ath10k_warn(ar
, "received addba event for invalid vdev_id: %u\n",
2951 spin_unlock_bh(&ar
->data_lock
);
2955 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
2956 "htt rx stop rx ba session sta %pM tid %hu\n",
2959 ieee80211_stop_rx_ba_session_offl(arvif
->vif
, peer
->addr
, tid
);
2960 spin_unlock_bh(&ar
->data_lock
);
2963 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head
*list
,
2964 struct sk_buff_head
*amsdu
)
2966 struct sk_buff
*msdu
;
2967 struct htt_rx_desc
*rxd
;
2969 if (skb_queue_empty(list
))
2972 if (WARN_ON(!skb_queue_empty(amsdu
)))
2975 while ((msdu
= __skb_dequeue(list
))) {
2976 __skb_queue_tail(amsdu
, msdu
);
2978 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
2979 if (rxd
->msdu_end
.common
.info0
&
2980 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))
2984 msdu
= skb_peek_tail(amsdu
);
2985 rxd
= (void *)msdu
->data
- sizeof(*rxd
);
2986 if (!(rxd
->msdu_end
.common
.info0
&
2987 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU
))) {
2988 skb_queue_splice_init(amsdu
, list
);
2995 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status
*status
,
2996 struct sk_buff
*skb
)
2998 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
3000 if (!ieee80211_has_protected(hdr
->frame_control
))
3003 /* Offloaded frames are already decrypted but firmware insists they are
3004 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
3005 * will drop the frame.
3008 hdr
->frame_control
&= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
3009 status
->flag
|= RX_FLAG_DECRYPTED
|
3010 RX_FLAG_IV_STRIPPED
|
3011 RX_FLAG_MMIC_STRIPPED
;
3014 static void ath10k_htt_rx_h_rx_offload(struct ath10k
*ar
,
3015 struct sk_buff_head
*list
)
3017 struct ath10k_htt
*htt
= &ar
->htt
;
3018 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
3019 struct htt_rx_offload_msdu
*rx
;
3020 struct sk_buff
*msdu
;
3023 while ((msdu
= __skb_dequeue(list
))) {
3024 /* Offloaded frames don't have Rx descriptor. Instead they have
3025 * a short meta information header.
3028 rx
= (void *)msdu
->data
;
3030 skb_put(msdu
, sizeof(*rx
));
3031 skb_pull(msdu
, sizeof(*rx
));
3033 if (skb_tailroom(msdu
) < __le16_to_cpu(rx
->msdu_len
)) {
3034 ath10k_warn(ar
, "dropping frame: offloaded rx msdu is too long!\n");
3035 dev_kfree_skb_any(msdu
);
3039 skb_put(msdu
, __le16_to_cpu(rx
->msdu_len
));
3041 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
3042 * actual payload is unaligned. Align the frame. Otherwise
3043 * mac80211 complains. This shouldn't reduce performance much
3044 * because these offloaded frames are rare.
3046 offset
= 4 - ((unsigned long)msdu
->data
& 3);
3047 skb_put(msdu
, offset
);
3048 memmove(msdu
->data
+ offset
, msdu
->data
, msdu
->len
);
3049 skb_pull(msdu
, offset
);
3051 /* FIXME: The frame is NWifi. Re-construct QoS Control
3052 * if possible later.
3055 memset(status
, 0, sizeof(*status
));
3056 status
->flag
|= RX_FLAG_NO_SIGNAL_VAL
;
3058 ath10k_htt_rx_h_rx_offload_prot(status
, msdu
);
3059 ath10k_htt_rx_h_channel(ar
, status
, NULL
, rx
->vdev_id
);
3060 ath10k_htt_rx_h_queue_msdu(ar
, status
, msdu
);
3064 static int ath10k_htt_rx_in_ord_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
3066 struct ath10k_htt
*htt
= &ar
->htt
;
3067 struct htt_resp
*resp
= (void *)skb
->data
;
3068 struct ieee80211_rx_status
*status
= &htt
->rx_status
;
3069 struct sk_buff_head list
;
3070 struct sk_buff_head amsdu
;
3079 lockdep_assert_held(&htt
->rx_ring
.lock
);
3081 if (htt
->rx_confused
)
3084 skb_pull(skb
, sizeof(resp
->hdr
));
3085 skb_pull(skb
, sizeof(resp
->rx_in_ord_ind
));
3087 peer_id
= __le16_to_cpu(resp
->rx_in_ord_ind
.peer_id
);
3088 msdu_count
= __le16_to_cpu(resp
->rx_in_ord_ind
.msdu_count
);
3089 vdev_id
= resp
->rx_in_ord_ind
.vdev_id
;
3090 tid
= SM(resp
->rx_in_ord_ind
.info
, HTT_RX_IN_ORD_IND_INFO_TID
);
3091 offload
= !!(resp
->rx_in_ord_ind
.info
&
3092 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK
);
3093 frag
= !!(resp
->rx_in_ord_ind
.info
& HTT_RX_IN_ORD_IND_INFO_FRAG_MASK
);
3095 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3096 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
3097 vdev_id
, peer_id
, tid
, offload
, frag
, msdu_count
);
3099 if (skb
->len
< msdu_count
* sizeof(*resp
->rx_in_ord_ind
.msdu_descs32
)) {
3100 ath10k_warn(ar
, "dropping invalid in order rx indication\n");
3104 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
3105 * extracted and processed.
3107 __skb_queue_head_init(&list
);
3108 if (ar
->hw_params
.target_64bit
)
3109 ret
= ath10k_htt_rx_pop_paddr64_list(htt
, &resp
->rx_in_ord_ind
,
3112 ret
= ath10k_htt_rx_pop_paddr32_list(htt
, &resp
->rx_in_ord_ind
,
3116 ath10k_warn(ar
, "failed to pop paddr list: %d\n", ret
);
3117 htt
->rx_confused
= true;
3121 /* Offloaded frames are very different and need to be handled
3125 ath10k_htt_rx_h_rx_offload(ar
, &list
);
3127 while (!skb_queue_empty(&list
)) {
3128 __skb_queue_head_init(&amsdu
);
3129 ret
= ath10k_htt_rx_extract_amsdu(&list
, &amsdu
);
3132 /* Note: The in-order indication may report interleaved
3133 * frames from different PPDUs meaning reported rx rate
3134 * to mac80211 isn't accurate/reliable. It's still
3135 * better to report something than nothing though. This
3136 * should still give an idea about rx rate to the user.
3138 ath10k_htt_rx_h_ppdu(ar
, &amsdu
, status
, vdev_id
);
3139 ath10k_htt_rx_h_filter(ar
, &amsdu
, status
, NULL
);
3140 ath10k_htt_rx_h_mpdu(ar
, &amsdu
, status
, false, NULL
,
3141 NULL
, peer_id
, frag
);
3142 ath10k_htt_rx_h_enqueue(ar
, &amsdu
, status
);
3147 /* Should not happen. */
3148 ath10k_warn(ar
, "failed to extract amsdu: %d\n", ret
);
3149 htt
->rx_confused
= true;
3150 __skb_queue_purge(&list
);
3157 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k
*ar
,
3158 const __le32
*resp_ids
,
3164 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm num_resp_ids %d\n",
3167 for (i
= 0; i
< num_resp_ids
; i
++) {
3168 resp_id
= le32_to_cpu(resp_ids
[i
]);
3170 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm resp_id %u\n",
3173 /* TODO: free resp_id */
3177 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k
*ar
, struct sk_buff
*skb
)
3179 struct ieee80211_hw
*hw
= ar
->hw
;
3180 struct ieee80211_txq
*txq
;
3181 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3182 struct htt_tx_fetch_record
*record
;
3184 size_t max_num_bytes
;
3185 size_t max_num_msdus
;
3188 const __le32
*resp_ids
;
3197 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind\n");
3199 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_ind
);
3200 if (unlikely(skb
->len
< len
)) {
3201 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: buffer too short\n");
3205 num_records
= le16_to_cpu(resp
->tx_fetch_ind
.num_records
);
3206 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_ind
.num_resp_ids
);
3208 len
+= sizeof(resp
->tx_fetch_ind
.records
[0]) * num_records
;
3209 len
+= sizeof(resp
->tx_fetch_ind
.resp_ids
[0]) * num_resp_ids
;
3211 if (unlikely(skb
->len
< len
)) {
3212 ath10k_warn(ar
, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3216 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
3217 num_records
, num_resp_ids
,
3218 le16_to_cpu(resp
->tx_fetch_ind
.fetch_seq_num
));
3220 if (!ar
->htt
.tx_q_state
.enabled
) {
3221 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: not enabled\n");
3225 if (ar
->htt
.tx_q_state
.mode
== HTT_TX_MODE_SWITCH_PUSH
) {
3226 ath10k_warn(ar
, "received unexpected tx_fetch_ind event: in push mode\n");
3232 for (i
= 0; i
< num_records
; i
++) {
3233 record
= &resp
->tx_fetch_ind
.records
[i
];
3234 peer_id
= MS(le16_to_cpu(record
->info
),
3235 HTT_TX_FETCH_RECORD_INFO_PEER_ID
);
3236 tid
= MS(le16_to_cpu(record
->info
),
3237 HTT_TX_FETCH_RECORD_INFO_TID
);
3238 max_num_msdus
= le16_to_cpu(record
->num_msdus
);
3239 max_num_bytes
= le32_to_cpu(record
->num_bytes
);
3241 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
3242 i
, peer_id
, tid
, max_num_msdus
, max_num_bytes
);
3244 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
3245 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
3246 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
3251 spin_lock_bh(&ar
->data_lock
);
3252 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
3253 spin_unlock_bh(&ar
->data_lock
);
3255 /* It is okay to release the lock and use txq because RCU read
3259 if (unlikely(!txq
)) {
3260 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
3268 ieee80211_txq_schedule_start(hw
, txq
->ac
);
3269 may_tx
= ieee80211_txq_may_transmit(hw
, txq
);
3270 while (num_msdus
< max_num_msdus
&&
3271 num_bytes
< max_num_bytes
) {
3275 ret
= ath10k_mac_tx_push_txq(hw
, txq
);
3282 ieee80211_return_txq(hw
, txq
, false);
3283 ieee80211_txq_schedule_end(hw
, txq
->ac
);
3285 record
->num_msdus
= cpu_to_le16(num_msdus
);
3286 record
->num_bytes
= cpu_to_le32(num_bytes
);
3288 ath10k_htt_tx_txq_recalc(hw
, txq
);
3293 resp_ids
= ath10k_htt_get_tx_fetch_ind_resp_ids(&resp
->tx_fetch_ind
);
3294 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
, resp_ids
, num_resp_ids
);
3296 ret
= ath10k_htt_tx_fetch_resp(ar
,
3297 resp
->tx_fetch_ind
.token
,
3298 resp
->tx_fetch_ind
.fetch_seq_num
,
3299 resp
->tx_fetch_ind
.records
,
3301 if (unlikely(ret
)) {
3302 ath10k_warn(ar
, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3303 le32_to_cpu(resp
->tx_fetch_ind
.token
), ret
);
3304 /* FIXME: request fw restart */
3307 ath10k_htt_tx_txq_sync(ar
);
3310 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k
*ar
,
3311 struct sk_buff
*skb
)
3313 const struct htt_resp
*resp
= (void *)skb
->data
;
3317 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx fetch confirm\n");
3319 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_fetch_confirm
);
3320 if (unlikely(skb
->len
< len
)) {
3321 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: buffer too short\n");
3325 num_resp_ids
= le16_to_cpu(resp
->tx_fetch_confirm
.num_resp_ids
);
3326 len
+= sizeof(resp
->tx_fetch_confirm
.resp_ids
[0]) * num_resp_ids
;
3328 if (unlikely(skb
->len
< len
)) {
3329 ath10k_warn(ar
, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3333 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar
,
3334 resp
->tx_fetch_confirm
.resp_ids
,
3338 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k
*ar
,
3339 struct sk_buff
*skb
)
3341 const struct htt_resp
*resp
= (void *)skb
->data
;
3342 const struct htt_tx_mode_switch_record
*record
;
3343 struct ieee80211_txq
*txq
;
3344 struct ath10k_txq
*artxq
;
3347 enum htt_tx_mode_switch_mode mode
;
3356 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx tx mode switch ind\n");
3358 len
= sizeof(resp
->hdr
) + sizeof(resp
->tx_mode_switch_ind
);
3359 if (unlikely(skb
->len
< len
)) {
3360 ath10k_warn(ar
, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3364 info0
= le16_to_cpu(resp
->tx_mode_switch_ind
.info0
);
3365 info1
= le16_to_cpu(resp
->tx_mode_switch_ind
.info1
);
3367 enable
= !!(info0
& HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE
);
3368 num_records
= MS(info0
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
3369 mode
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_MODE
);
3370 threshold
= MS(info1
, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD
);
3372 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3373 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
3374 info0
, info1
, enable
, num_records
, mode
, threshold
);
3376 len
+= sizeof(resp
->tx_mode_switch_ind
.records
[0]) * num_records
;
3378 if (unlikely(skb
->len
< len
)) {
3379 ath10k_warn(ar
, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3384 case HTT_TX_MODE_SWITCH_PUSH
:
3385 case HTT_TX_MODE_SWITCH_PUSH_PULL
:
3388 ath10k_warn(ar
, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3396 ar
->htt
.tx_q_state
.enabled
= enable
;
3397 ar
->htt
.tx_q_state
.mode
= mode
;
3398 ar
->htt
.tx_q_state
.num_push_allowed
= threshold
;
3402 for (i
= 0; i
< num_records
; i
++) {
3403 record
= &resp
->tx_mode_switch_ind
.records
[i
];
3404 info0
= le16_to_cpu(record
->info0
);
3405 peer_id
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID
);
3406 tid
= MS(info0
, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID
);
3408 if (unlikely(peer_id
>= ar
->htt
.tx_q_state
.num_peers
) ||
3409 unlikely(tid
>= ar
->htt
.tx_q_state
.num_tids
)) {
3410 ath10k_warn(ar
, "received out of range peer_id %hu tid %hhu\n",
3415 spin_lock_bh(&ar
->data_lock
);
3416 txq
= ath10k_mac_txq_lookup(ar
, peer_id
, tid
);
3417 spin_unlock_bh(&ar
->data_lock
);
3419 /* It is okay to release the lock and use txq because RCU read
3423 if (unlikely(!txq
)) {
3424 ath10k_warn(ar
, "failed to lookup txq for peer_id %hu tid %hhu\n",
3429 spin_lock_bh(&ar
->htt
.tx_lock
);
3430 artxq
= (void *)txq
->drv_priv
;
3431 artxq
->num_push_allowed
= le16_to_cpu(record
->num_max_msdus
);
3432 spin_unlock_bh(&ar
->htt
.tx_lock
);
3437 ath10k_mac_tx_push_pending(ar
);
3440 void ath10k_htt_htc_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
3444 release
= ath10k_htt_t2h_msg_handler(ar
, skb
);
3446 /* Free the indication buffer */
3448 dev_kfree_skb_any(skb
);
3451 static inline s8
ath10k_get_legacy_rate_idx(struct ath10k
*ar
, u8 rate
)
3453 static const u8 legacy_rates
[] = {1, 2, 5, 11, 6, 9, 12,
3454 18, 24, 36, 48, 54};
3457 for (i
= 0; i
< ARRAY_SIZE(legacy_rates
); i
++) {
3458 if (rate
== legacy_rates
[i
])
3462 ath10k_warn(ar
, "Invalid legacy rate %hhd peer stats", rate
);
3467 ath10k_accumulate_per_peer_tx_stats(struct ath10k
*ar
,
3468 struct ath10k_sta
*arsta
,
3469 struct ath10k_per_peer_tx_stats
*pstats
,
3472 struct rate_info
*txrate
= &arsta
->txrate
;
3473 struct ath10k_htt_tx_stats
*tx_stats
;
3474 int idx
, ht_idx
, gi
, mcs
, bw
, nss
;
3475 unsigned long flags
;
3477 if (!arsta
->tx_stats
)
3480 tx_stats
= arsta
->tx_stats
;
3481 flags
= txrate
->flags
;
3482 gi
= test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT
, &flags
);
3483 mcs
= ATH10K_HW_MCS_RATE(pstats
->ratecode
);
3486 ht_idx
= mcs
+ (nss
- 1) * 8;
3487 idx
= mcs
* 8 + 8 * 10 * (nss
- 1);
3490 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3492 if (txrate
->flags
& RATE_INFO_FLAGS_VHT_MCS
) {
3493 STATS_OP_FMT(SUCC
).vht
[0][mcs
] += pstats
->succ_bytes
;
3494 STATS_OP_FMT(SUCC
).vht
[1][mcs
] += pstats
->succ_pkts
;
3495 STATS_OP_FMT(FAIL
).vht
[0][mcs
] += pstats
->failed_bytes
;
3496 STATS_OP_FMT(FAIL
).vht
[1][mcs
] += pstats
->failed_pkts
;
3497 STATS_OP_FMT(RETRY
).vht
[0][mcs
] += pstats
->retry_bytes
;
3498 STATS_OP_FMT(RETRY
).vht
[1][mcs
] += pstats
->retry_pkts
;
3499 } else if (txrate
->flags
& RATE_INFO_FLAGS_MCS
) {
3500 STATS_OP_FMT(SUCC
).ht
[0][ht_idx
] += pstats
->succ_bytes
;
3501 STATS_OP_FMT(SUCC
).ht
[1][ht_idx
] += pstats
->succ_pkts
;
3502 STATS_OP_FMT(FAIL
).ht
[0][ht_idx
] += pstats
->failed_bytes
;
3503 STATS_OP_FMT(FAIL
).ht
[1][ht_idx
] += pstats
->failed_pkts
;
3504 STATS_OP_FMT(RETRY
).ht
[0][ht_idx
] += pstats
->retry_bytes
;
3505 STATS_OP_FMT(RETRY
).ht
[1][ht_idx
] += pstats
->retry_pkts
;
3507 mcs
= legacy_rate_idx
;
3509 STATS_OP_FMT(SUCC
).legacy
[0][mcs
] += pstats
->succ_bytes
;
3510 STATS_OP_FMT(SUCC
).legacy
[1][mcs
] += pstats
->succ_pkts
;
3511 STATS_OP_FMT(FAIL
).legacy
[0][mcs
] += pstats
->failed_bytes
;
3512 STATS_OP_FMT(FAIL
).legacy
[1][mcs
] += pstats
->failed_pkts
;
3513 STATS_OP_FMT(RETRY
).legacy
[0][mcs
] += pstats
->retry_bytes
;
3514 STATS_OP_FMT(RETRY
).legacy
[1][mcs
] += pstats
->retry_pkts
;
3517 if (ATH10K_HW_AMPDU(pstats
->flags
)) {
3518 tx_stats
->ba_fails
+= ATH10K_HW_BA_FAIL(pstats
->flags
);
3520 if (txrate
->flags
& RATE_INFO_FLAGS_MCS
) {
3521 STATS_OP_FMT(AMPDU
).ht
[0][ht_idx
] +=
3522 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3523 STATS_OP_FMT(AMPDU
).ht
[1][ht_idx
] +=
3524 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3526 STATS_OP_FMT(AMPDU
).vht
[0][mcs
] +=
3527 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3528 STATS_OP_FMT(AMPDU
).vht
[1][mcs
] +=
3529 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3531 STATS_OP_FMT(AMPDU
).bw
[0][bw
] +=
3532 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3533 STATS_OP_FMT(AMPDU
).nss
[0][nss
- 1] +=
3534 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3535 STATS_OP_FMT(AMPDU
).gi
[0][gi
] +=
3536 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3537 STATS_OP_FMT(AMPDU
).rate_table
[0][idx
] +=
3538 pstats
->succ_bytes
+ pstats
->retry_bytes
;
3539 STATS_OP_FMT(AMPDU
).bw
[1][bw
] +=
3540 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3541 STATS_OP_FMT(AMPDU
).nss
[1][nss
- 1] +=
3542 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3543 STATS_OP_FMT(AMPDU
).gi
[1][gi
] +=
3544 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3545 STATS_OP_FMT(AMPDU
).rate_table
[1][idx
] +=
3546 pstats
->succ_pkts
+ pstats
->retry_pkts
;
3548 tx_stats
->ack_fails
+=
3549 ATH10K_HW_BA_FAIL(pstats
->flags
);
3552 STATS_OP_FMT(SUCC
).bw
[0][bw
] += pstats
->succ_bytes
;
3553 STATS_OP_FMT(SUCC
).nss
[0][nss
- 1] += pstats
->succ_bytes
;
3554 STATS_OP_FMT(SUCC
).gi
[0][gi
] += pstats
->succ_bytes
;
3556 STATS_OP_FMT(SUCC
).bw
[1][bw
] += pstats
->succ_pkts
;
3557 STATS_OP_FMT(SUCC
).nss
[1][nss
- 1] += pstats
->succ_pkts
;
3558 STATS_OP_FMT(SUCC
).gi
[1][gi
] += pstats
->succ_pkts
;
3560 STATS_OP_FMT(FAIL
).bw
[0][bw
] += pstats
->failed_bytes
;
3561 STATS_OP_FMT(FAIL
).nss
[0][nss
- 1] += pstats
->failed_bytes
;
3562 STATS_OP_FMT(FAIL
).gi
[0][gi
] += pstats
->failed_bytes
;
3564 STATS_OP_FMT(FAIL
).bw
[1][bw
] += pstats
->failed_pkts
;
3565 STATS_OP_FMT(FAIL
).nss
[1][nss
- 1] += pstats
->failed_pkts
;
3566 STATS_OP_FMT(FAIL
).gi
[1][gi
] += pstats
->failed_pkts
;
3568 STATS_OP_FMT(RETRY
).bw
[0][bw
] += pstats
->retry_bytes
;
3569 STATS_OP_FMT(RETRY
).nss
[0][nss
- 1] += pstats
->retry_bytes
;
3570 STATS_OP_FMT(RETRY
).gi
[0][gi
] += pstats
->retry_bytes
;
3572 STATS_OP_FMT(RETRY
).bw
[1][bw
] += pstats
->retry_pkts
;
3573 STATS_OP_FMT(RETRY
).nss
[1][nss
- 1] += pstats
->retry_pkts
;
3574 STATS_OP_FMT(RETRY
).gi
[1][gi
] += pstats
->retry_pkts
;
3576 if (txrate
->flags
>= RATE_INFO_FLAGS_MCS
) {
3577 STATS_OP_FMT(SUCC
).rate_table
[0][idx
] += pstats
->succ_bytes
;
3578 STATS_OP_FMT(SUCC
).rate_table
[1][idx
] += pstats
->succ_pkts
;
3579 STATS_OP_FMT(FAIL
).rate_table
[0][idx
] += pstats
->failed_bytes
;
3580 STATS_OP_FMT(FAIL
).rate_table
[1][idx
] += pstats
->failed_pkts
;
3581 STATS_OP_FMT(RETRY
).rate_table
[0][idx
] += pstats
->retry_bytes
;
3582 STATS_OP_FMT(RETRY
).rate_table
[1][idx
] += pstats
->retry_pkts
;
3585 tx_stats
->tx_duration
+= pstats
->duration
;
3589 ath10k_update_per_peer_tx_stats(struct ath10k
*ar
,
3590 struct ieee80211_sta
*sta
,
3591 struct ath10k_per_peer_tx_stats
*peer_stats
)
3593 struct ath10k_sta
*arsta
= (struct ath10k_sta
*)sta
->drv_priv
;
3594 struct ieee80211_chanctx_conf
*conf
= NULL
;
3597 bool skip_auto_rate
;
3598 struct rate_info txrate
;
3600 lockdep_assert_held(&ar
->data_lock
);
3602 txrate
.flags
= ATH10K_HW_PREAMBLE(peer_stats
->ratecode
);
3603 txrate
.bw
= ATH10K_HW_BW(peer_stats
->flags
);
3604 txrate
.nss
= ATH10K_HW_NSS(peer_stats
->ratecode
);
3605 txrate
.mcs
= ATH10K_HW_MCS_RATE(peer_stats
->ratecode
);
3606 sgi
= ATH10K_HW_GI(peer_stats
->flags
);
3607 skip_auto_rate
= ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats
->flags
);
3609 /* Firmware's rate control skips broadcast/management frames,
3610 * if host has configure fixed rates and in some other special cases.
3615 if (txrate
.flags
== WMI_RATE_PREAMBLE_VHT
&& txrate
.mcs
> 9) {
3616 ath10k_warn(ar
, "Invalid VHT mcs %hhd peer stats", txrate
.mcs
);
3620 if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
&&
3621 (txrate
.mcs
> 7 || txrate
.nss
< 1)) {
3622 ath10k_warn(ar
, "Invalid HT mcs %hhd nss %hhd peer stats",
3623 txrate
.mcs
, txrate
.nss
);
3627 memset(&arsta
->txrate
, 0, sizeof(arsta
->txrate
));
3628 memset(&arsta
->tx_info
.status
, 0, sizeof(arsta
->tx_info
.status
));
3629 if (txrate
.flags
== WMI_RATE_PREAMBLE_CCK
||
3630 txrate
.flags
== WMI_RATE_PREAMBLE_OFDM
) {
3631 rate
= ATH10K_HW_LEGACY_RATE(peer_stats
->ratecode
);
3632 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3633 if (rate
== 6 && txrate
.flags
== WMI_RATE_PREAMBLE_CCK
)
3635 rate_idx
= ath10k_get_legacy_rate_idx(ar
, rate
);
3638 arsta
->txrate
.legacy
= rate
;
3639 } else if (txrate
.flags
== WMI_RATE_PREAMBLE_HT
) {
3640 arsta
->txrate
.flags
= RATE_INFO_FLAGS_MCS
;
3641 arsta
->txrate
.mcs
= txrate
.mcs
+ 8 * (txrate
.nss
- 1);
3643 arsta
->txrate
.flags
= RATE_INFO_FLAGS_VHT_MCS
;
3644 arsta
->txrate
.mcs
= txrate
.mcs
;
3647 switch (txrate
.flags
) {
3648 case WMI_RATE_PREAMBLE_OFDM
:
3649 if (arsta
->arvif
&& arsta
->arvif
->vif
)
3650 conf
= rcu_dereference(arsta
->arvif
->vif
->chanctx_conf
);
3651 if (conf
&& conf
->def
.chan
->band
== NL80211_BAND_5GHZ
)
3652 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
- 4;
3654 case WMI_RATE_PREAMBLE_CCK
:
3655 arsta
->tx_info
.status
.rates
[0].idx
= rate_idx
;
3657 arsta
->tx_info
.status
.rates
[0].flags
|=
3658 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE
|
3659 IEEE80211_TX_RC_SHORT_GI
);
3661 case WMI_RATE_PREAMBLE_HT
:
3662 arsta
->tx_info
.status
.rates
[0].idx
=
3663 txrate
.mcs
+ ((txrate
.nss
- 1) * 8);
3665 arsta
->tx_info
.status
.rates
[0].flags
|=
3666 IEEE80211_TX_RC_SHORT_GI
;
3667 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_MCS
;
3669 case WMI_RATE_PREAMBLE_VHT
:
3670 ieee80211_rate_set_vht(&arsta
->tx_info
.status
.rates
[0],
3671 txrate
.mcs
, txrate
.nss
);
3673 arsta
->tx_info
.status
.rates
[0].flags
|=
3674 IEEE80211_TX_RC_SHORT_GI
;
3675 arsta
->tx_info
.status
.rates
[0].flags
|= IEEE80211_TX_RC_VHT_MCS
;
3679 arsta
->txrate
.nss
= txrate
.nss
;
3680 arsta
->txrate
.bw
= ath10k_bw_to_mac80211_bw(txrate
.bw
);
3681 arsta
->last_tx_bitrate
= cfg80211_calculate_bitrate(&arsta
->txrate
);
3683 arsta
->txrate
.flags
|= RATE_INFO_FLAGS_SHORT_GI
;
3685 switch (arsta
->txrate
.bw
) {
3686 case RATE_INFO_BW_40
:
3687 arsta
->tx_info
.status
.rates
[0].flags
|=
3688 IEEE80211_TX_RC_40_MHZ_WIDTH
;
3690 case RATE_INFO_BW_80
:
3691 arsta
->tx_info
.status
.rates
[0].flags
|=
3692 IEEE80211_TX_RC_80_MHZ_WIDTH
;
3696 if (peer_stats
->succ_pkts
) {
3697 arsta
->tx_info
.flags
= IEEE80211_TX_STAT_ACK
;
3698 arsta
->tx_info
.status
.rates
[0].count
= 1;
3699 ieee80211_tx_rate_update(ar
->hw
, sta
, &arsta
->tx_info
);
3702 if (ar
->htt
.disable_tx_comp
) {
3703 arsta
->tx_failed
+= peer_stats
->failed_pkts
;
3704 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "tx failed %d\n",
3708 arsta
->tx_retries
+= peer_stats
->retry_pkts
;
3709 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt tx retries %d", arsta
->tx_retries
);
3711 if (ath10k_debug_is_extd_tx_stats_enabled(ar
))
3712 ath10k_accumulate_per_peer_tx_stats(ar
, arsta
, peer_stats
,
3716 static void ath10k_htt_fetch_peer_stats(struct ath10k
*ar
,
3717 struct sk_buff
*skb
)
3719 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3720 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
3721 struct htt_per_peer_tx_stats_ind
*tx_stats
;
3722 struct ieee80211_sta
*sta
;
3723 struct ath10k_peer
*peer
;
3725 u8 ppdu_len
, num_ppdu
;
3727 num_ppdu
= resp
->peer_tx_stats
.num_ppdu
;
3728 ppdu_len
= resp
->peer_tx_stats
.ppdu_len
* sizeof(__le32
);
3730 if (skb
->len
< sizeof(struct htt_resp_hdr
) + num_ppdu
* ppdu_len
) {
3731 ath10k_warn(ar
, "Invalid peer stats buf length %d\n", skb
->len
);
3735 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
3736 (resp
->peer_tx_stats
.payload
);
3737 peer_id
= __le16_to_cpu(tx_stats
->peer_id
);
3740 spin_lock_bh(&ar
->data_lock
);
3741 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3742 if (!peer
|| !peer
->sta
) {
3743 ath10k_warn(ar
, "Invalid peer id %d peer stats buffer\n",
3749 for (i
= 0; i
< num_ppdu
; i
++) {
3750 tx_stats
= (struct htt_per_peer_tx_stats_ind
*)
3751 (resp
->peer_tx_stats
.payload
+ i
* ppdu_len
);
3753 p_tx_stats
->succ_bytes
= __le32_to_cpu(tx_stats
->succ_bytes
);
3754 p_tx_stats
->retry_bytes
= __le32_to_cpu(tx_stats
->retry_bytes
);
3755 p_tx_stats
->failed_bytes
=
3756 __le32_to_cpu(tx_stats
->failed_bytes
);
3757 p_tx_stats
->ratecode
= tx_stats
->ratecode
;
3758 p_tx_stats
->flags
= tx_stats
->flags
;
3759 p_tx_stats
->succ_pkts
= __le16_to_cpu(tx_stats
->succ_pkts
);
3760 p_tx_stats
->retry_pkts
= __le16_to_cpu(tx_stats
->retry_pkts
);
3761 p_tx_stats
->failed_pkts
= __le16_to_cpu(tx_stats
->failed_pkts
);
3762 p_tx_stats
->duration
= __le16_to_cpu(tx_stats
->tx_duration
);
3764 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
3768 spin_unlock_bh(&ar
->data_lock
);
3772 static void ath10k_fetch_10_2_tx_stats(struct ath10k
*ar
, u8
*data
)
3774 struct ath10k_pktlog_hdr
*hdr
= (struct ath10k_pktlog_hdr
*)data
;
3775 struct ath10k_per_peer_tx_stats
*p_tx_stats
= &ar
->peer_tx_stats
;
3776 struct ath10k_10_2_peer_tx_stats
*tx_stats
;
3777 struct ieee80211_sta
*sta
;
3778 struct ath10k_peer
*peer
;
3779 u16 log_type
= __le16_to_cpu(hdr
->log_type
);
3782 if (log_type
!= ATH_PKTLOG_TYPE_TX_STAT
)
3785 tx_stats
= (struct ath10k_10_2_peer_tx_stats
*)((hdr
->payload
) +
3786 ATH10K_10_2_TX_STATS_OFFSET
);
3788 if (!tx_stats
->tx_ppdu_cnt
)
3791 peer_id
= tx_stats
->peer_id
;
3794 spin_lock_bh(&ar
->data_lock
);
3795 peer
= ath10k_peer_find_by_id(ar
, peer_id
);
3796 if (!peer
|| !peer
->sta
) {
3797 ath10k_warn(ar
, "Invalid peer id %d in peer stats buffer\n",
3803 for (i
= 0; i
< tx_stats
->tx_ppdu_cnt
; i
++) {
3804 p_tx_stats
->succ_bytes
=
3805 __le16_to_cpu(tx_stats
->success_bytes
[i
]);
3806 p_tx_stats
->retry_bytes
=
3807 __le16_to_cpu(tx_stats
->retry_bytes
[i
]);
3808 p_tx_stats
->failed_bytes
=
3809 __le16_to_cpu(tx_stats
->failed_bytes
[i
]);
3810 p_tx_stats
->ratecode
= tx_stats
->ratecode
[i
];
3811 p_tx_stats
->flags
= tx_stats
->flags
[i
];
3812 p_tx_stats
->succ_pkts
= tx_stats
->success_pkts
[i
];
3813 p_tx_stats
->retry_pkts
= tx_stats
->retry_pkts
[i
];
3814 p_tx_stats
->failed_pkts
= tx_stats
->failed_pkts
[i
];
3816 ath10k_update_per_peer_tx_stats(ar
, sta
, p_tx_stats
);
3818 spin_unlock_bh(&ar
->data_lock
);
3824 spin_unlock_bh(&ar
->data_lock
);
3828 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type
)
3831 case HTT_SECURITY_TKIP
:
3832 case HTT_SECURITY_TKIP_NOMIC
:
3833 case HTT_SECURITY_AES_CCMP
:
3840 static void ath10k_htt_rx_sec_ind_handler(struct ath10k
*ar
,
3841 struct htt_security_indication
*ev
)
3843 enum htt_txrx_sec_cast_type sec_index
;
3844 enum htt_security_types sec_type
;
3845 struct ath10k_peer
*peer
;
3847 spin_lock_bh(&ar
->data_lock
);
3849 peer
= ath10k_peer_find_by_id(ar
, __le16_to_cpu(ev
->peer_id
));
3851 ath10k_warn(ar
, "failed to find peer id %d for security indication",
3852 __le16_to_cpu(ev
->peer_id
));
3856 sec_type
= MS(ev
->flags
, HTT_SECURITY_TYPE
);
3858 if (ev
->flags
& HTT_SECURITY_IS_UNICAST
)
3859 sec_index
= HTT_TXRX_SEC_UCAST
;
3861 sec_index
= HTT_TXRX_SEC_MCAST
;
3863 peer
->rx_pn
[sec_index
].sec_type
= sec_type
;
3864 peer
->rx_pn
[sec_index
].pn_len
= ath10k_htt_rx_pn_len(sec_type
);
3866 memset(peer
->tids_last_pn_valid
, 0, sizeof(peer
->tids_last_pn_valid
));
3867 memset(peer
->tids_last_pn
, 0, sizeof(peer
->tids_last_pn
));
3870 spin_unlock_bh(&ar
->data_lock
);
3873 bool ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
)
3875 struct ath10k_htt
*htt
= &ar
->htt
;
3876 struct htt_resp
*resp
= (struct htt_resp
*)skb
->data
;
3877 enum htt_t2h_msg_type type
;
3879 /* confirm alignment */
3880 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
3881 ath10k_warn(ar
, "unaligned htt message, expect trouble\n");
3883 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, msg_type: 0x%0X\n",
3884 resp
->hdr
.msg_type
);
3886 if (resp
->hdr
.msg_type
>= ar
->htt
.t2h_msg_types_max
) {
3887 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
3888 resp
->hdr
.msg_type
, ar
->htt
.t2h_msg_types_max
);
3891 type
= ar
->htt
.t2h_msg_types
[resp
->hdr
.msg_type
];
3894 case HTT_T2H_MSG_TYPE_VERSION_CONF
: {
3895 htt
->target_version_major
= resp
->ver_resp
.major
;
3896 htt
->target_version_minor
= resp
->ver_resp
.minor
;
3897 complete(&htt
->target_version_received
);
3900 case HTT_T2H_MSG_TYPE_RX_IND
:
3901 if (ar
->bus_param
.dev_type
!= ATH10K_DEV_TYPE_HL
) {
3902 ath10k_htt_rx_proc_rx_ind_ll(htt
, &resp
->rx_ind
);
3904 skb_queue_tail(&htt
->rx_indication_head
, skb
);
3908 case HTT_T2H_MSG_TYPE_PEER_MAP
: {
3909 struct htt_peer_map_event ev
= {
3910 .vdev_id
= resp
->peer_map
.vdev_id
,
3911 .peer_id
= __le16_to_cpu(resp
->peer_map
.peer_id
),
3913 memcpy(ev
.addr
, resp
->peer_map
.addr
, sizeof(ev
.addr
));
3914 ath10k_peer_map_event(htt
, &ev
);
3917 case HTT_T2H_MSG_TYPE_PEER_UNMAP
: {
3918 struct htt_peer_unmap_event ev
= {
3919 .peer_id
= __le16_to_cpu(resp
->peer_unmap
.peer_id
),
3921 ath10k_peer_unmap_event(htt
, &ev
);
3924 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
: {
3925 struct htt_tx_done tx_done
= {};
3926 struct ath10k_htt
*htt
= &ar
->htt
;
3927 struct ath10k_htc
*htc
= &ar
->htc
;
3928 struct ath10k_htc_ep
*ep
= &ar
->htc
.endpoint
[htt
->eid
];
3929 int status
= __le32_to_cpu(resp
->mgmt_tx_completion
.status
);
3930 int info
= __le32_to_cpu(resp
->mgmt_tx_completion
.info
);
3932 tx_done
.msdu_id
= __le32_to_cpu(resp
->mgmt_tx_completion
.desc_id
);
3935 case HTT_MGMT_TX_STATUS_OK
:
3936 tx_done
.status
= HTT_TX_COMPL_STATE_ACK
;
3937 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS
,
3939 (resp
->mgmt_tx_completion
.flags
&
3940 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI
)) {
3942 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK
,
3946 case HTT_MGMT_TX_STATUS_RETRY
:
3947 tx_done
.status
= HTT_TX_COMPL_STATE_NOACK
;
3949 case HTT_MGMT_TX_STATUS_DROP
:
3950 tx_done
.status
= HTT_TX_COMPL_STATE_DISCARD
;
3954 if (htt
->disable_tx_comp
) {
3955 spin_lock_bh(&htc
->tx_lock
);
3957 spin_unlock_bh(&htc
->tx_lock
);
3960 status
= ath10k_txrx_tx_unref(htt
, &tx_done
);
3962 spin_lock_bh(&htt
->tx_lock
);
3963 ath10k_htt_tx_mgmt_dec_pending(htt
);
3964 spin_unlock_bh(&htt
->tx_lock
);
3968 case HTT_T2H_MSG_TYPE_TX_COMPL_IND
:
3969 ath10k_htt_rx_tx_compl_ind(htt
->ar
, skb
);
3971 case HTT_T2H_MSG_TYPE_SEC_IND
: {
3972 struct ath10k
*ar
= htt
->ar
;
3973 struct htt_security_indication
*ev
= &resp
->security_indication
;
3975 ath10k_htt_rx_sec_ind_handler(ar
, ev
);
3976 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
3977 "sec ind peer_id %d unicast %d type %d\n",
3978 __le16_to_cpu(ev
->peer_id
),
3979 !!(ev
->flags
& HTT_SECURITY_IS_UNICAST
),
3980 MS(ev
->flags
, HTT_SECURITY_TYPE
));
3981 complete(&ar
->install_key_done
);
3984 case HTT_T2H_MSG_TYPE_RX_FRAG_IND
: {
3985 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
3986 skb
->data
, skb
->len
);
3987 atomic_inc(&htt
->num_mpdus_ready
);
3989 return ath10k_htt_rx_proc_rx_frag_ind(htt
,
3993 case HTT_T2H_MSG_TYPE_TEST
:
3995 case HTT_T2H_MSG_TYPE_STATS_CONF
:
3996 trace_ath10k_htt_stats(ar
, skb
->data
, skb
->len
);
3998 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND
:
3999 /* Firmware can return tx frames if it's unable to fully
4000 * process them and suspects host may be able to fix it. ath10k
4001 * sends all tx frames as already inspected so this shouldn't
4002 * happen unless fw has a bug.
4004 ath10k_warn(ar
, "received an unexpected htt tx inspect event\n");
4006 case HTT_T2H_MSG_TYPE_RX_ADDBA
:
4007 ath10k_htt_rx_addba(ar
, resp
);
4009 case HTT_T2H_MSG_TYPE_RX_DELBA
:
4010 ath10k_htt_rx_delba(ar
, resp
);
4012 case HTT_T2H_MSG_TYPE_PKTLOG
: {
4013 trace_ath10k_htt_pktlog(ar
, resp
->pktlog_msg
.payload
,
4015 offsetof(struct htt_resp
,
4016 pktlog_msg
.payload
));
4018 if (ath10k_peer_stats_enabled(ar
))
4019 ath10k_fetch_10_2_tx_stats(ar
,
4020 resp
->pktlog_msg
.payload
);
4023 case HTT_T2H_MSG_TYPE_RX_FLUSH
: {
4024 /* Ignore this event because mac80211 takes care of Rx
4025 * aggregation reordering.
4029 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
: {
4030 skb_queue_tail(&htt
->rx_in_ord_compl_q
, skb
);
4033 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
: {
4034 struct ath10k_htt
*htt
= &ar
->htt
;
4035 struct ath10k_htc
*htc
= &ar
->htc
;
4036 struct ath10k_htc_ep
*ep
= &ar
->htc
.endpoint
[htt
->eid
];
4037 u32 msg_word
= __le32_to_cpu(*(__le32
*)resp
);
4038 int htt_credit_delta
;
4040 htt_credit_delta
= HTT_TX_CREDIT_DELTA_ABS_GET(msg_word
);
4041 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word
))
4042 htt_credit_delta
= -htt_credit_delta
;
4044 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
4045 "htt credit update delta %d\n",
4048 if (htt
->disable_tx_comp
) {
4049 spin_lock_bh(&htc
->tx_lock
);
4050 ep
->tx_credits
+= htt_credit_delta
;
4051 spin_unlock_bh(&htc
->tx_lock
);
4052 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
4053 "htt credit total %d\n",
4055 ep
->ep_ops
.ep_tx_credits(htc
->ar
);
4059 case HTT_T2H_MSG_TYPE_CHAN_CHANGE
: {
4060 u32 phymode
= __le32_to_cpu(resp
->chan_change
.phymode
);
4061 u32 freq
= __le32_to_cpu(resp
->chan_change
.freq
);
4063 ar
->tgt_oper_chan
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
4064 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
4065 "htt chan change freq %u phymode %s\n",
4066 freq
, ath10k_wmi_phymode_str(phymode
));
4069 case HTT_T2H_MSG_TYPE_AGGR_CONF
:
4071 case HTT_T2H_MSG_TYPE_TX_FETCH_IND
: {
4072 struct sk_buff
*tx_fetch_ind
= skb_copy(skb
, GFP_ATOMIC
);
4074 if (!tx_fetch_ind
) {
4075 ath10k_warn(ar
, "failed to copy htt tx fetch ind\n");
4078 skb_queue_tail(&htt
->tx_fetch_ind_q
, tx_fetch_ind
);
4081 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM
:
4082 ath10k_htt_rx_tx_fetch_confirm(ar
, skb
);
4084 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND
:
4085 ath10k_htt_rx_tx_mode_switch_ind(ar
, skb
);
4087 case HTT_T2H_MSG_TYPE_PEER_STATS
:
4088 ath10k_htt_fetch_peer_stats(ar
, skb
);
4090 case HTT_T2H_MSG_TYPE_EN_STATS
:
4092 ath10k_warn(ar
, "htt event (%d) not handled\n",
4093 resp
->hdr
.msg_type
);
4094 ath10k_dbg_dump(ar
, ATH10K_DBG_HTT_DUMP
, NULL
, "htt event: ",
4095 skb
->data
, skb
->len
);
4100 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler
);
4102 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k
*ar
,
4103 struct sk_buff
*skb
)
4105 trace_ath10k_htt_pktlog(ar
, skb
->data
, skb
->len
);
4106 dev_kfree_skb_any(skb
);
4108 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler
);
4110 static int ath10k_htt_rx_deliver_msdu(struct ath10k
*ar
, int quota
, int budget
)
4112 struct sk_buff
*skb
;
4114 while (quota
< budget
) {
4115 if (skb_queue_empty(&ar
->htt
.rx_msdus_q
))
4118 skb
= skb_dequeue(&ar
->htt
.rx_msdus_q
);
4121 ath10k_process_rx(ar
, skb
);
4128 int ath10k_htt_rx_hl_indication(struct ath10k
*ar
, int budget
)
4130 struct htt_resp
*resp
;
4131 struct ath10k_htt
*htt
= &ar
->htt
;
4132 struct sk_buff
*skb
;
4136 for (quota
= 0; quota
< budget
; quota
++) {
4137 skb
= skb_dequeue(&htt
->rx_indication_head
);
4141 resp
= (struct htt_resp
*)skb
->data
;
4143 release
= ath10k_htt_rx_proc_rx_ind_hl(htt
,
4147 HTT_RX_NON_TKIP_MIC
);
4150 dev_kfree_skb_any(skb
);
4152 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "rx indication poll pending count:%d\n",
4153 skb_queue_len(&htt
->rx_indication_head
));
4157 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication
);
4159 int ath10k_htt_txrx_compl_task(struct ath10k
*ar
, int budget
)
4161 struct ath10k_htt
*htt
= &ar
->htt
;
4162 struct htt_tx_done tx_done
= {};
4163 struct sk_buff_head tx_ind_q
;
4164 struct sk_buff
*skb
;
4165 unsigned long flags
;
4166 int quota
= 0, done
, ret
;
4167 bool resched_napi
= false;
4169 __skb_queue_head_init(&tx_ind_q
);
4171 /* Process pending frames before dequeuing more data
4174 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
4175 if (quota
== budget
) {
4176 resched_napi
= true;
4180 while ((skb
= skb_dequeue(&htt
->rx_in_ord_compl_q
))) {
4181 spin_lock_bh(&htt
->rx_ring
.lock
);
4182 ret
= ath10k_htt_rx_in_ord_ind(ar
, skb
);
4183 spin_unlock_bh(&htt
->rx_ring
.lock
);
4185 dev_kfree_skb_any(skb
);
4187 resched_napi
= true;
4192 while (atomic_read(&htt
->num_mpdus_ready
)) {
4193 ret
= ath10k_htt_rx_handle_amsdu(htt
);
4195 resched_napi
= true;
4198 atomic_dec(&htt
->num_mpdus_ready
);
4201 /* Deliver received data after processing data from hardware */
4202 quota
= ath10k_htt_rx_deliver_msdu(ar
, quota
, budget
);
4204 /* From NAPI documentation:
4205 * The napi poll() function may also process TX completions, in which
4206 * case if it processes the entire TX ring then it should count that
4207 * work as the rest of the budget.
4209 if ((quota
< budget
) && !kfifo_is_empty(&htt
->txdone_fifo
))
4212 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4213 * From kfifo_get() documentation:
4214 * Note that with only one concurrent reader and one concurrent writer,
4215 * you don't need extra locking to use these macro.
4217 while (kfifo_get(&htt
->txdone_fifo
, &tx_done
))
4218 ath10k_txrx_tx_unref(htt
, &tx_done
);
4220 ath10k_mac_tx_push_pending(ar
);
4222 spin_lock_irqsave(&htt
->tx_fetch_ind_q
.lock
, flags
);
4223 skb_queue_splice_init(&htt
->tx_fetch_ind_q
, &tx_ind_q
);
4224 spin_unlock_irqrestore(&htt
->tx_fetch_ind_q
.lock
, flags
);
4226 while ((skb
= __skb_dequeue(&tx_ind_q
))) {
4227 ath10k_htt_rx_tx_fetch_ind(ar
, skb
);
4228 dev_kfree_skb_any(skb
);
4232 ath10k_htt_rx_msdu_buff_replenish(htt
);
4233 /* In case of rx failure or more data to read, report budget
4234 * to reschedule NAPI poll
4236 done
= resched_napi
? budget
: quota
;
4240 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task
);
4242 static const struct ath10k_htt_rx_ops htt_rx_ops_32
= {
4243 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_32
,
4244 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_32
,
4245 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_32
,
4246 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_32
,
4247 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_32
,
4250 static const struct ath10k_htt_rx_ops htt_rx_ops_64
= {
4251 .htt_get_rx_ring_size
= ath10k_htt_get_rx_ring_size_64
,
4252 .htt_config_paddrs_ring
= ath10k_htt_config_paddrs_ring_64
,
4253 .htt_set_paddrs_ring
= ath10k_htt_set_paddrs_ring_64
,
4254 .htt_get_vaddr_ring
= ath10k_htt_get_vaddr_ring_64
,
4255 .htt_reset_paddrs_ring
= ath10k_htt_reset_paddrs_ring_64
,
4258 static const struct ath10k_htt_rx_ops htt_rx_ops_hl
= {
4259 .htt_rx_proc_rx_frag_ind
= ath10k_htt_rx_proc_rx_frag_ind_hl
,
4262 void ath10k_htt_set_rx_ops(struct ath10k_htt
*htt
)
4264 struct ath10k
*ar
= htt
->ar
;
4266 if (ar
->bus_param
.dev_type
== ATH10K_DEV_TYPE_HL
)
4267 htt
->rx_ops
= &htt_rx_ops_hl
;
4268 else if (ar
->hw_params
.target_64bit
)
4269 htt
->rx_ops
= &htt_rx_ops_64
;
4271 htt
->rx_ops
= &htt_rx_ops_32
;