]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/wireless/ath/ath10k/htt_rx.c
ath10k: enable parsing per station rx duration for 10.4
[mirror_ubuntu-eoan-kernel.git] / drivers / net / wireless / ath / ath10k / htt_rx.c
CommitLineData
5e3dd157
KV
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
edb8236d 18#include "core.h"
5e3dd157
KV
19#include "htc.h"
20#include "htt.h"
21#include "txrx.h"
22#include "debug.h"
a9bf0506 23#include "trace.h"
aa5b4fbc 24#include "mac.h"
5e3dd157
KV
25
26#include <linux/log2.h>
27
c545070e
MK
28#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
5e3dd157
KV
30
31/* when under memory pressure rx ring refill may fail and needs a retry */
32#define HTT_RX_RING_REFILL_RETRY_MS 50
33
f6dc2095 34static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
6c5151a9 35static void ath10k_htt_txrx_compl_task(unsigned long ptr);
f6dc2095 36
c545070e
MK
37static struct sk_buff *
38ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
39{
40 struct ath10k_skb_rxcb *rxcb;
41
42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43 if (rxcb->paddr == paddr)
44 return ATH10K_RXCB_SKB(rxcb);
45
46 WARN_ON_ONCE(1);
47 return NULL;
48}
49
5e3dd157
KV
50static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
51{
52 struct sk_buff *skb;
c545070e
MK
53 struct ath10k_skb_rxcb *rxcb;
54 struct hlist_node *n;
5e3dd157
KV
55 int i;
56
c545070e
MK
57 if (htt->rx_ring.in_ord_rx) {
58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
59 skb = ATH10K_RXCB_SKB(rxcb);
60 dma_unmap_single(htt->ar->dev, rxcb->paddr,
61 skb->len + skb_tailroom(skb),
62 DMA_FROM_DEVICE);
63 hash_del(&rxcb->hlist);
64 dev_kfree_skb_any(skb);
65 }
66 } else {
67 for (i = 0; i < htt->rx_ring.size; i++) {
68 skb = htt->rx_ring.netbufs_ring[i];
69 if (!skb)
70 continue;
71
72 rxcb = ATH10K_SKB_RXCB(skb);
73 dma_unmap_single(htt->ar->dev, rxcb->paddr,
74 skb->len + skb_tailroom(skb),
75 DMA_FROM_DEVICE);
76 dev_kfree_skb_any(skb);
77 }
5e3dd157
KV
78 }
79
80 htt->rx_ring.fill_cnt = 0;
c545070e
MK
81 hash_init(htt->rx_ring.skb_table);
82 memset(htt->rx_ring.netbufs_ring, 0,
83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
5e3dd157
KV
84}
85
86static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
87{
88 struct htt_rx_desc *rx_desc;
c545070e 89 struct ath10k_skb_rxcb *rxcb;
5e3dd157
KV
90 struct sk_buff *skb;
91 dma_addr_t paddr;
92 int ret = 0, idx;
93
c545070e
MK
94 /* The Full Rx Reorder firmware has no way of telling the host
95 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
96 * To keep things simple make sure ring is always half empty. This
97 * guarantees there'll be no replenishment overruns possible.
98 */
99 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
100
8cc7f26c 101 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
5e3dd157
KV
102 while (num > 0) {
103 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
104 if (!skb) {
105 ret = -ENOMEM;
106 goto fail;
107 }
108
109 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
110 skb_pull(skb,
111 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
112 skb->data);
113
114 /* Clear rx_desc attention word before posting to Rx ring */
115 rx_desc = (struct htt_rx_desc *)skb->data;
116 rx_desc->attention.flags = __cpu_to_le32(0);
117
118 paddr = dma_map_single(htt->ar->dev, skb->data,
119 skb->len + skb_tailroom(skb),
120 DMA_FROM_DEVICE);
121
122 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
123 dev_kfree_skb_any(skb);
124 ret = -ENOMEM;
125 goto fail;
126 }
127
c545070e
MK
128 rxcb = ATH10K_SKB_RXCB(skb);
129 rxcb->paddr = paddr;
5e3dd157
KV
130 htt->rx_ring.netbufs_ring[idx] = skb;
131 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
132 htt->rx_ring.fill_cnt++;
133
c545070e
MK
134 if (htt->rx_ring.in_ord_rx) {
135 hash_add(htt->rx_ring.skb_table,
136 &ATH10K_SKB_RXCB(skb)->hlist,
137 (u32)paddr);
138 }
139
5e3dd157
KV
140 num--;
141 idx++;
142 idx &= htt->rx_ring.size_mask;
143 }
144
145fail:
5de6dfc8
VT
146 /*
147 * Make sure the rx buffer is updated before available buffer
148 * index to avoid any potential rx ring corruption.
149 */
150 mb();
8cc7f26c 151 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
5e3dd157
KV
152 return ret;
153}
154
155static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
156{
157 lockdep_assert_held(&htt->rx_ring.lock);
158 return __ath10k_htt_rx_ring_fill_n(htt, num);
159}
160
161static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
162{
6e712d42 163 int ret, num_deficit, num_to_fill;
5e3dd157 164
6e712d42
MK
165 /* Refilling the whole RX ring buffer proves to be a bad idea. The
166 * reason is RX may take up significant amount of CPU cycles and starve
167 * other tasks, e.g. TX on an ethernet device while acting as a bridge
168 * with ath10k wlan interface. This ended up with very poor performance
169 * once CPU the host system was overwhelmed with RX on ath10k.
170 *
171 * By limiting the number of refills the replenishing occurs
172 * progressively. This in turns makes use of the fact tasklets are
173 * processed in FIFO order. This means actual RX processing can starve
174 * out refilling. If there's not enough buffers on RX ring FW will not
175 * report RX until it is refilled with enough buffers. This
176 * automatically balances load wrt to CPU power.
177 *
178 * This probably comes at a cost of lower maximum throughput but
3eafdfd6 179 * improves the average and stability. */
5e3dd157 180 spin_lock_bh(&htt->rx_ring.lock);
6e712d42
MK
181 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
182 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
183 num_deficit -= num_to_fill;
5e3dd157
KV
184 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
185 if (ret == -ENOMEM) {
186 /*
187 * Failed to fill it to the desired level -
188 * we'll start a timer and try again next time.
189 * As long as enough buffers are left in the ring for
190 * another A-MPDU rx, no special recovery is needed.
191 */
192 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
193 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
6e712d42
MK
194 } else if (num_deficit > 0) {
195 tasklet_schedule(&htt->rx_replenish_task);
5e3dd157
KV
196 }
197 spin_unlock_bh(&htt->rx_ring.lock);
198}
199
200static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
201{
202 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
af762c0b 203
5e3dd157
KV
204 ath10k_htt_rx_msdu_buff_replenish(htt);
205}
206
c545070e 207int ath10k_htt_rx_ring_refill(struct ath10k *ar)
5e3dd157 208{
c545070e
MK
209 struct ath10k_htt *htt = &ar->htt;
210 int ret;
3e841fd0 211
c545070e
MK
212 spin_lock_bh(&htt->rx_ring.lock);
213 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
214 htt->rx_ring.fill_cnt));
215 spin_unlock_bh(&htt->rx_ring.lock);
3e841fd0 216
c545070e
MK
217 if (ret)
218 ath10k_htt_rx_ring_free(htt);
219
220 return ret;
3e841fd0 221}
5e3dd157 222
95bf21f9 223void ath10k_htt_rx_free(struct ath10k_htt *htt)
3e841fd0 224{
5e3dd157 225 del_timer_sync(&htt->rx_ring.refill_retry_timer);
6e712d42 226 tasklet_kill(&htt->rx_replenish_task);
6c5151a9
MK
227 tasklet_kill(&htt->txrx_compl_task);
228
229 skb_queue_purge(&htt->tx_compl_q);
230 skb_queue_purge(&htt->rx_compl_q);
c545070e 231 skb_queue_purge(&htt->rx_in_ord_compl_q);
426e10ea 232 skb_queue_purge(&htt->tx_fetch_ind_q);
5e3dd157 233
c545070e 234 ath10k_htt_rx_ring_free(htt);
5e3dd157
KV
235
236 dma_free_coherent(htt->ar->dev,
237 (htt->rx_ring.size *
238 sizeof(htt->rx_ring.paddrs_ring)),
239 htt->rx_ring.paddrs_ring,
240 htt->rx_ring.base_paddr);
241
242 dma_free_coherent(htt->ar->dev,
243 sizeof(*htt->rx_ring.alloc_idx.vaddr),
244 htt->rx_ring.alloc_idx.vaddr,
245 htt->rx_ring.alloc_idx.paddr);
246
247 kfree(htt->rx_ring.netbufs_ring);
248}
249
250static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
251{
7aa7a72a 252 struct ath10k *ar = htt->ar;
5e3dd157
KV
253 int idx;
254 struct sk_buff *msdu;
255
45967089 256 lockdep_assert_held(&htt->rx_ring.lock);
5e3dd157 257
8d60ee87 258 if (htt->rx_ring.fill_cnt == 0) {
7aa7a72a 259 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
8d60ee87
MK
260 return NULL;
261 }
5e3dd157
KV
262
263 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
264 msdu = htt->rx_ring.netbufs_ring[idx];
3e841fd0 265 htt->rx_ring.netbufs_ring[idx] = NULL;
c545070e 266 htt->rx_ring.paddrs_ring[idx] = 0;
5e3dd157
KV
267
268 idx++;
269 idx &= htt->rx_ring.size_mask;
270 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
271 htt->rx_ring.fill_cnt--;
272
4de02806 273 dma_unmap_single(htt->ar->dev,
8582bf3b 274 ATH10K_SKB_RXCB(msdu)->paddr,
4de02806
MK
275 msdu->len + skb_tailroom(msdu),
276 DMA_FROM_DEVICE);
277 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
278 msdu->data, msdu->len + skb_tailroom(msdu));
4de02806 279
5e3dd157
KV
280 return msdu;
281}
282
d84dd60f 283/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
5e3dd157
KV
284static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
285 u8 **fw_desc, int *fw_desc_len,
f0e2770f 286 struct sk_buff_head *amsdu)
5e3dd157 287{
7aa7a72a 288 struct ath10k *ar = htt->ar;
5e3dd157 289 int msdu_len, msdu_chaining = 0;
9aa505d2 290 struct sk_buff *msdu;
5e3dd157
KV
291 struct htt_rx_desc *rx_desc;
292
45967089
MK
293 lockdep_assert_held(&htt->rx_ring.lock);
294
9aa505d2 295 for (;;) {
5e3dd157
KV
296 int last_msdu, msdu_len_invalid, msdu_chained;
297
9aa505d2
MK
298 msdu = ath10k_htt_rx_netbuf_pop(htt);
299 if (!msdu) {
9aa505d2 300 __skb_queue_purge(amsdu);
e0bd7513 301 return -ENOENT;
9aa505d2
MK
302 }
303
304 __skb_queue_tail(amsdu, msdu);
305
5e3dd157
KV
306 rx_desc = (struct htt_rx_desc *)msdu->data;
307
308 /* FIXME: we must report msdu payload since this is what caller
309 * expects now */
310 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
311 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
312
313 /*
314 * Sanity check - confirm the HW is finished filling in the
315 * rx data.
316 * If the HW and SW are working correctly, then it's guaranteed
317 * that the HW's MAC DMA is done before this point in the SW.
318 * To prevent the case that we handle a stale Rx descriptor,
319 * just assert for now until we have a way to recover.
320 */
321 if (!(__le32_to_cpu(rx_desc->attention.flags)
322 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
9aa505d2 323 __skb_queue_purge(amsdu);
e0bd7513 324 return -EIO;
5e3dd157
KV
325 }
326
327 /*
328 * Copy the FW rx descriptor for this MSDU from the rx
329 * indication message into the MSDU's netbuf. HL uses the
330 * same rx indication message definition as LL, and simply
331 * appends new info (fields from the HW rx desc, and the
332 * MSDU payload itself). So, the offset into the rx
333 * indication message only has to account for the standard
334 * offset of the per-MSDU FW rx desc info within the
335 * message, and how many bytes of the per-MSDU FW rx desc
336 * info have already been consumed. (And the endianness of
337 * the host, since for a big-endian host, the rx ind
338 * message contents, including the per-MSDU rx desc bytes,
339 * were byteswapped during upload.)
340 */
341 if (*fw_desc_len > 0) {
342 rx_desc->fw_desc.info0 = **fw_desc;
343 /*
344 * The target is expected to only provide the basic
345 * per-MSDU rx descriptors. Just to be sure, verify
346 * that the target has not attached extension data
347 * (e.g. LRO flow ID).
348 */
349
350 /* or more, if there's extension data */
351 (*fw_desc)++;
352 (*fw_desc_len)--;
353 } else {
354 /*
355 * When an oversized AMSDU happened, FW will lost
356 * some of MSDU status - in this case, the FW
357 * descriptors provided will be less than the
358 * actual MSDUs inside this MPDU. Mark the FW
359 * descriptors so that it will still deliver to
360 * upper stack, if no CRC error for this MPDU.
361 *
362 * FIX THIS - the FW descriptors are actually for
363 * MSDUs in the end of this A-MSDU instead of the
364 * beginning.
365 */
366 rx_desc->fw_desc.info0 = 0;
367 }
368
369 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
370 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
371 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
1f5dbfbb 372 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
5e3dd157
KV
373 RX_MSDU_START_INFO0_MSDU_LENGTH);
374 msdu_chained = rx_desc->frag_info.ring2_more_count;
375
376 if (msdu_len_invalid)
377 msdu_len = 0;
378
379 skb_trim(msdu, 0);
380 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
381 msdu_len -= msdu->len;
382
9aa505d2 383 /* Note: Chained buffers do not contain rx descriptor */
5e3dd157 384 while (msdu_chained--) {
9aa505d2
MK
385 msdu = ath10k_htt_rx_netbuf_pop(htt);
386 if (!msdu) {
9aa505d2 387 __skb_queue_purge(amsdu);
e0bd7513 388 return -ENOENT;
b30595ae
MK
389 }
390
9aa505d2
MK
391 __skb_queue_tail(amsdu, msdu);
392 skb_trim(msdu, 0);
393 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
394 msdu_len -= msdu->len;
ede9c8e0 395 msdu_chaining = 1;
5e3dd157
KV
396 }
397
1f5dbfbb 398 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
5e3dd157
KV
399 RX_MSDU_END_INFO0_LAST_MSDU;
400
b04e204f 401 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
a0883cf7 402 sizeof(*rx_desc) - sizeof(u32));
d8bb26b9 403
9aa505d2
MK
404 if (last_msdu)
405 break;
5e3dd157 406 }
5e3dd157 407
9aa505d2 408 if (skb_queue_empty(amsdu))
d84dd60f
JD
409 msdu_chaining = -1;
410
5e3dd157
KV
411 /*
412 * Don't refill the ring yet.
413 *
414 * First, the elements popped here are still in use - it is not
415 * safe to overwrite them until the matching call to
416 * mpdu_desc_list_next. Second, for efficiency it is preferable to
417 * refill the rx ring with 1 PPDU's worth of rx buffers (something
418 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
419 * (something like 3 buffers). Consequently, we'll rely on the txrx
420 * SW to tell us when it is done pulling all the PPDU's rx buffers
421 * out of the rx ring, and then refill it just once.
422 */
423
424 return msdu_chaining;
425}
426
6e712d42
MK
427static void ath10k_htt_rx_replenish_task(unsigned long ptr)
428{
429 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
af762c0b 430
6e712d42
MK
431 ath10k_htt_rx_msdu_buff_replenish(htt);
432}
433
c545070e
MK
434static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
435 u32 paddr)
436{
437 struct ath10k *ar = htt->ar;
438 struct ath10k_skb_rxcb *rxcb;
439 struct sk_buff *msdu;
440
441 lockdep_assert_held(&htt->rx_ring.lock);
442
443 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
444 if (!msdu)
445 return NULL;
446
447 rxcb = ATH10K_SKB_RXCB(msdu);
448 hash_del(&rxcb->hlist);
449 htt->rx_ring.fill_cnt--;
450
451 dma_unmap_single(htt->ar->dev, rxcb->paddr,
452 msdu->len + skb_tailroom(msdu),
453 DMA_FROM_DEVICE);
454 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
455 msdu->data, msdu->len + skb_tailroom(msdu));
456
457 return msdu;
458}
459
460static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
461 struct htt_rx_in_ord_ind *ev,
462 struct sk_buff_head *list)
463{
464 struct ath10k *ar = htt->ar;
465 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
466 struct htt_rx_desc *rxd;
467 struct sk_buff *msdu;
468 int msdu_count;
469 bool is_offload;
470 u32 paddr;
471
472 lockdep_assert_held(&htt->rx_ring.lock);
473
474 msdu_count = __le16_to_cpu(ev->msdu_count);
475 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
476
477 while (msdu_count--) {
478 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
479
480 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
481 if (!msdu) {
482 __skb_queue_purge(list);
483 return -ENOENT;
484 }
485
486 __skb_queue_tail(list, msdu);
487
488 if (!is_offload) {
489 rxd = (void *)msdu->data;
490
491 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
492
493 skb_put(msdu, sizeof(*rxd));
494 skb_pull(msdu, sizeof(*rxd));
495 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
496
497 if (!(__le32_to_cpu(rxd->attention.flags) &
498 RX_ATTENTION_FLAGS_MSDU_DONE)) {
499 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
500 return -EIO;
501 }
502 }
503
504 msdu_desc++;
505 }
506
507 return 0;
508}
509
95bf21f9 510int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
5e3dd157 511{
7aa7a72a 512 struct ath10k *ar = htt->ar;
5e3dd157
KV
513 dma_addr_t paddr;
514 void *vaddr;
bd8bdbb6 515 size_t size;
5e3dd157
KV
516 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
517
51fc7d74
MK
518 htt->rx_confused = false;
519
fe2407a8
MK
520 /* XXX: The fill level could be changed during runtime in response to
521 * the host processing latency. Is this really worth it?
522 */
523 htt->rx_ring.size = HTT_RX_RING_SIZE;
524 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
525 htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
526
5e3dd157 527 if (!is_power_of_2(htt->rx_ring.size)) {
7aa7a72a 528 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
5e3dd157
KV
529 return -EINVAL;
530 }
531
5e3dd157 532 htt->rx_ring.netbufs_ring =
3e841fd0 533 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
5e3dd157
KV
534 GFP_KERNEL);
535 if (!htt->rx_ring.netbufs_ring)
536 goto err_netbuf;
537
bd8bdbb6
KV
538 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
539
d6cb23b5 540 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
5e3dd157
KV
541 if (!vaddr)
542 goto err_dma_ring;
543
544 htt->rx_ring.paddrs_ring = vaddr;
545 htt->rx_ring.base_paddr = paddr;
546
547 vaddr = dma_alloc_coherent(htt->ar->dev,
548 sizeof(*htt->rx_ring.alloc_idx.vaddr),
d6cb23b5 549 &paddr, GFP_KERNEL);
5e3dd157
KV
550 if (!vaddr)
551 goto err_dma_idx;
552
553 htt->rx_ring.alloc_idx.vaddr = vaddr;
554 htt->rx_ring.alloc_idx.paddr = paddr;
c545070e 555 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
5e3dd157
KV
556 *htt->rx_ring.alloc_idx.vaddr = 0;
557
558 /* Initialize the Rx refill retry timer */
559 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
560
561 spin_lock_init(&htt->rx_ring.lock);
562
563 htt->rx_ring.fill_cnt = 0;
c545070e
MK
564 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
565 hash_init(htt->rx_ring.skb_table);
5e3dd157 566
6e712d42
MK
567 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
568 (unsigned long)htt);
569
6c5151a9
MK
570 skb_queue_head_init(&htt->tx_compl_q);
571 skb_queue_head_init(&htt->rx_compl_q);
c545070e 572 skb_queue_head_init(&htt->rx_in_ord_compl_q);
426e10ea 573 skb_queue_head_init(&htt->tx_fetch_ind_q);
6c5151a9
MK
574
575 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
576 (unsigned long)htt);
577
7aa7a72a 578 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
5e3dd157
KV
579 htt->rx_ring.size, htt->rx_ring.fill_level);
580 return 0;
581
5e3dd157
KV
582err_dma_idx:
583 dma_free_coherent(htt->ar->dev,
584 (htt->rx_ring.size *
585 sizeof(htt->rx_ring.paddrs_ring)),
586 htt->rx_ring.paddrs_ring,
587 htt->rx_ring.base_paddr);
588err_dma_ring:
589 kfree(htt->rx_ring.netbufs_ring);
590err_netbuf:
591 return -ENOMEM;
592}
593
7aa7a72a
MK
594static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
595 enum htt_rx_mpdu_encrypt_type type)
5e3dd157
KV
596{
597 switch (type) {
890d3b2a
MK
598 case HTT_RX_MPDU_ENCRYPT_NONE:
599 return 0;
5e3dd157
KV
600 case HTT_RX_MPDU_ENCRYPT_WEP40:
601 case HTT_RX_MPDU_ENCRYPT_WEP104:
890d3b2a 602 return IEEE80211_WEP_IV_LEN;
5e3dd157 603 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
5e3dd157 604 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
890d3b2a 605 return IEEE80211_TKIP_IV_LEN;
5e3dd157 606 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
890d3b2a
MK
607 return IEEE80211_CCMP_HDR_LEN;
608 case HTT_RX_MPDU_ENCRYPT_WEP128:
609 case HTT_RX_MPDU_ENCRYPT_WAPI:
610 break;
5e3dd157
KV
611 }
612
890d3b2a 613 ath10k_warn(ar, "unsupported encryption type %d\n", type);
5e3dd157
KV
614 return 0;
615}
616
890d3b2a
MK
617#define MICHAEL_MIC_LEN 8
618
7aa7a72a
MK
619static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
620 enum htt_rx_mpdu_encrypt_type type)
5e3dd157
KV
621{
622 switch (type) {
623 case HTT_RX_MPDU_ENCRYPT_NONE:
890d3b2a 624 return 0;
5e3dd157
KV
625 case HTT_RX_MPDU_ENCRYPT_WEP40:
626 case HTT_RX_MPDU_ENCRYPT_WEP104:
890d3b2a 627 return IEEE80211_WEP_ICV_LEN;
5e3dd157
KV
628 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
629 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
890d3b2a 630 return IEEE80211_TKIP_ICV_LEN;
5e3dd157 631 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
890d3b2a
MK
632 return IEEE80211_CCMP_MIC_LEN;
633 case HTT_RX_MPDU_ENCRYPT_WEP128:
634 case HTT_RX_MPDU_ENCRYPT_WAPI:
635 break;
5e3dd157
KV
636 }
637
890d3b2a 638 ath10k_warn(ar, "unsupported encryption type %d\n", type);
5e3dd157
KV
639 return 0;
640}
641
f6dc2095
MK
642struct amsdu_subframe_hdr {
643 u8 dst[ETH_ALEN];
644 u8 src[ETH_ALEN];
645 __be16 len;
646} __packed;
647
6986fdd6
MK
648#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
649
87326c97 650static void ath10k_htt_rx_h_rates(struct ath10k *ar,
b9fd8a84
MK
651 struct ieee80211_rx_status *status,
652 struct htt_rx_desc *rxd)
73539b40 653{
5528e032
MK
654 struct ieee80211_supported_band *sband;
655 u8 cck, rate, bw, sgi, mcs, nss;
73539b40 656 u8 preamble = 0;
6986fdd6 657 u8 group_id;
b9fd8a84 658 u32 info1, info2, info3;
73539b40 659
b9fd8a84
MK
660 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
661 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
662 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
663
664 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
73539b40
JD
665
666 switch (preamble) {
667 case HTT_RX_LEGACY:
5528e032
MK
668 /* To get legacy rate index band is required. Since band can't
669 * be undefined check if freq is non-zero.
670 */
671 if (!status->freq)
672 return;
673
b9fd8a84
MK
674 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
675 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
5528e032 676 rate &= ~RX_PPDU_START_RATE_FLAG;
73539b40 677
5528e032 678 sband = &ar->mac.sbands[status->band];
4b7f353b 679 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
73539b40
JD
680 break;
681 case HTT_RX_HT:
682 case HTT_RX_HT_WITH_TXBF:
b9fd8a84
MK
683 /* HT-SIG - Table 20-11 in info2 and info3 */
684 mcs = info2 & 0x1F;
73539b40 685 nss = mcs >> 3;
b9fd8a84
MK
686 bw = (info2 >> 7) & 1;
687 sgi = (info3 >> 7) & 1;
73539b40
JD
688
689 status->rate_idx = mcs;
690 status->flag |= RX_FLAG_HT;
691 if (sgi)
692 status->flag |= RX_FLAG_SHORT_GI;
693 if (bw)
694 status->flag |= RX_FLAG_40MHZ;
695 break;
696 case HTT_RX_VHT:
697 case HTT_RX_VHT_WITH_TXBF:
b9fd8a84 698 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
73539b40 699 TODO check this */
b9fd8a84
MK
700 bw = info2 & 3;
701 sgi = info3 & 1;
6986fdd6
MK
702 group_id = (info2 >> 4) & 0x3F;
703
704 if (GROUP_ID_IS_SU_MIMO(group_id)) {
705 mcs = (info3 >> 4) & 0x0F;
706 nss = ((info2 >> 10) & 0x07) + 1;
707 } else {
708 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
709 * so it's impossible to decode MCS. Also since
710 * firmware consumes Group Id Management frames host
711 * has no knowledge regarding group/user position
712 * mapping so it's impossible to pick the correct Nsts
713 * from VHT-SIG-A1.
714 *
715 * Bandwidth and SGI are valid so report the rateinfo
716 * on best-effort basis.
717 */
718 mcs = 0;
719 nss = 1;
720 }
73539b40 721
6ccea107
MP
722 if (mcs > 0x09) {
723 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
724 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
725 __le32_to_cpu(rxd->attention.flags),
726 __le32_to_cpu(rxd->mpdu_start.info0),
727 __le32_to_cpu(rxd->mpdu_start.info1),
728 __le32_to_cpu(rxd->msdu_start.common.info0),
729 __le32_to_cpu(rxd->msdu_start.common.info1),
730 rxd->ppdu_start.info0,
731 __le32_to_cpu(rxd->ppdu_start.info1),
732 __le32_to_cpu(rxd->ppdu_start.info2),
733 __le32_to_cpu(rxd->ppdu_start.info3),
734 __le32_to_cpu(rxd->ppdu_start.info4));
735
736 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
737 __le32_to_cpu(rxd->msdu_end.common.info0),
738 __le32_to_cpu(rxd->mpdu_end.info0));
739
740 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
741 "rx desc msdu payload: ",
742 rxd->msdu_payload, 50);
743 }
744
73539b40
JD
745 status->rate_idx = mcs;
746 status->vht_nss = nss;
747
748 if (sgi)
749 status->flag |= RX_FLAG_SHORT_GI;
750
751 switch (bw) {
752 /* 20MHZ */
753 case 0:
754 break;
755 /* 40MHZ */
756 case 1:
757 status->flag |= RX_FLAG_40MHZ;
758 break;
759 /* 80MHZ */
760 case 2:
761 status->vht_flag |= RX_VHT_FLAG_80MHZ;
762 }
763
764 status->flag |= RX_FLAG_VHT;
765 break;
766 default:
767 break;
768 }
769}
770
500ff9f9
MK
771static struct ieee80211_channel *
772ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
773{
774 struct ath10k_peer *peer;
775 struct ath10k_vif *arvif;
776 struct cfg80211_chan_def def;
777 u16 peer_id;
778
779 lockdep_assert_held(&ar->data_lock);
780
781 if (!rxd)
782 return NULL;
783
784 if (rxd->attention.flags &
785 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
786 return NULL;
787
1f5dbfbb 788 if (!(rxd->msdu_end.common.info0 &
500ff9f9
MK
789 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
790 return NULL;
791
792 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
793 RX_MPDU_START_INFO0_PEER_IDX);
794
795 peer = ath10k_peer_find_by_id(ar, peer_id);
796 if (!peer)
797 return NULL;
798
799 arvif = ath10k_get_arvif(ar, peer->vdev_id);
800 if (WARN_ON_ONCE(!arvif))
801 return NULL;
802
803 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
804 return NULL;
805
806 return def.chan;
807}
808
809static struct ieee80211_channel *
810ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
811{
812 struct ath10k_vif *arvif;
813 struct cfg80211_chan_def def;
814
815 lockdep_assert_held(&ar->data_lock);
816
817 list_for_each_entry(arvif, &ar->arvifs, list) {
818 if (arvif->vdev_id == vdev_id &&
819 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
820 return def.chan;
821 }
822
823 return NULL;
824}
825
826static void
827ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
828 struct ieee80211_chanctx_conf *conf,
829 void *data)
830{
831 struct cfg80211_chan_def *def = data;
832
833 *def = conf->def;
834}
835
836static struct ieee80211_channel *
837ath10k_htt_rx_h_any_channel(struct ath10k *ar)
838{
839 struct cfg80211_chan_def def = {};
840
841 ieee80211_iter_chan_contexts_atomic(ar->hw,
842 ath10k_htt_rx_h_any_chan_iter,
843 &def);
844
845 return def.chan;
846}
847
36653f05 848static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
500ff9f9
MK
849 struct ieee80211_rx_status *status,
850 struct htt_rx_desc *rxd,
851 u32 vdev_id)
36653f05
JD
852{
853 struct ieee80211_channel *ch;
854
855 spin_lock_bh(&ar->data_lock);
856 ch = ar->scan_channel;
857 if (!ch)
858 ch = ar->rx_channel;
500ff9f9
MK
859 if (!ch)
860 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
861 if (!ch)
862 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
863 if (!ch)
864 ch = ath10k_htt_rx_h_any_channel(ar);
2ce9b25c
RM
865 if (!ch)
866 ch = ar->tgt_oper_chan;
36653f05
JD
867 spin_unlock_bh(&ar->data_lock);
868
869 if (!ch)
870 return false;
871
872 status->band = ch->band;
873 status->freq = ch->center_freq;
874
875 return true;
876}
877
b9fd8a84
MK
878static void ath10k_htt_rx_h_signal(struct ath10k *ar,
879 struct ieee80211_rx_status *status,
880 struct htt_rx_desc *rxd)
881{
882 /* FIXME: Get real NF */
883 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
884 rxd->ppdu_start.rssi_comb;
885 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
886}
887
888static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
889 struct ieee80211_rx_status *status,
890 struct htt_rx_desc *rxd)
891{
892 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
893 * means all prior MSDUs in a PPDU are reported to mac80211 without the
894 * TSF. Is it worth holding frames until end of PPDU is known?
895 *
896 * FIXME: Can we get/compute 64bit TSF?
897 */
3ec79e3a 898 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
b9fd8a84
MK
899 status->flag |= RX_FLAG_MACTIME_END;
900}
901
902static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
903 struct sk_buff_head *amsdu,
500ff9f9
MK
904 struct ieee80211_rx_status *status,
905 u32 vdev_id)
b9fd8a84
MK
906{
907 struct sk_buff *first;
908 struct htt_rx_desc *rxd;
909 bool is_first_ppdu;
910 bool is_last_ppdu;
911
912 if (skb_queue_empty(amsdu))
913 return;
914
915 first = skb_peek(amsdu);
916 rxd = (void *)first->data - sizeof(*rxd);
917
918 is_first_ppdu = !!(rxd->attention.flags &
919 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
920 is_last_ppdu = !!(rxd->attention.flags &
921 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
922
923 if (is_first_ppdu) {
924 /* New PPDU starts so clear out the old per-PPDU status. */
925 status->freq = 0;
926 status->rate_idx = 0;
927 status->vht_nss = 0;
928 status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
929 status->flag &= ~(RX_FLAG_HT |
930 RX_FLAG_VHT |
931 RX_FLAG_SHORT_GI |
932 RX_FLAG_40MHZ |
933 RX_FLAG_MACTIME_END);
934 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
935
936 ath10k_htt_rx_h_signal(ar, status, rxd);
500ff9f9 937 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
b9fd8a84
MK
938 ath10k_htt_rx_h_rates(ar, status, rxd);
939 }
940
941 if (is_last_ppdu)
942 ath10k_htt_rx_h_mactime(ar, status, rxd);
943}
944
76f5329a
JD
945static const char * const tid_to_ac[] = {
946 "BE",
947 "BK",
948 "BK",
949 "BE",
950 "VI",
951 "VI",
952 "VO",
953 "VO",
954};
955
956static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
957{
958 u8 *qc;
959 int tid;
960
961 if (!ieee80211_is_data_qos(hdr->frame_control))
962 return "";
963
964 qc = ieee80211_get_qos_ctl(hdr);
965 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
966 if (tid < 8)
967 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
968 else
969 snprintf(out, size, "tid %d", tid);
970
971 return out;
972}
973
85f6d7cf
JD
974static void ath10k_process_rx(struct ath10k *ar,
975 struct ieee80211_rx_status *rx_status,
976 struct sk_buff *skb)
73539b40
JD
977{
978 struct ieee80211_rx_status *status;
76f5329a
JD
979 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
980 char tid[32];
73539b40 981
85f6d7cf
JD
982 status = IEEE80211_SKB_RXCB(skb);
983 *status = *rx_status;
73539b40 984
7aa7a72a 985 ath10k_dbg(ar, ATH10K_DBG_DATA,
76f5329a 986 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
85f6d7cf
JD
987 skb,
988 skb->len,
76f5329a
JD
989 ieee80211_get_SA(hdr),
990 ath10k_get_tid(hdr, tid, sizeof(tid)),
991 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
992 "mcast" : "ucast",
993 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
73539b40
JD
994 status->flag == 0 ? "legacy" : "",
995 status->flag & RX_FLAG_HT ? "ht" : "",
996 status->flag & RX_FLAG_VHT ? "vht" : "",
997 status->flag & RX_FLAG_40MHZ ? "40" : "",
998 status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
999 status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
1000 status->rate_idx,
1001 status->vht_nss,
1002 status->freq,
87326c97 1003 status->band, status->flag,
78433f96 1004 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
76f5329a
JD
1005 !!(status->flag & RX_FLAG_MMIC_ERROR),
1006 !!(status->flag & RX_FLAG_AMSDU_MORE));
7aa7a72a 1007 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
85f6d7cf 1008 skb->data, skb->len);
5ce8e7fd
RM
1009 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1010 trace_ath10k_rx_payload(ar, skb->data, skb->len);
73539b40 1011
85f6d7cf 1012 ieee80211_rx(ar->hw, skb);
73539b40
JD
1013}
1014
48f4ca34
MK
1015static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1016 struct ieee80211_hdr *hdr)
d960c369 1017{
48f4ca34
MK
1018 int len = ieee80211_hdrlen(hdr->frame_control);
1019
1020 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1021 ar->fw_features))
1022 len = round_up(len, 4);
1023
1024 return len;
d960c369
MK
1025}
1026
581c25f8
MK
1027static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1028 struct sk_buff *msdu,
1029 struct ieee80211_rx_status *status,
1030 enum htt_rx_mpdu_encrypt_type enctype,
1031 bool is_decrypted)
5e3dd157 1032{
581c25f8 1033 struct ieee80211_hdr *hdr;
5e3dd157 1034 struct htt_rx_desc *rxd;
581c25f8
MK
1035 size_t hdr_len;
1036 size_t crypto_len;
1037 bool is_first;
1038 bool is_last;
1039
1040 rxd = (void *)msdu->data - sizeof(*rxd);
1f5dbfbb 1041 is_first = !!(rxd->msdu_end.common.info0 &
581c25f8 1042 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1f5dbfbb 1043 is_last = !!(rxd->msdu_end.common.info0 &
581c25f8
MK
1044 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1045
1046 /* Delivered decapped frame:
1047 * [802.11 header]
1048 * [crypto param] <-- can be trimmed if !fcs_err &&
1049 * !decrypt_err && !peer_idx_invalid
1050 * [amsdu header] <-- only if A-MSDU
1051 * [rfc1042/llc]
1052 * [payload]
1053 * [FCS] <-- at end, needs to be trimmed
1054 */
1055
1056 /* This probably shouldn't happen but warn just in case */
1057 if (unlikely(WARN_ON_ONCE(!is_first)))
1058 return;
1059
1060 /* This probably shouldn't happen but warn just in case */
1061 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1062 return;
1063
1064 skb_trim(msdu, msdu->len - FCS_LEN);
1065
1066 /* In most cases this will be true for sniffed frames. It makes sense
ccec9038
DL
1067 * to deliver them as-is without stripping the crypto param. This is
1068 * necessary for software based decryption.
581c25f8
MK
1069 *
1070 * If there's no error then the frame is decrypted. At least that is
1071 * the case for frames that come in via fragmented rx indication.
1072 */
1073 if (!is_decrypted)
1074 return;
1075
1076 /* The payload is decrypted so strip crypto params. Start from tail
1077 * since hdr is used to compute some stuff.
1078 */
1079
1080 hdr = (void *)msdu->data;
1081
1082 /* Tail */
60549cab
GB
1083 if (status->flag & RX_FLAG_IV_STRIPPED)
1084 skb_trim(msdu, msdu->len -
1085 ath10k_htt_rx_crypto_tail_len(ar, enctype));
581c25f8
MK
1086
1087 /* MMIC */
60549cab
GB
1088 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1089 !ieee80211_has_morefrags(hdr->frame_control) &&
581c25f8
MK
1090 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1091 skb_trim(msdu, msdu->len - 8);
1092
1093 /* Head */
60549cab
GB
1094 if (status->flag & RX_FLAG_IV_STRIPPED) {
1095 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1096 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
581c25f8 1097
60549cab
GB
1098 memmove((void *)msdu->data + crypto_len,
1099 (void *)msdu->data, hdr_len);
1100 skb_pull(msdu, crypto_len);
1101 }
581c25f8
MK
1102}
1103
1104static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1105 struct sk_buff *msdu,
1106 struct ieee80211_rx_status *status,
1107 const u8 first_hdr[64])
1108{
f6dc2095 1109 struct ieee80211_hdr *hdr;
581c25f8
MK
1110 size_t hdr_len;
1111 u8 da[ETH_ALEN];
1112 u8 sa[ETH_ALEN];
5e3dd157 1113
581c25f8
MK
1114 /* Delivered decapped frame:
1115 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1116 * [rfc1042/llc]
1117 *
1118 * Note: The nwifi header doesn't have QoS Control and is
1119 * (always?) a 3addr frame.
1120 *
1121 * Note2: There's no A-MSDU subframe header. Even if it's part
1122 * of an A-MSDU.
1123 */
9aa505d2 1124
581c25f8 1125 /* pull decapped header and copy SA & DA */
b8d55fca
YL
1126 if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
1127 ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
1128 /* The QCA99X0 4 address mode pad 2 bytes at the
1129 * beginning of MSDU
1130 */
1131 hdr = (struct ieee80211_hdr *)(msdu->data + 2);
1132 /* The skb length need be extended 2 as the 2 bytes at the tail
1133 * be excluded due to the padding
1134 */
1135 skb_put(msdu, 2);
1136 } else {
1137 hdr = (struct ieee80211_hdr *)(msdu->data);
1138 }
1139
48f4ca34 1140 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
581c25f8
MK
1141 ether_addr_copy(da, ieee80211_get_DA(hdr));
1142 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1143 skb_pull(msdu, hdr_len);
5e3dd157 1144
581c25f8
MK
1145 /* push original 802.11 header */
1146 hdr = (struct ieee80211_hdr *)first_hdr;
f6dc2095 1147 hdr_len = ieee80211_hdrlen(hdr->frame_control);
581c25f8 1148 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
5e3dd157 1149
581c25f8
MK
1150 /* original 802.11 header has a different DA and in
1151 * case of 4addr it may also have different SA
1152 */
1153 hdr = (struct ieee80211_hdr *)msdu->data;
1154 ether_addr_copy(ieee80211_get_DA(hdr), da);
1155 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1156}
5e3dd157 1157
581c25f8
MK
1158static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1159 struct sk_buff *msdu,
1160 enum htt_rx_mpdu_encrypt_type enctype)
1161{
1162 struct ieee80211_hdr *hdr;
1163 struct htt_rx_desc *rxd;
1164 size_t hdr_len, crypto_len;
1165 void *rfc1042;
1166 bool is_first, is_last, is_amsdu;
e3fbf8d2 1167
581c25f8
MK
1168 rxd = (void *)msdu->data - sizeof(*rxd);
1169 hdr = (void *)rxd->rx_hdr_status;
f6dc2095 1170
1f5dbfbb 1171 is_first = !!(rxd->msdu_end.common.info0 &
581c25f8 1172 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1f5dbfbb 1173 is_last = !!(rxd->msdu_end.common.info0 &
581c25f8
MK
1174 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1175 is_amsdu = !(is_first && is_last);
5e3dd157 1176
581c25f8 1177 rfc1042 = hdr;
5e3dd157 1178
581c25f8
MK
1179 if (is_first) {
1180 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1181 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
652de35e 1182
581c25f8
MK
1183 rfc1042 += round_up(hdr_len, 4) +
1184 round_up(crypto_len, 4);
f6dc2095 1185 }
5e3dd157 1186
581c25f8
MK
1187 if (is_amsdu)
1188 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1189
1190 return rfc1042;
5e3dd157
KV
1191}
1192
581c25f8
MK
1193static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1194 struct sk_buff *msdu,
1195 struct ieee80211_rx_status *status,
1196 const u8 first_hdr[64],
1197 enum htt_rx_mpdu_encrypt_type enctype)
5e3dd157 1198{
5e3dd157 1199 struct ieee80211_hdr *hdr;
581c25f8
MK
1200 struct ethhdr *eth;
1201 size_t hdr_len;
e3fbf8d2 1202 void *rfc1042;
581c25f8
MK
1203 u8 da[ETH_ALEN];
1204 u8 sa[ETH_ALEN];
5e3dd157 1205
581c25f8
MK
1206 /* Delivered decapped frame:
1207 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1208 * [payload]
1209 */
1210
1211 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1212 if (WARN_ON_ONCE(!rfc1042))
1213 return;
1214
1215 /* pull decapped header and copy SA & DA */
1216 eth = (struct ethhdr *)msdu->data;
1217 ether_addr_copy(da, eth->h_dest);
1218 ether_addr_copy(sa, eth->h_source);
1219 skb_pull(msdu, sizeof(struct ethhdr));
1220
1221 /* push rfc1042/llc/snap */
1222 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1223 sizeof(struct rfc1042_hdr));
1224
1225 /* push original 802.11 header */
1226 hdr = (struct ieee80211_hdr *)first_hdr;
1227 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1228 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1229
1230 /* original 802.11 header has a different DA and in
1231 * case of 4addr it may also have different SA
1232 */
1233 hdr = (struct ieee80211_hdr *)msdu->data;
1234 ether_addr_copy(ieee80211_get_DA(hdr), da);
1235 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1236}
1237
1238static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1239 struct sk_buff *msdu,
1240 struct ieee80211_rx_status *status,
1241 const u8 first_hdr[64])
1242{
1243 struct ieee80211_hdr *hdr;
1244 size_t hdr_len;
1245
1246 /* Delivered decapped frame:
1247 * [amsdu header] <-- replaced with 802.11 hdr
1248 * [rfc1042/llc]
1249 * [payload]
1250 */
1251
1252 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1253
1254 hdr = (struct ieee80211_hdr *)first_hdr;
e3fbf8d2 1255 hdr_len = ieee80211_hdrlen(hdr->frame_control);
581c25f8
MK
1256 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1257}
5e3dd157 1258
581c25f8
MK
1259static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1260 struct sk_buff *msdu,
1261 struct ieee80211_rx_status *status,
1262 u8 first_hdr[64],
1263 enum htt_rx_mpdu_encrypt_type enctype,
1264 bool is_decrypted)
1265{
1266 struct htt_rx_desc *rxd;
1267 enum rx_msdu_decap_format decap;
f6dc2095 1268
581c25f8
MK
1269 /* First msdu's decapped header:
1270 * [802.11 header] <-- padded to 4 bytes long
1271 * [crypto param] <-- padded to 4 bytes long
1272 * [amsdu header] <-- only if A-MSDU
1273 * [rfc1042/llc]
1274 *
1275 * Other (2nd, 3rd, ..) msdu's decapped header:
1276 * [amsdu header] <-- only if A-MSDU
1277 * [rfc1042/llc]
1278 */
1279
1280 rxd = (void *)msdu->data - sizeof(*rxd);
1f5dbfbb 1281 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
581c25f8
MK
1282 RX_MSDU_START_INFO1_DECAP_FORMAT);
1283
1284 switch (decap) {
5e3dd157 1285 case RX_MSDU_DECAP_RAW:
581c25f8
MK
1286 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1287 is_decrypted);
5e3dd157
KV
1288 break;
1289 case RX_MSDU_DECAP_NATIVE_WIFI:
581c25f8 1290 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
5e3dd157
KV
1291 break;
1292 case RX_MSDU_DECAP_ETHERNET2_DIX:
581c25f8 1293 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
e3fbf8d2
MK
1294 break;
1295 case RX_MSDU_DECAP_8023_SNAP_LLC:
581c25f8 1296 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
e3fbf8d2 1297 break;
5e3dd157 1298 }
5e3dd157
KV
1299}
1300
605f81aa
MK
1301static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1302{
1303 struct htt_rx_desc *rxd;
1304 u32 flags, info;
1305 bool is_ip4, is_ip6;
1306 bool is_tcp, is_udp;
1307 bool ip_csum_ok, tcpudp_csum_ok;
1308
1309 rxd = (void *)skb->data - sizeof(*rxd);
1310 flags = __le32_to_cpu(rxd->attention.flags);
1f5dbfbb 1311 info = __le32_to_cpu(rxd->msdu_start.common.info1);
605f81aa
MK
1312
1313 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1314 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1315 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1316 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1317 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1318 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1319
1320 if (!is_ip4 && !is_ip6)
1321 return CHECKSUM_NONE;
1322 if (!is_tcp && !is_udp)
1323 return CHECKSUM_NONE;
1324 if (!ip_csum_ok)
1325 return CHECKSUM_NONE;
1326 if (!tcpudp_csum_ok)
1327 return CHECKSUM_NONE;
1328
1329 return CHECKSUM_UNNECESSARY;
1330}
1331
581c25f8
MK
1332static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1333{
1334 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1335}
1336
1337static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1338 struct sk_buff_head *amsdu,
1339 struct ieee80211_rx_status *status)
1340{
1341 struct sk_buff *first;
1342 struct sk_buff *last;
1343 struct sk_buff *msdu;
1344 struct htt_rx_desc *rxd;
1345 struct ieee80211_hdr *hdr;
1346 enum htt_rx_mpdu_encrypt_type enctype;
1347 u8 first_hdr[64];
1348 u8 *qos;
1349 size_t hdr_len;
1350 bool has_fcs_err;
1351 bool has_crypto_err;
1352 bool has_tkip_err;
1353 bool has_peer_idx_invalid;
1354 bool is_decrypted;
60549cab 1355 bool is_mgmt;
581c25f8
MK
1356 u32 attention;
1357
1358 if (skb_queue_empty(amsdu))
1359 return;
1360
1361 first = skb_peek(amsdu);
1362 rxd = (void *)first->data - sizeof(*rxd);
1363
60549cab
GB
1364 is_mgmt = !!(rxd->attention.flags &
1365 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1366
581c25f8
MK
1367 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1368 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1369
1370 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1371 * decapped header. It'll be used for undecapping of each MSDU.
1372 */
1373 hdr = (void *)rxd->rx_hdr_status;
1374 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1375 memcpy(first_hdr, hdr, hdr_len);
1376
1377 /* Each A-MSDU subframe will use the original header as the base and be
1378 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1379 */
1380 hdr = (void *)first_hdr;
1381 qos = ieee80211_get_qos_ctl(hdr);
1382 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1383
1384 /* Some attention flags are valid only in the last MSDU. */
1385 last = skb_peek_tail(amsdu);
1386 rxd = (void *)last->data - sizeof(*rxd);
1387 attention = __le32_to_cpu(rxd->attention.flags);
1388
1389 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1390 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1391 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1392 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1393
1394 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1395 * e.g. due to fcs error, missing peer or invalid key data it will
1396 * report the frame as raw.
1397 */
1398 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1399 !has_fcs_err &&
1400 !has_crypto_err &&
1401 !has_peer_idx_invalid);
1402
1403 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1404 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1405 RX_FLAG_MMIC_ERROR |
1406 RX_FLAG_DECRYPTED |
1407 RX_FLAG_IV_STRIPPED |
60549cab 1408 RX_FLAG_ONLY_MONITOR |
581c25f8
MK
1409 RX_FLAG_MMIC_STRIPPED);
1410
1411 if (has_fcs_err)
1412 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1413
1414 if (has_tkip_err)
1415 status->flag |= RX_FLAG_MMIC_ERROR;
1416
60549cab
GB
1417 /* Firmware reports all necessary management frames via WMI already.
1418 * They are not reported to monitor interfaces at all so pass the ones
1419 * coming via HTT to monitor interfaces instead. This simplifies
1420 * matters a lot.
1421 */
1422 if (is_mgmt)
1423 status->flag |= RX_FLAG_ONLY_MONITOR;
1424
1425 if (is_decrypted) {
1426 status->flag |= RX_FLAG_DECRYPTED;
1427
1428 if (likely(!is_mgmt))
1429 status->flag |= RX_FLAG_IV_STRIPPED |
1430 RX_FLAG_MMIC_STRIPPED;
1431}
581c25f8
MK
1432
1433 skb_queue_walk(amsdu, msdu) {
1434 ath10k_htt_rx_h_csum_offload(msdu);
1435 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1436 is_decrypted);
1437
1438 /* Undecapping involves copying the original 802.11 header back
1439 * to sk_buff. If frame is protected and hardware has decrypted
1440 * it then remove the protected bit.
1441 */
1442 if (!is_decrypted)
1443 continue;
60549cab
GB
1444 if (is_mgmt)
1445 continue;
581c25f8
MK
1446
1447 hdr = (void *)msdu->data;
1448 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1449 }
1450}
1451
1452static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1453 struct sk_buff_head *amsdu,
1454 struct ieee80211_rx_status *status)
1455{
1456 struct sk_buff *msdu;
1457
1458 while ((msdu = __skb_dequeue(amsdu))) {
1459 /* Setup per-MSDU flags */
1460 if (skb_queue_empty(amsdu))
1461 status->flag &= ~RX_FLAG_AMSDU_MORE;
1462 else
1463 status->flag |= RX_FLAG_AMSDU_MORE;
1464
1465 ath10k_process_rx(ar, status, msdu);
1466 }
1467}
1468
9aa505d2 1469static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
bfa35368 1470{
9aa505d2 1471 struct sk_buff *skb, *first;
bfa35368
BG
1472 int space;
1473 int total_len = 0;
1474
1475 /* TODO: Might could optimize this by using
1476 * skb_try_coalesce or similar method to
1477 * decrease copying, or maybe get mac80211 to
1478 * provide a way to just receive a list of
1479 * skb?
1480 */
1481
9aa505d2 1482 first = __skb_dequeue(amsdu);
bfa35368
BG
1483
1484 /* Allocate total length all at once. */
9aa505d2
MK
1485 skb_queue_walk(amsdu, skb)
1486 total_len += skb->len;
bfa35368 1487
9aa505d2 1488 space = total_len - skb_tailroom(first);
bfa35368 1489 if ((space > 0) &&
9aa505d2 1490 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
bfa35368
BG
1491 /* TODO: bump some rx-oom error stat */
1492 /* put it back together so we can free the
1493 * whole list at once.
1494 */
9aa505d2 1495 __skb_queue_head(amsdu, first);
bfa35368
BG
1496 return -1;
1497 }
1498
1499 /* Walk list again, copying contents into
1500 * msdu_head
1501 */
9aa505d2
MK
1502 while ((skb = __skb_dequeue(amsdu))) {
1503 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1504 skb->len);
1505 dev_kfree_skb_any(skb);
bfa35368
BG
1506 }
1507
9aa505d2 1508 __skb_queue_head(amsdu, first);
bfa35368
BG
1509 return 0;
1510}
1511
581c25f8
MK
1512static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1513 struct sk_buff_head *amsdu,
1514 bool chained)
2acc4eb2 1515{
581c25f8
MK
1516 struct sk_buff *first;
1517 struct htt_rx_desc *rxd;
1518 enum rx_msdu_decap_format decap;
7aa7a72a 1519
581c25f8
MK
1520 first = skb_peek(amsdu);
1521 rxd = (void *)first->data - sizeof(*rxd);
1f5dbfbb 1522 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
581c25f8 1523 RX_MSDU_START_INFO1_DECAP_FORMAT);
2acc4eb2 1524
581c25f8
MK
1525 if (!chained)
1526 return;
1527
1528 /* FIXME: Current unchaining logic can only handle simple case of raw
1529 * msdu chaining. If decapping is other than raw the chaining may be
1530 * more complex and this isn't handled by the current code. Don't even
1531 * try re-constructing such frames - it'll be pretty much garbage.
1532 */
1533 if (decap != RX_MSDU_DECAP_RAW ||
1534 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1535 __skb_queue_purge(amsdu);
1536 return;
2acc4eb2
JD
1537 }
1538
581c25f8
MK
1539 ath10k_unchain_msdu(amsdu);
1540}
1541
1542static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1543 struct sk_buff_head *amsdu,
1544 struct ieee80211_rx_status *rx_status)
1545{
581c25f8
MK
1546 /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1547 * invalid/dangerous frames.
1548 */
1549
1550 if (!rx_status->freq) {
1551 ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
36653f05
JD
1552 return false;
1553 }
1554
581c25f8
MK
1555 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1556 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
2acc4eb2
JD
1557 return false;
1558 }
1559
1560 return true;
1561}
1562
581c25f8
MK
1563static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1564 struct sk_buff_head *amsdu,
1565 struct ieee80211_rx_status *rx_status)
1566{
1567 if (skb_queue_empty(amsdu))
1568 return;
1569
1570 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1571 return;
1572
1573 __skb_queue_purge(amsdu);
1574}
1575
5e3dd157
KV
1576static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1577 struct htt_rx_indication *rx)
1578{
7aa7a72a 1579 struct ath10k *ar = htt->ar;
6df92a3d 1580 struct ieee80211_rx_status *rx_status = &htt->rx_status;
5e3dd157 1581 struct htt_rx_indication_mpdu_range *mpdu_ranges;
9aa505d2 1582 struct sk_buff_head amsdu;
5e3dd157
KV
1583 int num_mpdu_ranges;
1584 int fw_desc_len;
1585 u8 *fw_desc;
d540690d 1586 int i, ret, mpdu_count = 0;
5e3dd157 1587
45967089
MK
1588 lockdep_assert_held(&htt->rx_ring.lock);
1589
e0bd7513
MK
1590 if (htt->rx_confused)
1591 return;
1592
5e3dd157
KV
1593 fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
1594 fw_desc = (u8 *)&rx->fw_desc;
1595
1596 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1597 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1598 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1599
7aa7a72a 1600 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
5e3dd157
KV
1601 rx, sizeof(*rx) +
1602 (sizeof(struct htt_rx_indication_mpdu_range) *
1603 num_mpdu_ranges));
1604
d540690d
MK
1605 for (i = 0; i < num_mpdu_ranges; i++)
1606 mpdu_count += mpdu_ranges[i].mpdu_count;
1607
1608 while (mpdu_count--) {
d540690d
MK
1609 __skb_queue_head_init(&amsdu);
1610 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
f0e2770f 1611 &fw_desc_len, &amsdu);
d540690d 1612 if (ret < 0) {
e0bd7513 1613 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
d540690d 1614 __skb_queue_purge(&amsdu);
e0bd7513
MK
1615 /* FIXME: It's probably a good idea to reboot the
1616 * device instead of leaving it inoperable.
1617 */
1618 htt->rx_confused = true;
1619 break;
d540690d 1620 }
5e3dd157 1621
500ff9f9 1622 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
581c25f8
MK
1623 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1624 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1625 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1626 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
5e3dd157
KV
1627 }
1628
6e712d42 1629 tasklet_schedule(&htt->rx_replenish_task);
5e3dd157
KV
1630}
1631
1632static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
5b07e07f 1633 struct htt_rx_fragment_indication *frag)
5e3dd157 1634{
7aa7a72a 1635 struct ath10k *ar = htt->ar;
6df92a3d 1636 struct ieee80211_rx_status *rx_status = &htt->rx_status;
9aa505d2 1637 struct sk_buff_head amsdu;
d84dd60f 1638 int ret;
5e3dd157 1639 u8 *fw_desc;
581c25f8 1640 int fw_desc_len;
5e3dd157
KV
1641
1642 fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1643 fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1644
9aa505d2 1645 __skb_queue_head_init(&amsdu);
45967089
MK
1646
1647 spin_lock_bh(&htt->rx_ring.lock);
d84dd60f 1648 ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
f0e2770f 1649 &amsdu);
45967089 1650 spin_unlock_bh(&htt->rx_ring.lock);
5e3dd157 1651
686687c9
MK
1652 tasklet_schedule(&htt->rx_replenish_task);
1653
7aa7a72a 1654 ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
5e3dd157 1655
d84dd60f 1656 if (ret) {
7aa7a72a 1657 ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
d84dd60f 1658 ret);
9aa505d2 1659 __skb_queue_purge(&amsdu);
5e3dd157
KV
1660 return;
1661 }
1662
9aa505d2
MK
1663 if (skb_queue_len(&amsdu) != 1) {
1664 ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
1665 __skb_queue_purge(&amsdu);
1666 return;
1667 }
1668
500ff9f9 1669 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
581c25f8
MK
1670 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1671 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1672 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
5e3dd157 1673
5e3dd157 1674 if (fw_desc_len > 0) {
7aa7a72a 1675 ath10k_dbg(ar, ATH10K_DBG_HTT,
5e3dd157
KV
1676 "expecting more fragmented rx in one indication %d\n",
1677 fw_desc_len);
1678 }
1679}
1680
6c5151a9
MK
1681static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1682 struct sk_buff *skb)
1683{
1684 struct ath10k_htt *htt = &ar->htt;
1685 struct htt_resp *resp = (struct htt_resp *)skb->data;
1686 struct htt_tx_done tx_done = {};
1687 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1688 __le16 msdu_id;
1689 int i;
1690
1691 switch (status) {
1692 case HTT_DATA_TX_STATUS_NO_ACK:
1693 tx_done.no_ack = true;
1694 break;
1695 case HTT_DATA_TX_STATUS_OK:
55314fc2 1696 tx_done.success = true;
6c5151a9
MK
1697 break;
1698 case HTT_DATA_TX_STATUS_DISCARD:
1699 case HTT_DATA_TX_STATUS_POSTPONE:
1700 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1701 tx_done.discard = true;
1702 break;
1703 default:
7aa7a72a 1704 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
6c5151a9
MK
1705 tx_done.discard = true;
1706 break;
1707 }
1708
7aa7a72a 1709 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
6c5151a9
MK
1710 resp->data_tx_completion.num_msdus);
1711
1712 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1713 msdu_id = resp->data_tx_completion.msdus[i];
1714 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1715 ath10k_txrx_tx_unref(htt, &tx_done);
1716 }
1717}
1718
aa5b4fbc
MK
1719static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1720{
1721 struct htt_rx_addba *ev = &resp->rx_addba;
1722 struct ath10k_peer *peer;
1723 struct ath10k_vif *arvif;
1724 u16 info0, tid, peer_id;
1725
1726 info0 = __le16_to_cpu(ev->info0);
1727 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1728 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1729
7aa7a72a 1730 ath10k_dbg(ar, ATH10K_DBG_HTT,
aa5b4fbc
MK
1731 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1732 tid, peer_id, ev->window_size);
1733
1734 spin_lock_bh(&ar->data_lock);
1735 peer = ath10k_peer_find_by_id(ar, peer_id);
1736 if (!peer) {
7aa7a72a 1737 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
aa5b4fbc
MK
1738 peer_id);
1739 spin_unlock_bh(&ar->data_lock);
1740 return;
1741 }
1742
1743 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1744 if (!arvif) {
7aa7a72a 1745 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
aa5b4fbc
MK
1746 peer->vdev_id);
1747 spin_unlock_bh(&ar->data_lock);
1748 return;
1749 }
1750
7aa7a72a 1751 ath10k_dbg(ar, ATH10K_DBG_HTT,
aa5b4fbc
MK
1752 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1753 peer->addr, tid, ev->window_size);
1754
1755 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1756 spin_unlock_bh(&ar->data_lock);
1757}
1758
1759static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1760{
1761 struct htt_rx_delba *ev = &resp->rx_delba;
1762 struct ath10k_peer *peer;
1763 struct ath10k_vif *arvif;
1764 u16 info0, tid, peer_id;
1765
1766 info0 = __le16_to_cpu(ev->info0);
1767 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1768 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1769
7aa7a72a 1770 ath10k_dbg(ar, ATH10K_DBG_HTT,
aa5b4fbc
MK
1771 "htt rx delba tid %hu peer_id %hu\n",
1772 tid, peer_id);
1773
1774 spin_lock_bh(&ar->data_lock);
1775 peer = ath10k_peer_find_by_id(ar, peer_id);
1776 if (!peer) {
7aa7a72a 1777 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
aa5b4fbc
MK
1778 peer_id);
1779 spin_unlock_bh(&ar->data_lock);
1780 return;
1781 }
1782
1783 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1784 if (!arvif) {
7aa7a72a 1785 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
aa5b4fbc
MK
1786 peer->vdev_id);
1787 spin_unlock_bh(&ar->data_lock);
1788 return;
1789 }
1790
7aa7a72a 1791 ath10k_dbg(ar, ATH10K_DBG_HTT,
aa5b4fbc
MK
1792 "htt rx stop rx ba session sta %pM tid %hu\n",
1793 peer->addr, tid);
1794
1795 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1796 spin_unlock_bh(&ar->data_lock);
1797}
1798
c545070e
MK
1799static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1800 struct sk_buff_head *amsdu)
1801{
1802 struct sk_buff *msdu;
1803 struct htt_rx_desc *rxd;
1804
1805 if (skb_queue_empty(list))
1806 return -ENOBUFS;
1807
1808 if (WARN_ON(!skb_queue_empty(amsdu)))
1809 return -EINVAL;
1810
1811 while ((msdu = __skb_dequeue(list))) {
1812 __skb_queue_tail(amsdu, msdu);
1813
1814 rxd = (void *)msdu->data - sizeof(*rxd);
1f5dbfbb 1815 if (rxd->msdu_end.common.info0 &
c545070e
MK
1816 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1817 break;
1818 }
1819
1820 msdu = skb_peek_tail(amsdu);
1821 rxd = (void *)msdu->data - sizeof(*rxd);
1f5dbfbb 1822 if (!(rxd->msdu_end.common.info0 &
c545070e
MK
1823 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1824 skb_queue_splice_init(amsdu, list);
1825 return -EAGAIN;
1826 }
1827
1828 return 0;
1829}
1830
1831static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1832 struct sk_buff *skb)
1833{
1834 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1835
1836 if (!ieee80211_has_protected(hdr->frame_control))
1837 return;
1838
1839 /* Offloaded frames are already decrypted but firmware insists they are
1840 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1841 * will drop the frame.
1842 */
1843
1844 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1845 status->flag |= RX_FLAG_DECRYPTED |
1846 RX_FLAG_IV_STRIPPED |
1847 RX_FLAG_MMIC_STRIPPED;
1848}
1849
1850static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1851 struct sk_buff_head *list)
1852{
1853 struct ath10k_htt *htt = &ar->htt;
1854 struct ieee80211_rx_status *status = &htt->rx_status;
1855 struct htt_rx_offload_msdu *rx;
1856 struct sk_buff *msdu;
1857 size_t offset;
1858
1859 while ((msdu = __skb_dequeue(list))) {
1860 /* Offloaded frames don't have Rx descriptor. Instead they have
1861 * a short meta information header.
1862 */
1863
1864 rx = (void *)msdu->data;
1865
1866 skb_put(msdu, sizeof(*rx));
1867 skb_pull(msdu, sizeof(*rx));
1868
1869 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1870 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1871 dev_kfree_skb_any(msdu);
1872 continue;
1873 }
1874
1875 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1876
1877 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1878 * actual payload is unaligned. Align the frame. Otherwise
1879 * mac80211 complains. This shouldn't reduce performance much
1880 * because these offloaded frames are rare.
1881 */
1882 offset = 4 - ((unsigned long)msdu->data & 3);
1883 skb_put(msdu, offset);
1884 memmove(msdu->data + offset, msdu->data, msdu->len);
1885 skb_pull(msdu, offset);
1886
1887 /* FIXME: The frame is NWifi. Re-construct QoS Control
1888 * if possible later.
1889 */
1890
1891 memset(status, 0, sizeof(*status));
1892 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1893
1894 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
500ff9f9 1895 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
c545070e
MK
1896 ath10k_process_rx(ar, status, msdu);
1897 }
1898}
1899
1900static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1901{
1902 struct ath10k_htt *htt = &ar->htt;
1903 struct htt_resp *resp = (void *)skb->data;
1904 struct ieee80211_rx_status *status = &htt->rx_status;
1905 struct sk_buff_head list;
1906 struct sk_buff_head amsdu;
1907 u16 peer_id;
1908 u16 msdu_count;
1909 u8 vdev_id;
1910 u8 tid;
1911 bool offload;
1912 bool frag;
1913 int ret;
1914
1915 lockdep_assert_held(&htt->rx_ring.lock);
1916
1917 if (htt->rx_confused)
1918 return;
1919
1920 skb_pull(skb, sizeof(resp->hdr));
1921 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1922
1923 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1924 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1925 vdev_id = resp->rx_in_ord_ind.vdev_id;
1926 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1927 offload = !!(resp->rx_in_ord_ind.info &
1928 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1929 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1930
1931 ath10k_dbg(ar, ATH10K_DBG_HTT,
1932 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1933 vdev_id, peer_id, tid, offload, frag, msdu_count);
1934
1935 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1936 ath10k_warn(ar, "dropping invalid in order rx indication\n");
1937 return;
1938 }
1939
1940 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1941 * extracted and processed.
1942 */
1943 __skb_queue_head_init(&list);
1944 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1945 if (ret < 0) {
1946 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1947 htt->rx_confused = true;
1948 return;
1949 }
1950
1951 /* Offloaded frames are very different and need to be handled
1952 * separately.
1953 */
1954 if (offload)
1955 ath10k_htt_rx_h_rx_offload(ar, &list);
1956
1957 while (!skb_queue_empty(&list)) {
1958 __skb_queue_head_init(&amsdu);
1959 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1960 switch (ret) {
1961 case 0:
1962 /* Note: The in-order indication may report interleaved
1963 * frames from different PPDUs meaning reported rx rate
1964 * to mac80211 isn't accurate/reliable. It's still
1965 * better to report something than nothing though. This
1966 * should still give an idea about rx rate to the user.
1967 */
500ff9f9 1968 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
c545070e
MK
1969 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1970 ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1971 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1972 break;
1973 case -EAGAIN:
1974 /* fall through */
1975 default:
1976 /* Should not happen. */
1977 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1978 htt->rx_confused = true;
1979 __skb_queue_purge(&list);
1980 return;
1981 }
1982 }
1983
1984 tasklet_schedule(&htt->rx_replenish_task);
1985}
1986
839ae637
MK
1987static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
1988 const __le32 *resp_ids,
1989 int num_resp_ids)
1990{
1991 int i;
1992 u32 resp_id;
1993
1994 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
1995 num_resp_ids);
1996
1997 for (i = 0; i < num_resp_ids; i++) {
1998 resp_id = le32_to_cpu(resp_ids[i]);
1999
2000 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2001 resp_id);
2002
2003 /* TODO: free resp_id */
2004 }
2005}
2006
2007static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2008{
426e10ea
MK
2009 struct ieee80211_hw *hw = ar->hw;
2010 struct ieee80211_txq *txq;
839ae637
MK
2011 struct htt_resp *resp = (struct htt_resp *)skb->data;
2012 struct htt_tx_fetch_record *record;
2013 size_t len;
2014 size_t max_num_bytes;
2015 size_t max_num_msdus;
426e10ea
MK
2016 size_t num_bytes;
2017 size_t num_msdus;
839ae637
MK
2018 const __le32 *resp_ids;
2019 u16 num_records;
2020 u16 num_resp_ids;
2021 u16 peer_id;
2022 u8 tid;
426e10ea 2023 int ret;
839ae637
MK
2024 int i;
2025
2026 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2027
2028 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2029 if (unlikely(skb->len < len)) {
2030 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2031 return;
2032 }
2033
2034 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2035 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2036
2037 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2038 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2039
2040 if (unlikely(skb->len < len)) {
2041 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2042 return;
2043 }
2044
2045 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2046 num_records, num_resp_ids,
2047 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2048
426e10ea
MK
2049 if (!ar->htt.tx_q_state.enabled) {
2050 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2051 return;
2052 }
2053
2054 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2055 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2056 return;
2057 }
2058
2059 rcu_read_lock();
839ae637
MK
2060
2061 for (i = 0; i < num_records; i++) {
2062 record = &resp->tx_fetch_ind.records[i];
2063 peer_id = MS(le16_to_cpu(record->info),
2064 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2065 tid = MS(le16_to_cpu(record->info),
2066 HTT_TX_FETCH_RECORD_INFO_TID);
2067 max_num_msdus = le16_to_cpu(record->num_msdus);
2068 max_num_bytes = le32_to_cpu(record->num_bytes);
2069
2070 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2071 i, peer_id, tid, max_num_msdus, max_num_bytes);
2072
2073 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2074 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2075 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2076 peer_id, tid);
2077 continue;
2078 }
2079
426e10ea
MK
2080 spin_lock_bh(&ar->data_lock);
2081 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2082 spin_unlock_bh(&ar->data_lock);
2083
2084 /* It is okay to release the lock and use txq because RCU read
2085 * lock is held.
2086 */
2087
2088 if (unlikely(!txq)) {
2089 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2090 peer_id, tid);
2091 continue;
2092 }
2093
2094 num_msdus = 0;
2095 num_bytes = 0;
2096
2097 while (num_msdus < max_num_msdus &&
2098 num_bytes < max_num_bytes) {
2099 ret = ath10k_mac_tx_push_txq(hw, txq);
2100 if (ret < 0)
2101 break;
2102
2103 num_msdus++;
2104 num_bytes += ret;
2105 }
2106
2107 record->num_msdus = cpu_to_le16(num_msdus);
2108 record->num_bytes = cpu_to_le32(num_bytes);
2109
2110 ath10k_htt_tx_txq_recalc(hw, txq);
839ae637
MK
2111 }
2112
426e10ea
MK
2113 rcu_read_unlock();
2114
839ae637
MK
2115 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2116 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2117
426e10ea
MK
2118 ret = ath10k_htt_tx_fetch_resp(ar,
2119 resp->tx_fetch_ind.token,
2120 resp->tx_fetch_ind.fetch_seq_num,
2121 resp->tx_fetch_ind.records,
2122 num_records);
2123 if (unlikely(ret)) {
2124 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2125 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2126 /* FIXME: request fw restart */
2127 }
2128
2129 ath10k_htt_tx_txq_sync(ar);
839ae637
MK
2130}
2131
2132static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2133 struct sk_buff *skb)
2134{
2135 const struct htt_resp *resp = (void *)skb->data;
2136 size_t len;
2137 int num_resp_ids;
2138
2139 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2140
2141 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2142 if (unlikely(skb->len < len)) {
2143 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2144 return;
2145 }
2146
2147 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2148 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2149
2150 if (unlikely(skb->len < len)) {
2151 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2152 return;
2153 }
2154
2155 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2156 resp->tx_fetch_confirm.resp_ids,
2157 num_resp_ids);
2158}
2159
2160static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2161 struct sk_buff *skb)
2162{
2163 const struct htt_resp *resp = (void *)skb->data;
2164 const struct htt_tx_mode_switch_record *record;
426e10ea
MK
2165 struct ieee80211_txq *txq;
2166 struct ath10k_txq *artxq;
839ae637
MK
2167 size_t len;
2168 size_t num_records;
2169 enum htt_tx_mode_switch_mode mode;
2170 bool enable;
2171 u16 info0;
2172 u16 info1;
2173 u16 threshold;
2174 u16 peer_id;
2175 u8 tid;
2176 int i;
2177
2178 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2179
2180 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2181 if (unlikely(skb->len < len)) {
2182 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2183 return;
2184 }
2185
2186 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2187 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2188
2189 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2190 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2191 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2192 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2193
2194 ath10k_dbg(ar, ATH10K_DBG_HTT,
2195 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2196 info0, info1, enable, num_records, mode, threshold);
2197
2198 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2199
2200 if (unlikely(skb->len < len)) {
2201 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2202 return;
2203 }
2204
2205 switch (mode) {
2206 case HTT_TX_MODE_SWITCH_PUSH:
2207 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2208 break;
2209 default:
2210 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2211 mode);
2212 return;
2213 }
2214
2215 if (!enable)
2216 return;
2217
426e10ea
MK
2218 ar->htt.tx_q_state.enabled = enable;
2219 ar->htt.tx_q_state.mode = mode;
2220 ar->htt.tx_q_state.num_push_allowed = threshold;
2221
2222 rcu_read_lock();
839ae637
MK
2223
2224 for (i = 0; i < num_records; i++) {
2225 record = &resp->tx_mode_switch_ind.records[i];
2226 info0 = le16_to_cpu(record->info0);
2227 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2228 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2229
2230 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2231 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2232 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2233 peer_id, tid);
2234 continue;
2235 }
2236
426e10ea
MK
2237 spin_lock_bh(&ar->data_lock);
2238 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2239 spin_unlock_bh(&ar->data_lock);
2240
2241 /* It is okay to release the lock and use txq because RCU read
2242 * lock is held.
2243 */
2244
2245 if (unlikely(!txq)) {
2246 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2247 peer_id, tid);
2248 continue;
2249 }
2250
2251 spin_lock_bh(&ar->htt.tx_lock);
2252 artxq = (void *)txq->drv_priv;
2253 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2254 spin_unlock_bh(&ar->htt.tx_lock);
839ae637
MK
2255 }
2256
426e10ea
MK
2257 rcu_read_unlock();
2258
2259 ath10k_mac_tx_push_pending(ar);
839ae637
MK
2260}
2261
2ce9b25c
RM
2262static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
2263{
2264 enum ieee80211_band band;
2265
2266 switch (phy_mode) {
2267 case MODE_11A:
2268 case MODE_11NA_HT20:
2269 case MODE_11NA_HT40:
2270 case MODE_11AC_VHT20:
2271 case MODE_11AC_VHT40:
2272 case MODE_11AC_VHT80:
2273 band = IEEE80211_BAND_5GHZ;
2274 break;
2275 case MODE_11G:
2276 case MODE_11B:
2277 case MODE_11GONLY:
2278 case MODE_11NG_HT20:
2279 case MODE_11NG_HT40:
2280 case MODE_11AC_VHT20_2G:
2281 case MODE_11AC_VHT40_2G:
2282 case MODE_11AC_VHT80_2G:
2283 default:
2284 band = IEEE80211_BAND_2GHZ;
2285 }
2286
2287 return band;
2288}
2289
5e3dd157
KV
2290void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2291{
edb8236d 2292 struct ath10k_htt *htt = &ar->htt;
5e3dd157 2293 struct htt_resp *resp = (struct htt_resp *)skb->data;
8348db29 2294 enum htt_t2h_msg_type type;
5e3dd157
KV
2295
2296 /* confirm alignment */
2297 if (!IS_ALIGNED((unsigned long)skb->data, 4))
7aa7a72a 2298 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
5e3dd157 2299
7aa7a72a 2300 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
5e3dd157 2301 resp->hdr.msg_type);
8348db29
RM
2302
2303 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2304 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2305 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2306 dev_kfree_skb_any(skb);
2307 return;
2308 }
2309 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2310
2311 switch (type) {
5e3dd157
KV
2312 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2313 htt->target_version_major = resp->ver_resp.major;
2314 htt->target_version_minor = resp->ver_resp.minor;
2315 complete(&htt->target_version_received);
2316 break;
2317 }
6c5151a9 2318 case HTT_T2H_MSG_TYPE_RX_IND:
e7827e51 2319 skb_queue_tail(&htt->rx_compl_q, skb);
6c5151a9
MK
2320 tasklet_schedule(&htt->txrx_compl_task);
2321 return;
5e3dd157
KV
2322 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2323 struct htt_peer_map_event ev = {
2324 .vdev_id = resp->peer_map.vdev_id,
2325 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2326 };
2327 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2328 ath10k_peer_map_event(htt, &ev);
2329 break;
2330 }
2331 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2332 struct htt_peer_unmap_event ev = {
2333 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2334 };
2335 ath10k_peer_unmap_event(htt, &ev);
2336 break;
2337 }
2338 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2339 struct htt_tx_done tx_done = {};
2340 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2341
2342 tx_done.msdu_id =
2343 __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2344
2345 switch (status) {
2346 case HTT_MGMT_TX_STATUS_OK:
55314fc2 2347 tx_done.success = true;
5e3dd157
KV
2348 break;
2349 case HTT_MGMT_TX_STATUS_RETRY:
2350 tx_done.no_ack = true;
2351 break;
2352 case HTT_MGMT_TX_STATUS_DROP:
2353 tx_done.discard = true;
2354 break;
2355 }
2356
cac08552
RM
2357 status = ath10k_txrx_tx_unref(htt, &tx_done);
2358 if (!status) {
2359 spin_lock_bh(&htt->tx_lock);
2360 ath10k_htt_tx_mgmt_dec_pending(htt);
2361 spin_unlock_bh(&htt->tx_lock);
2362 }
29946878 2363 ath10k_mac_tx_push_pending(ar);
5e3dd157
KV
2364 break;
2365 }
6c5151a9 2366 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
005fb161 2367 skb_queue_tail(&htt->tx_compl_q, skb);
6c5151a9
MK
2368 tasklet_schedule(&htt->txrx_compl_task);
2369 return;
5e3dd157
KV
2370 case HTT_T2H_MSG_TYPE_SEC_IND: {
2371 struct ath10k *ar = htt->ar;
2372 struct htt_security_indication *ev = &resp->security_indication;
2373
7aa7a72a 2374 ath10k_dbg(ar, ATH10K_DBG_HTT,
5e3dd157
KV
2375 "sec ind peer_id %d unicast %d type %d\n",
2376 __le16_to_cpu(ev->peer_id),
2377 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2378 MS(ev->flags, HTT_SECURITY_TYPE));
2379 complete(&ar->install_key_done);
2380 break;
2381 }
2382 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
7aa7a72a 2383 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
5e3dd157
KV
2384 skb->data, skb->len);
2385 ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
2386 break;
2387 }
2388 case HTT_T2H_MSG_TYPE_TEST:
5e3dd157 2389 break;
5e3dd157 2390 case HTT_T2H_MSG_TYPE_STATS_CONF:
d35a6c18 2391 trace_ath10k_htt_stats(ar, skb->data, skb->len);
a9bf0506
KV
2392 break;
2393 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
708b9bde
MK
2394 /* Firmware can return tx frames if it's unable to fully
2395 * process them and suspects host may be able to fix it. ath10k
2396 * sends all tx frames as already inspected so this shouldn't
2397 * happen unless fw has a bug.
2398 */
7aa7a72a 2399 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
708b9bde 2400 break;
5e3dd157 2401 case HTT_T2H_MSG_TYPE_RX_ADDBA:
aa5b4fbc
MK
2402 ath10k_htt_rx_addba(ar, resp);
2403 break;
5e3dd157 2404 case HTT_T2H_MSG_TYPE_RX_DELBA:
aa5b4fbc
MK
2405 ath10k_htt_rx_delba(ar, resp);
2406 break;
bfdd7937
RM
2407 case HTT_T2H_MSG_TYPE_PKTLOG: {
2408 struct ath10k_pktlog_hdr *hdr =
2409 (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
2410
2411 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2412 sizeof(*hdr) +
2413 __le16_to_cpu(hdr->size));
2414 break;
2415 }
aa5b4fbc
MK
2416 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2417 /* Ignore this event because mac80211 takes care of Rx
2418 * aggregation reordering.
2419 */
2420 break;
2421 }
c545070e 2422 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
e7827e51 2423 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
c545070e
MK
2424 tasklet_schedule(&htt->txrx_compl_task);
2425 return;
2426 }
2427 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
8348db29 2428 break;
2ce9b25c
RM
2429 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2430 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2431 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2432
2433 ar->tgt_oper_chan =
2434 __ieee80211_get_channel(ar->hw->wiphy, freq);
2435 ath10k_dbg(ar, ATH10K_DBG_HTT,
2436 "htt chan change freq %u phymode %s\n",
2437 freq, ath10k_wmi_phymode_str(phymode));
c545070e 2438 break;
2ce9b25c 2439 }
ccec9038
DL
2440 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2441 break;
721ad3ca 2442 case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
426e10ea
MK
2443 skb_queue_tail(&htt->tx_fetch_ind_q, skb);
2444 tasklet_schedule(&htt->txrx_compl_task);
2445 return;
df94e702 2446 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
839ae637
MK
2447 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2448 break;
df94e702 2449 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
839ae637 2450 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
9b158736
MK
2451 break;
2452 case HTT_T2H_MSG_TYPE_EN_STATS:
5e3dd157 2453 default:
2358a544
MK
2454 ath10k_warn(ar, "htt event (%d) not handled\n",
2455 resp->hdr.msg_type);
7aa7a72a 2456 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
5e3dd157
KV
2457 skb->data, skb->len);
2458 break;
2459 };
2460
2461 /* Free the indication buffer */
2462 dev_kfree_skb_any(skb);
2463}
3f0f7ed4 2464EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
6c5151a9 2465
afb0bf7f
VN
2466void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2467 struct sk_buff *skb)
2468{
53a5c9bc 2469 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
afb0bf7f
VN
2470 dev_kfree_skb_any(skb);
2471}
2472EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2473
6c5151a9
MK
2474static void ath10k_htt_txrx_compl_task(unsigned long ptr)
2475{
2476 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
c545070e 2477 struct ath10k *ar = htt->ar;
d742c969 2478 struct sk_buff_head tx_q;
da6416ca
RM
2479 struct sk_buff_head rx_q;
2480 struct sk_buff_head rx_ind_q;
426e10ea 2481 struct sk_buff_head tx_ind_q;
6c5151a9
MK
2482 struct htt_resp *resp;
2483 struct sk_buff *skb;
d742c969 2484 unsigned long flags;
6c5151a9 2485
d742c969 2486 __skb_queue_head_init(&tx_q);
da6416ca
RM
2487 __skb_queue_head_init(&rx_q);
2488 __skb_queue_head_init(&rx_ind_q);
426e10ea 2489 __skb_queue_head_init(&tx_ind_q);
d742c969
MK
2490
2491 spin_lock_irqsave(&htt->tx_compl_q.lock, flags);
2492 skb_queue_splice_init(&htt->tx_compl_q, &tx_q);
2493 spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags);
2494
da6416ca
RM
2495 spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
2496 skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
2497 spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
2498
2499 spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
2500 skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
2501 spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
2502
426e10ea
MK
2503 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2504 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2505 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2506
d742c969 2507 while ((skb = __skb_dequeue(&tx_q))) {
6c5151a9
MK
2508 ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
2509 dev_kfree_skb_any(skb);
2510 }
2511
426e10ea
MK
2512 while ((skb = __skb_dequeue(&tx_ind_q))) {
2513 ath10k_htt_rx_tx_fetch_ind(ar, skb);
2514 dev_kfree_skb_any(skb);
2515 }
2516
29946878
MK
2517 ath10k_mac_tx_push_pending(ar);
2518
da6416ca 2519 while ((skb = __skb_dequeue(&rx_q))) {
6c5151a9 2520 resp = (struct htt_resp *)skb->data;
e7827e51 2521 spin_lock_bh(&htt->rx_ring.lock);
6c5151a9 2522 ath10k_htt_rx_handler(htt, &resp->rx_ind);
e7827e51 2523 spin_unlock_bh(&htt->rx_ring.lock);
6c5151a9
MK
2524 dev_kfree_skb_any(skb);
2525 }
c545070e 2526
da6416ca 2527 while ((skb = __skb_dequeue(&rx_ind_q))) {
e7827e51 2528 spin_lock_bh(&htt->rx_ring.lock);
c545070e 2529 ath10k_htt_rx_in_ord_ind(ar, skb);
e7827e51 2530 spin_unlock_bh(&htt->rx_ring.lock);
c545070e
MK
2531 dev_kfree_skb_any(skb);
2532 }
6c5151a9 2533}