]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/mwifiex/11n_rxreorder.c
Merge tag 'samsung-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene...
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / mwifiex / 11n_rxreorder.c
1 /*
2 * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
3 *
4 * Copyright (C) 2011-2014, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "util.h"
23 #include "fw.h"
24 #include "main.h"
25 #include "wmm.h"
26 #include "11n.h"
27 #include "11n_rxreorder.h"
28
29 /* This function will dispatch amsdu packet and forward it to kernel/upper
30 * layer.
31 */
32 static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
33 struct sk_buff *skb)
34 {
35 struct rxpd *local_rx_pd = (struct rxpd *)(skb->data);
36 int ret;
37
38 if (le16_to_cpu(local_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
39 struct sk_buff_head list;
40 struct sk_buff *rx_skb;
41
42 __skb_queue_head_init(&list);
43
44 skb_pull(skb, le16_to_cpu(local_rx_pd->rx_pkt_offset));
45 skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
46
47 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
48 priv->wdev->iftype, 0, false);
49
50 while (!skb_queue_empty(&list)) {
51 rx_skb = __skb_dequeue(&list);
52 ret = mwifiex_recv_packet(priv, rx_skb);
53 if (ret == -1)
54 dev_err(priv->adapter->dev,
55 "Rx of A-MSDU failed");
56 }
57 return 0;
58 }
59
60 return -1;
61 }
62
63 /* This function will process the rx packet and forward it to kernel/upper
64 * layer.
65 */
66 static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
67 {
68 int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
69
70 if (!ret)
71 return 0;
72
73 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
74 return mwifiex_handle_uap_rx_forward(priv, payload);
75
76 return mwifiex_process_rx_packet(priv, payload);
77 }
78
79 /*
80 * This function dispatches all packets in the Rx reorder table until the
81 * start window.
82 *
83 * There could be holes in the buffer, which are skipped by the function.
84 * Since the buffer is linear, the function uses rotation to simulate
85 * circular buffer.
86 */
87 static void
88 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
89 struct mwifiex_rx_reorder_tbl *tbl,
90 int start_win)
91 {
92 int pkt_to_send, i;
93 void *rx_tmp_ptr;
94 unsigned long flags;
95
96 pkt_to_send = (start_win > tbl->start_win) ?
97 min((start_win - tbl->start_win), tbl->win_size) :
98 tbl->win_size;
99
100 for (i = 0; i < pkt_to_send; ++i) {
101 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
102 rx_tmp_ptr = NULL;
103 if (tbl->rx_reorder_ptr[i]) {
104 rx_tmp_ptr = tbl->rx_reorder_ptr[i];
105 tbl->rx_reorder_ptr[i] = NULL;
106 }
107 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
108 if (rx_tmp_ptr)
109 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
110 }
111
112 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
113 /*
114 * We don't have a circular buffer, hence use rotation to simulate
115 * circular buffer
116 */
117 for (i = 0; i < tbl->win_size - pkt_to_send; ++i) {
118 tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i];
119 tbl->rx_reorder_ptr[pkt_to_send + i] = NULL;
120 }
121
122 tbl->start_win = start_win;
123 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
124 }
125
126 /*
127 * This function dispatches all packets in the Rx reorder table until
128 * a hole is found.
129 *
130 * The start window is adjusted automatically when a hole is located.
131 * Since the buffer is linear, the function uses rotation to simulate
132 * circular buffer.
133 */
134 static void
135 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
136 struct mwifiex_rx_reorder_tbl *tbl)
137 {
138 int i, j, xchg;
139 void *rx_tmp_ptr;
140 unsigned long flags;
141
142 for (i = 0; i < tbl->win_size; ++i) {
143 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
144 if (!tbl->rx_reorder_ptr[i]) {
145 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
146 break;
147 }
148 rx_tmp_ptr = tbl->rx_reorder_ptr[i];
149 tbl->rx_reorder_ptr[i] = NULL;
150 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
151 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
152 }
153
154 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
155 /*
156 * We don't have a circular buffer, hence use rotation to simulate
157 * circular buffer
158 */
159 if (i > 0) {
160 xchg = tbl->win_size - i;
161 for (j = 0; j < xchg; ++j) {
162 tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j];
163 tbl->rx_reorder_ptr[i + j] = NULL;
164 }
165 }
166 tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
167 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
168 }
169
170 /*
171 * This function deletes the Rx reorder table and frees the memory.
172 *
173 * The function stops the associated timer and dispatches all the
174 * pending packets in the Rx reorder table before deletion.
175 */
176 static void
177 mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
178 struct mwifiex_rx_reorder_tbl *tbl)
179 {
180 unsigned long flags;
181 int start_win;
182
183 if (!tbl)
184 return;
185
186 spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
187 priv->adapter->rx_locked = true;
188 if (priv->adapter->rx_processing) {
189 spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
190 flush_workqueue(priv->adapter->rx_workqueue);
191 } else {
192 spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
193 }
194
195 start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
196 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
197
198 del_timer_sync(&tbl->timer_context.timer);
199
200 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
201 list_del(&tbl->list);
202 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
203
204 kfree(tbl->rx_reorder_ptr);
205 kfree(tbl);
206
207 spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
208 priv->adapter->rx_locked = false;
209 spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
210
211 }
212
213 /*
214 * This function returns the pointer to an entry in Rx reordering
215 * table which matches the given TA/TID pair.
216 */
217 struct mwifiex_rx_reorder_tbl *
218 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
219 {
220 struct mwifiex_rx_reorder_tbl *tbl;
221 unsigned long flags;
222
223 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
224 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
225 if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
226 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
227 flags);
228 return tbl;
229 }
230 }
231 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
232
233 return NULL;
234 }
235
236 /* This function retrieves the pointer to an entry in Rx reordering
237 * table which matches the given TA and deletes it.
238 */
239 void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
240 {
241 struct mwifiex_rx_reorder_tbl *tbl, *tmp;
242 unsigned long flags;
243
244 if (!ta)
245 return;
246
247 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
248 list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
249 if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
250 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
251 flags);
252 mwifiex_del_rx_reorder_entry(priv, tbl);
253 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
254 }
255 }
256 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
257
258 return;
259 }
260
261 /*
262 * This function finds the last sequence number used in the packets
263 * buffered in Rx reordering table.
264 */
265 static int
266 mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
267 {
268 struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
269 struct mwifiex_private *priv = ctx->priv;
270 unsigned long flags;
271 int i;
272
273 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
274 for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
275 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
276 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
277 flags);
278 return i;
279 }
280 }
281 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
282
283 return -1;
284 }
285
286 /*
287 * This function flushes all the packets in Rx reordering table.
288 *
289 * The function checks if any packets are currently buffered in the
290 * table or not. In case there are packets available, it dispatches
291 * them and then dumps the Rx reordering table.
292 */
293 static void
294 mwifiex_flush_data(unsigned long context)
295 {
296 struct reorder_tmr_cnxt *ctx =
297 (struct reorder_tmr_cnxt *) context;
298 int start_win, seq_num;
299
300 seq_num = mwifiex_11n_find_last_seq_num(ctx);
301
302 if (seq_num < 0)
303 return;
304
305 dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
306 start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
307 mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
308 start_win);
309 }
310
311 /*
312 * This function creates an entry in Rx reordering table for the
313 * given TA/TID.
314 *
315 * The function also initializes the entry with sequence number, window
316 * size as well as initializes the timer.
317 *
318 * If the received TA/TID pair is already present, all the packets are
319 * dispatched and the window size is moved until the SSN.
320 */
321 static void
322 mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
323 int tid, int win_size, int seq_num)
324 {
325 int i;
326 struct mwifiex_rx_reorder_tbl *tbl, *new_node;
327 u16 last_seq = 0;
328 unsigned long flags;
329 struct mwifiex_sta_node *node;
330
331 /*
332 * If we get a TID, ta pair which is already present dispatch all the
333 * the packets and move the window size until the ssn
334 */
335 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
336 if (tbl) {
337 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
338 return;
339 }
340 /* if !tbl then create one */
341 new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
342 if (!new_node)
343 return;
344
345 INIT_LIST_HEAD(&new_node->list);
346 new_node->tid = tid;
347 memcpy(new_node->ta, ta, ETH_ALEN);
348 new_node->start_win = seq_num;
349 new_node->init_win = seq_num;
350 new_node->flags = 0;
351
352 if (mwifiex_queuing_ra_based(priv)) {
353 dev_dbg(priv->adapter->dev,
354 "info: AP/ADHOC:last_seq=%d start_win=%d\n",
355 last_seq, new_node->start_win);
356 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
357 node = mwifiex_get_sta_entry(priv, ta);
358 if (node)
359 last_seq = node->rx_seq[tid];
360 }
361 } else {
362 node = mwifiex_get_sta_entry(priv, ta);
363 if (node)
364 last_seq = node->rx_seq[tid];
365 else
366 last_seq = priv->rx_seq[tid];
367 }
368
369 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
370 last_seq >= new_node->start_win) {
371 new_node->start_win = last_seq + 1;
372 new_node->flags |= RXREOR_INIT_WINDOW_SHIFT;
373 }
374
375 new_node->win_size = win_size;
376
377 new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
378 GFP_KERNEL);
379 if (!new_node->rx_reorder_ptr) {
380 kfree((u8 *) new_node);
381 dev_err(priv->adapter->dev,
382 "%s: failed to alloc reorder_ptr\n", __func__);
383 return;
384 }
385
386 new_node->timer_context.ptr = new_node;
387 new_node->timer_context.priv = priv;
388
389 init_timer(&new_node->timer_context.timer);
390 new_node->timer_context.timer.function = mwifiex_flush_data;
391 new_node->timer_context.timer.data =
392 (unsigned long) &new_node->timer_context;
393
394 for (i = 0; i < win_size; ++i)
395 new_node->rx_reorder_ptr[i] = NULL;
396
397 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
398 list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
399 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
400 }
401
402 /*
403 * This function prepares command for adding a BA request.
404 *
405 * Preparation includes -
406 * - Setting command ID and proper size
407 * - Setting add BA request buffer
408 * - Ensuring correct endian-ness
409 */
410 int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
411 {
412 struct host_cmd_ds_11n_addba_req *add_ba_req = &cmd->params.add_ba_req;
413
414 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
415 cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
416 memcpy(add_ba_req, data_buf, sizeof(*add_ba_req));
417
418 return 0;
419 }
420
421 /*
422 * This function prepares command for adding a BA response.
423 *
424 * Preparation includes -
425 * - Setting command ID and proper size
426 * - Setting add BA response buffer
427 * - Ensuring correct endian-ness
428 */
429 int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
430 struct host_cmd_ds_command *cmd,
431 struct host_cmd_ds_11n_addba_req
432 *cmd_addba_req)
433 {
434 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
435 struct mwifiex_sta_node *sta_ptr;
436 u32 rx_win_size = priv->add_ba_param.rx_win_size;
437 u8 tid;
438 int win_size;
439 uint16_t block_ack_param_set;
440
441 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
442 ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
443 priv->adapter->is_hw_11ac_capable &&
444 memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
445 sta_ptr = mwifiex_get_sta_entry(priv,
446 cmd_addba_req->peer_mac_addr);
447 if (!sta_ptr) {
448 dev_warn(priv->adapter->dev,
449 "BA setup with unknown TDLS peer %pM!\n",
450 cmd_addba_req->peer_mac_addr);
451 return -1;
452 }
453 if (sta_ptr->is_11ac_enabled)
454 rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
455 }
456
457 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
458 cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
459
460 memcpy(add_ba_rsp->peer_mac_addr, cmd_addba_req->peer_mac_addr,
461 ETH_ALEN);
462 add_ba_rsp->dialog_token = cmd_addba_req->dialog_token;
463 add_ba_rsp->block_ack_tmo = cmd_addba_req->block_ack_tmo;
464 add_ba_rsp->ssn = cmd_addba_req->ssn;
465
466 block_ack_param_set = le16_to_cpu(cmd_addba_req->block_ack_param_set);
467 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
468 >> BLOCKACKPARAM_TID_POS;
469 add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
470 block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
471
472 /* If we don't support AMSDU inside AMPDU, reset the bit */
473 if (!priv->add_ba_param.rx_amsdu ||
474 (priv->aggr_prio_tbl[tid].amsdu == BA_STREAM_NOT_ALLOWED))
475 block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
476 block_ack_param_set |= rx_win_size << BLOCKACKPARAM_WINSIZE_POS;
477 add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
478 win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
479 & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
480 >> BLOCKACKPARAM_WINSIZE_POS;
481 cmd_addba_req->block_ack_param_set = cpu_to_le16(block_ack_param_set);
482
483 mwifiex_11n_create_rx_reorder_tbl(priv, cmd_addba_req->peer_mac_addr,
484 tid, win_size,
485 le16_to_cpu(cmd_addba_req->ssn));
486 return 0;
487 }
488
489 /*
490 * This function prepares command for deleting a BA request.
491 *
492 * Preparation includes -
493 * - Setting command ID and proper size
494 * - Setting del BA request buffer
495 * - Ensuring correct endian-ness
496 */
497 int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
498 {
499 struct host_cmd_ds_11n_delba *del_ba = &cmd->params.del_ba;
500
501 cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
502 cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
503 memcpy(del_ba, data_buf, sizeof(*del_ba));
504
505 return 0;
506 }
507
508 /*
509 * This function identifies if Rx reordering is needed for a received packet.
510 *
511 * In case reordering is required, the function will do the reordering
512 * before sending it to kernel.
513 *
514 * The Rx reorder table is checked first with the received TID/TA pair. If
515 * not found, the received packet is dispatched immediately. But if found,
516 * the packet is reordered and all the packets in the updated Rx reordering
517 * table is dispatched until a hole is found.
518 *
519 * For sequence number less than the starting window, the packet is dropped.
520 */
521 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
522 u16 seq_num, u16 tid,
523 u8 *ta, u8 pkt_type, void *payload)
524 {
525 struct mwifiex_rx_reorder_tbl *tbl;
526 int start_win, end_win, win_size;
527 u16 pkt_index;
528 bool init_window_shift = false;
529
530 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
531 if (!tbl) {
532 if (pkt_type != PKT_TYPE_BAR)
533 mwifiex_11n_dispatch_pkt(priv, payload);
534 return 0;
535 }
536
537 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
538 mwifiex_11n_dispatch_pkt(priv, payload);
539 return 0;
540 }
541
542 start_win = tbl->start_win;
543 win_size = tbl->win_size;
544 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
545 if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
546 init_window_shift = true;
547 tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
548 }
549 mod_timer(&tbl->timer_context.timer,
550 jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
551
552 if (tbl->flags & RXREOR_FORCE_NO_DROP) {
553 dev_dbg(priv->adapter->dev,
554 "RXREOR_FORCE_NO_DROP when HS is activated\n");
555 tbl->flags &= ~RXREOR_FORCE_NO_DROP;
556 } else if (init_window_shift && seq_num < start_win &&
557 seq_num >= tbl->init_win) {
558 dev_dbg(priv->adapter->dev,
559 "Sender TID sequence number reset %d->%d for SSN %d\n",
560 start_win, seq_num, tbl->init_win);
561 tbl->start_win = start_win = seq_num;
562 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
563 } else {
564 /*
565 * If seq_num is less then starting win then ignore and drop
566 * the packet
567 */
568 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
569 if (seq_num >= ((start_win + TWOPOW11) &
570 (MAX_TID_VALUE - 1)) &&
571 seq_num < start_win)
572 return -1;
573 } else if ((seq_num < start_win) ||
574 (seq_num > (start_win + TWOPOW11))) {
575 return -1;
576 }
577 }
578
579 /*
580 * If this packet is a BAR we adjust seq_num as
581 * WinStart = seq_num
582 */
583 if (pkt_type == PKT_TYPE_BAR)
584 seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
585
586 if (((end_win < start_win) &&
587 (seq_num < start_win) && (seq_num > end_win)) ||
588 ((end_win > start_win) && ((seq_num > end_win) ||
589 (seq_num < start_win)))) {
590 end_win = seq_num;
591 if (((seq_num - win_size) + 1) >= 0)
592 start_win = (end_win - win_size) + 1;
593 else
594 start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
595 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
596 }
597
598 if (pkt_type != PKT_TYPE_BAR) {
599 if (seq_num >= start_win)
600 pkt_index = seq_num - start_win;
601 else
602 pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
603
604 if (tbl->rx_reorder_ptr[pkt_index])
605 return -1;
606
607 tbl->rx_reorder_ptr[pkt_index] = payload;
608 }
609
610 /*
611 * Dispatch all packets sequentially from start_win until a
612 * hole is found and adjust the start_win appropriately
613 */
614 mwifiex_11n_scan_and_dispatch(priv, tbl);
615
616 return 0;
617 }
618
619 /*
620 * This function deletes an entry for a given TID/TA pair.
621 *
622 * The TID/TA are taken from del BA event body.
623 */
624 void
625 mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
626 u8 type, int initiator)
627 {
628 struct mwifiex_rx_reorder_tbl *tbl;
629 struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
630 u8 cleanup_rx_reorder_tbl;
631 unsigned long flags;
632
633 if (type == TYPE_DELBA_RECEIVE)
634 cleanup_rx_reorder_tbl = (initiator) ? true : false;
635 else
636 cleanup_rx_reorder_tbl = (initiator) ? false : true;
637
638 dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n",
639 peer_mac, tid, initiator);
640
641 if (cleanup_rx_reorder_tbl) {
642 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
643 peer_mac);
644 if (!tbl) {
645 dev_dbg(priv->adapter->dev,
646 "event: TID, TA not found in table\n");
647 return;
648 }
649 mwifiex_del_rx_reorder_entry(priv, tbl);
650 } else {
651 ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
652 if (!ptx_tbl) {
653 dev_dbg(priv->adapter->dev,
654 "event: TID, RA not found in table\n");
655 return;
656 }
657
658 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
659 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
660 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
661 }
662 }
663
664 /*
665 * This function handles the command response of an add BA response.
666 *
667 * Handling includes changing the header fields into CPU format and
668 * creating the stream, provided the add BA is accepted.
669 */
670 int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
671 struct host_cmd_ds_command *resp)
672 {
673 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
674 int tid, win_size;
675 struct mwifiex_rx_reorder_tbl *tbl;
676 uint16_t block_ack_param_set;
677
678 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
679
680 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
681 >> BLOCKACKPARAM_TID_POS;
682 /*
683 * Check if we had rejected the ADDBA, if yes then do not create
684 * the stream
685 */
686 if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
687 dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
688 add_ba_rsp->peer_mac_addr, tid);
689
690 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
691 add_ba_rsp->peer_mac_addr);
692 if (tbl)
693 mwifiex_del_rx_reorder_entry(priv, tbl);
694
695 return 0;
696 }
697
698 win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
699 >> BLOCKACKPARAM_WINSIZE_POS;
700
701 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
702 add_ba_rsp->peer_mac_addr);
703 if (tbl) {
704 if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
705 priv->add_ba_param.rx_amsdu &&
706 (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
707 tbl->amsdu = true;
708 else
709 tbl->amsdu = false;
710 }
711
712 dev_dbg(priv->adapter->dev,
713 "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
714 add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
715
716 return 0;
717 }
718
719 /*
720 * This function handles BA stream timeout event by preparing and sending
721 * a command to the firmware.
722 */
723 void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
724 struct host_cmd_ds_11n_batimeout *event)
725 {
726 struct host_cmd_ds_11n_delba delba;
727
728 memset(&delba, 0, sizeof(struct host_cmd_ds_11n_delba));
729 memcpy(delba.peer_mac_addr, event->peer_mac_addr, ETH_ALEN);
730
731 delba.del_ba_param_set |=
732 cpu_to_le16((u16) event->tid << DELBA_TID_POS);
733 delba.del_ba_param_set |= cpu_to_le16(
734 (u16) event->origninator << DELBA_INITIATOR_POS);
735 delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
736 mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba, false);
737 }
738
739 /*
740 * This function cleans up the Rx reorder table by deleting all the entries
741 * and re-initializing.
742 */
743 void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
744 {
745 struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
746 unsigned long flags;
747
748 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
749 list_for_each_entry_safe(del_tbl_ptr, tmp_node,
750 &priv->rx_reorder_tbl_ptr, list) {
751 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
752 mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
753 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
754 }
755 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
756 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
757
758 mwifiex_reset_11n_rx_seq_num(priv);
759 }
760
761 /*
762 * This function updates all rx_reorder_tbl's flags.
763 */
764 void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
765 {
766 struct mwifiex_private *priv;
767 struct mwifiex_rx_reorder_tbl *tbl;
768 unsigned long lock_flags;
769 int i;
770
771 for (i = 0; i < adapter->priv_num; i++) {
772 priv = adapter->priv[i];
773 if (!priv)
774 continue;
775
776 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
777 if (list_empty(&priv->rx_reorder_tbl_ptr)) {
778 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
779 lock_flags);
780 continue;
781 }
782
783 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
784 tbl->flags = flags;
785 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
786 }
787
788 return;
789 }