]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/rtl8712/rtl8712_recv.c
Merge branch 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu...
[mirror_ubuntu-artful-kernel.git] / drivers / staging / rtl8712 / rtl8712_recv.c
1 /******************************************************************************
2 * rtl8712_recv.c
3 *
4 * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
5 * Linux device driver for RTL8192SU
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * Modifications for inclusion into the Linux staging tree are
21 * Copyright(c) 2010 Larry Finger. All rights reserved.
22 *
23 * Contact information:
24 * WLAN FAE <wlanfae@realtek.com>
25 * Larry Finger <Larry.Finger@lwfinger.net>
26 *
27 ******************************************************************************/
28
29 #define _RTL8712_RECV_C_
30
31 #include <linux/if_ether.h>
32 #include <linux/ip.h>
33
34 #include "osdep_service.h"
35 #include "drv_types.h"
36 #include "recv_osdep.h"
37 #include "mlme_osdep.h"
38 #include "ethernet.h"
39 #include "usb_ops.h"
40 #include "wifi.h"
41
42 /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
43 static u8 bridge_tunnel_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8};
44
45 /* Ethernet-II snap header (RFC1042 for most EtherTypes) */
46 static u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
47
48 static void recv_tasklet(void *priv);
49
50 int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
51 {
52 int i;
53 struct recv_buf *precvbuf;
54 int res = _SUCCESS;
55 addr_t tmpaddr = 0;
56 int alignment = 0;
57 struct sk_buff *pskb = NULL;
58
59 /*init recv_buf*/
60 _init_queue(&precvpriv->free_recv_buf_queue);
61 precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
62 GFP_ATOMIC);
63 if (precvpriv->pallocated_recv_buf == NULL)
64 return _FAIL;
65 precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
66 ((addr_t) (precvpriv->pallocated_recv_buf) & 3);
67 precvbuf = (struct recv_buf *)precvpriv->precv_buf;
68 for (i = 0; i < NR_RECVBUFF; i++) {
69 INIT_LIST_HEAD(&precvbuf->list);
70 spin_lock_init(&precvbuf->recvbuf_lock);
71 res = r8712_os_recvbuf_resource_alloc(padapter, precvbuf);
72 if (res == _FAIL)
73 break;
74 precvbuf->ref_cnt = 0;
75 precvbuf->adapter = padapter;
76 list_add_tail(&precvbuf->list,
77 &(precvpriv->free_recv_buf_queue.queue));
78 precvbuf++;
79 }
80 precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
81 tasklet_init(&precvpriv->recv_tasklet,
82 (void(*)(unsigned long))recv_tasklet,
83 (unsigned long)padapter);
84 skb_queue_head_init(&precvpriv->rx_skb_queue);
85
86 skb_queue_head_init(&precvpriv->free_recv_skb_queue);
87 for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
88 pskb = netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ +
89 RECVBUFF_ALIGN_SZ);
90 if (pskb) {
91 tmpaddr = (addr_t)pskb->data;
92 alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
93 skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
94 skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
95 }
96 pskb = NULL;
97 }
98 return res;
99 }
100
101 void r8712_free_recv_priv(struct recv_priv *precvpriv)
102 {
103 int i;
104 struct recv_buf *precvbuf;
105 struct _adapter *padapter = precvpriv->adapter;
106
107 precvbuf = (struct recv_buf *)precvpriv->precv_buf;
108 for (i = 0; i < NR_RECVBUFF; i++) {
109 r8712_os_recvbuf_resource_free(padapter, precvbuf);
110 precvbuf++;
111 }
112 kfree(precvpriv->pallocated_recv_buf);
113 skb_queue_purge(&precvpriv->rx_skb_queue);
114 if (skb_queue_len(&precvpriv->rx_skb_queue))
115 netdev_warn(padapter->pnetdev, "r8712u: rx_skb_queue not empty\n");
116 skb_queue_purge(&precvpriv->free_recv_skb_queue);
117 if (skb_queue_len(&precvpriv->free_recv_skb_queue))
118 netdev_warn(padapter->pnetdev, "r8712u: free_recv_skb_queue not empty %d\n",
119 skb_queue_len(&precvpriv->free_recv_skb_queue));
120 }
121
122 int r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf)
123 {
124 precvbuf->transfer_len = 0;
125 precvbuf->len = 0;
126 precvbuf->ref_cnt = 0;
127 if (precvbuf->pbuf) {
128 precvbuf->pdata = precvbuf->pbuf;
129 precvbuf->phead = precvbuf->pbuf;
130 precvbuf->ptail = precvbuf->pbuf;
131 precvbuf->pend = precvbuf->pdata + MAX_RECVBUF_SZ;
132 }
133 return _SUCCESS;
134 }
135
136 int r8712_free_recvframe(union recv_frame *precvframe,
137 struct __queue *pfree_recv_queue)
138 {
139 unsigned long irqL;
140 struct _adapter *padapter = precvframe->u.hdr.adapter;
141 struct recv_priv *precvpriv = &padapter->recvpriv;
142
143 if (precvframe->u.hdr.pkt) {
144 dev_kfree_skb_any(precvframe->u.hdr.pkt);/*free skb by driver*/
145 precvframe->u.hdr.pkt = NULL;
146 }
147 spin_lock_irqsave(&pfree_recv_queue->lock, irqL);
148 list_del_init(&(precvframe->u.hdr.list));
149 list_add_tail(&(precvframe->u.hdr.list), &pfree_recv_queue->queue);
150 if (padapter != NULL) {
151 if (pfree_recv_queue == &precvpriv->free_recv_queue)
152 precvpriv->free_recvframe_cnt++;
153 }
154 spin_unlock_irqrestore(&pfree_recv_queue->lock, irqL);
155 return _SUCCESS;
156 }
157
158 static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
159 struct recv_stat *prxstat)
160 {
161 u16 drvinfo_sz = 0;
162
163 drvinfo_sz = (le32_to_cpu(prxstat->rxdw0)&0x000f0000)>>16;
164 drvinfo_sz <<= 3;
165 /*TODO:
166 * Offset 0 */
167 pattrib->bdecrypted = ((le32_to_cpu(prxstat->rxdw0) & BIT(27)) >> 27)
168 ? 0 : 1;
169 pattrib->crc_err = (le32_to_cpu(prxstat->rxdw0) & BIT(14)) >> 14;
170 /*Offset 4*/
171 /*Offset 8*/
172 /*Offset 12*/
173 if (le32_to_cpu(prxstat->rxdw3) & BIT(13)) {
174 pattrib->tcpchk_valid = 1; /* valid */
175 if (le32_to_cpu(prxstat->rxdw3) & BIT(11))
176 pattrib->tcp_chkrpt = 1; /* correct */
177 else
178 pattrib->tcp_chkrpt = 0; /* incorrect */
179 if (le32_to_cpu(prxstat->rxdw3) & BIT(12))
180 pattrib->ip_chkrpt = 1; /* correct */
181 else
182 pattrib->ip_chkrpt = 0; /* incorrect */
183 } else
184 pattrib->tcpchk_valid = 0; /* invalid */
185 pattrib->mcs_rate = (u8)((le32_to_cpu(prxstat->rxdw3)) & 0x3f);
186 pattrib->htc = (u8)((le32_to_cpu(prxstat->rxdw3) >> 14) & 0x1);
187 /*Offset 16*/
188 /*Offset 20*/
189 /*phy_info*/
190 }
191
192 /*perform defrag*/
193 static union recv_frame *recvframe_defrag(struct _adapter *adapter,
194 struct __queue *defrag_q)
195 {
196 struct list_head *plist, *phead;
197 u8 wlanhdr_offset;
198 u8 curfragnum;
199 struct recv_frame_hdr *pfhdr, *pnfhdr;
200 union recv_frame *prframe, *pnextrframe;
201 struct __queue *pfree_recv_queue;
202
203 pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
204 phead = &defrag_q->queue;
205 plist = phead->next;
206 prframe = LIST_CONTAINOR(plist, union recv_frame, u);
207 list_del_init(&prframe->u.list);
208 pfhdr = &prframe->u.hdr;
209 curfragnum = 0;
210 if (curfragnum != pfhdr->attrib.frag_num) {
211 /*the first fragment number must be 0
212 *free the whole queue*/
213 r8712_free_recvframe(prframe, pfree_recv_queue);
214 r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
215 return NULL;
216 }
217 curfragnum++;
218 plist = &defrag_q->queue;
219 plist = plist->next;
220 while (end_of_queue_search(phead, plist) == false) {
221 pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
222 pnfhdr = &pnextrframe->u.hdr;
223 /*check the fragment sequence (2nd ~n fragment frame) */
224 if (curfragnum != pnfhdr->attrib.frag_num) {
225 /* the fragment number must increase (after decache)
226 * release the defrag_q & prframe */
227 r8712_free_recvframe(prframe, pfree_recv_queue);
228 r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
229 return NULL;
230 }
231 curfragnum++;
232 /* copy the 2nd~n fragment frame's payload to the first fragment
233 * get the 2nd~last fragment frame's payload */
234 wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
235 recvframe_pull(pnextrframe, wlanhdr_offset);
236 /* append to first fragment frame's tail (if privacy frame,
237 * pull the ICV) */
238 recvframe_pull_tail(prframe, pfhdr->attrib.icv_len);
239 memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len);
240 recvframe_put(prframe, pnfhdr->len);
241 pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
242 plist = plist->next;
243 }
244 /* free the defrag_q queue and return the prframe */
245 r8712_free_recvframe_queue(defrag_q, pfree_recv_queue);
246 return prframe;
247 }
248
249 /* check if need to defrag, if needed queue the frame to defrag_q */
250 union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
251 union recv_frame *precv_frame)
252 {
253 u8 ismfrag;
254 u8 fragnum;
255 u8 *psta_addr;
256 struct recv_frame_hdr *pfhdr;
257 struct sta_info *psta;
258 struct sta_priv *pstapriv;
259 struct list_head *phead;
260 union recv_frame *prtnframe = NULL;
261 struct __queue *pfree_recv_queue, *pdefrag_q;
262
263 pstapriv = &padapter->stapriv;
264 pfhdr = &precv_frame->u.hdr;
265 pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
266 /* need to define struct of wlan header frame ctrl */
267 ismfrag = pfhdr->attrib.mfrag;
268 fragnum = pfhdr->attrib.frag_num;
269 psta_addr = pfhdr->attrib.ta;
270 psta = r8712_get_stainfo(pstapriv, psta_addr);
271 if (psta == NULL)
272 pdefrag_q = NULL;
273 else
274 pdefrag_q = &psta->sta_recvpriv.defrag_q;
275
276 if ((ismfrag == 0) && (fragnum == 0))
277 prtnframe = precv_frame;/*isn't a fragment frame*/
278 if (ismfrag == 1) {
279 /* 0~(n-1) fragment frame
280 * enqueue to defraf_g */
281 if (pdefrag_q != NULL) {
282 if (fragnum == 0) {
283 /*the first fragment*/
284 if (!list_empty(&pdefrag_q->queue)) {
285 /*free current defrag_q */
286 r8712_free_recvframe_queue(pdefrag_q,
287 pfree_recv_queue);
288 }
289 }
290 /* Then enqueue the 0~(n-1) fragment to the defrag_q */
291 phead = &pdefrag_q->queue;
292 list_add_tail(&pfhdr->list, phead);
293 prtnframe = NULL;
294 } else {
295 /* can't find this ta's defrag_queue, so free this
296 * recv_frame */
297 r8712_free_recvframe(precv_frame, pfree_recv_queue);
298 prtnframe = NULL;
299 }
300
301 }
302 if ((ismfrag == 0) && (fragnum != 0)) {
303 /* the last fragment frame
304 * enqueue the last fragment */
305 if (pdefrag_q != NULL) {
306 phead = &pdefrag_q->queue;
307 list_add_tail(&pfhdr->list, phead);
308 /*call recvframe_defrag to defrag*/
309 precv_frame = recvframe_defrag(padapter, pdefrag_q);
310 prtnframe = precv_frame;
311 } else {
312 /* can't find this ta's defrag_queue, so free this
313 * recv_frame */
314 r8712_free_recvframe(precv_frame, pfree_recv_queue);
315 prtnframe = NULL;
316 }
317 }
318 if ((prtnframe != NULL) && (prtnframe->u.hdr.attrib.privacy)) {
319 /* after defrag we must check tkip mic code */
320 if (r8712_recvframe_chkmic(padapter, prtnframe) == _FAIL) {
321 r8712_free_recvframe(prtnframe, pfree_recv_queue);
322 prtnframe = NULL;
323 }
324 }
325 return prtnframe;
326 }
327
328 static int amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
329 {
330 int a_len, padding_len;
331 u16 eth_type, nSubframe_Length;
332 u8 nr_subframes, i;
333 unsigned char *data_ptr, *pdata;
334 struct rx_pkt_attrib *pattrib;
335 _pkt *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
336 struct recv_priv *precvpriv = &padapter->recvpriv;
337 struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
338
339 nr_subframes = 0;
340 pattrib = &prframe->u.hdr.attrib;
341 recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
342 if (prframe->u.hdr.attrib.iv_len > 0)
343 recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
344 a_len = prframe->u.hdr.len;
345 pdata = prframe->u.hdr.rx_data;
346 while (a_len > ETH_HLEN) {
347 /* Offset 12 denote 2 mac address */
348 nSubframe_Length = *((u16 *)(pdata + 12));
349 /*==m==>change the length order*/
350 nSubframe_Length = (nSubframe_Length >> 8) +
351 (nSubframe_Length << 8);
352 if (a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
353 netdev_warn(padapter->pnetdev, "r8712u: nRemain_Length is %d and nSubframe_Length is: %d\n",
354 a_len, nSubframe_Length);
355 goto exit;
356 }
357 /* move the data point to data content */
358 pdata += ETH_HLEN;
359 a_len -= ETH_HLEN;
360 /* Allocate new skb for releasing to upper layer */
361 sub_skb = dev_alloc_skb(nSubframe_Length + 12);
362 if (!sub_skb)
363 break;
364 skb_reserve(sub_skb, 12);
365 data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
366 memcpy(data_ptr, pdata, nSubframe_Length);
367 subframes[nr_subframes++] = sub_skb;
368 if (nr_subframes >= MAX_SUBFRAME_COUNT) {
369 netdev_warn(padapter->pnetdev, "r8712u: ParseSubframe(): Too many Subframes! Packets dropped!\n");
370 break;
371 }
372 pdata += nSubframe_Length;
373 a_len -= nSubframe_Length;
374 if (a_len != 0) {
375 padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & 3);
376 if (padding_len == 4)
377 padding_len = 0;
378 if (a_len < padding_len)
379 goto exit;
380 pdata += padding_len;
381 a_len -= padding_len;
382 }
383 }
384 for (i = 0; i < nr_subframes; i++) {
385 sub_skb = subframes[i];
386 /* convert hdr + possible LLC headers into Ethernet header */
387 eth_type = (sub_skb->data[6] << 8) | sub_skb->data[7];
388 if (sub_skb->len >= 8 &&
389 ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
390 eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
391 !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
392 /* remove RFC1042 or Bridge-Tunnel encapsulation and
393 * replace EtherType */
394 skb_pull(sub_skb, SNAP_SIZE);
395 memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
396 ETH_ALEN);
397 memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
398 ETH_ALEN);
399 } else {
400 u16 len;
401 /* Leave Ethernet header part of hdr and full payload */
402 len = htons(sub_skb->len);
403 memcpy(skb_push(sub_skb, 2), &len, 2);
404 memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
405 ETH_ALEN);
406 memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
407 ETH_ALEN);
408 }
409 /* Indicate the packets to upper layer */
410 if (sub_skb) {
411 sub_skb->protocol =
412 eth_type_trans(sub_skb, padapter->pnetdev);
413 sub_skb->dev = padapter->pnetdev;
414 if ((pattrib->tcpchk_valid == 1) &&
415 (pattrib->tcp_chkrpt == 1)) {
416 sub_skb->ip_summed = CHECKSUM_UNNECESSARY;
417 } else
418 sub_skb->ip_summed = CHECKSUM_NONE;
419 netif_rx(sub_skb);
420 }
421 }
422 exit:
423 prframe->u.hdr.len = 0;
424 r8712_free_recvframe(prframe, pfree_recv_queue);
425 return _SUCCESS;
426 }
427
428 void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
429 {
430 uint voffset;
431 u8 *poffset;
432 u16 cmd_len, drvinfo_sz;
433 struct recv_stat *prxstat;
434
435 poffset = (u8 *)prxcmdbuf;
436 voffset = *(uint *)poffset;
437 prxstat = (struct recv_stat *)prxcmdbuf;
438 drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16;
439 drvinfo_sz <<= 3;
440 poffset += RXDESC_SIZE + drvinfo_sz;
441 do {
442 voffset = *(uint *)poffset;
443 cmd_len = (u16)(le32_to_cpu(voffset) & 0xffff);
444 r8712_event_handle(padapter, (uint *)poffset);
445 poffset += (cmd_len + 8);/*8 bytes alignment*/
446 } while (le32_to_cpu(voffset) & BIT(31));
447
448 }
449
450 static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl,
451 u16 seq_num)
452 {
453 u8 wsize = preorder_ctrl->wsize_b;
454 u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) % 4096;
455
456 /* Rx Reorder initialize condition.*/
457 if (preorder_ctrl->indicate_seq == 0xffff)
458 preorder_ctrl->indicate_seq = seq_num;
459 /* Drop out the packet which SeqNum is smaller than WinStart */
460 if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
461 return false;
462 /*
463 * Sliding window manipulation. Conditions includes:
464 * 1. Incoming SeqNum is equal to WinStart =>Window shift 1
465 * 2. Incoming SeqNum is larger than the WinEnd => Window shift N
466 */
467 if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq))
468 preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq +
469 1) % 4096;
470 else if (SN_LESS(wend, seq_num)) {
471 if (seq_num >= (wsize - 1))
472 preorder_ctrl->indicate_seq = seq_num + 1 - wsize;
473 else
474 preorder_ctrl->indicate_seq = 4095 - (wsize -
475 (seq_num + 1)) + 1;
476 }
477 return true;
478 }
479
480 static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
481 union recv_frame *prframe)
482 {
483 struct list_head *phead, *plist;
484 union recv_frame *pnextrframe;
485 struct rx_pkt_attrib *pnextattrib;
486 struct __queue *ppending_recvframe_queue =
487 &preorder_ctrl->pending_recvframe_queue;
488 struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
489
490 phead = &ppending_recvframe_queue->queue;
491 plist = phead->next;
492 while (end_of_queue_search(phead, plist) == false) {
493 pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
494 pnextattrib = &pnextrframe->u.hdr.attrib;
495 if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
496 plist = plist->next;
497 else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
498 return false;
499 else
500 break;
501 }
502 list_del_init(&(prframe->u.hdr.list));
503 list_add_tail(&(prframe->u.hdr.list), plist);
504 return true;
505 }
506
507 int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
508 struct recv_reorder_ctrl *preorder_ctrl,
509 int bforced)
510 {
511 struct list_head *phead, *plist;
512 union recv_frame *prframe;
513 struct rx_pkt_attrib *pattrib;
514 int bPktInBuf = false;
515 struct recv_priv *precvpriv = &padapter->recvpriv;
516 struct __queue *ppending_recvframe_queue =
517 &preorder_ctrl->pending_recvframe_queue;
518
519 phead = &ppending_recvframe_queue->queue;
520 plist = phead->next;
521 /* Handling some condition for forced indicate case.*/
522 if (bforced == true) {
523 if (list_empty(phead))
524 return true;
525
526 prframe = LIST_CONTAINOR(plist, union recv_frame, u);
527 pattrib = &prframe->u.hdr.attrib;
528 preorder_ctrl->indicate_seq = pattrib->seq_num;
529 }
530 /* Prepare indication list and indication.
531 * Check if there is any packet need indicate. */
532 while (!list_empty(phead)) {
533 prframe = LIST_CONTAINOR(plist, union recv_frame, u);
534 pattrib = &prframe->u.hdr.attrib;
535 if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
536 plist = plist->next;
537 list_del_init(&(prframe->u.hdr.list));
538 if (SN_EQUAL(preorder_ctrl->indicate_seq,
539 pattrib->seq_num))
540 preorder_ctrl->indicate_seq =
541 (preorder_ctrl->indicate_seq + 1) % 4096;
542 /*indicate this recv_frame*/
543 if (!pattrib->amsdu) {
544 if ((padapter->bDriverStopped == false) &&
545 (padapter->bSurpriseRemoved == false)) {
546 /* indicate this recv_frame */
547 r8712_recv_indicatepkt(padapter,
548 prframe);
549 }
550 } else if (pattrib->amsdu == 1) {
551 if (amsdu_to_msdu(padapter, prframe) !=
552 _SUCCESS)
553 r8712_free_recvframe(prframe,
554 &precvpriv->free_recv_queue);
555 }
556 /* Update local variables. */
557 bPktInBuf = false;
558 } else {
559 bPktInBuf = true;
560 break;
561 }
562 }
563 return bPktInBuf;
564 }
565
566 static int recv_indicatepkt_reorder(struct _adapter *padapter,
567 union recv_frame *prframe)
568 {
569 unsigned long irql;
570 struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
571 struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
572 struct __queue *ppending_recvframe_queue =
573 &preorder_ctrl->pending_recvframe_queue;
574
575 if (!pattrib->amsdu) {
576 /* s1. */
577 r8712_wlanhdr_to_ethhdr(prframe);
578 if (pattrib->qos != 1) {
579 if ((padapter->bDriverStopped == false) &&
580 (padapter->bSurpriseRemoved == false)) {
581 r8712_recv_indicatepkt(padapter, prframe);
582 return _SUCCESS;
583 } else
584 return _FAIL;
585 }
586 }
587 spin_lock_irqsave(&ppending_recvframe_queue->lock, irql);
588 /*s2. check if winstart_b(indicate_seq) needs to be updated*/
589 if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num))
590 goto _err_exit;
591 /*s3. Insert all packet into Reorder Queue to maintain its ordering.*/
592 if (!enqueue_reorder_recvframe(preorder_ctrl, prframe))
593 goto _err_exit;
594 /*s4.
595 * Indication process.
596 * After Packet dropping and Sliding Window shifting as above, we can
597 * now just indicate the packets with the SeqNum smaller than latest
598 * WinStart and buffer other packets.
599 *
600 * For Rx Reorder condition:
601 * 1. All packets with SeqNum smaller than WinStart => Indicate
602 * 2. All packets with SeqNum larger than or equal to
603 * WinStart => Buffer it.
604 */
605 if (r8712_recv_indicatepkts_in_order(padapter, preorder_ctrl, false) ==
606 true) {
607 mod_timer(&preorder_ctrl->reordering_ctrl_timer,
608 jiffies + msecs_to_jiffies(REORDER_WAIT_TIME));
609 spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
610 } else {
611 spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
612 del_timer(&preorder_ctrl->reordering_ctrl_timer);
613 }
614 return _SUCCESS;
615 _err_exit:
616 spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
617 return _FAIL;
618 }
619
620 void r8712_reordering_ctrl_timeout_handler(void *pcontext)
621 {
622 unsigned long irql;
623 struct recv_reorder_ctrl *preorder_ctrl =
624 (struct recv_reorder_ctrl *)pcontext;
625 struct _adapter *padapter = preorder_ctrl->padapter;
626 struct __queue *ppending_recvframe_queue =
627 &preorder_ctrl->pending_recvframe_queue;
628
629 if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
630 return;
631 spin_lock_irqsave(&ppending_recvframe_queue->lock, irql);
632 r8712_recv_indicatepkts_in_order(padapter, preorder_ctrl, true);
633 spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
634 }
635
636 static int r8712_process_recv_indicatepkts(struct _adapter *padapter,
637 union recv_frame *prframe)
638 {
639 int retval = _SUCCESS;
640 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
641 struct ht_priv *phtpriv = &pmlmepriv->htpriv;
642
643 if (phtpriv->ht_option == 1) { /*B/G/N Mode*/
644 if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
645 /* including perform A-MPDU Rx Ordering Buffer Control*/
646 if ((padapter->bDriverStopped == false) &&
647 (padapter->bSurpriseRemoved == false))
648 return _FAIL;
649 }
650 } else { /*B/G mode*/
651 retval = r8712_wlanhdr_to_ethhdr(prframe);
652 if (retval != _SUCCESS)
653 return retval;
654 if ((padapter->bDriverStopped == false) &&
655 (padapter->bSurpriseRemoved == false)) {
656 /* indicate this recv_frame */
657 r8712_recv_indicatepkt(padapter, prframe);
658 } else
659 return _FAIL;
660 }
661 return retval;
662 }
663
664 static u8 query_rx_pwr_percentage(s8 antpower)
665 {
666 if ((antpower <= -100) || (antpower >= 20))
667 return 0;
668 else if (antpower >= 0)
669 return 100;
670 else
671 return 100 + antpower;
672 }
673
674 static u8 evm_db2percentage(s8 value)
675 {
676 /*
677 * -33dB~0dB to 0%~99%
678 */
679 s8 ret_val;
680
681 ret_val = value;
682 if (ret_val >= 0)
683 ret_val = 0;
684 if (ret_val <= -33)
685 ret_val = -33;
686 ret_val = -ret_val;
687 ret_val *= 3;
688 if (ret_val == 99)
689 ret_val = 100;
690 return ret_val;
691 }
692
693 s32 r8712_signal_scale_mapping(s32 cur_sig)
694 {
695 s32 ret_sig;
696
697 if (cur_sig >= 51 && cur_sig <= 100)
698 ret_sig = 100;
699 else if (cur_sig >= 41 && cur_sig <= 50)
700 ret_sig = 80 + ((cur_sig - 40) * 2);
701 else if (cur_sig >= 31 && cur_sig <= 40)
702 ret_sig = 66 + (cur_sig - 30);
703 else if (cur_sig >= 21 && cur_sig <= 30)
704 ret_sig = 54 + (cur_sig - 20);
705 else if (cur_sig >= 10 && cur_sig <= 20)
706 ret_sig = 42 + (((cur_sig - 10) * 2) / 3);
707 else if (cur_sig >= 5 && cur_sig <= 9)
708 ret_sig = 22 + (((cur_sig - 5) * 3) / 2);
709 else if (cur_sig >= 1 && cur_sig <= 4)
710 ret_sig = 6 + (((cur_sig - 1) * 3) / 2);
711 else
712 ret_sig = cur_sig;
713 return ret_sig;
714 }
715
716 static s32 translate2dbm(struct _adapter *padapter, u8 signal_strength_idx)
717 {
718 s32 signal_power; /* in dBm.*/
719 /* Translate to dBm (x=0.5y-95).*/
720 signal_power = (s32)((signal_strength_idx + 1) >> 1);
721 signal_power -= 95;
722 return signal_power;
723 }
724
725 static void query_rx_phy_status(struct _adapter *padapter,
726 union recv_frame *prframe)
727 {
728 u8 i, max_spatial_stream, evm;
729 struct recv_stat *prxstat = (struct recv_stat *)prframe->u.hdr.rx_head;
730 struct phy_stat *pphy_stat = (struct phy_stat *)(prxstat + 1);
731 u8 *pphy_head = (u8 *)(prxstat + 1);
732 s8 rx_pwr[4], rx_pwr_all;
733 u8 pwdb_all;
734 u32 rssi, total_rssi = 0;
735 u8 bcck_rate = 0, rf_rx_num = 0, cck_highpwr = 0;
736 struct phy_cck_rx_status *pcck_buf;
737 u8 sq;
738
739 /* Record it for next packet processing*/
740 bcck_rate = (prframe->u.hdr.attrib.mcs_rate <= 3 ? 1 : 0);
741 if (bcck_rate) {
742 u8 report;
743
744 /* CCK Driver info Structure is not the same as OFDM packet.*/
745 pcck_buf = (struct phy_cck_rx_status *)pphy_stat;
746 /* (1)Hardware does not provide RSSI for CCK
747 * (2)PWDB, Average PWDB cacluated by hardware
748 * (for rate adaptive)
749 */
750 if (!cck_highpwr) {
751 report = pcck_buf->cck_agc_rpt & 0xc0;
752 report >>= 6;
753 switch (report) {
754 /* Modify the RF RNA gain value to -40, -20,
755 * -2, 14 by Jenyu's suggestion
756 * Note: different RF with the different
757 * RNA gain. */
758 case 0x3:
759 rx_pwr_all = -40 - (pcck_buf->cck_agc_rpt &
760 0x3e);
761 break;
762 case 0x2:
763 rx_pwr_all = -20 - (pcck_buf->cck_agc_rpt &
764 0x3e);
765 break;
766 case 0x1:
767 rx_pwr_all = -2 - (pcck_buf->cck_agc_rpt &
768 0x3e);
769 break;
770 case 0x0:
771 rx_pwr_all = 14 - (pcck_buf->cck_agc_rpt &
772 0x3e);
773 break;
774 }
775 } else {
776 report = ((u8)(le32_to_cpu(pphy_stat->phydw1) >> 8)) &
777 0x60;
778 report >>= 5;
779 switch (report) {
780 case 0x3:
781 rx_pwr_all = -40 - ((pcck_buf->cck_agc_rpt &
782 0x1f) << 1);
783 break;
784 case 0x2:
785 rx_pwr_all = -20 - ((pcck_buf->cck_agc_rpt &
786 0x1f) << 1);
787 break;
788 case 0x1:
789 rx_pwr_all = -2 - ((pcck_buf->cck_agc_rpt &
790 0x1f) << 1);
791 break;
792 case 0x0:
793 rx_pwr_all = 14 - ((pcck_buf->cck_agc_rpt &
794 0x1f) << 1);
795 break;
796 }
797 }
798 pwdb_all = query_rx_pwr_percentage(rx_pwr_all);
799 /* CCK gain is smaller than OFDM/MCS gain,*/
800 /* so we add gain diff by experiences, the val is 6 */
801 pwdb_all += 6;
802 if (pwdb_all > 100)
803 pwdb_all = 100;
804 /* modify the offset to make the same gain index with OFDM.*/
805 if (pwdb_all > 34 && pwdb_all <= 42)
806 pwdb_all -= 2;
807 else if (pwdb_all > 26 && pwdb_all <= 34)
808 pwdb_all -= 6;
809 else if (pwdb_all > 14 && pwdb_all <= 26)
810 pwdb_all -= 8;
811 else if (pwdb_all > 4 && pwdb_all <= 14)
812 pwdb_all -= 4;
813 /*
814 * (3) Get Signal Quality (EVM)
815 */
816 if (pwdb_all > 40)
817 sq = 100;
818 else {
819 sq = pcck_buf->sq_rpt;
820 if (pcck_buf->sq_rpt > 64)
821 sq = 0;
822 else if (pcck_buf->sq_rpt < 20)
823 sq = 100;
824 else
825 sq = ((64-sq) * 100) / 44;
826 }
827 prframe->u.hdr.attrib.signal_qual = sq;
828 prframe->u.hdr.attrib.rx_mimo_signal_qual[0] = sq;
829 prframe->u.hdr.attrib.rx_mimo_signal_qual[1] = -1;
830 } else {
831 /* (1)Get RSSI for HT rate */
832 for (i = 0; i < ((padapter->registrypriv.rf_config) &
833 0x0f); i++) {
834 rf_rx_num++;
835 rx_pwr[i] = ((pphy_head[PHY_STAT_GAIN_TRSW_SHT + i]
836 & 0x3F) * 2) - 110;
837 /* Translate DBM to percentage. */
838 rssi = query_rx_pwr_percentage(rx_pwr[i]);
839 total_rssi += rssi;
840 }
841 /* (2)PWDB, Average PWDB cacluated by hardware (for
842 * rate adaptive) */
843 rx_pwr_all = (((pphy_head[PHY_STAT_PWDB_ALL_SHT]) >> 1) & 0x7f)
844 - 106;
845 pwdb_all = query_rx_pwr_percentage(rx_pwr_all);
846
847 {
848 /* (3)EVM of HT rate */
849 if (prframe->u.hdr.attrib.htc &&
850 prframe->u.hdr.attrib.mcs_rate >= 20 &&
851 prframe->u.hdr.attrib.mcs_rate <= 27) {
852 /* both spatial stream make sense */
853 max_spatial_stream = 2;
854 } else {
855 /* only spatial stream 1 makes sense */
856 max_spatial_stream = 1;
857 }
858 for (i = 0; i < max_spatial_stream; i++) {
859 evm = evm_db2percentage((pphy_head
860 [PHY_STAT_RXEVM_SHT + i]));/*dbm*/
861 prframe->u.hdr.attrib.signal_qual =
862 (u8)(evm & 0xff);
863 prframe->u.hdr.attrib.rx_mimo_signal_qual[i] =
864 (u8)(evm & 0xff);
865 }
866 }
867 }
868 /* UI BSS List signal strength(in percentage), make it good looking,
869 * from 0~100. It is assigned to the BSS List in
870 * GetValueFromBeaconOrProbeRsp(). */
871 if (bcck_rate)
872 prframe->u.hdr.attrib.signal_strength =
873 (u8)r8712_signal_scale_mapping(pwdb_all);
874 else {
875 if (rf_rx_num != 0)
876 prframe->u.hdr.attrib.signal_strength =
877 (u8)(r8712_signal_scale_mapping(total_rssi /=
878 rf_rx_num));
879 }
880 }
881
882 static void process_link_qual(struct _adapter *padapter,
883 union recv_frame *prframe)
884 {
885 u32 last_evm = 0, tmpVal;
886 struct rx_pkt_attrib *pattrib;
887
888 if (prframe == NULL || padapter == NULL)
889 return;
890 pattrib = &prframe->u.hdr.attrib;
891 if (pattrib->signal_qual != 0) {
892 /*
893 * 1. Record the general EVM to the sliding window.
894 */
895 if (padapter->recvpriv.signal_qual_data.total_num++ >=
896 PHY_LINKQUALITY_SLID_WIN_MAX) {
897 padapter->recvpriv.signal_qual_data.total_num =
898 PHY_LINKQUALITY_SLID_WIN_MAX;
899 last_evm = padapter->recvpriv.signal_qual_data.elements
900 [padapter->recvpriv.signal_qual_data.index];
901 padapter->recvpriv.signal_qual_data.total_val -=
902 last_evm;
903 }
904 padapter->recvpriv.signal_qual_data.total_val +=
905 pattrib->signal_qual;
906 padapter->recvpriv.signal_qual_data.elements[padapter->
907 recvpriv.signal_qual_data.index++] =
908 pattrib->signal_qual;
909 if (padapter->recvpriv.signal_qual_data.index >=
910 PHY_LINKQUALITY_SLID_WIN_MAX)
911 padapter->recvpriv.signal_qual_data.index = 0;
912
913 /* <1> Showed on UI for user, in percentage. */
914 tmpVal = padapter->recvpriv.signal_qual_data.total_val /
915 padapter->recvpriv.signal_qual_data.total_num;
916 padapter->recvpriv.signal = (u8)tmpVal;
917 }
918 }
919
920 static void process_rssi(struct _adapter *padapter, union recv_frame *prframe)
921 {
922 u32 last_rssi, tmp_val;
923 struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
924
925 if (padapter->recvpriv.signal_strength_data.total_num++ >=
926 PHY_RSSI_SLID_WIN_MAX) {
927 padapter->recvpriv.signal_strength_data.total_num =
928 PHY_RSSI_SLID_WIN_MAX;
929 last_rssi = padapter->recvpriv.signal_strength_data.elements
930 [padapter->recvpriv.signal_strength_data.index];
931 padapter->recvpriv.signal_strength_data.total_val -= last_rssi;
932 }
933 padapter->recvpriv.signal_strength_data.total_val +=
934 pattrib->signal_strength;
935 padapter->recvpriv.signal_strength_data.elements[padapter->recvpriv.
936 signal_strength_data.index++] =
937 pattrib->signal_strength;
938 if (padapter->recvpriv.signal_strength_data.index >=
939 PHY_RSSI_SLID_WIN_MAX)
940 padapter->recvpriv.signal_strength_data.index = 0;
941 tmp_val = padapter->recvpriv.signal_strength_data.total_val /
942 padapter->recvpriv.signal_strength_data.total_num;
943 padapter->recvpriv.rssi = (s8)translate2dbm(padapter, (u8)tmp_val);
944 }
945
946 static void process_phy_info(struct _adapter *padapter,
947 union recv_frame *prframe)
948 {
949 query_rx_phy_status(padapter, prframe);
950 process_rssi(padapter, prframe);
951 process_link_qual(padapter, prframe);
952 }
953
954 int recv_func(struct _adapter *padapter, void *pcontext)
955 {
956 struct rx_pkt_attrib *pattrib;
957 union recv_frame *prframe, *orig_prframe;
958 int retval = _SUCCESS;
959 struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
960 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
961
962 prframe = (union recv_frame *)pcontext;
963 orig_prframe = prframe;
964 pattrib = &prframe->u.hdr.attrib;
965 if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) {
966 if (pattrib->crc_err == 1)
967 padapter->mppriv.rx_crcerrpktcount++;
968 else
969 padapter->mppriv.rx_pktcount++;
970 if (check_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE) == false) {
971 /* free this recv_frame */
972 r8712_free_recvframe(orig_prframe, pfree_recv_queue);
973 goto _exit_recv_func;
974 }
975 }
976 /* check the frame crtl field and decache */
977 retval = r8712_validate_recv_frame(padapter, prframe);
978 if (retval != _SUCCESS) {
979 /* free this recv_frame */
980 r8712_free_recvframe(orig_prframe, pfree_recv_queue);
981 goto _exit_recv_func;
982 }
983 process_phy_info(padapter, prframe);
984 prframe = r8712_decryptor(padapter, prframe);
985 if (prframe == NULL) {
986 retval = _FAIL;
987 goto _exit_recv_func;
988 }
989 prframe = r8712_recvframe_chk_defrag(padapter, prframe);
990 if (prframe == NULL)
991 goto _exit_recv_func;
992 prframe = r8712_portctrl(padapter, prframe);
993 if (prframe == NULL) {
994 retval = _FAIL;
995 goto _exit_recv_func;
996 }
997 retval = r8712_process_recv_indicatepkts(padapter, prframe);
998 if (retval != _SUCCESS) {
999 r8712_free_recvframe(orig_prframe, pfree_recv_queue);
1000 goto _exit_recv_func;
1001 }
1002 _exit_recv_func:
1003 return retval;
1004 }
1005
1006 static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
1007 {
1008 u8 *pbuf, shift_sz = 0;
1009 u8 frag, mf;
1010 uint pkt_len;
1011 u32 transfer_len;
1012 struct recv_stat *prxstat;
1013 u16 pkt_cnt, drvinfo_sz, pkt_offset, tmp_len, alloc_sz;
1014 struct __queue *pfree_recv_queue;
1015 _pkt *pkt_copy = NULL;
1016 union recv_frame *precvframe = NULL;
1017 struct recv_priv *precvpriv = &padapter->recvpriv;
1018
1019 pfree_recv_queue = &(precvpriv->free_recv_queue);
1020 pbuf = pskb->data;
1021 prxstat = (struct recv_stat *)pbuf;
1022 pkt_cnt = (le32_to_cpu(prxstat->rxdw2)>>16)&0xff;
1023 pkt_len = le32_to_cpu(prxstat->rxdw0)&0x00003fff;
1024 transfer_len = pskb->len;
1025 /* Test throughput with Netgear 3700 (No security) with Chariot 3T3R
1026 * pairs. The packet count will be a big number so that the containing
1027 * packet will effect the Rx reordering. */
1028 if (transfer_len < pkt_len) {
1029 /* In this case, it means the MAX_RECVBUF_SZ is too small to
1030 * get the data from 8712u. */
1031 return _FAIL;
1032 }
1033 do {
1034 prxstat = (struct recv_stat *)pbuf;
1035 pkt_len = le32_to_cpu(prxstat->rxdw0)&0x00003fff;
1036 /* more fragment bit */
1037 mf = (le32_to_cpu(prxstat->rxdw1) >> 27) & 0x1;
1038 /* ragmentation number */
1039 frag = (le32_to_cpu(prxstat->rxdw2) >> 12) & 0xf;
1040 /* uint 2^3 = 8 bytes */
1041 drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16;
1042 drvinfo_sz <<= 3;
1043 if (pkt_len <= 0)
1044 goto _exit_recvbuf2recvframe;
1045 /* Qos data, wireless lan header length is 26 */
1046 if ((le32_to_cpu(prxstat->rxdw0) >> 23) & 0x01)
1047 shift_sz = 2;
1048 precvframe = r8712_alloc_recvframe(pfree_recv_queue);
1049 if (precvframe == NULL)
1050 goto _exit_recvbuf2recvframe;
1051 INIT_LIST_HEAD(&precvframe->u.hdr.list);
1052 precvframe->u.hdr.precvbuf = NULL; /*can't access the precvbuf*/
1053 precvframe->u.hdr.len = 0;
1054 tmp_len = pkt_len + drvinfo_sz + RXDESC_SIZE;
1055 pkt_offset = (u16)round_up(tmp_len, 128);
1056 /* for first fragment packet, driver need allocate 1536 +
1057 * drvinfo_sz + RXDESC_SIZE to defrag packet. */
1058 if ((mf == 1) && (frag == 0))
1059 alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/
1060 else
1061 alloc_sz = tmp_len;
1062 /* 2 is for IP header 4 bytes alignment in QoS packet case.
1063 * 4 is for skb->data 4 bytes alignment. */
1064 alloc_sz += 6;
1065 pkt_copy = netdev_alloc_skb(padapter->pnetdev, alloc_sz);
1066 if (pkt_copy) {
1067 precvframe->u.hdr.pkt = pkt_copy;
1068 skb_reserve(pkt_copy, 4 - ((addr_t)(pkt_copy->data)
1069 % 4));
1070 skb_reserve(pkt_copy, shift_sz);
1071 memcpy(pkt_copy->data, pbuf, tmp_len);
1072 precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data =
1073 precvframe->u.hdr.rx_tail = pkt_copy->data;
1074 precvframe->u.hdr.rx_end = pkt_copy->data + alloc_sz;
1075 } else {
1076 precvframe->u.hdr.pkt = skb_clone(pskb, GFP_ATOMIC);
1077 if (!precvframe->u.hdr.pkt)
1078 return _FAIL;
1079 precvframe->u.hdr.rx_head = pbuf;
1080 precvframe->u.hdr.rx_data = pbuf;
1081 precvframe->u.hdr.rx_tail = pbuf;
1082 precvframe->u.hdr.rx_end = pbuf + alloc_sz;
1083 }
1084 recvframe_put(precvframe, tmp_len);
1085 recvframe_pull(precvframe, drvinfo_sz + RXDESC_SIZE);
1086 /* because the endian issue, driver avoid reference to the
1087 * rxstat after calling update_recvframe_attrib_from_recvstat();
1088 */
1089 update_recvframe_attrib_from_recvstat(&precvframe->u.hdr.attrib,
1090 prxstat);
1091 r8712_recv_entry(precvframe);
1092 transfer_len -= pkt_offset;
1093 pbuf += pkt_offset;
1094 pkt_cnt--;
1095 precvframe = NULL;
1096 pkt_copy = NULL;
1097 } while ((transfer_len > 0) && pkt_cnt > 0);
1098 _exit_recvbuf2recvframe:
1099 return _SUCCESS;
1100 }
1101
1102 static void recv_tasklet(void *priv)
1103 {
1104 struct sk_buff *pskb;
1105 struct _adapter *padapter = (struct _adapter *)priv;
1106 struct recv_priv *precvpriv = &padapter->recvpriv;
1107
1108 while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
1109 recvbuf2recvframe(padapter, pskb);
1110 skb_reset_tail_pointer(pskb);
1111 pskb->len = 0;
1112 if (!skb_cloned(pskb))
1113 skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
1114 else
1115 consume_skb(pskb);
1116 }
1117 }