]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/staging/rtl8192e/rtllib_tx.c
Merge tag 'armsoc-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[mirror_ubuntu-eoan-kernel.git] / drivers / staging / rtl8192e / rtllib_tx.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25 ******************************************************************************
26
27 Few modifications for Realtek's Wi-Fi drivers by
28 Andrea Merello <andrea.merello@gmail.com>
29
30 A special thanks goes to Realtek for their support !
31
32 ******************************************************************************/
33
34 #include <linux/compiler.h>
35 #include <linux/errno.h>
36 #include <linux/if_arp.h>
37 #include <linux/in6.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/netdevice.h>
43 #include <linux/pci.h>
44 #include <linux/proc_fs.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/tcp.h>
48 #include <linux/types.h>
49 #include <linux/wireless.h>
50 #include <linux/etherdevice.h>
51 #include <linux/uaccess.h>
52 #include <linux/if_vlan.h>
53
54 #include "rtllib.h"
55
56 /* 802.11 Data Frame
57 *
58 *
59 * 802.11 frame_control for data frames - 2 bytes
60 * ,--------------------------------------------------------------------.
61 * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
62 * |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
63 * val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
64 * |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
65 * desc | ver | type | ^-subtype-^ |to |from|more|retry| pwr |more |wep |
66 * | | | x=0 data |DS | DS |frag| | mgm |data | |
67 * | | | x=1 data+ack | | | | | | | |
68 * '--------------------------------------------------------------------'
69 * /\
70 * |
71 * 802.11 Data Frame |
72 * ,--------- 'ctrl' expands to >---'
73 * |
74 * ,--'---,-------------------------------------------------------------.
75 * Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
76 * |------|------|---------|---------|---------|------|---------|------|
77 * Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
78 * | | tion | (BSSID) | | | ence | data | |
79 * `--------------------------------------------------| |------'
80 * Total: 28 non-data bytes `----.----'
81 * |
82 * .- 'Frame data' expands to <---------------------------'
83 * |
84 * V
85 * ,---------------------------------------------------.
86 * Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
87 * |------|------|---------|----------|------|---------|
88 * Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
89 * | DSAP | SSAP | | | | Packet |
90 * | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
91 * `-----------------------------------------| |
92 * Total: 8 non-data bytes `----.----'
93 * |
94 * .- 'IP Packet' expands, if WEP enabled, to <--'
95 * |
96 * V
97 * ,-----------------------.
98 * Bytes | 4 | 0-2296 | 4 |
99 * |-----|-----------|-----|
100 * Desc. | IV | Encrypted | ICV |
101 * | | IP Packet | |
102 * `-----------------------'
103 * Total: 8 non-data bytes
104 *
105 *
106 * 802.3 Ethernet Data Frame
107 *
108 * ,-----------------------------------------.
109 * Bytes | 6 | 6 | 2 | Variable | 4 |
110 * |-------|-------|------|-----------|------|
111 * Desc. | Dest. | Source| Type | IP Packet | fcs |
112 * | MAC | MAC | | | |
113 * `-----------------------------------------'
114 * Total: 18 non-data bytes
115 *
116 * In the event that fragmentation is required, the incoming payload is split
117 * into N parts of size ieee->fts. The first fragment contains the SNAP header
118 * and the remaining packets are just data.
119 *
120 * If encryption is enabled, each fragment payload size is reduced by enough
121 * space to add the prefix and postfix (IV and ICV totalling 8 bytes in
122 * the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to
123 * 500 without encryption it will take 3 frames. With WEP it will take 4 frames
124 * as the payload of each frame is reduced to 492 bytes.
125 *
126 * SKB visualization
127 *
128 * ,- skb->data
129 * |
130 * | ETHERNET HEADER ,-<-- PAYLOAD
131 * | | 14 bytes from skb->data
132 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
133 * | | | |
134 * |,-Dest.--. ,--Src.---. | | |
135 * | 6 bytes| | 6 bytes | | | |
136 * v | | | | | |
137 * 0 | v 1 | v | v 2
138 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
139 * ^ | ^ | ^ |
140 * | | | | | |
141 * | | | | `T' <---- 2 bytes for Type
142 * | | | |
143 * | | '---SNAP--' <-------- 6 bytes for SNAP
144 * | |
145 * `-IV--' <-------------------- 4 bytes for IV (WEP)
146 *
147 * SNAP HEADER
148 *
149 */
150
151 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
152 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
153
154 static int rtllib_put_snap(u8 *data, u16 h_proto)
155 {
156 struct rtllib_snap_hdr *snap;
157 u8 *oui;
158
159 snap = (struct rtllib_snap_hdr *)data;
160 snap->dsap = 0xaa;
161 snap->ssap = 0xaa;
162 snap->ctrl = 0x03;
163
164 if (h_proto == 0x8137 || h_proto == 0x80f3)
165 oui = P802_1H_OUI;
166 else
167 oui = RFC1042_OUI;
168 snap->oui[0] = oui[0];
169 snap->oui[1] = oui[1];
170 snap->oui[2] = oui[2];
171
172 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
173
174 return SNAP_SIZE + sizeof(u16);
175 }
176
177 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
178 int hdr_len)
179 {
180 struct lib80211_crypt_data *crypt = NULL;
181 int res;
182
183 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
184
185 if (!(crypt && crypt->ops)) {
186 netdev_info(ieee->dev, "=========>%s(), crypt is null\n",
187 __func__);
188 return -1;
189 }
190 /* To encrypt, frame format is:
191 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
192 */
193
194 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
195 * call both MSDU and MPDU encryption functions from here.
196 */
197 atomic_inc(&crypt->refcnt);
198 res = 0;
199 if (crypt->ops->encrypt_msdu)
200 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
201 if (res == 0 && crypt->ops->encrypt_mpdu)
202 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
203
204 atomic_dec(&crypt->refcnt);
205 if (res < 0) {
206 netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
207 ieee->dev->name, frag->len);
208 return -1;
209 }
210
211 return 0;
212 }
213
214
215 void rtllib_txb_free(struct rtllib_txb *txb)
216 {
217 if (unlikely(!txb))
218 return;
219 kfree(txb);
220 }
221
222 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
223 gfp_t gfp_mask)
224 {
225 struct rtllib_txb *txb;
226 int i;
227
228 txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
229 gfp_mask);
230 if (!txb)
231 return NULL;
232
233 memset(txb, 0, sizeof(struct rtllib_txb));
234 txb->nr_frags = nr_frags;
235 txb->frag_size = cpu_to_le16(txb_size);
236
237 for (i = 0; i < nr_frags; i++) {
238 txb->fragments[i] = dev_alloc_skb(txb_size);
239 if (unlikely(!txb->fragments[i])) {
240 i--;
241 break;
242 }
243 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
244 }
245 if (unlikely(i != nr_frags)) {
246 while (i >= 0)
247 dev_kfree_skb_any(txb->fragments[i--]);
248 kfree(txb);
249 return NULL;
250 }
251 return txb;
252 }
253
254 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
255 {
256 struct ethhdr *eth;
257 struct iphdr *ip;
258
259 eth = (struct ethhdr *)skb->data;
260 if (eth->h_proto != htons(ETH_P_IP))
261 return 0;
262
263 #ifdef VERBOSE_DEBUG
264 print_hex_dump_bytes("rtllib_classify(): ", DUMP_PREFIX_NONE, skb->data,
265 skb->len);
266 #endif
267 ip = ip_hdr(skb);
268 switch (ip->tos & 0xfc) {
269 case 0x20:
270 return 2;
271 case 0x40:
272 return 1;
273 case 0x60:
274 return 3;
275 case 0x80:
276 return 4;
277 case 0xa0:
278 return 5;
279 case 0xc0:
280 return 6;
281 case 0xe0:
282 return 7;
283 default:
284 return 0;
285 }
286 }
287
288 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
289 struct sk_buff *skb,
290 struct cb_desc *tcb_desc)
291 {
292 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
293 struct tx_ts_record *pTxTs = NULL;
294 struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
295
296 if (rtllib_act_scanning(ieee, false))
297 return;
298
299 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
300 return;
301 if (!IsQoSDataFrame(skb->data))
302 return;
303 if (is_multicast_ether_addr(hdr->addr1))
304 return;
305
306 if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
307 return;
308
309 if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
310 return;
311
312 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
313 return;
314 if (pHTInfo->bCurrentAMPDUEnable) {
315 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
316 skb->priority, TX_DIR, true)) {
317 netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
318 return;
319 }
320 if (pTxTs->TxAdmittedBARecord.bValid == false) {
321 if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
322 KEY_TYPE_NA)) {
323 ;
324 } else if (tcb_desc->bdhcp == 1) {
325 ;
326 } else if (!pTxTs->bDisable_AddBa) {
327 TsStartAddBaProcess(ieee, pTxTs);
328 }
329 goto FORCED_AGG_SETTING;
330 } else if (pTxTs->bUsingBa == false) {
331 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
332 (pTxTs->TxCurSeq+1)%4096))
333 pTxTs->bUsingBa = true;
334 else
335 goto FORCED_AGG_SETTING;
336 }
337 if (ieee->iw_mode == IW_MODE_INFRA) {
338 tcb_desc->bAMPDUEnable = true;
339 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
340 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
341 }
342 }
343 FORCED_AGG_SETTING:
344 switch (pHTInfo->ForcedAMPDUMode) {
345 case HT_AGG_AUTO:
346 break;
347
348 case HT_AGG_FORCE_ENABLE:
349 tcb_desc->bAMPDUEnable = true;
350 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
351 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
352 break;
353
354 case HT_AGG_FORCE_DISABLE:
355 tcb_desc->bAMPDUEnable = false;
356 tcb_desc->ampdu_density = 0;
357 tcb_desc->ampdu_factor = 0;
358 break;
359 }
360 }
361
362 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
363 struct cb_desc *tcb_desc)
364 {
365 tcb_desc->bUseShortPreamble = false;
366 if (tcb_desc->data_rate == 2)
367 return;
368 else if (ieee->current_network.capability &
369 WLAN_CAPABILITY_SHORT_PREAMBLE)
370 tcb_desc->bUseShortPreamble = true;
371 }
372
373 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
374 struct cb_desc *tcb_desc)
375 {
376 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
377
378 tcb_desc->bUseShortGI = false;
379
380 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
381 return;
382
383 if (pHTInfo->bForcedShortGI) {
384 tcb_desc->bUseShortGI = true;
385 return;
386 }
387
388 if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
389 tcb_desc->bUseShortGI = true;
390 else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
391 tcb_desc->bUseShortGI = true;
392 }
393
394 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
395 struct cb_desc *tcb_desc)
396 {
397 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
398
399 tcb_desc->bPacketBW = false;
400
401 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
402 return;
403
404 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
405 return;
406
407 if ((tcb_desc->data_rate & 0x80) == 0)
408 return;
409 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
410 !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
411 tcb_desc->bPacketBW = true;
412 }
413
414 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
415 struct cb_desc *tcb_desc,
416 struct sk_buff *skb)
417 {
418 struct rt_hi_throughput *pHTInfo;
419
420 tcb_desc->bRTSSTBC = false;
421 tcb_desc->bRTSUseShortGI = false;
422 tcb_desc->bCTSEnable = false;
423 tcb_desc->RTSSC = 0;
424 tcb_desc->bRTSBW = false;
425
426 if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
427 return;
428
429 if (is_broadcast_ether_addr(skb->data+16))
430 return;
431
432 if (ieee->mode < IEEE_N_24G) {
433 if (skb->len > ieee->rts) {
434 tcb_desc->bRTSEnable = true;
435 tcb_desc->rts_rate = MGN_24M;
436 } else if (ieee->current_network.buseprotection) {
437 tcb_desc->bRTSEnable = true;
438 tcb_desc->bCTSEnable = true;
439 tcb_desc->rts_rate = MGN_24M;
440 }
441 return;
442 }
443
444 pHTInfo = ieee->pHTInfo;
445
446 while (true) {
447 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
448 tcb_desc->bCTSEnable = true;
449 tcb_desc->rts_rate = MGN_24M;
450 tcb_desc->bRTSEnable = true;
451 break;
452 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
453 HT_IOT_ACT_PURE_N_MODE)) {
454 tcb_desc->bRTSEnable = true;
455 tcb_desc->rts_rate = MGN_24M;
456 break;
457 }
458 if (ieee->current_network.buseprotection) {
459 tcb_desc->bRTSEnable = true;
460 tcb_desc->bCTSEnable = true;
461 tcb_desc->rts_rate = MGN_24M;
462 break;
463 }
464 if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
465 u8 HTOpMode = pHTInfo->CurrentOpMode;
466
467 if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
468 HTOpMode == 3)) ||
469 (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
470 tcb_desc->rts_rate = MGN_24M;
471 tcb_desc->bRTSEnable = true;
472 break;
473 }
474 }
475 if (skb->len > ieee->rts) {
476 tcb_desc->rts_rate = MGN_24M;
477 tcb_desc->bRTSEnable = true;
478 break;
479 }
480 if (tcb_desc->bAMPDUEnable) {
481 tcb_desc->rts_rate = MGN_24M;
482 tcb_desc->bRTSEnable = false;
483 break;
484 }
485 goto NO_PROTECTION;
486 }
487 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
488 tcb_desc->bUseShortPreamble = true;
489 if (ieee->iw_mode == IW_MODE_MASTER)
490 goto NO_PROTECTION;
491 return;
492 NO_PROTECTION:
493 tcb_desc->bRTSEnable = false;
494 tcb_desc->bCTSEnable = false;
495 tcb_desc->rts_rate = 0;
496 tcb_desc->RTSSC = 0;
497 tcb_desc->bRTSBW = false;
498 }
499
500
501 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
502 struct cb_desc *tcb_desc)
503 {
504 if (ieee->bTxDisableRateFallBack)
505 tcb_desc->bTxDisableRateFallBack = true;
506
507 if (ieee->bTxUseDriverAssingedRate)
508 tcb_desc->bTxUseDriverAssingedRate = true;
509 if (!tcb_desc->bTxDisableRateFallBack ||
510 !tcb_desc->bTxUseDriverAssingedRate) {
511 if (ieee->iw_mode == IW_MODE_INFRA ||
512 ieee->iw_mode == IW_MODE_ADHOC)
513 tcb_desc->RATRIndex = 0;
514 }
515 }
516
517 static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
518 u8 *dst)
519 {
520 u16 seqnum = 0;
521
522 if (is_multicast_ether_addr(dst))
523 return 0;
524 if (IsQoSDataFrame(skb->data)) {
525 struct tx_ts_record *pTS = NULL;
526
527 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
528 skb->priority, TX_DIR, true))
529 return 0;
530 seqnum = pTS->TxCurSeq;
531 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
532 return seqnum;
533 }
534 return 0;
535 }
536
537 static int wme_downgrade_ac(struct sk_buff *skb)
538 {
539 switch (skb->priority) {
540 case 6:
541 case 7:
542 skb->priority = 5; /* VO -> VI */
543 return 0;
544 case 4:
545 case 5:
546 skb->priority = 3; /* VI -> BE */
547 return 0;
548 case 0:
549 case 3:
550 skb->priority = 1; /* BE -> BK */
551 return 0;
552 default:
553 return -1;
554 }
555 }
556
557 static u8 rtllib_current_rate(struct rtllib_device *ieee)
558 {
559 if (ieee->mode & IEEE_MODE_MASK)
560 return ieee->rate;
561
562 if (ieee->HTCurrentOperaRate)
563 return ieee->HTCurrentOperaRate;
564 else
565 return ieee->rate & 0x7F;
566 }
567
568 static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
569 {
570 struct rtllib_device *ieee = (struct rtllib_device *)
571 netdev_priv_rsl(dev);
572 struct rtllib_txb *txb = NULL;
573 struct rtllib_hdr_3addrqos *frag_hdr;
574 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
575 unsigned long flags;
576 struct net_device_stats *stats = &ieee->stats;
577 int ether_type = 0, encrypt;
578 int bytes, fc, qos_ctl = 0, hdr_len;
579 struct sk_buff *skb_frag;
580 struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
581 .duration_id = 0,
582 .seq_ctl = 0,
583 .qos_ctl = 0
584 };
585 int qos_actived = ieee->current_network.qos_data.active;
586 u8 dest[ETH_ALEN];
587 u8 src[ETH_ALEN];
588 struct lib80211_crypt_data *crypt = NULL;
589 struct cb_desc *tcb_desc;
590 u8 bIsMulticast = false;
591 u8 IsAmsdu = false;
592 bool bdhcp = false;
593
594 spin_lock_irqsave(&ieee->lock, flags);
595
596 /* If there is no driver handler to take the TXB, don't bother
597 * creating it...
598 */
599 if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
600 IEEE_SOFTMAC_TX_QUEUE)) ||
601 ((!ieee->softmac_data_hard_start_xmit &&
602 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
603 netdev_warn(ieee->dev, "No xmit handler.\n");
604 goto success;
605 }
606
607
608 if (likely(ieee->raw_tx == 0)) {
609 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
610 netdev_warn(ieee->dev, "skb too small (%d).\n",
611 skb->len);
612 goto success;
613 }
614 /* Save source and destination addresses */
615 ether_addr_copy(dest, skb->data);
616 ether_addr_copy(src, skb->data + ETH_ALEN);
617
618 memset(skb->cb, 0, sizeof(skb->cb));
619 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
620
621 if (ieee->iw_mode == IW_MODE_MONITOR) {
622 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
623 if (unlikely(!txb)) {
624 netdev_warn(ieee->dev,
625 "Could not allocate TXB\n");
626 goto failed;
627 }
628
629 txb->encrypted = 0;
630 txb->payload_size = cpu_to_le16(skb->len);
631 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
632 skb->len);
633
634 goto success;
635 }
636
637 if (skb->len > 282) {
638 if (ETH_P_IP == ether_type) {
639 const struct iphdr *ip = (struct iphdr *)
640 ((u8 *)skb->data+14);
641 if (IPPROTO_UDP == ip->protocol) {
642 struct udphdr *udp;
643
644 udp = (struct udphdr *)((u8 *)ip +
645 (ip->ihl << 2));
646 if (((((u8 *)udp)[1] == 68) &&
647 (((u8 *)udp)[3] == 67)) ||
648 ((((u8 *)udp)[1] == 67) &&
649 (((u8 *)udp)[3] == 68))) {
650 bdhcp = true;
651 ieee->LPSDelayCnt = 200;
652 }
653 }
654 } else if (ETH_P_ARP == ether_type) {
655 netdev_info(ieee->dev,
656 "=================>DHCP Protocol start tx ARP pkt!!\n");
657 bdhcp = true;
658 ieee->LPSDelayCnt =
659 ieee->current_network.tim.tim_count;
660 }
661 }
662
663 skb->priority = rtllib_classify(skb, IsAmsdu);
664 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
665 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
666 ieee->host_encrypt && crypt && crypt->ops;
667 if (!encrypt && ieee->ieee802_1x &&
668 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
669 stats->tx_dropped++;
670 goto success;
671 }
672 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
673 struct eapol *eap = (struct eapol *)(skb->data +
674 sizeof(struct ethhdr) - SNAP_SIZE -
675 sizeof(u16));
676 netdev_dbg(ieee->dev,
677 "TX: IEEE 802.11 EAPOL frame: %s\n",
678 eap_get_type(eap->type));
679 }
680
681 /* Advance the SKB to the start of the payload */
682 skb_pull(skb, sizeof(struct ethhdr));
683
684 /* Determine total amount of storage required for TXB packets */
685 bytes = skb->len + SNAP_SIZE + sizeof(u16);
686
687 if (encrypt)
688 fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
689 else
690 fc = RTLLIB_FTYPE_DATA;
691
692 if (qos_actived)
693 fc |= RTLLIB_STYPE_QOS_DATA;
694 else
695 fc |= RTLLIB_STYPE_DATA;
696
697 if (ieee->iw_mode == IW_MODE_INFRA) {
698 fc |= RTLLIB_FCTL_TODS;
699 /* To DS: Addr1 = BSSID, Addr2 = SA,
700 * Addr3 = DA
701 */
702 ether_addr_copy(header.addr1,
703 ieee->current_network.bssid);
704 ether_addr_copy(header.addr2, src);
705 if (IsAmsdu)
706 ether_addr_copy(header.addr3,
707 ieee->current_network.bssid);
708 else
709 ether_addr_copy(header.addr3, dest);
710 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
711 /* not From/To DS: Addr1 = DA, Addr2 = SA,
712 * Addr3 = BSSID
713 */
714 ether_addr_copy(header.addr1, dest);
715 ether_addr_copy(header.addr2, src);
716 ether_addr_copy(header.addr3,
717 ieee->current_network.bssid);
718 }
719
720 bIsMulticast = is_multicast_ether_addr(header.addr1);
721
722 header.frame_ctl = cpu_to_le16(fc);
723
724 /* Determine fragmentation size based on destination (multicast
725 * and broadcast are not fragmented)
726 */
727 if (bIsMulticast) {
728 frag_size = MAX_FRAG_THRESHOLD;
729 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
730 } else {
731 frag_size = ieee->fts;
732 qos_ctl = 0;
733 }
734
735 if (qos_actived) {
736 hdr_len = RTLLIB_3ADDR_LEN + 2;
737
738 /* in case we are a client verify acm is not set for this ac */
739 while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
740 netdev_info(ieee->dev, "skb->priority = %x\n",
741 skb->priority);
742 if (wme_downgrade_ac(skb))
743 break;
744 netdev_info(ieee->dev, "converted skb->priority = %x\n",
745 skb->priority);
746 }
747 qos_ctl |= skb->priority;
748 header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
749 } else {
750 hdr_len = RTLLIB_3ADDR_LEN;
751 }
752 /* Determine amount of payload per fragment. Regardless of if
753 * this stack is providing the full 802.11 header, one will
754 * eventually be affixed to this fragment -- so we must account
755 * for it when determining the amount of payload space.
756 */
757 bytes_per_frag = frag_size - hdr_len;
758 if (ieee->config &
759 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
760 bytes_per_frag -= RTLLIB_FCS_LEN;
761
762 /* Each fragment may need to have room for encrypting
763 * pre/postfix
764 */
765 if (encrypt) {
766 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
767 crypt->ops->extra_mpdu_postfix_len +
768 crypt->ops->extra_msdu_prefix_len +
769 crypt->ops->extra_msdu_postfix_len;
770 }
771 /* Number of fragments is the total bytes_per_frag /
772 * payload_per_fragment
773 */
774 nr_frags = bytes / bytes_per_frag;
775 bytes_last_frag = bytes % bytes_per_frag;
776 if (bytes_last_frag)
777 nr_frags++;
778 else
779 bytes_last_frag = bytes_per_frag;
780
781 /* When we allocate the TXB we allocate enough space for the
782 * reserve and full fragment bytes (bytes_per_frag doesn't
783 * include prefix, postfix, header, FCS, etc.)
784 */
785 txb = rtllib_alloc_txb(nr_frags, frag_size +
786 ieee->tx_headroom, GFP_ATOMIC);
787 if (unlikely(!txb)) {
788 netdev_warn(ieee->dev, "Could not allocate TXB\n");
789 goto failed;
790 }
791 txb->encrypted = encrypt;
792 txb->payload_size = cpu_to_le16(bytes);
793
794 if (qos_actived)
795 txb->queue_index = UP2AC(skb->priority);
796 else
797 txb->queue_index = WME_AC_BE;
798
799 for (i = 0; i < nr_frags; i++) {
800 skb_frag = txb->fragments[i];
801 tcb_desc = (struct cb_desc *)(skb_frag->cb +
802 MAX_DEV_ADDR_SIZE);
803 if (qos_actived) {
804 skb_frag->priority = skb->priority;
805 tcb_desc->queue_index = UP2AC(skb->priority);
806 } else {
807 skb_frag->priority = WME_AC_BE;
808 tcb_desc->queue_index = WME_AC_BE;
809 }
810 skb_reserve(skb_frag, ieee->tx_headroom);
811
812 if (encrypt) {
813 if (ieee->hwsec_active)
814 tcb_desc->bHwSec = 1;
815 else
816 tcb_desc->bHwSec = 0;
817 skb_reserve(skb_frag,
818 crypt->ops->extra_mpdu_prefix_len +
819 crypt->ops->extra_msdu_prefix_len);
820 } else {
821 tcb_desc->bHwSec = 0;
822 }
823 frag_hdr = (struct rtllib_hdr_3addrqos *)
824 skb_put(skb_frag, hdr_len);
825 memcpy(frag_hdr, &header, hdr_len);
826
827 /* If this is not the last fragment, then add the
828 * MOREFRAGS bit to the frame control
829 */
830 if (i != nr_frags - 1) {
831 frag_hdr->frame_ctl = cpu_to_le16(
832 fc | RTLLIB_FCTL_MOREFRAGS);
833 bytes = bytes_per_frag;
834
835 } else {
836 /* The last fragment has the remaining length */
837 bytes = bytes_last_frag;
838 }
839 if ((qos_actived) && (!bIsMulticast)) {
840 frag_hdr->seq_ctl =
841 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
842 header.addr1));
843 frag_hdr->seq_ctl =
844 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
845 } else {
846 frag_hdr->seq_ctl =
847 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
848 }
849 /* Put a SNAP header on the first fragment */
850 if (i == 0) {
851 rtllib_put_snap(
852 skb_put(skb_frag, SNAP_SIZE +
853 sizeof(u16)), ether_type);
854 bytes -= SNAP_SIZE + sizeof(u16);
855 }
856
857 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
858
859 /* Advance the SKB... */
860 skb_pull(skb, bytes);
861
862 /* Encryption routine will move the header forward in
863 * order to insert the IV between the header and the
864 * payload
865 */
866 if (encrypt)
867 rtllib_encrypt_fragment(ieee, skb_frag,
868 hdr_len);
869 if (ieee->config &
870 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
871 skb_put(skb_frag, 4);
872 }
873
874 if ((qos_actived) && (!bIsMulticast)) {
875 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
876 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
877 else
878 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
879 } else {
880 if (ieee->seq_ctrl[0] == 0xFFF)
881 ieee->seq_ctrl[0] = 0;
882 else
883 ieee->seq_ctrl[0]++;
884 }
885 } else {
886 if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
887 netdev_warn(ieee->dev, "skb too small (%d).\n",
888 skb->len);
889 goto success;
890 }
891
892 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
893 if (!txb) {
894 netdev_warn(ieee->dev, "Could not allocate TXB\n");
895 goto failed;
896 }
897
898 txb->encrypted = 0;
899 txb->payload_size = cpu_to_le16(skb->len);
900 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
901 skb->len);
902 }
903
904 success:
905 if (txb) {
906 struct cb_desc *tcb_desc = (struct cb_desc *)
907 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
908 tcb_desc->bTxEnableFwCalcDur = 1;
909 tcb_desc->priority = skb->priority;
910
911 if (ether_type == ETH_P_PAE) {
912 if (ieee->pHTInfo->IOTAction &
913 HT_IOT_ACT_WA_IOT_Broadcom) {
914 tcb_desc->data_rate =
915 MgntQuery_TxRateExcludeCCKRates(ieee);
916 tcb_desc->bTxDisableRateFallBack = false;
917 } else {
918 tcb_desc->data_rate = ieee->basic_rate;
919 tcb_desc->bTxDisableRateFallBack = 1;
920 }
921
922
923 tcb_desc->RATRIndex = 7;
924 tcb_desc->bTxUseDriverAssingedRate = 1;
925 } else {
926 if (is_multicast_ether_addr(header.addr1))
927 tcb_desc->bMulticast = 1;
928 if (is_broadcast_ether_addr(header.addr1))
929 tcb_desc->bBroadcast = 1;
930 rtllib_txrate_selectmode(ieee, tcb_desc);
931 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
932 tcb_desc->data_rate = ieee->basic_rate;
933 else
934 tcb_desc->data_rate = rtllib_current_rate(ieee);
935
936 if (bdhcp) {
937 if (ieee->pHTInfo->IOTAction &
938 HT_IOT_ACT_WA_IOT_Broadcom) {
939 tcb_desc->data_rate =
940 MgntQuery_TxRateExcludeCCKRates(ieee);
941 tcb_desc->bTxDisableRateFallBack = false;
942 } else {
943 tcb_desc->data_rate = MGN_1M;
944 tcb_desc->bTxDisableRateFallBack = 1;
945 }
946
947
948 tcb_desc->RATRIndex = 7;
949 tcb_desc->bTxUseDriverAssingedRate = 1;
950 tcb_desc->bdhcp = 1;
951 }
952
953 rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
954 rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
955 tcb_desc);
956 rtllib_query_HTCapShortGI(ieee, tcb_desc);
957 rtllib_query_BandwidthMode(ieee, tcb_desc);
958 rtllib_query_protectionmode(ieee, tcb_desc,
959 txb->fragments[0]);
960 }
961 }
962 spin_unlock_irqrestore(&ieee->lock, flags);
963 dev_kfree_skb_any(skb);
964 if (txb) {
965 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
966 dev->stats.tx_packets++;
967 dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
968 rtllib_softmac_xmit(txb, ieee);
969 } else {
970 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
971 stats->tx_packets++;
972 stats->tx_bytes += le16_to_cpu(txb->payload_size);
973 return 0;
974 }
975 rtllib_txb_free(txb);
976 }
977 }
978
979 return 0;
980
981 failed:
982 spin_unlock_irqrestore(&ieee->lock, flags);
983 netif_stop_queue(dev);
984 stats->tx_errors++;
985 return 1;
986
987 }
988 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
989 {
990 memset(skb->cb, 0, sizeof(skb->cb));
991 return rtllib_xmit_inter(skb, dev);
992 }
993 EXPORT_SYMBOL(rtllib_xmit);