]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/mac80211/tx.c
[MAC80211]: remove management interface
[mirror_ubuntu-zesty-kernel.git] / net / mac80211 / tx.c
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *
12 * Transmit and frame generation functions.
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/skbuff.h>
18 #include <linux/etherdevice.h>
19 #include <linux/bitmap.h>
20 #include <linux/rcupdate.h>
21 #include <net/net_namespace.h>
22 #include <net/ieee80211_radiotap.h>
23 #include <net/cfg80211.h>
24 #include <net/mac80211.h>
25 #include <asm/unaligned.h>
26
27 #include "ieee80211_i.h"
28 #include "ieee80211_led.h"
29 #include "wep.h"
30 #include "wpa.h"
31 #include "wme.h"
32 #include "ieee80211_rate.h"
33
34 #define IEEE80211_TX_OK 0
35 #define IEEE80211_TX_AGAIN 1
36 #define IEEE80211_TX_FRAG_AGAIN 2
37
38 /* misc utils */
39
40 static inline void ieee80211_include_sequence(struct ieee80211_sub_if_data *sdata,
41 struct ieee80211_hdr *hdr)
42 {
43 /* Set the sequence number for this frame. */
44 hdr->seq_ctrl = cpu_to_le16(sdata->sequence);
45
46 /* Increase the sequence number. */
47 sdata->sequence = (sdata->sequence + 0x10) & IEEE80211_SCTL_SEQ;
48 }
49
50 #ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP
51 static void ieee80211_dump_frame(const char *ifname, const char *title,
52 const struct sk_buff *skb)
53 {
54 const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
55 u16 fc;
56 int hdrlen;
57 DECLARE_MAC_BUF(mac);
58
59 printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len);
60 if (skb->len < 4) {
61 printk("\n");
62 return;
63 }
64
65 fc = le16_to_cpu(hdr->frame_control);
66 hdrlen = ieee80211_get_hdrlen(fc);
67 if (hdrlen > skb->len)
68 hdrlen = skb->len;
69 if (hdrlen >= 4)
70 printk(" FC=0x%04x DUR=0x%04x",
71 fc, le16_to_cpu(hdr->duration_id));
72 if (hdrlen >= 10)
73 printk(" A1=%s", print_mac(mac, hdr->addr1));
74 if (hdrlen >= 16)
75 printk(" A2=%s", print_mac(mac, hdr->addr2));
76 if (hdrlen >= 24)
77 printk(" A3=%s", print_mac(mac, hdr->addr3));
78 if (hdrlen >= 30)
79 printk(" A4=%s", print_mac(mac, hdr->addr4));
80 printk("\n");
81 }
82 #else /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
83 static inline void ieee80211_dump_frame(const char *ifname, const char *title,
84 struct sk_buff *skb)
85 {
86 }
87 #endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
88
89 static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr,
90 int next_frag_len)
91 {
92 int rate, mrate, erp, dur, i;
93 struct ieee80211_rate *txrate = tx->u.tx.rate;
94 struct ieee80211_local *local = tx->local;
95 struct ieee80211_hw_mode *mode = tx->u.tx.mode;
96
97 erp = txrate->flags & IEEE80211_RATE_ERP;
98
99 /*
100 * data and mgmt (except PS Poll):
101 * - during CFP: 32768
102 * - during contention period:
103 * if addr1 is group address: 0
104 * if more fragments = 0 and addr1 is individual address: time to
105 * transmit one ACK plus SIFS
106 * if more fragments = 1 and addr1 is individual address: time to
107 * transmit next fragment plus 2 x ACK plus 3 x SIFS
108 *
109 * IEEE 802.11, 9.6:
110 * - control response frame (CTS or ACK) shall be transmitted using the
111 * same rate as the immediately previous frame in the frame exchange
112 * sequence, if this rate belongs to the PHY mandatory rates, or else
113 * at the highest possible rate belonging to the PHY rates in the
114 * BSSBasicRateSet
115 */
116
117 if ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) {
118 /* TODO: These control frames are not currently sent by
119 * 80211.o, but should they be implemented, this function
120 * needs to be updated to support duration field calculation.
121 *
122 * RTS: time needed to transmit pending data/mgmt frame plus
123 * one CTS frame plus one ACK frame plus 3 x SIFS
124 * CTS: duration of immediately previous RTS minus time
125 * required to transmit CTS and its SIFS
126 * ACK: 0 if immediately previous directed data/mgmt had
127 * more=0, with more=1 duration in ACK frame is duration
128 * from previous frame minus time needed to transmit ACK
129 * and its SIFS
130 * PS Poll: BIT(15) | BIT(14) | aid
131 */
132 return 0;
133 }
134
135 /* data/mgmt */
136 if (0 /* FIX: data/mgmt during CFP */)
137 return 32768;
138
139 if (group_addr) /* Group address as the destination - no ACK */
140 return 0;
141
142 /* Individual destination address:
143 * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
144 * CTS and ACK frames shall be transmitted using the highest rate in
145 * basic rate set that is less than or equal to the rate of the
146 * immediately previous frame and that is using the same modulation
147 * (CCK or OFDM). If no basic rate set matches with these requirements,
148 * the highest mandatory rate of the PHY that is less than or equal to
149 * the rate of the previous frame is used.
150 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
151 */
152 rate = -1;
153 mrate = 10; /* use 1 Mbps if everything fails */
154 for (i = 0; i < mode->num_rates; i++) {
155 struct ieee80211_rate *r = &mode->rates[i];
156 if (r->rate > txrate->rate)
157 break;
158
159 if (IEEE80211_RATE_MODULATION(txrate->flags) !=
160 IEEE80211_RATE_MODULATION(r->flags))
161 continue;
162
163 if (r->flags & IEEE80211_RATE_BASIC)
164 rate = r->rate;
165 else if (r->flags & IEEE80211_RATE_MANDATORY)
166 mrate = r->rate;
167 }
168 if (rate == -1) {
169 /* No matching basic rate found; use highest suitable mandatory
170 * PHY rate */
171 rate = mrate;
172 }
173
174 /* Time needed to transmit ACK
175 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
176 * to closest integer */
177
178 dur = ieee80211_frame_duration(local, 10, rate, erp,
179 tx->sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE);
180
181 if (next_frag_len) {
182 /* Frame is fragmented: duration increases with time needed to
183 * transmit next fragment plus ACK and 2 x SIFS. */
184 dur *= 2; /* ACK + SIFS */
185 /* next fragment */
186 dur += ieee80211_frame_duration(local, next_frag_len,
187 txrate->rate, erp,
188 tx->sdata->flags &
189 IEEE80211_SDATA_SHORT_PREAMBLE);
190 }
191
192 return dur;
193 }
194
195 static inline int __ieee80211_queue_stopped(const struct ieee80211_local *local,
196 int queue)
197 {
198 return test_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]);
199 }
200
201 static inline int __ieee80211_queue_pending(const struct ieee80211_local *local,
202 int queue)
203 {
204 return test_bit(IEEE80211_LINK_STATE_PENDING, &local->state[queue]);
205 }
206
207 static int inline is_ieee80211_device(struct net_device *dev,
208 struct net_device *master)
209 {
210 return (wdev_priv(dev->ieee80211_ptr) ==
211 wdev_priv(master->ieee80211_ptr));
212 }
213
214 /* tx handlers */
215
216 static ieee80211_txrx_result
217 ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx)
218 {
219 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
220 struct sk_buff *skb = tx->skb;
221 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
222 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
223 u32 sta_flags;
224
225 if (unlikely(tx->local->sta_scanning != 0) &&
226 ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
227 (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ))
228 return TXRX_DROP;
229
230 if (tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED)
231 return TXRX_CONTINUE;
232
233 sta_flags = tx->sta ? tx->sta->flags : 0;
234
235 if (likely(tx->flags & IEEE80211_TXRXD_TXUNICAST)) {
236 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
237 tx->sdata->type != IEEE80211_IF_TYPE_IBSS &&
238 (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
239 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
240 DECLARE_MAC_BUF(mac);
241 printk(KERN_DEBUG "%s: dropped data frame to not "
242 "associated station %s\n",
243 tx->dev->name, print_mac(mac, hdr->addr1));
244 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
245 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
246 return TXRX_DROP;
247 }
248 } else {
249 if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
250 tx->local->num_sta == 0 &&
251 tx->sdata->type != IEEE80211_IF_TYPE_IBSS)) {
252 /*
253 * No associated STAs - no need to send multicast
254 * frames.
255 */
256 return TXRX_DROP;
257 }
258 return TXRX_CONTINUE;
259 }
260
261 if (unlikely(/* !injected && */ tx->sdata->ieee802_1x &&
262 !(sta_flags & WLAN_STA_AUTHORIZED))) {
263 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
264 DECLARE_MAC_BUF(mac);
265 printk(KERN_DEBUG "%s: dropped frame to %s"
266 " (unauthorized port)\n", tx->dev->name,
267 print_mac(mac, hdr->addr1));
268 #endif
269 I802_DEBUG_INC(tx->local->tx_handlers_drop_unauth_port);
270 return TXRX_DROP;
271 }
272
273 return TXRX_CONTINUE;
274 }
275
276 static ieee80211_txrx_result
277 ieee80211_tx_h_sequence(struct ieee80211_txrx_data *tx)
278 {
279 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
280
281 if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24)
282 ieee80211_include_sequence(tx->sdata, hdr);
283
284 return TXRX_CONTINUE;
285 }
286
287 /* This function is called whenever the AP is about to exceed the maximum limit
288 * of buffered frames for power saving STAs. This situation should not really
289 * happen often during normal operation, so dropping the oldest buffered packet
290 * from each queue should be OK to make some room for new frames. */
291 static void purge_old_ps_buffers(struct ieee80211_local *local)
292 {
293 int total = 0, purged = 0;
294 struct sk_buff *skb;
295 struct ieee80211_sub_if_data *sdata;
296 struct sta_info *sta;
297
298 /*
299 * virtual interfaces are protected by RCU
300 */
301 rcu_read_lock();
302
303 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
304 struct ieee80211_if_ap *ap;
305 if (sdata->dev == local->mdev ||
306 sdata->type != IEEE80211_IF_TYPE_AP)
307 continue;
308 ap = &sdata->u.ap;
309 skb = skb_dequeue(&ap->ps_bc_buf);
310 if (skb) {
311 purged++;
312 dev_kfree_skb(skb);
313 }
314 total += skb_queue_len(&ap->ps_bc_buf);
315 }
316 rcu_read_unlock();
317
318 read_lock_bh(&local->sta_lock);
319 list_for_each_entry(sta, &local->sta_list, list) {
320 skb = skb_dequeue(&sta->ps_tx_buf);
321 if (skb) {
322 purged++;
323 dev_kfree_skb(skb);
324 }
325 total += skb_queue_len(&sta->ps_tx_buf);
326 }
327 read_unlock_bh(&local->sta_lock);
328
329 local->total_ps_buffered = total;
330 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n",
331 wiphy_name(local->hw.wiphy), purged);
332 }
333
334 static inline ieee80211_txrx_result
335 ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx)
336 {
337 /* broadcast/multicast frame */
338 /* If any of the associated stations is in power save mode,
339 * the frame is buffered to be sent after DTIM beacon frame */
340 if ((tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) &&
341 tx->sdata->type != IEEE80211_IF_TYPE_WDS &&
342 tx->sdata->bss && atomic_read(&tx->sdata->bss->num_sta_ps) &&
343 !(tx->fc & IEEE80211_FCTL_ORDER)) {
344 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
345 purge_old_ps_buffers(tx->local);
346 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >=
347 AP_MAX_BC_BUFFER) {
348 if (net_ratelimit()) {
349 printk(KERN_DEBUG "%s: BC TX buffer full - "
350 "dropping the oldest frame\n",
351 tx->dev->name);
352 }
353 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
354 } else
355 tx->local->total_ps_buffered++;
356 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
357 return TXRX_QUEUED;
358 }
359
360 return TXRX_CONTINUE;
361 }
362
363 static inline ieee80211_txrx_result
364 ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
365 {
366 struct sta_info *sta = tx->sta;
367 DECLARE_MAC_BUF(mac);
368
369 if (unlikely(!sta ||
370 ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
371 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
372 return TXRX_CONTINUE;
373
374 if (unlikely((sta->flags & WLAN_STA_PS) && !sta->pspoll)) {
375 struct ieee80211_tx_packet_data *pkt_data;
376 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
377 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries "
378 "before %d)\n",
379 print_mac(mac, sta->addr), sta->aid,
380 skb_queue_len(&sta->ps_tx_buf));
381 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
382 sta->flags |= WLAN_STA_TIM;
383 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
384 purge_old_ps_buffers(tx->local);
385 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
386 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf);
387 if (net_ratelimit()) {
388 printk(KERN_DEBUG "%s: STA %s TX "
389 "buffer full - dropping oldest frame\n",
390 tx->dev->name, print_mac(mac, sta->addr));
391 }
392 dev_kfree_skb(old);
393 } else
394 tx->local->total_ps_buffered++;
395 /* Queue frame to be sent after STA sends an PS Poll frame */
396 if (skb_queue_empty(&sta->ps_tx_buf)) {
397 if (tx->local->ops->set_tim)
398 tx->local->ops->set_tim(local_to_hw(tx->local),
399 sta->aid, 1);
400 if (tx->sdata->bss)
401 bss_tim_set(tx->local, tx->sdata->bss, sta->aid);
402 }
403 pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb;
404 pkt_data->jiffies = jiffies;
405 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
406 return TXRX_QUEUED;
407 }
408 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
409 else if (unlikely(sta->flags & WLAN_STA_PS)) {
410 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll "
411 "set -> send frame\n", tx->dev->name,
412 print_mac(mac, sta->addr));
413 }
414 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
415 sta->pspoll = 0;
416
417 return TXRX_CONTINUE;
418 }
419
420
421 static ieee80211_txrx_result
422 ieee80211_tx_h_ps_buf(struct ieee80211_txrx_data *tx)
423 {
424 if (unlikely(tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED))
425 return TXRX_CONTINUE;
426
427 if (tx->flags & IEEE80211_TXRXD_TXUNICAST)
428 return ieee80211_tx_h_unicast_ps_buf(tx);
429 else
430 return ieee80211_tx_h_multicast_ps_buf(tx);
431 }
432
433
434
435
436 static ieee80211_txrx_result
437 ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx)
438 {
439 struct ieee80211_key *key;
440
441 if (unlikely(tx->u.tx.control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
442 tx->key = NULL;
443 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
444 tx->key = key;
445 else if ((key = rcu_dereference(tx->sdata->default_key)))
446 tx->key = key;
447 else if (tx->sdata->drop_unencrypted &&
448 !(tx->sdata->eapol && ieee80211_is_eapol(tx->skb))) {
449 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
450 return TXRX_DROP;
451 } else {
452 tx->key = NULL;
453 tx->u.tx.control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
454 }
455
456 if (tx->key) {
457 tx->key->tx_rx_count++;
458 /* TODO: add threshold stuff again */
459 }
460
461 return TXRX_CONTINUE;
462 }
463
464 static ieee80211_txrx_result
465 ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
466 {
467 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
468 size_t hdrlen, per_fragm, num_fragm, payload_len, left;
469 struct sk_buff **frags, *first, *frag;
470 int i;
471 u16 seq;
472 u8 *pos;
473 int frag_threshold = tx->local->fragmentation_threshold;
474
475 if (!(tx->flags & IEEE80211_TXRXD_FRAGMENTED))
476 return TXRX_CONTINUE;
477
478 first = tx->skb;
479
480 hdrlen = ieee80211_get_hdrlen(tx->fc);
481 payload_len = first->len - hdrlen;
482 per_fragm = frag_threshold - hdrlen - FCS_LEN;
483 num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
484
485 frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
486 if (!frags)
487 goto fail;
488
489 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
490 seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ;
491 pos = first->data + hdrlen + per_fragm;
492 left = payload_len - per_fragm;
493 for (i = 0; i < num_fragm - 1; i++) {
494 struct ieee80211_hdr *fhdr;
495 size_t copylen;
496
497 if (left <= 0)
498 goto fail;
499
500 /* reserve enough extra head and tail room for possible
501 * encryption */
502 frag = frags[i] =
503 dev_alloc_skb(tx->local->tx_headroom +
504 frag_threshold +
505 IEEE80211_ENCRYPT_HEADROOM +
506 IEEE80211_ENCRYPT_TAILROOM);
507 if (!frag)
508 goto fail;
509 /* Make sure that all fragments use the same priority so
510 * that they end up using the same TX queue */
511 frag->priority = first->priority;
512 skb_reserve(frag, tx->local->tx_headroom +
513 IEEE80211_ENCRYPT_HEADROOM);
514 fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen);
515 memcpy(fhdr, first->data, hdrlen);
516 if (i == num_fragm - 2)
517 fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS);
518 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
519 copylen = left > per_fragm ? per_fragm : left;
520 memcpy(skb_put(frag, copylen), pos, copylen);
521
522 pos += copylen;
523 left -= copylen;
524 }
525 skb_trim(first, hdrlen + per_fragm);
526
527 tx->u.tx.num_extra_frag = num_fragm - 1;
528 tx->u.tx.extra_frag = frags;
529
530 return TXRX_CONTINUE;
531
532 fail:
533 printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name);
534 if (frags) {
535 for (i = 0; i < num_fragm - 1; i++)
536 if (frags[i])
537 dev_kfree_skb(frags[i]);
538 kfree(frags);
539 }
540 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
541 return TXRX_DROP;
542 }
543
544 static ieee80211_txrx_result
545 ieee80211_tx_h_encrypt(struct ieee80211_txrx_data *tx)
546 {
547 if (!tx->key)
548 return TXRX_CONTINUE;
549
550 switch (tx->key->conf.alg) {
551 case ALG_WEP:
552 return ieee80211_crypto_wep_encrypt(tx);
553 case ALG_TKIP:
554 return ieee80211_crypto_tkip_encrypt(tx);
555 case ALG_CCMP:
556 return ieee80211_crypto_ccmp_encrypt(tx);
557 case ALG_NONE:
558 return TXRX_CONTINUE;
559 }
560
561 /* not reached */
562 WARN_ON(1);
563 return TXRX_DROP;
564 }
565
566 static ieee80211_txrx_result
567 ieee80211_tx_h_rate_ctrl(struct ieee80211_txrx_data *tx)
568 {
569 struct rate_control_extra extra;
570
571 memset(&extra, 0, sizeof(extra));
572 extra.mode = tx->u.tx.mode;
573 extra.ethertype = tx->ethertype;
574
575 tx->u.tx.rate = rate_control_get_rate(tx->local, tx->dev, tx->skb,
576 &extra);
577 if (unlikely(extra.probe != NULL)) {
578 tx->u.tx.control->flags |= IEEE80211_TXCTL_RATE_CTRL_PROBE;
579 tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG;
580 tx->u.tx.control->alt_retry_rate = tx->u.tx.rate->val;
581 tx->u.tx.rate = extra.probe;
582 } else {
583 tx->u.tx.control->alt_retry_rate = -1;
584 }
585 if (!tx->u.tx.rate)
586 return TXRX_DROP;
587 if (tx->u.tx.mode->mode == MODE_IEEE80211G &&
588 (tx->sdata->flags & IEEE80211_SDATA_USE_PROTECTION) &&
589 (tx->flags & IEEE80211_TXRXD_FRAGMENTED) && extra.nonerp) {
590 tx->u.tx.last_frag_rate = tx->u.tx.rate;
591 if (extra.probe)
592 tx->flags &= ~IEEE80211_TXRXD_TXPROBE_LAST_FRAG;
593 else
594 tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG;
595 tx->u.tx.rate = extra.nonerp;
596 tx->u.tx.control->rate = extra.nonerp;
597 tx->u.tx.control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE;
598 } else {
599 tx->u.tx.last_frag_rate = tx->u.tx.rate;
600 tx->u.tx.control->rate = tx->u.tx.rate;
601 }
602 tx->u.tx.control->tx_rate = tx->u.tx.rate->val;
603
604 return TXRX_CONTINUE;
605 }
606
607 static ieee80211_txrx_result
608 ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx)
609 {
610 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
611 u16 fc = le16_to_cpu(hdr->frame_control);
612 u16 dur;
613 struct ieee80211_tx_control *control = tx->u.tx.control;
614 struct ieee80211_hw_mode *mode = tx->u.tx.mode;
615
616 if (!is_multicast_ether_addr(hdr->addr1)) {
617 if (tx->skb->len + FCS_LEN > tx->local->rts_threshold &&
618 tx->local->rts_threshold < IEEE80211_MAX_RTS_THRESHOLD) {
619 control->flags |= IEEE80211_TXCTL_USE_RTS_CTS;
620 control->flags |= IEEE80211_TXCTL_LONG_RETRY_LIMIT;
621 control->retry_limit =
622 tx->local->long_retry_limit;
623 } else {
624 control->retry_limit =
625 tx->local->short_retry_limit;
626 }
627 } else {
628 control->retry_limit = 1;
629 }
630
631 if (tx->flags & IEEE80211_TXRXD_FRAGMENTED) {
632 /* Do not use multiple retry rates when sending fragmented
633 * frames.
634 * TODO: The last fragment could still use multiple retry
635 * rates. */
636 control->alt_retry_rate = -1;
637 }
638
639 /* Use CTS protection for unicast frames sent using extended rates if
640 * there are associated non-ERP stations and RTS/CTS is not configured
641 * for the frame. */
642 if (mode->mode == MODE_IEEE80211G &&
643 (tx->u.tx.rate->flags & IEEE80211_RATE_ERP) &&
644 (tx->flags & IEEE80211_TXRXD_TXUNICAST) &&
645 (tx->sdata->flags & IEEE80211_SDATA_USE_PROTECTION) &&
646 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS))
647 control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT;
648
649 /* Transmit data frames using short preambles if the driver supports
650 * short preambles at the selected rate and short preambles are
651 * available on the network at the current point in time. */
652 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
653 (tx->u.tx.rate->flags & IEEE80211_RATE_PREAMBLE2) &&
654 (tx->sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE) &&
655 (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) {
656 tx->u.tx.control->tx_rate = tx->u.tx.rate->val2;
657 }
658
659 /* Setup duration field for the first fragment of the frame. Duration
660 * for remaining fragments will be updated when they are being sent
661 * to low-level driver in ieee80211_tx(). */
662 dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1),
663 (tx->flags & IEEE80211_TXRXD_FRAGMENTED) ?
664 tx->u.tx.extra_frag[0]->len : 0);
665 hdr->duration_id = cpu_to_le16(dur);
666
667 if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) ||
668 (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) {
669 struct ieee80211_rate *rate;
670
671 /* Do not use multiple retry rates when using RTS/CTS */
672 control->alt_retry_rate = -1;
673
674 /* Use min(data rate, max base rate) as CTS/RTS rate */
675 rate = tx->u.tx.rate;
676 while (rate > mode->rates &&
677 !(rate->flags & IEEE80211_RATE_BASIC))
678 rate--;
679
680 control->rts_cts_rate = rate->val;
681 control->rts_rate = rate;
682 }
683
684 if (tx->sta) {
685 tx->sta->tx_packets++;
686 tx->sta->tx_fragments++;
687 tx->sta->tx_bytes += tx->skb->len;
688 if (tx->u.tx.extra_frag) {
689 int i;
690 tx->sta->tx_fragments += tx->u.tx.num_extra_frag;
691 for (i = 0; i < tx->u.tx.num_extra_frag; i++) {
692 tx->sta->tx_bytes +=
693 tx->u.tx.extra_frag[i]->len;
694 }
695 }
696 }
697
698 /*
699 * Tell hardware to not encrypt when we had sw crypto.
700 * Because we use the same flag to internally indicate that
701 * no (software) encryption should be done, we have to set it
702 * after all crypto handlers.
703 */
704 if (tx->key && !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
705 tx->u.tx.control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
706
707 return TXRX_CONTINUE;
708 }
709
710 static ieee80211_txrx_result
711 ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx)
712 {
713 struct ieee80211_local *local = tx->local;
714 struct ieee80211_hw_mode *mode = tx->u.tx.mode;
715 struct sk_buff *skb = tx->skb;
716 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
717 u32 load = 0, hdrtime;
718
719 /* TODO: this could be part of tx_status handling, so that the number
720 * of retries would be known; TX rate should in that case be stored
721 * somewhere with the packet */
722
723 /* Estimate total channel use caused by this frame */
724
725 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
726 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
727
728 if (mode->mode == MODE_IEEE80211A ||
729 (mode->mode == MODE_IEEE80211G &&
730 tx->u.tx.rate->flags & IEEE80211_RATE_ERP))
731 hdrtime = CHAN_UTIL_HDR_SHORT;
732 else
733 hdrtime = CHAN_UTIL_HDR_LONG;
734
735 load = hdrtime;
736 if (!is_multicast_ether_addr(hdr->addr1))
737 load += hdrtime;
738
739 if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_RTS_CTS)
740 load += 2 * hdrtime;
741 else if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
742 load += hdrtime;
743
744 load += skb->len * tx->u.tx.rate->rate_inv;
745
746 if (tx->u.tx.extra_frag) {
747 int i;
748 for (i = 0; i < tx->u.tx.num_extra_frag; i++) {
749 load += 2 * hdrtime;
750 load += tx->u.tx.extra_frag[i]->len *
751 tx->u.tx.rate->rate;
752 }
753 }
754
755 /* Divide channel_use by 8 to avoid wrapping around the counter */
756 load >>= CHAN_UTIL_SHIFT;
757 local->channel_use_raw += load;
758 if (tx->sta)
759 tx->sta->channel_use_raw += load;
760 tx->sdata->channel_use_raw += load;
761
762 return TXRX_CONTINUE;
763 }
764
765 /* TODO: implement register/unregister functions for adding TX/RX handlers
766 * into ordered list */
767
768 ieee80211_tx_handler ieee80211_tx_handlers[] =
769 {
770 ieee80211_tx_h_check_assoc,
771 ieee80211_tx_h_sequence,
772 ieee80211_tx_h_ps_buf,
773 ieee80211_tx_h_select_key,
774 ieee80211_tx_h_michael_mic_add,
775 ieee80211_tx_h_fragment,
776 ieee80211_tx_h_encrypt,
777 ieee80211_tx_h_rate_ctrl,
778 ieee80211_tx_h_misc,
779 ieee80211_tx_h_load_stats,
780 NULL
781 };
782
783 /* actual transmit path */
784
785 /*
786 * deal with packet injection down monitor interface
787 * with Radiotap Header -- only called for monitor mode interface
788 */
789 static ieee80211_txrx_result
790 __ieee80211_parse_tx_radiotap(
791 struct ieee80211_txrx_data *tx,
792 struct sk_buff *skb, struct ieee80211_tx_control *control)
793 {
794 /*
795 * this is the moment to interpret and discard the radiotap header that
796 * must be at the start of the packet injected in Monitor mode
797 *
798 * Need to take some care with endian-ness since radiotap
799 * args are little-endian
800 */
801
802 struct ieee80211_radiotap_iterator iterator;
803 struct ieee80211_radiotap_header *rthdr =
804 (struct ieee80211_radiotap_header *) skb->data;
805 struct ieee80211_hw_mode *mode = tx->local->hw.conf.mode;
806 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
807
808 /*
809 * default control situation for all injected packets
810 * FIXME: this does not suit all usage cases, expand to allow control
811 */
812
813 control->retry_limit = 1; /* no retry */
814 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS |
815 IEEE80211_TXCTL_USE_CTS_PROTECT);
816 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT |
817 IEEE80211_TXCTL_NO_ACK;
818 control->antenna_sel_tx = 0; /* default to default antenna */
819
820 /*
821 * for every radiotap entry that is present
822 * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
823 * entries present, or -EINVAL on error)
824 */
825
826 while (!ret) {
827 int i, target_rate;
828
829 ret = ieee80211_radiotap_iterator_next(&iterator);
830
831 if (ret)
832 continue;
833
834 /* see if this argument is something we can use */
835 switch (iterator.this_arg_index) {
836 /*
837 * You must take care when dereferencing iterator.this_arg
838 * for multibyte types... the pointer is not aligned. Use
839 * get_unaligned((type *)iterator.this_arg) to dereference
840 * iterator.this_arg for type "type" safely on all arches.
841 */
842 case IEEE80211_RADIOTAP_RATE:
843 /*
844 * radiotap rate u8 is in 500kbps units eg, 0x02=1Mbps
845 * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps
846 */
847 target_rate = (*iterator.this_arg) * 5;
848 for (i = 0; i < mode->num_rates; i++) {
849 struct ieee80211_rate *r = &mode->rates[i];
850
851 if (r->rate > target_rate)
852 continue;
853
854 control->rate = r;
855
856 if (r->flags & IEEE80211_RATE_PREAMBLE2)
857 control->tx_rate = r->val2;
858 else
859 control->tx_rate = r->val;
860
861 /* end on exact match */
862 if (r->rate == target_rate)
863 i = mode->num_rates;
864 }
865 break;
866
867 case IEEE80211_RADIOTAP_ANTENNA:
868 /*
869 * radiotap uses 0 for 1st ant, mac80211 is 1 for
870 * 1st ant
871 */
872 control->antenna_sel_tx = (*iterator.this_arg) + 1;
873 break;
874
875 case IEEE80211_RADIOTAP_DBM_TX_POWER:
876 control->power_level = *iterator.this_arg;
877 break;
878
879 case IEEE80211_RADIOTAP_FLAGS:
880 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
881 /*
882 * this indicates that the skb we have been
883 * handed has the 32-bit FCS CRC at the end...
884 * we should react to that by snipping it off
885 * because it will be recomputed and added
886 * on transmission
887 */
888 if (skb->len < (iterator.max_length + FCS_LEN))
889 return TXRX_DROP;
890
891 skb_trim(skb, skb->len - FCS_LEN);
892 }
893 break;
894
895 default:
896 break;
897 }
898 }
899
900 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
901 return TXRX_DROP;
902
903 /*
904 * remove the radiotap header
905 * iterator->max_length was sanity-checked against
906 * skb->len by iterator init
907 */
908 skb_pull(skb, iterator.max_length);
909
910 return TXRX_CONTINUE;
911 }
912
913 static ieee80211_txrx_result inline
914 __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
915 struct sk_buff *skb,
916 struct net_device *dev,
917 struct ieee80211_tx_control *control)
918 {
919 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
920 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
921 struct ieee80211_sub_if_data *sdata;
922 ieee80211_txrx_result res = TXRX_CONTINUE;
923
924 int hdrlen;
925
926 memset(tx, 0, sizeof(*tx));
927 tx->skb = skb;
928 tx->dev = dev; /* use original interface */
929 tx->local = local;
930 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev);
931
932 /*
933 * set defaults for things that can be set by
934 * injected radiotap headers
935 */
936 control->power_level = local->hw.conf.power_level;
937 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
938
939 /* process and remove the injection radiotap header */
940 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
941 if (unlikely(sdata->type == IEEE80211_IF_TYPE_MNTR)) {
942 if (__ieee80211_parse_tx_radiotap(tx, skb, control) ==
943 TXRX_DROP) {
944 return TXRX_DROP;
945 }
946 /*
947 * we removed the radiotap header after this point,
948 * we filled control with what we could use
949 * set to the actual ieee header now
950 */
951 hdr = (struct ieee80211_hdr *) skb->data;
952 res = TXRX_QUEUED; /* indication it was monitor packet */
953 }
954
955 tx->sta = sta_info_get(local, hdr->addr1);
956 tx->fc = le16_to_cpu(hdr->frame_control);
957 tx->u.tx.control = control;
958 if (is_multicast_ether_addr(hdr->addr1)) {
959 tx->flags &= ~IEEE80211_TXRXD_TXUNICAST;
960 control->flags |= IEEE80211_TXCTL_NO_ACK;
961 } else {
962 tx->flags |= IEEE80211_TXRXD_TXUNICAST;
963 control->flags &= ~IEEE80211_TXCTL_NO_ACK;
964 }
965 if (local->fragmentation_threshold < IEEE80211_MAX_FRAG_THRESHOLD &&
966 (tx->flags & IEEE80211_TXRXD_TXUNICAST) &&
967 skb->len + FCS_LEN > local->fragmentation_threshold &&
968 !local->ops->set_frag_threshold)
969 tx->flags |= IEEE80211_TXRXD_FRAGMENTED;
970 else
971 tx->flags &= ~IEEE80211_TXRXD_FRAGMENTED;
972 if (!tx->sta)
973 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK;
974 else if (tx->sta->clear_dst_mask) {
975 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK;
976 tx->sta->clear_dst_mask = 0;
977 }
978 hdrlen = ieee80211_get_hdrlen(tx->fc);
979 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
980 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
981 tx->ethertype = (pos[0] << 8) | pos[1];
982 }
983 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT;
984
985 return res;
986 }
987
988 /* Device in tx->dev has a reference added; use dev_put(tx->dev) when
989 * finished with it. */
990 static int inline ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
991 struct sk_buff *skb,
992 struct net_device *mdev,
993 struct ieee80211_tx_control *control)
994 {
995 struct ieee80211_tx_packet_data *pkt_data;
996 struct net_device *dev;
997
998 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
999 dev = dev_get_by_index(&init_net, pkt_data->ifindex);
1000 if (unlikely(dev && !is_ieee80211_device(dev, mdev))) {
1001 dev_put(dev);
1002 dev = NULL;
1003 }
1004 if (unlikely(!dev))
1005 return -ENODEV;
1006 __ieee80211_tx_prepare(tx, skb, dev, control);
1007 return 0;
1008 }
1009
1010 static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1011 struct ieee80211_txrx_data *tx)
1012 {
1013 struct ieee80211_tx_control *control = tx->u.tx.control;
1014 int ret, i;
1015
1016 if (!ieee80211_qdisc_installed(local->mdev) &&
1017 __ieee80211_queue_stopped(local, 0)) {
1018 netif_stop_queue(local->mdev);
1019 return IEEE80211_TX_AGAIN;
1020 }
1021 if (skb) {
1022 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1023 "TX to low-level driver", skb);
1024 ret = local->ops->tx(local_to_hw(local), skb, control);
1025 if (ret)
1026 return IEEE80211_TX_AGAIN;
1027 local->mdev->trans_start = jiffies;
1028 ieee80211_led_tx(local, 1);
1029 }
1030 if (tx->u.tx.extra_frag) {
1031 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS |
1032 IEEE80211_TXCTL_USE_CTS_PROTECT |
1033 IEEE80211_TXCTL_CLEAR_DST_MASK |
1034 IEEE80211_TXCTL_FIRST_FRAGMENT);
1035 for (i = 0; i < tx->u.tx.num_extra_frag; i++) {
1036 if (!tx->u.tx.extra_frag[i])
1037 continue;
1038 if (__ieee80211_queue_stopped(local, control->queue))
1039 return IEEE80211_TX_FRAG_AGAIN;
1040 if (i == tx->u.tx.num_extra_frag) {
1041 control->tx_rate = tx->u.tx.last_frag_hwrate;
1042 control->rate = tx->u.tx.last_frag_rate;
1043 if (tx->flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG)
1044 control->flags |=
1045 IEEE80211_TXCTL_RATE_CTRL_PROBE;
1046 else
1047 control->flags &=
1048 ~IEEE80211_TXCTL_RATE_CTRL_PROBE;
1049 }
1050
1051 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1052 "TX to low-level driver",
1053 tx->u.tx.extra_frag[i]);
1054 ret = local->ops->tx(local_to_hw(local),
1055 tx->u.tx.extra_frag[i],
1056 control);
1057 if (ret)
1058 return IEEE80211_TX_FRAG_AGAIN;
1059 local->mdev->trans_start = jiffies;
1060 ieee80211_led_tx(local, 1);
1061 tx->u.tx.extra_frag[i] = NULL;
1062 }
1063 kfree(tx->u.tx.extra_frag);
1064 tx->u.tx.extra_frag = NULL;
1065 }
1066 return IEEE80211_TX_OK;
1067 }
1068
1069 static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1070 struct ieee80211_tx_control *control)
1071 {
1072 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1073 struct sta_info *sta;
1074 ieee80211_tx_handler *handler;
1075 struct ieee80211_txrx_data tx;
1076 ieee80211_txrx_result res = TXRX_DROP, res_prepare;
1077 int ret, i;
1078
1079 WARN_ON(__ieee80211_queue_pending(local, control->queue));
1080
1081 if (unlikely(skb->len < 10)) {
1082 dev_kfree_skb(skb);
1083 return 0;
1084 }
1085
1086 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control);
1087
1088 if (res_prepare == TXRX_DROP) {
1089 dev_kfree_skb(skb);
1090 return 0;
1091 }
1092
1093 /*
1094 * key references are protected using RCU and this requires that
1095 * we are in a read-site RCU section during receive processing
1096 */
1097 rcu_read_lock();
1098
1099 sta = tx.sta;
1100 tx.u.tx.mode = local->hw.conf.mode;
1101
1102 if (res_prepare == TXRX_QUEUED) { /* if it was an injected packet */
1103 res = TXRX_CONTINUE;
1104 } else {
1105 for (handler = local->tx_handlers; *handler != NULL;
1106 handler++) {
1107 res = (*handler)(&tx);
1108 if (res != TXRX_CONTINUE)
1109 break;
1110 }
1111 }
1112
1113 skb = tx.skb; /* handlers are allowed to change skb */
1114
1115 if (sta)
1116 sta_info_put(sta);
1117
1118 if (unlikely(res == TXRX_DROP)) {
1119 I802_DEBUG_INC(local->tx_handlers_drop);
1120 goto drop;
1121 }
1122
1123 if (unlikely(res == TXRX_QUEUED)) {
1124 I802_DEBUG_INC(local->tx_handlers_queued);
1125 rcu_read_unlock();
1126 return 0;
1127 }
1128
1129 if (tx.u.tx.extra_frag) {
1130 for (i = 0; i < tx.u.tx.num_extra_frag; i++) {
1131 int next_len, dur;
1132 struct ieee80211_hdr *hdr =
1133 (struct ieee80211_hdr *)
1134 tx.u.tx.extra_frag[i]->data;
1135
1136 if (i + 1 < tx.u.tx.num_extra_frag) {
1137 next_len = tx.u.tx.extra_frag[i + 1]->len;
1138 } else {
1139 next_len = 0;
1140 tx.u.tx.rate = tx.u.tx.last_frag_rate;
1141 tx.u.tx.last_frag_hwrate = tx.u.tx.rate->val;
1142 }
1143 dur = ieee80211_duration(&tx, 0, next_len);
1144 hdr->duration_id = cpu_to_le16(dur);
1145 }
1146 }
1147
1148 retry:
1149 ret = __ieee80211_tx(local, skb, &tx);
1150 if (ret) {
1151 struct ieee80211_tx_stored_packet *store =
1152 &local->pending_packet[control->queue];
1153
1154 if (ret == IEEE80211_TX_FRAG_AGAIN)
1155 skb = NULL;
1156 set_bit(IEEE80211_LINK_STATE_PENDING,
1157 &local->state[control->queue]);
1158 smp_mb();
1159 /* When the driver gets out of buffers during sending of
1160 * fragments and calls ieee80211_stop_queue, there is
1161 * a small window between IEEE80211_LINK_STATE_XOFF and
1162 * IEEE80211_LINK_STATE_PENDING flags are set. If a buffer
1163 * gets available in that window (i.e. driver calls
1164 * ieee80211_wake_queue), we would end up with ieee80211_tx
1165 * called with IEEE80211_LINK_STATE_PENDING. Prevent this by
1166 * continuing transmitting here when that situation is
1167 * possible to have happened. */
1168 if (!__ieee80211_queue_stopped(local, control->queue)) {
1169 clear_bit(IEEE80211_LINK_STATE_PENDING,
1170 &local->state[control->queue]);
1171 goto retry;
1172 }
1173 memcpy(&store->control, control,
1174 sizeof(struct ieee80211_tx_control));
1175 store->skb = skb;
1176 store->extra_frag = tx.u.tx.extra_frag;
1177 store->num_extra_frag = tx.u.tx.num_extra_frag;
1178 store->last_frag_hwrate = tx.u.tx.last_frag_hwrate;
1179 store->last_frag_rate = tx.u.tx.last_frag_rate;
1180 store->last_frag_rate_ctrl_probe =
1181 !!(tx.flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG);
1182 }
1183 rcu_read_unlock();
1184 return 0;
1185
1186 drop:
1187 if (skb)
1188 dev_kfree_skb(skb);
1189 for (i = 0; i < tx.u.tx.num_extra_frag; i++)
1190 if (tx.u.tx.extra_frag[i])
1191 dev_kfree_skb(tx.u.tx.extra_frag[i]);
1192 kfree(tx.u.tx.extra_frag);
1193 rcu_read_unlock();
1194 return 0;
1195 }
1196
1197 /* device xmit handlers */
1198
1199 int ieee80211_master_start_xmit(struct sk_buff *skb,
1200 struct net_device *dev)
1201 {
1202 struct ieee80211_tx_control control;
1203 struct ieee80211_tx_packet_data *pkt_data;
1204 struct net_device *odev = NULL;
1205 struct ieee80211_sub_if_data *osdata;
1206 int headroom;
1207 int ret;
1208
1209 /*
1210 * copy control out of the skb so other people can use skb->cb
1211 */
1212 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1213 memset(&control, 0, sizeof(struct ieee80211_tx_control));
1214
1215 if (pkt_data->ifindex)
1216 odev = dev_get_by_index(&init_net, pkt_data->ifindex);
1217 if (unlikely(odev && !is_ieee80211_device(odev, dev))) {
1218 dev_put(odev);
1219 odev = NULL;
1220 }
1221 if (unlikely(!odev)) {
1222 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1223 printk(KERN_DEBUG "%s: Discarded packet with nonexistent "
1224 "originating device\n", dev->name);
1225 #endif
1226 dev_kfree_skb(skb);
1227 return 0;
1228 }
1229 osdata = IEEE80211_DEV_TO_SUB_IF(odev);
1230
1231 headroom = osdata->local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM;
1232 if (skb_headroom(skb) < headroom) {
1233 if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
1234 dev_kfree_skb(skb);
1235 dev_put(odev);
1236 return 0;
1237 }
1238 }
1239
1240 control.ifindex = odev->ifindex;
1241 control.type = osdata->type;
1242 if (pkt_data->flags & IEEE80211_TXPD_REQ_TX_STATUS)
1243 control.flags |= IEEE80211_TXCTL_REQ_TX_STATUS;
1244 if (pkt_data->flags & IEEE80211_TXPD_DO_NOT_ENCRYPT)
1245 control.flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
1246 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE)
1247 control.flags |= IEEE80211_TXCTL_REQUEUE;
1248 control.queue = pkt_data->queue;
1249
1250 ret = ieee80211_tx(odev, skb, &control);
1251 dev_put(odev);
1252
1253 return ret;
1254 }
1255
1256 int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1257 struct net_device *dev)
1258 {
1259 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1260 struct ieee80211_tx_packet_data *pkt_data;
1261 struct ieee80211_radiotap_header *prthdr =
1262 (struct ieee80211_radiotap_header *)skb->data;
1263 u16 len_rthdr;
1264
1265 /* check for not even having the fixed radiotap header part */
1266 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
1267 goto fail; /* too short to be possibly valid */
1268
1269 /* is it a header version we can trust to find length from? */
1270 if (unlikely(prthdr->it_version))
1271 goto fail; /* only version 0 is supported */
1272
1273 /* then there must be a radiotap header with a length we can use */
1274 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1275
1276 /* does the skb contain enough to deliver on the alleged length? */
1277 if (unlikely(skb->len < len_rthdr))
1278 goto fail; /* skb too short for claimed rt header extent */
1279
1280 skb->dev = local->mdev;
1281
1282 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1283 memset(pkt_data, 0, sizeof(*pkt_data));
1284 /* needed because we set skb device to master */
1285 pkt_data->ifindex = dev->ifindex;
1286
1287 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT;
1288
1289 /*
1290 * fix up the pointers accounting for the radiotap
1291 * header still being in there. We are being given
1292 * a precooked IEEE80211 header so no need for
1293 * normal processing
1294 */
1295 skb_set_mac_header(skb, len_rthdr);
1296 /*
1297 * these are just fixed to the end of the rt area since we
1298 * don't have any better information and at this point, nobody cares
1299 */
1300 skb_set_network_header(skb, len_rthdr);
1301 skb_set_transport_header(skb, len_rthdr);
1302
1303 /* pass the radiotap header up to the next stage intact */
1304 dev_queue_xmit(skb);
1305 return NETDEV_TX_OK;
1306
1307 fail:
1308 dev_kfree_skb(skb);
1309 return NETDEV_TX_OK; /* meaning, we dealt with the skb */
1310 }
1311
1312 /**
1313 * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type
1314 * subinterfaces (wlan#, WDS, and VLAN interfaces)
1315 * @skb: packet to be sent
1316 * @dev: incoming interface
1317 *
1318 * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will
1319 * not be freed, and caller is responsible for either retrying later or freeing
1320 * skb).
1321 *
1322 * This function takes in an Ethernet header and encapsulates it with suitable
1323 * IEEE 802.11 header based on which interface the packet is coming in. The
1324 * encapsulated packet will then be passed to master interface, wlan#.11, for
1325 * transmission (through low-level driver).
1326 */
1327 int ieee80211_subif_start_xmit(struct sk_buff *skb,
1328 struct net_device *dev)
1329 {
1330 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1331 struct ieee80211_tx_packet_data *pkt_data;
1332 struct ieee80211_sub_if_data *sdata;
1333 int ret = 1, head_need;
1334 u16 ethertype, hdrlen, fc;
1335 struct ieee80211_hdr hdr;
1336 const u8 *encaps_data;
1337 int encaps_len, skip_header_bytes;
1338 int nh_pos, h_pos;
1339 struct sta_info *sta;
1340
1341 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1342 if (unlikely(skb->len < ETH_HLEN)) {
1343 printk(KERN_DEBUG "%s: short skb (len=%d)\n",
1344 dev->name, skb->len);
1345 ret = 0;
1346 goto fail;
1347 }
1348
1349 nh_pos = skb_network_header(skb) - skb->data;
1350 h_pos = skb_transport_header(skb) - skb->data;
1351
1352 /* convert Ethernet header to proper 802.11 header (based on
1353 * operation mode) */
1354 ethertype = (skb->data[12] << 8) | skb->data[13];
1355 /* TODO: handling for 802.1x authorized/unauthorized port */
1356 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
1357
1358 switch (sdata->type) {
1359 case IEEE80211_IF_TYPE_AP:
1360 case IEEE80211_IF_TYPE_VLAN:
1361 fc |= IEEE80211_FCTL_FROMDS;
1362 /* DA BSSID SA */
1363 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1364 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1365 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1366 hdrlen = 24;
1367 break;
1368 case IEEE80211_IF_TYPE_WDS:
1369 fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
1370 /* RA TA DA SA */
1371 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1372 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1373 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1374 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1375 hdrlen = 30;
1376 break;
1377 case IEEE80211_IF_TYPE_STA:
1378 fc |= IEEE80211_FCTL_TODS;
1379 /* BSSID SA DA */
1380 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN);
1381 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1382 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1383 hdrlen = 24;
1384 break;
1385 case IEEE80211_IF_TYPE_IBSS:
1386 /* DA SA BSSID */
1387 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1388 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1389 memcpy(hdr.addr3, sdata->u.sta.bssid, ETH_ALEN);
1390 hdrlen = 24;
1391 break;
1392 default:
1393 ret = 0;
1394 goto fail;
1395 }
1396
1397 /* receiver is QoS enabled, use a QoS type frame */
1398 sta = sta_info_get(local, hdr.addr1);
1399 if (sta) {
1400 if (sta->flags & WLAN_STA_WME) {
1401 fc |= IEEE80211_STYPE_QOS_DATA;
1402 hdrlen += 2;
1403 }
1404 sta_info_put(sta);
1405 }
1406
1407 hdr.frame_control = cpu_to_le16(fc);
1408 hdr.duration_id = 0;
1409 hdr.seq_ctrl = 0;
1410
1411 skip_header_bytes = ETH_HLEN;
1412 if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
1413 encaps_data = bridge_tunnel_header;
1414 encaps_len = sizeof(bridge_tunnel_header);
1415 skip_header_bytes -= 2;
1416 } else if (ethertype >= 0x600) {
1417 encaps_data = rfc1042_header;
1418 encaps_len = sizeof(rfc1042_header);
1419 skip_header_bytes -= 2;
1420 } else {
1421 encaps_data = NULL;
1422 encaps_len = 0;
1423 }
1424
1425 skb_pull(skb, skip_header_bytes);
1426 nh_pos -= skip_header_bytes;
1427 h_pos -= skip_header_bytes;
1428
1429 /* TODO: implement support for fragments so that there is no need to
1430 * reallocate and copy payload; it might be enough to support one
1431 * extra fragment that would be copied in the beginning of the frame
1432 * data.. anyway, it would be nice to include this into skb structure
1433 * somehow
1434 *
1435 * There are few options for this:
1436 * use skb->cb as an extra space for 802.11 header
1437 * allocate new buffer if not enough headroom
1438 * make sure that there is enough headroom in every skb by increasing
1439 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and
1440 * alloc_skb() (net/core/skbuff.c)
1441 */
1442 head_need = hdrlen + encaps_len + local->tx_headroom;
1443 head_need -= skb_headroom(skb);
1444
1445 /* We are going to modify skb data, so make a copy of it if happens to
1446 * be cloned. This could happen, e.g., with Linux bridge code passing
1447 * us broadcast frames. */
1448
1449 if (head_need > 0 || skb_cloned(skb)) {
1450 #if 0
1451 printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes "
1452 "of headroom\n", dev->name, head_need);
1453 #endif
1454
1455 if (skb_cloned(skb))
1456 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1457 else
1458 I802_DEBUG_INC(local->tx_expand_skb_head);
1459 /* Since we have to reallocate the buffer, make sure that there
1460 * is enough room for possible WEP IV/ICV and TKIP (8 bytes
1461 * before payload and 12 after). */
1462 if (pskb_expand_head(skb, (head_need > 0 ? head_need + 8 : 8),
1463 12, GFP_ATOMIC)) {
1464 printk(KERN_DEBUG "%s: failed to reallocate TX buffer"
1465 "\n", dev->name);
1466 goto fail;
1467 }
1468 }
1469
1470 if (encaps_data) {
1471 memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
1472 nh_pos += encaps_len;
1473 h_pos += encaps_len;
1474 }
1475
1476 if (fc & IEEE80211_STYPE_QOS_DATA) {
1477 __le16 *qos_control;
1478
1479 qos_control = (__le16*) skb_push(skb, 2);
1480 memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
1481 /*
1482 * Maybe we could actually set some fields here, for now just
1483 * initialise to zero to indicate no special operation.
1484 */
1485 *qos_control = 0;
1486 } else
1487 memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
1488
1489 nh_pos += hdrlen;
1490 h_pos += hdrlen;
1491
1492 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1493 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data));
1494 pkt_data->ifindex = dev->ifindex;
1495
1496 skb->dev = local->mdev;
1497 dev->stats.tx_packets++;
1498 dev->stats.tx_bytes += skb->len;
1499
1500 /* Update skb pointers to various headers since this modified frame
1501 * is going to go through Linux networking code that may potentially
1502 * need things like pointer to IP header. */
1503 skb_set_mac_header(skb, 0);
1504 skb_set_network_header(skb, nh_pos);
1505 skb_set_transport_header(skb, h_pos);
1506
1507 dev->trans_start = jiffies;
1508 dev_queue_xmit(skb);
1509
1510 return 0;
1511
1512 fail:
1513 if (!ret)
1514 dev_kfree_skb(skb);
1515
1516 return ret;
1517 }
1518
1519 /*
1520 * This is the transmit routine for the 802.11 type interfaces
1521 * called by upper layers of the linux networking
1522 * stack when it has a frame to transmit
1523 */
1524 int ieee80211_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
1525 {
1526 struct ieee80211_sub_if_data *sdata;
1527 struct ieee80211_tx_packet_data *pkt_data;
1528 struct ieee80211_hdr *hdr;
1529 u16 fc;
1530
1531 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1532
1533 if (skb->len < 10) {
1534 dev_kfree_skb(skb);
1535 return 0;
1536 }
1537
1538 if (skb_headroom(skb) < sdata->local->tx_headroom) {
1539 if (pskb_expand_head(skb, sdata->local->tx_headroom,
1540 0, GFP_ATOMIC)) {
1541 dev_kfree_skb(skb);
1542 return 0;
1543 }
1544 }
1545
1546 hdr = (struct ieee80211_hdr *) skb->data;
1547 fc = le16_to_cpu(hdr->frame_control);
1548
1549 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb;
1550 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data));
1551 pkt_data->ifindex = sdata->dev->ifindex;
1552
1553 skb->priority = 20; /* use hardcoded priority for mgmt TX queue */
1554 skb->dev = sdata->local->mdev;
1555
1556 /*
1557 * We're using the protocol field of the the frame control header
1558 * to request TX callback for hostapd. BIT(1) is checked.
1559 */
1560 if ((fc & BIT(1)) == BIT(1)) {
1561 pkt_data->flags |= IEEE80211_TXPD_REQ_TX_STATUS;
1562 fc &= ~BIT(1);
1563 hdr->frame_control = cpu_to_le16(fc);
1564 }
1565
1566 if (!(fc & IEEE80211_FCTL_PROTECTED))
1567 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT;
1568
1569 dev->stats.tx_packets++;
1570 dev->stats.tx_bytes += skb->len;
1571
1572 dev_queue_xmit(skb);
1573
1574 return 0;
1575 }
1576
1577 /* helper functions for pending packets for when queues are stopped */
1578
1579 void ieee80211_clear_tx_pending(struct ieee80211_local *local)
1580 {
1581 int i, j;
1582 struct ieee80211_tx_stored_packet *store;
1583
1584 for (i = 0; i < local->hw.queues; i++) {
1585 if (!__ieee80211_queue_pending(local, i))
1586 continue;
1587 store = &local->pending_packet[i];
1588 kfree_skb(store->skb);
1589 for (j = 0; j < store->num_extra_frag; j++)
1590 kfree_skb(store->extra_frag[j]);
1591 kfree(store->extra_frag);
1592 clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[i]);
1593 }
1594 }
1595
1596 void ieee80211_tx_pending(unsigned long data)
1597 {
1598 struct ieee80211_local *local = (struct ieee80211_local *)data;
1599 struct net_device *dev = local->mdev;
1600 struct ieee80211_tx_stored_packet *store;
1601 struct ieee80211_txrx_data tx;
1602 int i, ret, reschedule = 0;
1603
1604 netif_tx_lock_bh(dev);
1605 for (i = 0; i < local->hw.queues; i++) {
1606 if (__ieee80211_queue_stopped(local, i))
1607 continue;
1608 if (!__ieee80211_queue_pending(local, i)) {
1609 reschedule = 1;
1610 continue;
1611 }
1612 store = &local->pending_packet[i];
1613 tx.u.tx.control = &store->control;
1614 tx.u.tx.extra_frag = store->extra_frag;
1615 tx.u.tx.num_extra_frag = store->num_extra_frag;
1616 tx.u.tx.last_frag_hwrate = store->last_frag_hwrate;
1617 tx.u.tx.last_frag_rate = store->last_frag_rate;
1618 tx.flags = 0;
1619 if (store->last_frag_rate_ctrl_probe)
1620 tx.flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG;
1621 ret = __ieee80211_tx(local, store->skb, &tx);
1622 if (ret) {
1623 if (ret == IEEE80211_TX_FRAG_AGAIN)
1624 store->skb = NULL;
1625 } else {
1626 clear_bit(IEEE80211_LINK_STATE_PENDING,
1627 &local->state[i]);
1628 reschedule = 1;
1629 }
1630 }
1631 netif_tx_unlock_bh(dev);
1632 if (reschedule) {
1633 if (!ieee80211_qdisc_installed(dev)) {
1634 if (!__ieee80211_queue_stopped(local, 0))
1635 netif_wake_queue(dev);
1636 } else
1637 netif_schedule(dev);
1638 }
1639 }
1640
1641 /* functions for drivers to get certain frames */
1642
1643 static void ieee80211_beacon_add_tim(struct ieee80211_local *local,
1644 struct ieee80211_if_ap *bss,
1645 struct sk_buff *skb)
1646 {
1647 u8 *pos, *tim;
1648 int aid0 = 0;
1649 int i, have_bits = 0, n1, n2;
1650
1651 /* Generate bitmap for TIM only if there are any STAs in power save
1652 * mode. */
1653 read_lock_bh(&local->sta_lock);
1654 if (atomic_read(&bss->num_sta_ps) > 0)
1655 /* in the hope that this is faster than
1656 * checking byte-for-byte */
1657 have_bits = !bitmap_empty((unsigned long*)bss->tim,
1658 IEEE80211_MAX_AID+1);
1659
1660 if (bss->dtim_count == 0)
1661 bss->dtim_count = bss->dtim_period - 1;
1662 else
1663 bss->dtim_count--;
1664
1665 tim = pos = (u8 *) skb_put(skb, 6);
1666 *pos++ = WLAN_EID_TIM;
1667 *pos++ = 4;
1668 *pos++ = bss->dtim_count;
1669 *pos++ = bss->dtim_period;
1670
1671 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
1672 aid0 = 1;
1673
1674 if (have_bits) {
1675 /* Find largest even number N1 so that bits numbered 1 through
1676 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
1677 * (N2 + 1) x 8 through 2007 are 0. */
1678 n1 = 0;
1679 for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
1680 if (bss->tim[i]) {
1681 n1 = i & 0xfe;
1682 break;
1683 }
1684 }
1685 n2 = n1;
1686 for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
1687 if (bss->tim[i]) {
1688 n2 = i;
1689 break;
1690 }
1691 }
1692
1693 /* Bitmap control */
1694 *pos++ = n1 | aid0;
1695 /* Part Virt Bitmap */
1696 memcpy(pos, bss->tim + n1, n2 - n1 + 1);
1697
1698 tim[1] = n2 - n1 + 4;
1699 skb_put(skb, n2 - n1);
1700 } else {
1701 *pos++ = aid0; /* Bitmap control */
1702 *pos++ = 0; /* Part Virt Bitmap */
1703 }
1704 read_unlock_bh(&local->sta_lock);
1705 }
1706
1707 struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, int if_id,
1708 struct ieee80211_tx_control *control)
1709 {
1710 struct ieee80211_local *local = hw_to_local(hw);
1711 struct sk_buff *skb;
1712 struct net_device *bdev;
1713 struct ieee80211_sub_if_data *sdata = NULL;
1714 struct ieee80211_if_ap *ap = NULL;
1715 struct ieee80211_rate *rate;
1716 struct rate_control_extra extra;
1717 u8 *b_head, *b_tail;
1718 int bh_len, bt_len;
1719
1720 bdev = dev_get_by_index(&init_net, if_id);
1721 if (bdev) {
1722 sdata = IEEE80211_DEV_TO_SUB_IF(bdev);
1723 ap = &sdata->u.ap;
1724 dev_put(bdev);
1725 }
1726
1727 if (!ap || sdata->type != IEEE80211_IF_TYPE_AP ||
1728 !ap->beacon_head) {
1729 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1730 if (net_ratelimit())
1731 printk(KERN_DEBUG "no beacon data avail for idx=%d "
1732 "(%s)\n", if_id, bdev ? bdev->name : "N/A");
1733 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
1734 return NULL;
1735 }
1736
1737 /* Assume we are generating the normal beacon locally */
1738 b_head = ap->beacon_head;
1739 b_tail = ap->beacon_tail;
1740 bh_len = ap->beacon_head_len;
1741 bt_len = ap->beacon_tail_len;
1742
1743 skb = dev_alloc_skb(local->tx_headroom +
1744 bh_len + bt_len + 256 /* maximum TIM len */);
1745 if (!skb)
1746 return NULL;
1747
1748 skb_reserve(skb, local->tx_headroom);
1749 memcpy(skb_put(skb, bh_len), b_head, bh_len);
1750
1751 ieee80211_include_sequence(sdata, (struct ieee80211_hdr *)skb->data);
1752
1753 ieee80211_beacon_add_tim(local, ap, skb);
1754
1755 if (b_tail) {
1756 memcpy(skb_put(skb, bt_len), b_tail, bt_len);
1757 }
1758
1759 if (control) {
1760 memset(&extra, 0, sizeof(extra));
1761 extra.mode = local->oper_hw_mode;
1762
1763 rate = rate_control_get_rate(local, local->mdev, skb, &extra);
1764 if (!rate) {
1765 if (net_ratelimit()) {
1766 printk(KERN_DEBUG "%s: ieee80211_beacon_get: no rate "
1767 "found\n", wiphy_name(local->hw.wiphy));
1768 }
1769 dev_kfree_skb(skb);
1770 return NULL;
1771 }
1772
1773 control->tx_rate =
1774 ((sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE) &&
1775 (rate->flags & IEEE80211_RATE_PREAMBLE2)) ?
1776 rate->val2 : rate->val;
1777 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1778 control->power_level = local->hw.conf.power_level;
1779 control->flags |= IEEE80211_TXCTL_NO_ACK;
1780 control->retry_limit = 1;
1781 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK;
1782 }
1783
1784 ap->num_beacons++;
1785 return skb;
1786 }
1787 EXPORT_SYMBOL(ieee80211_beacon_get);
1788
1789 void ieee80211_rts_get(struct ieee80211_hw *hw, int if_id,
1790 const void *frame, size_t frame_len,
1791 const struct ieee80211_tx_control *frame_txctl,
1792 struct ieee80211_rts *rts)
1793 {
1794 const struct ieee80211_hdr *hdr = frame;
1795 u16 fctl;
1796
1797 fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS;
1798 rts->frame_control = cpu_to_le16(fctl);
1799 rts->duration = ieee80211_rts_duration(hw, if_id, frame_len, frame_txctl);
1800 memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
1801 memcpy(rts->ta, hdr->addr2, sizeof(rts->ta));
1802 }
1803 EXPORT_SYMBOL(ieee80211_rts_get);
1804
1805 void ieee80211_ctstoself_get(struct ieee80211_hw *hw, int if_id,
1806 const void *frame, size_t frame_len,
1807 const struct ieee80211_tx_control *frame_txctl,
1808 struct ieee80211_cts *cts)
1809 {
1810 const struct ieee80211_hdr *hdr = frame;
1811 u16 fctl;
1812
1813 fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS;
1814 cts->frame_control = cpu_to_le16(fctl);
1815 cts->duration = ieee80211_ctstoself_duration(hw, if_id, frame_len, frame_txctl);
1816 memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
1817 }
1818 EXPORT_SYMBOL(ieee80211_ctstoself_get);
1819
1820 struct sk_buff *
1821 ieee80211_get_buffered_bc(struct ieee80211_hw *hw, int if_id,
1822 struct ieee80211_tx_control *control)
1823 {
1824 struct ieee80211_local *local = hw_to_local(hw);
1825 struct sk_buff *skb;
1826 struct sta_info *sta;
1827 ieee80211_tx_handler *handler;
1828 struct ieee80211_txrx_data tx;
1829 ieee80211_txrx_result res = TXRX_DROP;
1830 struct net_device *bdev;
1831 struct ieee80211_sub_if_data *sdata;
1832 struct ieee80211_if_ap *bss = NULL;
1833
1834 bdev = dev_get_by_index(&init_net, if_id);
1835 if (bdev) {
1836 sdata = IEEE80211_DEV_TO_SUB_IF(bdev);
1837 bss = &sdata->u.ap;
1838 dev_put(bdev);
1839 }
1840 if (!bss || sdata->type != IEEE80211_IF_TYPE_AP || !bss->beacon_head)
1841 return NULL;
1842
1843 if (bss->dtim_count != 0)
1844 return NULL; /* send buffered bc/mc only after DTIM beacon */
1845 memset(control, 0, sizeof(*control));
1846 while (1) {
1847 skb = skb_dequeue(&bss->ps_bc_buf);
1848 if (!skb)
1849 return NULL;
1850 local->total_ps_buffered--;
1851
1852 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) {
1853 struct ieee80211_hdr *hdr =
1854 (struct ieee80211_hdr *) skb->data;
1855 /* more buffered multicast/broadcast frames ==> set
1856 * MoreData flag in IEEE 802.11 header to inform PS
1857 * STAs */
1858 hdr->frame_control |=
1859 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1860 }
1861
1862 if (ieee80211_tx_prepare(&tx, skb, local->mdev, control) == 0)
1863 break;
1864 dev_kfree_skb_any(skb);
1865 }
1866 sta = tx.sta;
1867 tx.flags |= IEEE80211_TXRXD_TXPS_BUFFERED;
1868 tx.u.tx.mode = local->hw.conf.mode;
1869
1870 for (handler = local->tx_handlers; *handler != NULL; handler++) {
1871 res = (*handler)(&tx);
1872 if (res == TXRX_DROP || res == TXRX_QUEUED)
1873 break;
1874 }
1875 dev_put(tx.dev);
1876 skb = tx.skb; /* handlers are allowed to change skb */
1877
1878 if (res == TXRX_DROP) {
1879 I802_DEBUG_INC(local->tx_handlers_drop);
1880 dev_kfree_skb(skb);
1881 skb = NULL;
1882 } else if (res == TXRX_QUEUED) {
1883 I802_DEBUG_INC(local->tx_handlers_queued);
1884 skb = NULL;
1885 }
1886
1887 if (sta)
1888 sta_info_put(sta);
1889
1890 return skb;
1891 }
1892 EXPORT_SYMBOL(ieee80211_get_buffered_bc);