]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/wireless/rt2x00/rt2x00queue.c
Fix common misspellings
[mirror_ubuntu-zesty-kernel.git] / drivers / net / wireless / rt2x00 / rt2x00queue.c
1 /*
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 <http://rt2x00.serialmonkey.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23 /*
24 Module: rt2x00lib
25 Abstract: rt2x00 queue specific routines.
26 */
27
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/dma-mapping.h>
32
33 #include "rt2x00.h"
34 #include "rt2x00lib.h"
35
36 struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
37 {
38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
39 struct sk_buff *skb;
40 struct skb_frame_desc *skbdesc;
41 unsigned int frame_size;
42 unsigned int head_size = 0;
43 unsigned int tail_size = 0;
44
45 /*
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
48 */
49 frame_size = entry->queue->data_size + entry->queue->desc_size;
50
51 /*
52 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
55 */
56 head_size = 4;
57
58 /*
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon.
62 */
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
64 head_size += 8;
65 tail_size += 8;
66 }
67
68 /*
69 * Allocate skbuffer.
70 */
71 skb = dev_alloc_skb(frame_size + head_size + tail_size);
72 if (!skb)
73 return NULL;
74
75 /*
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
78 */
79 skb_reserve(skb, head_size);
80 skb_put(skb, frame_size);
81
82 /*
83 * Populate skbdesc.
84 */
85 skbdesc = get_skb_frame_desc(skb);
86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry;
88
89 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
91 skb->data,
92 skb->len,
93 DMA_FROM_DEVICE);
94 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
95 }
96
97 return skb;
98 }
99
100 void rt2x00queue_map_txskb(struct queue_entry *entry)
101 {
102 struct device *dev = entry->queue->rt2x00dev->dev;
103 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
104
105 skbdesc->skb_dma =
106 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
107 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
108 }
109 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
110
111 void rt2x00queue_unmap_skb(struct queue_entry *entry)
112 {
113 struct device *dev = entry->queue->rt2x00dev->dev;
114 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
115
116 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
117 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
118 DMA_FROM_DEVICE);
119 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
120 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
121 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
122 DMA_TO_DEVICE);
123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
124 }
125 }
126 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
127
128 void rt2x00queue_free_skb(struct queue_entry *entry)
129 {
130 if (!entry->skb)
131 return;
132
133 rt2x00queue_unmap_skb(entry);
134 dev_kfree_skb_any(entry->skb);
135 entry->skb = NULL;
136 }
137
138 void rt2x00queue_align_frame(struct sk_buff *skb)
139 {
140 unsigned int frame_length = skb->len;
141 unsigned int align = ALIGN_SIZE(skb, 0);
142
143 if (!align)
144 return;
145
146 skb_push(skb, align);
147 memmove(skb->data, skb->data + align, frame_length);
148 skb_trim(skb, frame_length);
149 }
150
151 void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
152 {
153 unsigned int frame_length = skb->len;
154 unsigned int align = ALIGN_SIZE(skb, header_length);
155
156 if (!align)
157 return;
158
159 skb_push(skb, align);
160 memmove(skb->data, skb->data + align, frame_length);
161 skb_trim(skb, frame_length);
162 }
163
164 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
165 {
166 unsigned int payload_length = skb->len - header_length;
167 unsigned int header_align = ALIGN_SIZE(skb, 0);
168 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
169 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
170
171 /*
172 * Adjust the header alignment if the payload needs to be moved more
173 * than the header.
174 */
175 if (payload_align > header_align)
176 header_align += 4;
177
178 /* There is nothing to do if no alignment is needed */
179 if (!header_align)
180 return;
181
182 /* Reserve the amount of space needed in front of the frame */
183 skb_push(skb, header_align);
184
185 /*
186 * Move the header.
187 */
188 memmove(skb->data, skb->data + header_align, header_length);
189
190 /* Move the payload, if present and if required */
191 if (payload_length && payload_align)
192 memmove(skb->data + header_length + l2pad,
193 skb->data + header_length + l2pad + payload_align,
194 payload_length);
195
196 /* Trim the skb to the correct size */
197 skb_trim(skb, header_length + l2pad + payload_length);
198 }
199
200 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
201 {
202 /*
203 * L2 padding is only present if the skb contains more than just the
204 * IEEE 802.11 header.
205 */
206 unsigned int l2pad = (skb->len > header_length) ?
207 L2PAD_SIZE(header_length) : 0;
208
209 if (!l2pad)
210 return;
211
212 memmove(skb->data + l2pad, skb->data, header_length);
213 skb_pull(skb, l2pad);
214 }
215
216 static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
217 struct txentry_desc *txdesc)
218 {
219 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
220 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
221 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
222 unsigned long irqflags;
223
224 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
225 return;
226
227 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
228
229 if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags))
230 return;
231
232 /*
233 * The hardware is not able to insert a sequence number. Assign a
234 * software generated one here.
235 *
236 * This is wrong because beacons are not getting sequence
237 * numbers assigned properly.
238 *
239 * A secondary problem exists for drivers that cannot toggle
240 * sequence counting per-frame, since those will override the
241 * sequence counter given by mac80211.
242 */
243 spin_lock_irqsave(&intf->seqlock, irqflags);
244
245 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
246 intf->seqno += 0x10;
247 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
248 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
249
250 spin_unlock_irqrestore(&intf->seqlock, irqflags);
251
252 }
253
254 static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
255 struct txentry_desc *txdesc,
256 const struct rt2x00_rate *hwrate)
257 {
258 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
259 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
260 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
261 unsigned int data_length;
262 unsigned int duration;
263 unsigned int residual;
264
265 /*
266 * Determine with what IFS priority this frame should be send.
267 * Set ifs to IFS_SIFS when the this is not the first fragment,
268 * or this fragment came after RTS/CTS.
269 */
270 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
271 txdesc->u.plcp.ifs = IFS_BACKOFF;
272 else
273 txdesc->u.plcp.ifs = IFS_SIFS;
274
275 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
276 data_length = entry->skb->len + 4;
277 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
278
279 /*
280 * PLCP setup
281 * Length calculation depends on OFDM/CCK rate.
282 */
283 txdesc->u.plcp.signal = hwrate->plcp;
284 txdesc->u.plcp.service = 0x04;
285
286 if (hwrate->flags & DEV_RATE_OFDM) {
287 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
288 txdesc->u.plcp.length_low = data_length & 0x3f;
289 } else {
290 /*
291 * Convert length to microseconds.
292 */
293 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
294 duration = GET_DURATION(data_length, hwrate->bitrate);
295
296 if (residual != 0) {
297 duration++;
298
299 /*
300 * Check if we need to set the Length Extension
301 */
302 if (hwrate->bitrate == 110 && residual <= 30)
303 txdesc->u.plcp.service |= 0x80;
304 }
305
306 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
307 txdesc->u.plcp.length_low = duration & 0xff;
308
309 /*
310 * When preamble is enabled we should set the
311 * preamble bit for the signal.
312 */
313 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
314 txdesc->u.plcp.signal |= 0x08;
315 }
316 }
317
318 static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
319 struct txentry_desc *txdesc)
320 {
321 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
322 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
323 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
324 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
325 struct ieee80211_rate *rate;
326 const struct rt2x00_rate *hwrate = NULL;
327
328 memset(txdesc, 0, sizeof(*txdesc));
329
330 /*
331 * Header and frame information.
332 */
333 txdesc->length = entry->skb->len;
334 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
335
336 /*
337 * Check whether this frame is to be acked.
338 */
339 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
340 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
341
342 /*
343 * Check if this is a RTS/CTS frame
344 */
345 if (ieee80211_is_rts(hdr->frame_control) ||
346 ieee80211_is_cts(hdr->frame_control)) {
347 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
348 if (ieee80211_is_rts(hdr->frame_control))
349 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
350 else
351 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
352 if (tx_info->control.rts_cts_rate_idx >= 0)
353 rate =
354 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
355 }
356
357 /*
358 * Determine retry information.
359 */
360 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
361 if (txdesc->retry_limit >= rt2x00dev->long_retry)
362 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
363
364 /*
365 * Check if more fragments are pending
366 */
367 if (ieee80211_has_morefrags(hdr->frame_control)) {
368 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
369 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
370 }
371
372 /*
373 * Check if more frames (!= fragments) are pending
374 */
375 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
376 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
377
378 /*
379 * Beacons and probe responses require the tsf timestamp
380 * to be inserted into the frame.
381 */
382 if (ieee80211_is_beacon(hdr->frame_control) ||
383 ieee80211_is_probe_resp(hdr->frame_control))
384 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
385
386 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
387 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
388 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
389
390 /*
391 * Determine rate modulation.
392 */
393 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
394 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
395 else if (txrate->flags & IEEE80211_TX_RC_MCS)
396 txdesc->rate_mode = RATE_MODE_HT_MIX;
397 else {
398 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
399 hwrate = rt2x00_get_rate(rate->hw_value);
400 if (hwrate->flags & DEV_RATE_OFDM)
401 txdesc->rate_mode = RATE_MODE_OFDM;
402 else
403 txdesc->rate_mode = RATE_MODE_CCK;
404 }
405
406 /*
407 * Apply TX descriptor handling by components
408 */
409 rt2x00crypto_create_tx_descriptor(entry, txdesc);
410 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
411
412 if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags))
413 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
414 else
415 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
416 }
417
418 static int rt2x00queue_write_tx_data(struct queue_entry *entry,
419 struct txentry_desc *txdesc)
420 {
421 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
422
423 /*
424 * This should not happen, we already checked the entry
425 * was ours. When the hardware disagrees there has been
426 * a queue corruption!
427 */
428 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
429 rt2x00dev->ops->lib->get_entry_state(entry))) {
430 ERROR(rt2x00dev,
431 "Corrupt queue %d, accessing entry which is not ours.\n"
432 "Please file bug report to %s.\n",
433 entry->queue->qid, DRV_PROJECT);
434 return -EINVAL;
435 }
436
437 /*
438 * Add the requested extra tx headroom in front of the skb.
439 */
440 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
441 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
442
443 /*
444 * Call the driver's write_tx_data function, if it exists.
445 */
446 if (rt2x00dev->ops->lib->write_tx_data)
447 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
448
449 /*
450 * Map the skb to DMA.
451 */
452 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
453 rt2x00queue_map_txskb(entry);
454
455 return 0;
456 }
457
458 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
459 struct txentry_desc *txdesc)
460 {
461 struct data_queue *queue = entry->queue;
462
463 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
464
465 /*
466 * All processing on the frame has been completed, this means
467 * it is now ready to be dumped to userspace through debugfs.
468 */
469 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
470 }
471
472 static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
473 struct txentry_desc *txdesc)
474 {
475 /*
476 * Check if we need to kick the queue, there are however a few rules
477 * 1) Don't kick unless this is the last in frame in a burst.
478 * When the burst flag is set, this frame is always followed
479 * by another frame which in some way are related to eachother.
480 * This is true for fragments, RTS or CTS-to-self frames.
481 * 2) Rule 1 can be broken when the available entries
482 * in the queue are less then a certain threshold.
483 */
484 if (rt2x00queue_threshold(queue) ||
485 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
486 queue->rt2x00dev->ops->lib->kick_queue(queue);
487 }
488
489 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
490 bool local)
491 {
492 struct ieee80211_tx_info *tx_info;
493 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
494 struct txentry_desc txdesc;
495 struct skb_frame_desc *skbdesc;
496 u8 rate_idx, rate_flags;
497
498 if (unlikely(rt2x00queue_full(queue)))
499 return -ENOBUFS;
500
501 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
502 &entry->flags))) {
503 ERROR(queue->rt2x00dev,
504 "Arrived at non-free entry in the non-full queue %d.\n"
505 "Please file bug report to %s.\n",
506 queue->qid, DRV_PROJECT);
507 return -EINVAL;
508 }
509
510 /*
511 * Copy all TX descriptor information into txdesc,
512 * after that we are free to use the skb->cb array
513 * for our information.
514 */
515 entry->skb = skb;
516 rt2x00queue_create_tx_descriptor(entry, &txdesc);
517
518 /*
519 * All information is retrieved from the skb->cb array,
520 * now we should claim ownership of the driver part of that
521 * array, preserving the bitrate index and flags.
522 */
523 tx_info = IEEE80211_SKB_CB(skb);
524 rate_idx = tx_info->control.rates[0].idx;
525 rate_flags = tx_info->control.rates[0].flags;
526 skbdesc = get_skb_frame_desc(skb);
527 memset(skbdesc, 0, sizeof(*skbdesc));
528 skbdesc->entry = entry;
529 skbdesc->tx_rate_idx = rate_idx;
530 skbdesc->tx_rate_flags = rate_flags;
531
532 if (local)
533 skbdesc->flags |= SKBDESC_NOT_MAC80211;
534
535 /*
536 * When hardware encryption is supported, and this frame
537 * is to be encrypted, we should strip the IV/EIV data from
538 * the frame so we can provide it to the driver separately.
539 */
540 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
541 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
542 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
543 rt2x00crypto_tx_copy_iv(skb, &txdesc);
544 else
545 rt2x00crypto_tx_remove_iv(skb, &txdesc);
546 }
547
548 /*
549 * When DMA allocation is required we should guarantee to the
550 * driver that the DMA is aligned to a 4-byte boundary.
551 * However some drivers require L2 padding to pad the payload
552 * rather then the header. This could be a requirement for
553 * PCI and USB devices, while header alignment only is valid
554 * for PCI devices.
555 */
556 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
557 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
558 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
559 rt2x00queue_align_frame(entry->skb);
560
561 /*
562 * It could be possible that the queue was corrupted and this
563 * call failed. Since we always return NETDEV_TX_OK to mac80211,
564 * this frame will simply be dropped.
565 */
566 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
567 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
568 entry->skb = NULL;
569 return -EIO;
570 }
571
572 set_bit(ENTRY_DATA_PENDING, &entry->flags);
573
574 rt2x00queue_index_inc(queue, Q_INDEX);
575 rt2x00queue_write_tx_descriptor(entry, &txdesc);
576 rt2x00queue_kick_tx_queue(queue, &txdesc);
577
578 return 0;
579 }
580
581 int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
582 struct ieee80211_vif *vif)
583 {
584 struct rt2x00_intf *intf = vif_to_intf(vif);
585
586 if (unlikely(!intf->beacon))
587 return -ENOBUFS;
588
589 mutex_lock(&intf->beacon_skb_mutex);
590
591 /*
592 * Clean up the beacon skb.
593 */
594 rt2x00queue_free_skb(intf->beacon);
595
596 /*
597 * Clear beacon (single bssid devices don't need to clear the beacon
598 * since the beacon queue will get stopped anyway).
599 */
600 if (rt2x00dev->ops->lib->clear_beacon)
601 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
602
603 mutex_unlock(&intf->beacon_skb_mutex);
604
605 return 0;
606 }
607
608 int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
609 struct ieee80211_vif *vif)
610 {
611 struct rt2x00_intf *intf = vif_to_intf(vif);
612 struct skb_frame_desc *skbdesc;
613 struct txentry_desc txdesc;
614
615 if (unlikely(!intf->beacon))
616 return -ENOBUFS;
617
618 /*
619 * Clean up the beacon skb.
620 */
621 rt2x00queue_free_skb(intf->beacon);
622
623 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
624 if (!intf->beacon->skb)
625 return -ENOMEM;
626
627 /*
628 * Copy all TX descriptor information into txdesc,
629 * after that we are free to use the skb->cb array
630 * for our information.
631 */
632 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
633
634 /*
635 * Fill in skb descriptor
636 */
637 skbdesc = get_skb_frame_desc(intf->beacon->skb);
638 memset(skbdesc, 0, sizeof(*skbdesc));
639 skbdesc->entry = intf->beacon;
640
641 /*
642 * Send beacon to hardware.
643 */
644 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
645
646 return 0;
647
648 }
649
650 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
651 struct ieee80211_vif *vif)
652 {
653 struct rt2x00_intf *intf = vif_to_intf(vif);
654 int ret;
655
656 mutex_lock(&intf->beacon_skb_mutex);
657 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
658 mutex_unlock(&intf->beacon_skb_mutex);
659
660 return ret;
661 }
662
663 void rt2x00queue_for_each_entry(struct data_queue *queue,
664 enum queue_index start,
665 enum queue_index end,
666 void (*fn)(struct queue_entry *entry))
667 {
668 unsigned long irqflags;
669 unsigned int index_start;
670 unsigned int index_end;
671 unsigned int i;
672
673 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
674 ERROR(queue->rt2x00dev,
675 "Entry requested from invalid index range (%d - %d)\n",
676 start, end);
677 return;
678 }
679
680 /*
681 * Only protect the range we are going to loop over,
682 * if during our loop a extra entry is set to pending
683 * it should not be kicked during this run, since it
684 * is part of another TX operation.
685 */
686 spin_lock_irqsave(&queue->index_lock, irqflags);
687 index_start = queue->index[start];
688 index_end = queue->index[end];
689 spin_unlock_irqrestore(&queue->index_lock, irqflags);
690
691 /*
692 * Start from the TX done pointer, this guarantees that we will
693 * send out all frames in the correct order.
694 */
695 if (index_start < index_end) {
696 for (i = index_start; i < index_end; i++)
697 fn(&queue->entries[i]);
698 } else {
699 for (i = index_start; i < queue->limit; i++)
700 fn(&queue->entries[i]);
701
702 for (i = 0; i < index_end; i++)
703 fn(&queue->entries[i]);
704 }
705 }
706 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
707
708 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
709 enum queue_index index)
710 {
711 struct queue_entry *entry;
712 unsigned long irqflags;
713
714 if (unlikely(index >= Q_INDEX_MAX)) {
715 ERROR(queue->rt2x00dev,
716 "Entry requested from invalid index type (%d)\n", index);
717 return NULL;
718 }
719
720 spin_lock_irqsave(&queue->index_lock, irqflags);
721
722 entry = &queue->entries[queue->index[index]];
723
724 spin_unlock_irqrestore(&queue->index_lock, irqflags);
725
726 return entry;
727 }
728 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
729
730 void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
731 {
732 unsigned long irqflags;
733
734 if (unlikely(index >= Q_INDEX_MAX)) {
735 ERROR(queue->rt2x00dev,
736 "Index change on invalid index type (%d)\n", index);
737 return;
738 }
739
740 spin_lock_irqsave(&queue->index_lock, irqflags);
741
742 queue->index[index]++;
743 if (queue->index[index] >= queue->limit)
744 queue->index[index] = 0;
745
746 queue->last_action[index] = jiffies;
747
748 if (index == Q_INDEX) {
749 queue->length++;
750 } else if (index == Q_INDEX_DONE) {
751 queue->length--;
752 queue->count++;
753 }
754
755 spin_unlock_irqrestore(&queue->index_lock, irqflags);
756 }
757
758 void rt2x00queue_pause_queue(struct data_queue *queue)
759 {
760 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
761 !test_bit(QUEUE_STARTED, &queue->flags) ||
762 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
763 return;
764
765 switch (queue->qid) {
766 case QID_AC_VO:
767 case QID_AC_VI:
768 case QID_AC_BE:
769 case QID_AC_BK:
770 /*
771 * For TX queues, we have to disable the queue
772 * inside mac80211.
773 */
774 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
775 break;
776 default:
777 break;
778 }
779 }
780 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
781
782 void rt2x00queue_unpause_queue(struct data_queue *queue)
783 {
784 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
785 !test_bit(QUEUE_STARTED, &queue->flags) ||
786 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
787 return;
788
789 switch (queue->qid) {
790 case QID_AC_VO:
791 case QID_AC_VI:
792 case QID_AC_BE:
793 case QID_AC_BK:
794 /*
795 * For TX queues, we have to enable the queue
796 * inside mac80211.
797 */
798 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
799 break;
800 case QID_RX:
801 /*
802 * For RX we need to kick the queue now in order to
803 * receive frames.
804 */
805 queue->rt2x00dev->ops->lib->kick_queue(queue);
806 default:
807 break;
808 }
809 }
810 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
811
812 void rt2x00queue_start_queue(struct data_queue *queue)
813 {
814 mutex_lock(&queue->status_lock);
815
816 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
817 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
818 mutex_unlock(&queue->status_lock);
819 return;
820 }
821
822 set_bit(QUEUE_PAUSED, &queue->flags);
823
824 queue->rt2x00dev->ops->lib->start_queue(queue);
825
826 rt2x00queue_unpause_queue(queue);
827
828 mutex_unlock(&queue->status_lock);
829 }
830 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
831
832 void rt2x00queue_stop_queue(struct data_queue *queue)
833 {
834 mutex_lock(&queue->status_lock);
835
836 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
837 mutex_unlock(&queue->status_lock);
838 return;
839 }
840
841 rt2x00queue_pause_queue(queue);
842
843 queue->rt2x00dev->ops->lib->stop_queue(queue);
844
845 mutex_unlock(&queue->status_lock);
846 }
847 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
848
849 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
850 {
851 unsigned int i;
852 bool started;
853 bool tx_queue =
854 (queue->qid == QID_AC_VO) ||
855 (queue->qid == QID_AC_VI) ||
856 (queue->qid == QID_AC_BE) ||
857 (queue->qid == QID_AC_BK);
858
859 mutex_lock(&queue->status_lock);
860
861 /*
862 * If the queue has been started, we must stop it temporarily
863 * to prevent any new frames to be queued on the device. If
864 * we are not dropping the pending frames, the queue must
865 * only be stopped in the software and not the hardware,
866 * otherwise the queue will never become empty on its own.
867 */
868 started = test_bit(QUEUE_STARTED, &queue->flags);
869 if (started) {
870 /*
871 * Pause the queue
872 */
873 rt2x00queue_pause_queue(queue);
874
875 /*
876 * If we are not supposed to drop any pending
877 * frames, this means we must force a start (=kick)
878 * to the queue to make sure the hardware will
879 * start transmitting.
880 */
881 if (!drop && tx_queue)
882 queue->rt2x00dev->ops->lib->kick_queue(queue);
883 }
884
885 /*
886 * Check if driver supports flushing, we can only guarantee
887 * full support for flushing if the driver is able
888 * to cancel all pending frames (drop = true).
889 */
890 if (drop && queue->rt2x00dev->ops->lib->flush_queue)
891 queue->rt2x00dev->ops->lib->flush_queue(queue);
892
893 /*
894 * When we don't want to drop any frames, or when
895 * the driver doesn't fully flush the queue correcly,
896 * we must wait for the queue to become empty.
897 */
898 for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
899 msleep(10);
900
901 /*
902 * The queue flush has failed...
903 */
904 if (unlikely(!rt2x00queue_empty(queue)))
905 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
906
907 /*
908 * Restore the queue to the previous status
909 */
910 if (started)
911 rt2x00queue_unpause_queue(queue);
912
913 mutex_unlock(&queue->status_lock);
914 }
915 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
916
917 void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
918 {
919 struct data_queue *queue;
920
921 /*
922 * rt2x00queue_start_queue will call ieee80211_wake_queue
923 * for each queue after is has been properly initialized.
924 */
925 tx_queue_for_each(rt2x00dev, queue)
926 rt2x00queue_start_queue(queue);
927
928 rt2x00queue_start_queue(rt2x00dev->rx);
929 }
930 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
931
932 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
933 {
934 struct data_queue *queue;
935
936 /*
937 * rt2x00queue_stop_queue will call ieee80211_stop_queue
938 * as well, but we are completely shutting doing everything
939 * now, so it is much safer to stop all TX queues at once,
940 * and use rt2x00queue_stop_queue for cleaning up.
941 */
942 ieee80211_stop_queues(rt2x00dev->hw);
943
944 tx_queue_for_each(rt2x00dev, queue)
945 rt2x00queue_stop_queue(queue);
946
947 rt2x00queue_stop_queue(rt2x00dev->rx);
948 }
949 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
950
951 void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
952 {
953 struct data_queue *queue;
954
955 tx_queue_for_each(rt2x00dev, queue)
956 rt2x00queue_flush_queue(queue, drop);
957
958 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
959 }
960 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
961
962 static void rt2x00queue_reset(struct data_queue *queue)
963 {
964 unsigned long irqflags;
965 unsigned int i;
966
967 spin_lock_irqsave(&queue->index_lock, irqflags);
968
969 queue->count = 0;
970 queue->length = 0;
971
972 for (i = 0; i < Q_INDEX_MAX; i++) {
973 queue->index[i] = 0;
974 queue->last_action[i] = jiffies;
975 }
976
977 spin_unlock_irqrestore(&queue->index_lock, irqflags);
978 }
979
980 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
981 {
982 struct data_queue *queue;
983 unsigned int i;
984
985 queue_for_each(rt2x00dev, queue) {
986 rt2x00queue_reset(queue);
987
988 for (i = 0; i < queue->limit; i++)
989 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
990 }
991 }
992
993 static int rt2x00queue_alloc_entries(struct data_queue *queue,
994 const struct data_queue_desc *qdesc)
995 {
996 struct queue_entry *entries;
997 unsigned int entry_size;
998 unsigned int i;
999
1000 rt2x00queue_reset(queue);
1001
1002 queue->limit = qdesc->entry_num;
1003 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
1004 queue->data_size = qdesc->data_size;
1005 queue->desc_size = qdesc->desc_size;
1006
1007 /*
1008 * Allocate all queue entries.
1009 */
1010 entry_size = sizeof(*entries) + qdesc->priv_size;
1011 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1012 if (!entries)
1013 return -ENOMEM;
1014
1015 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1016 (((char *)(__base)) + ((__limit) * (__esize)) + \
1017 ((__index) * (__psize)))
1018
1019 for (i = 0; i < queue->limit; i++) {
1020 entries[i].flags = 0;
1021 entries[i].queue = queue;
1022 entries[i].skb = NULL;
1023 entries[i].entry_idx = i;
1024 entries[i].priv_data =
1025 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1026 sizeof(*entries), qdesc->priv_size);
1027 }
1028
1029 #undef QUEUE_ENTRY_PRIV_OFFSET
1030
1031 queue->entries = entries;
1032
1033 return 0;
1034 }
1035
1036 static void rt2x00queue_free_skbs(struct data_queue *queue)
1037 {
1038 unsigned int i;
1039
1040 if (!queue->entries)
1041 return;
1042
1043 for (i = 0; i < queue->limit; i++) {
1044 rt2x00queue_free_skb(&queue->entries[i]);
1045 }
1046 }
1047
1048 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1049 {
1050 unsigned int i;
1051 struct sk_buff *skb;
1052
1053 for (i = 0; i < queue->limit; i++) {
1054 skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
1055 if (!skb)
1056 return -ENOMEM;
1057 queue->entries[i].skb = skb;
1058 }
1059
1060 return 0;
1061 }
1062
1063 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1064 {
1065 struct data_queue *queue;
1066 int status;
1067
1068 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1069 if (status)
1070 goto exit;
1071
1072 tx_queue_for_each(rt2x00dev, queue) {
1073 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1074 if (status)
1075 goto exit;
1076 }
1077
1078 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1079 if (status)
1080 goto exit;
1081
1082 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
1083 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1084 rt2x00dev->ops->atim);
1085 if (status)
1086 goto exit;
1087 }
1088
1089 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1090 if (status)
1091 goto exit;
1092
1093 return 0;
1094
1095 exit:
1096 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1097
1098 rt2x00queue_uninitialize(rt2x00dev);
1099
1100 return status;
1101 }
1102
1103 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1104 {
1105 struct data_queue *queue;
1106
1107 rt2x00queue_free_skbs(rt2x00dev->rx);
1108
1109 queue_for_each(rt2x00dev, queue) {
1110 kfree(queue->entries);
1111 queue->entries = NULL;
1112 }
1113 }
1114
1115 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1116 struct data_queue *queue, enum data_queue_qid qid)
1117 {
1118 mutex_init(&queue->status_lock);
1119 spin_lock_init(&queue->index_lock);
1120
1121 queue->rt2x00dev = rt2x00dev;
1122 queue->qid = qid;
1123 queue->txop = 0;
1124 queue->aifs = 2;
1125 queue->cw_min = 5;
1126 queue->cw_max = 10;
1127 }
1128
1129 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1130 {
1131 struct data_queue *queue;
1132 enum data_queue_qid qid;
1133 unsigned int req_atim =
1134 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1135
1136 /*
1137 * We need the following queues:
1138 * RX: 1
1139 * TX: ops->tx_queues
1140 * Beacon: 1
1141 * Atim: 1 (if required)
1142 */
1143 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1144
1145 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1146 if (!queue) {
1147 ERROR(rt2x00dev, "Queue allocation failed.\n");
1148 return -ENOMEM;
1149 }
1150
1151 /*
1152 * Initialize pointers
1153 */
1154 rt2x00dev->rx = queue;
1155 rt2x00dev->tx = &queue[1];
1156 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1157 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1158
1159 /*
1160 * Initialize queue parameters.
1161 * RX: qid = QID_RX
1162 * TX: qid = QID_AC_VO + index
1163 * TX: cw_min: 2^5 = 32.
1164 * TX: cw_max: 2^10 = 1024.
1165 * BCN: qid = QID_BEACON
1166 * ATIM: qid = QID_ATIM
1167 */
1168 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1169
1170 qid = QID_AC_VO;
1171 tx_queue_for_each(rt2x00dev, queue)
1172 rt2x00queue_init(rt2x00dev, queue, qid++);
1173
1174 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1175 if (req_atim)
1176 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1177
1178 return 0;
1179 }
1180
1181 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1182 {
1183 kfree(rt2x00dev->rx);
1184 rt2x00dev->rx = NULL;
1185 rt2x00dev->tx = NULL;
1186 rt2x00dev->bcn = NULL;
1187 }