2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 <http://rt2x00.serialmonkey.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 Abstract: rt2x00 queue specific routines.
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/dma-mapping.h>
34 #include "rt2x00lib.h"
36 struct sk_buff
*rt2x00queue_alloc_rxskb(struct rt2x00_dev
*rt2x00dev
,
37 struct queue_entry
*entry
)
40 struct skb_frame_desc
*skbdesc
;
41 unsigned int frame_size
;
42 unsigned int head_size
= 0;
43 unsigned int tail_size
= 0;
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
49 frame_size
= entry
->queue
->data_size
+ entry
->queue
->desc_size
;
52 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon.
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO
, &rt2x00dev
->flags
)) {
71 skb
= dev_alloc_skb(frame_size
+ head_size
+ tail_size
);
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
79 skb_reserve(skb
, head_size
);
80 skb_put(skb
, frame_size
);
85 skbdesc
= get_skb_frame_desc(skb
);
86 memset(skbdesc
, 0, sizeof(*skbdesc
));
87 skbdesc
->entry
= entry
;
89 if (test_bit(DRIVER_REQUIRE_DMA
, &rt2x00dev
->flags
)) {
90 skbdesc
->skb_dma
= dma_map_single(rt2x00dev
->dev
,
94 skbdesc
->flags
|= SKBDESC_DMA_MAPPED_RX
;
100 void rt2x00queue_map_txskb(struct rt2x00_dev
*rt2x00dev
, struct sk_buff
*skb
)
102 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(skb
);
105 dma_map_single(rt2x00dev
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
106 skbdesc
->flags
|= SKBDESC_DMA_MAPPED_TX
;
108 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb
);
110 void rt2x00queue_unmap_skb(struct rt2x00_dev
*rt2x00dev
, struct sk_buff
*skb
)
112 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(skb
);
114 if (skbdesc
->flags
& SKBDESC_DMA_MAPPED_RX
) {
115 dma_unmap_single(rt2x00dev
->dev
, skbdesc
->skb_dma
, skb
->len
,
117 skbdesc
->flags
&= ~SKBDESC_DMA_MAPPED_RX
;
120 if (skbdesc
->flags
& SKBDESC_DMA_MAPPED_TX
) {
121 dma_unmap_single(rt2x00dev
->dev
, skbdesc
->skb_dma
, skb
->len
,
123 skbdesc
->flags
&= ~SKBDESC_DMA_MAPPED_TX
;
126 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb
);
128 void rt2x00queue_free_skb(struct rt2x00_dev
*rt2x00dev
, struct sk_buff
*skb
)
133 rt2x00queue_unmap_skb(rt2x00dev
, skb
);
134 dev_kfree_skb_any(skb
);
137 void rt2x00queue_align_frame(struct sk_buff
*skb
)
139 unsigned int frame_length
= skb
->len
;
140 unsigned int align
= ALIGN_SIZE(skb
, 0);
145 skb_push(skb
, align
);
146 memmove(skb
->data
, skb
->data
+ align
, frame_length
);
147 skb_trim(skb
, frame_length
);
150 void rt2x00queue_align_payload(struct sk_buff
*skb
, unsigned int header_length
)
152 unsigned int frame_length
= skb
->len
;
153 unsigned int align
= ALIGN_SIZE(skb
, header_length
);
158 skb_push(skb
, align
);
159 memmove(skb
->data
, skb
->data
+ align
, frame_length
);
160 skb_trim(skb
, frame_length
);
163 void rt2x00queue_insert_l2pad(struct sk_buff
*skb
, unsigned int header_length
)
165 unsigned int payload_length
= skb
->len
- header_length
;
166 unsigned int header_align
= ALIGN_SIZE(skb
, 0);
167 unsigned int payload_align
= ALIGN_SIZE(skb
, header_length
);
168 unsigned int l2pad
= payload_length
? L2PAD_SIZE(header_length
) : 0;
171 * Adjust the header alignment if the payload needs to be moved more
174 if (payload_align
> header_align
)
177 /* There is nothing to do if no alignment is needed */
181 /* Reserve the amount of space needed in front of the frame */
182 skb_push(skb
, header_align
);
187 memmove(skb
->data
, skb
->data
+ header_align
, header_length
);
189 /* Move the payload, if present and if required */
190 if (payload_length
&& payload_align
)
191 memmove(skb
->data
+ header_length
+ l2pad
,
192 skb
->data
+ header_length
+ l2pad
+ payload_align
,
195 /* Trim the skb to the correct size */
196 skb_trim(skb
, header_length
+ l2pad
+ payload_length
);
199 void rt2x00queue_remove_l2pad(struct sk_buff
*skb
, unsigned int header_length
)
201 unsigned int l2pad
= L2PAD_SIZE(header_length
);
206 memmove(skb
->data
+ l2pad
, skb
->data
, header_length
);
207 skb_pull(skb
, l2pad
);
210 static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry
*entry
,
211 struct txentry_desc
*txdesc
)
213 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(entry
->skb
);
214 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)entry
->skb
->data
;
215 struct rt2x00_intf
*intf
= vif_to_intf(tx_info
->control
.vif
);
216 unsigned long irqflags
;
218 if (!(tx_info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
) ||
219 unlikely(!tx_info
->control
.vif
))
223 * Hardware should insert sequence counter.
224 * FIXME: We insert a software sequence counter first for
225 * hardware that doesn't support hardware sequence counting.
227 * This is wrong because beacons are not getting sequence
228 * numbers assigned properly.
230 * A secondary problem exists for drivers that cannot toggle
231 * sequence counting per-frame, since those will override the
232 * sequence counter given by mac80211.
234 spin_lock_irqsave(&intf
->seqlock
, irqflags
);
236 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT
, &txdesc
->flags
))
238 hdr
->seq_ctrl
&= cpu_to_le16(IEEE80211_SCTL_FRAG
);
239 hdr
->seq_ctrl
|= cpu_to_le16(intf
->seqno
);
241 spin_unlock_irqrestore(&intf
->seqlock
, irqflags
);
243 __set_bit(ENTRY_TXD_GENERATE_SEQ
, &txdesc
->flags
);
246 static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry
*entry
,
247 struct txentry_desc
*txdesc
,
248 const struct rt2x00_rate
*hwrate
)
250 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
251 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(entry
->skb
);
252 struct ieee80211_tx_rate
*txrate
= &tx_info
->control
.rates
[0];
253 unsigned int data_length
;
254 unsigned int duration
;
255 unsigned int residual
;
257 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
258 data_length
= entry
->skb
->len
+ 4;
259 data_length
+= rt2x00crypto_tx_overhead(rt2x00dev
, entry
->skb
);
263 * Length calculation depends on OFDM/CCK rate.
265 txdesc
->signal
= hwrate
->plcp
;
266 txdesc
->service
= 0x04;
268 if (hwrate
->flags
& DEV_RATE_OFDM
) {
269 txdesc
->length_high
= (data_length
>> 6) & 0x3f;
270 txdesc
->length_low
= data_length
& 0x3f;
273 * Convert length to microseconds.
275 residual
= GET_DURATION_RES(data_length
, hwrate
->bitrate
);
276 duration
= GET_DURATION(data_length
, hwrate
->bitrate
);
282 * Check if we need to set the Length Extension
284 if (hwrate
->bitrate
== 110 && residual
<= 30)
285 txdesc
->service
|= 0x80;
288 txdesc
->length_high
= (duration
>> 8) & 0xff;
289 txdesc
->length_low
= duration
& 0xff;
292 * When preamble is enabled we should set the
293 * preamble bit for the signal.
295 if (txrate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
296 txdesc
->signal
|= 0x08;
300 static void rt2x00queue_create_tx_descriptor(struct queue_entry
*entry
,
301 struct txentry_desc
*txdesc
)
303 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
304 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(entry
->skb
);
305 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)entry
->skb
->data
;
306 struct ieee80211_rate
*rate
=
307 ieee80211_get_tx_rate(rt2x00dev
->hw
, tx_info
);
308 const struct rt2x00_rate
*hwrate
;
310 memset(txdesc
, 0, sizeof(*txdesc
));
313 * Initialize information from queue
315 txdesc
->queue
= entry
->queue
->qid
;
316 txdesc
->cw_min
= entry
->queue
->cw_min
;
317 txdesc
->cw_max
= entry
->queue
->cw_max
;
318 txdesc
->aifs
= entry
->queue
->aifs
;
321 * Header and frame information.
323 txdesc
->length
= entry
->skb
->len
;
324 txdesc
->header_length
= ieee80211_get_hdrlen_from_skb(entry
->skb
);
327 * Check whether this frame is to be acked.
329 if (!(tx_info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
330 __set_bit(ENTRY_TXD_ACK
, &txdesc
->flags
);
333 * Check if this is a RTS/CTS frame
335 if (ieee80211_is_rts(hdr
->frame_control
) ||
336 ieee80211_is_cts(hdr
->frame_control
)) {
337 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
338 if (ieee80211_is_rts(hdr
->frame_control
))
339 __set_bit(ENTRY_TXD_RTS_FRAME
, &txdesc
->flags
);
341 __set_bit(ENTRY_TXD_CTS_FRAME
, &txdesc
->flags
);
342 if (tx_info
->control
.rts_cts_rate_idx
>= 0)
344 ieee80211_get_rts_cts_rate(rt2x00dev
->hw
, tx_info
);
348 * Determine retry information.
350 txdesc
->retry_limit
= tx_info
->control
.rates
[0].count
- 1;
351 if (txdesc
->retry_limit
>= rt2x00dev
->long_retry
)
352 __set_bit(ENTRY_TXD_RETRY_MODE
, &txdesc
->flags
);
355 * Check if more fragments are pending
357 if (ieee80211_has_morefrags(hdr
->frame_control
)) {
358 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
359 __set_bit(ENTRY_TXD_MORE_FRAG
, &txdesc
->flags
);
363 * Check if more frames (!= fragments) are pending
365 if (tx_info
->flags
& IEEE80211_TX_CTL_MORE_FRAMES
)
366 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
369 * Beacons and probe responses require the tsf timestamp
370 * to be inserted into the frame, except for a frame that has been injected
371 * through a monitor interface. This latter is needed for testing a
374 if ((ieee80211_is_beacon(hdr
->frame_control
) ||
375 ieee80211_is_probe_resp(hdr
->frame_control
)) &&
376 (!(tx_info
->flags
& IEEE80211_TX_CTL_INJECTED
)))
377 __set_bit(ENTRY_TXD_REQ_TIMESTAMP
, &txdesc
->flags
);
380 * Determine with what IFS priority this frame should be send.
381 * Set ifs to IFS_SIFS when the this is not the first fragment,
382 * or this fragment came after RTS/CTS.
384 if ((tx_info
->flags
& IEEE80211_TX_CTL_FIRST_FRAGMENT
) &&
385 !test_bit(ENTRY_TXD_RTS_FRAME
, &txdesc
->flags
)) {
386 __set_bit(ENTRY_TXD_FIRST_FRAGMENT
, &txdesc
->flags
);
387 txdesc
->ifs
= IFS_BACKOFF
;
389 txdesc
->ifs
= IFS_SIFS
;
392 * Determine rate modulation.
394 hwrate
= rt2x00_get_rate(rate
->hw_value
);
395 txdesc
->rate_mode
= RATE_MODE_CCK
;
396 if (hwrate
->flags
& DEV_RATE_OFDM
)
397 txdesc
->rate_mode
= RATE_MODE_OFDM
;
400 * Apply TX descriptor handling by components
402 rt2x00crypto_create_tx_descriptor(entry
, txdesc
);
403 rt2x00ht_create_tx_descriptor(entry
, txdesc
, hwrate
);
404 rt2x00queue_create_tx_descriptor_seq(entry
, txdesc
);
405 rt2x00queue_create_tx_descriptor_plcp(entry
, txdesc
, hwrate
);
408 static int rt2x00queue_write_tx_data(struct queue_entry
*entry
,
409 struct txentry_desc
*txdesc
)
411 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
414 * This should not happen, we already checked the entry
415 * was ours. When the hardware disagrees there has been
416 * a queue corruption!
418 if (unlikely(rt2x00dev
->ops
->lib
->get_entry_state
&&
419 rt2x00dev
->ops
->lib
->get_entry_state(entry
))) {
421 "Corrupt queue %d, accessing entry which is not ours.\n"
422 "Please file bug report to %s.\n",
423 entry
->queue
->qid
, DRV_PROJECT
);
428 * Add the requested extra tx headroom in front of the skb.
430 skb_push(entry
->skb
, rt2x00dev
->ops
->extra_tx_headroom
);
431 memset(entry
->skb
->data
, 0, rt2x00dev
->ops
->extra_tx_headroom
);
434 * Call the driver's write_tx_data function, if it exists.
436 if (rt2x00dev
->ops
->lib
->write_tx_data
)
437 rt2x00dev
->ops
->lib
->write_tx_data(entry
, txdesc
);
440 * Map the skb to DMA.
442 if (test_bit(DRIVER_REQUIRE_DMA
, &rt2x00dev
->flags
))
443 rt2x00queue_map_txskb(rt2x00dev
, entry
->skb
);
448 static void rt2x00queue_write_tx_descriptor(struct queue_entry
*entry
,
449 struct txentry_desc
*txdesc
)
451 struct data_queue
*queue
= entry
->queue
;
452 struct rt2x00_dev
*rt2x00dev
= queue
->rt2x00dev
;
454 rt2x00dev
->ops
->lib
->write_tx_desc(rt2x00dev
, entry
->skb
, txdesc
);
457 * All processing on the frame has been completed, this means
458 * it is now ready to be dumped to userspace through debugfs.
460 rt2x00debug_dump_frame(rt2x00dev
, DUMP_FRAME_TX
, entry
->skb
);
463 static void rt2x00queue_kick_tx_queue(struct queue_entry
*entry
,
464 struct txentry_desc
*txdesc
)
466 struct data_queue
*queue
= entry
->queue
;
467 struct rt2x00_dev
*rt2x00dev
= queue
->rt2x00dev
;
470 * Check if we need to kick the queue, there are however a few rules
471 * 1) Don't kick unless this is the last in frame in a burst.
472 * When the burst flag is set, this frame is always followed
473 * by another frame which in some way are related to eachother.
474 * This is true for fragments, RTS or CTS-to-self frames.
475 * 2) Rule 1 can be broken when the available entries
476 * in the queue are less then a certain threshold.
478 if (rt2x00queue_threshold(queue
) ||
479 !test_bit(ENTRY_TXD_BURST
, &txdesc
->flags
))
480 rt2x00dev
->ops
->lib
->kick_tx_queue(rt2x00dev
, queue
->qid
);
483 int rt2x00queue_write_tx_frame(struct data_queue
*queue
, struct sk_buff
*skb
,
486 struct ieee80211_tx_info
*tx_info
;
487 struct queue_entry
*entry
= rt2x00queue_get_entry(queue
, Q_INDEX
);
488 struct txentry_desc txdesc
;
489 struct skb_frame_desc
*skbdesc
;
490 u8 rate_idx
, rate_flags
;
492 if (unlikely(rt2x00queue_full(queue
)))
495 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA
, &entry
->flags
)) {
496 ERROR(queue
->rt2x00dev
,
497 "Arrived at non-free entry in the non-full queue %d.\n"
498 "Please file bug report to %s.\n",
499 queue
->qid
, DRV_PROJECT
);
504 * Copy all TX descriptor information into txdesc,
505 * after that we are free to use the skb->cb array
506 * for our information.
509 rt2x00queue_create_tx_descriptor(entry
, &txdesc
);
512 * All information is retrieved from the skb->cb array,
513 * now we should claim ownership of the driver part of that
514 * array, preserving the bitrate index and flags.
516 tx_info
= IEEE80211_SKB_CB(skb
);
517 rate_idx
= tx_info
->control
.rates
[0].idx
;
518 rate_flags
= tx_info
->control
.rates
[0].flags
;
519 skbdesc
= get_skb_frame_desc(skb
);
520 memset(skbdesc
, 0, sizeof(*skbdesc
));
521 skbdesc
->entry
= entry
;
522 skbdesc
->tx_rate_idx
= rate_idx
;
523 skbdesc
->tx_rate_flags
= rate_flags
;
526 skbdesc
->flags
|= SKBDESC_NOT_MAC80211
;
529 * When hardware encryption is supported, and this frame
530 * is to be encrypted, we should strip the IV/EIV data from
531 * the frame so we can provide it to the driver separately.
533 if (test_bit(ENTRY_TXD_ENCRYPT
, &txdesc
.flags
) &&
534 !test_bit(ENTRY_TXD_ENCRYPT_IV
, &txdesc
.flags
)) {
535 if (test_bit(DRIVER_REQUIRE_COPY_IV
, &queue
->rt2x00dev
->flags
))
536 rt2x00crypto_tx_copy_iv(skb
, &txdesc
);
538 rt2x00crypto_tx_remove_iv(skb
, &txdesc
);
542 * When DMA allocation is required we should guarentee to the
543 * driver that the DMA is aligned to a 4-byte boundary.
544 * However some drivers require L2 padding to pad the payload
545 * rather then the header. This could be a requirement for
546 * PCI and USB devices, while header alignment only is valid
549 if (test_bit(DRIVER_REQUIRE_L2PAD
, &queue
->rt2x00dev
->flags
))
550 rt2x00queue_insert_l2pad(entry
->skb
, txdesc
.header_length
);
551 else if (test_bit(DRIVER_REQUIRE_DMA
, &queue
->rt2x00dev
->flags
))
552 rt2x00queue_align_frame(entry
->skb
);
555 * It could be possible that the queue was corrupted and this
556 * call failed. Since we always return NETDEV_TX_OK to mac80211,
557 * this frame will simply be dropped.
559 if (unlikely(rt2x00queue_write_tx_data(entry
, &txdesc
))) {
560 clear_bit(ENTRY_OWNER_DEVICE_DATA
, &entry
->flags
);
565 set_bit(ENTRY_DATA_PENDING
, &entry
->flags
);
567 rt2x00queue_index_inc(queue
, Q_INDEX
);
568 rt2x00queue_write_tx_descriptor(entry
, &txdesc
);
569 rt2x00queue_kick_tx_queue(entry
, &txdesc
);
574 int rt2x00queue_update_beacon(struct rt2x00_dev
*rt2x00dev
,
575 struct ieee80211_vif
*vif
,
576 const bool enable_beacon
)
578 struct rt2x00_intf
*intf
= vif_to_intf(vif
);
579 struct skb_frame_desc
*skbdesc
;
580 struct txentry_desc txdesc
;
582 if (unlikely(!intf
->beacon
))
585 mutex_lock(&intf
->beacon_skb_mutex
);
588 * Clean up the beacon skb.
590 rt2x00queue_free_skb(rt2x00dev
, intf
->beacon
->skb
);
591 intf
->beacon
->skb
= NULL
;
593 if (!enable_beacon
) {
594 rt2x00dev
->ops
->lib
->kill_tx_queue(rt2x00dev
, QID_BEACON
);
595 mutex_unlock(&intf
->beacon_skb_mutex
);
599 intf
->beacon
->skb
= ieee80211_beacon_get(rt2x00dev
->hw
, vif
);
600 if (!intf
->beacon
->skb
) {
601 mutex_unlock(&intf
->beacon_skb_mutex
);
606 * Copy all TX descriptor information into txdesc,
607 * after that we are free to use the skb->cb array
608 * for our information.
610 rt2x00queue_create_tx_descriptor(intf
->beacon
, &txdesc
);
613 * Fill in skb descriptor
615 skbdesc
= get_skb_frame_desc(intf
->beacon
->skb
);
616 memset(skbdesc
, 0, sizeof(*skbdesc
));
617 skbdesc
->entry
= intf
->beacon
;
620 * Send beacon to hardware and enable beacon genaration..
622 rt2x00dev
->ops
->lib
->write_beacon(intf
->beacon
, &txdesc
);
624 mutex_unlock(&intf
->beacon_skb_mutex
);
629 struct data_queue
*rt2x00queue_get_queue(struct rt2x00_dev
*rt2x00dev
,
630 const enum data_queue_qid queue
)
632 int atim
= test_bit(DRIVER_REQUIRE_ATIM_QUEUE
, &rt2x00dev
->flags
);
635 return rt2x00dev
->rx
;
637 if (queue
< rt2x00dev
->ops
->tx_queues
&& rt2x00dev
->tx
)
638 return &rt2x00dev
->tx
[queue
];
643 if (queue
== QID_BEACON
)
644 return &rt2x00dev
->bcn
[0];
645 else if (queue
== QID_ATIM
&& atim
)
646 return &rt2x00dev
->bcn
[1];
650 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue
);
652 struct queue_entry
*rt2x00queue_get_entry(struct data_queue
*queue
,
653 enum queue_index index
)
655 struct queue_entry
*entry
;
656 unsigned long irqflags
;
658 if (unlikely(index
>= Q_INDEX_MAX
)) {
659 ERROR(queue
->rt2x00dev
,
660 "Entry requested from invalid index type (%d)\n", index
);
664 spin_lock_irqsave(&queue
->lock
, irqflags
);
666 entry
= &queue
->entries
[queue
->index
[index
]];
668 spin_unlock_irqrestore(&queue
->lock
, irqflags
);
672 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry
);
674 void rt2x00queue_index_inc(struct data_queue
*queue
, enum queue_index index
)
676 unsigned long irqflags
;
678 if (unlikely(index
>= Q_INDEX_MAX
)) {
679 ERROR(queue
->rt2x00dev
,
680 "Index change on invalid index type (%d)\n", index
);
684 spin_lock_irqsave(&queue
->lock
, irqflags
);
686 queue
->index
[index
]++;
687 if (queue
->index
[index
] >= queue
->limit
)
688 queue
->index
[index
] = 0;
690 if (index
== Q_INDEX
) {
692 queue
->last_index
= jiffies
;
693 } else if (index
== Q_INDEX_DONE
) {
696 queue
->last_index_done
= jiffies
;
699 spin_unlock_irqrestore(&queue
->lock
, irqflags
);
702 static void rt2x00queue_reset(struct data_queue
*queue
)
704 unsigned long irqflags
;
706 spin_lock_irqsave(&queue
->lock
, irqflags
);
710 queue
->last_index
= jiffies
;
711 queue
->last_index_done
= jiffies
;
712 memset(queue
->index
, 0, sizeof(queue
->index
));
714 spin_unlock_irqrestore(&queue
->lock
, irqflags
);
717 void rt2x00queue_stop_queues(struct rt2x00_dev
*rt2x00dev
)
719 struct data_queue
*queue
;
721 txall_queue_for_each(rt2x00dev
, queue
)
722 rt2x00dev
->ops
->lib
->kill_tx_queue(rt2x00dev
, queue
->qid
);
725 void rt2x00queue_init_queues(struct rt2x00_dev
*rt2x00dev
)
727 struct data_queue
*queue
;
730 queue_for_each(rt2x00dev
, queue
) {
731 rt2x00queue_reset(queue
);
733 for (i
= 0; i
< queue
->limit
; i
++) {
734 rt2x00dev
->ops
->lib
->clear_entry(&queue
->entries
[i
]);
735 if (queue
->qid
== QID_RX
)
736 rt2x00queue_index_inc(queue
, Q_INDEX
);
741 static int rt2x00queue_alloc_entries(struct data_queue
*queue
,
742 const struct data_queue_desc
*qdesc
)
744 struct queue_entry
*entries
;
745 unsigned int entry_size
;
748 rt2x00queue_reset(queue
);
750 queue
->limit
= qdesc
->entry_num
;
751 queue
->threshold
= DIV_ROUND_UP(qdesc
->entry_num
, 10);
752 queue
->data_size
= qdesc
->data_size
;
753 queue
->desc_size
= qdesc
->desc_size
;
756 * Allocate all queue entries.
758 entry_size
= sizeof(*entries
) + qdesc
->priv_size
;
759 entries
= kcalloc(queue
->limit
, entry_size
, GFP_KERNEL
);
763 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
764 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
765 ((__index) * (__psize)) )
767 for (i
= 0; i
< queue
->limit
; i
++) {
768 entries
[i
].flags
= 0;
769 entries
[i
].queue
= queue
;
770 entries
[i
].skb
= NULL
;
771 entries
[i
].entry_idx
= i
;
772 entries
[i
].priv_data
=
773 QUEUE_ENTRY_PRIV_OFFSET(entries
, i
, queue
->limit
,
774 sizeof(*entries
), qdesc
->priv_size
);
777 #undef QUEUE_ENTRY_PRIV_OFFSET
779 queue
->entries
= entries
;
784 static void rt2x00queue_free_skbs(struct rt2x00_dev
*rt2x00dev
,
785 struct data_queue
*queue
)
792 for (i
= 0; i
< queue
->limit
; i
++) {
793 if (queue
->entries
[i
].skb
)
794 rt2x00queue_free_skb(rt2x00dev
, queue
->entries
[i
].skb
);
798 static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev
*rt2x00dev
,
799 struct data_queue
*queue
)
804 for (i
= 0; i
< queue
->limit
; i
++) {
805 skb
= rt2x00queue_alloc_rxskb(rt2x00dev
, &queue
->entries
[i
]);
808 queue
->entries
[i
].skb
= skb
;
814 int rt2x00queue_initialize(struct rt2x00_dev
*rt2x00dev
)
816 struct data_queue
*queue
;
819 status
= rt2x00queue_alloc_entries(rt2x00dev
->rx
, rt2x00dev
->ops
->rx
);
823 tx_queue_for_each(rt2x00dev
, queue
) {
824 status
= rt2x00queue_alloc_entries(queue
, rt2x00dev
->ops
->tx
);
829 status
= rt2x00queue_alloc_entries(rt2x00dev
->bcn
, rt2x00dev
->ops
->bcn
);
833 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE
, &rt2x00dev
->flags
)) {
834 status
= rt2x00queue_alloc_entries(&rt2x00dev
->bcn
[1],
835 rt2x00dev
->ops
->atim
);
840 status
= rt2x00queue_alloc_rxskbs(rt2x00dev
, rt2x00dev
->rx
);
847 ERROR(rt2x00dev
, "Queue entries allocation failed.\n");
849 rt2x00queue_uninitialize(rt2x00dev
);
854 void rt2x00queue_uninitialize(struct rt2x00_dev
*rt2x00dev
)
856 struct data_queue
*queue
;
858 rt2x00queue_free_skbs(rt2x00dev
, rt2x00dev
->rx
);
860 queue_for_each(rt2x00dev
, queue
) {
861 kfree(queue
->entries
);
862 queue
->entries
= NULL
;
866 static void rt2x00queue_init(struct rt2x00_dev
*rt2x00dev
,
867 struct data_queue
*queue
, enum data_queue_qid qid
)
869 spin_lock_init(&queue
->lock
);
871 queue
->rt2x00dev
= rt2x00dev
;
879 int rt2x00queue_allocate(struct rt2x00_dev
*rt2x00dev
)
881 struct data_queue
*queue
;
882 enum data_queue_qid qid
;
883 unsigned int req_atim
=
884 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE
, &rt2x00dev
->flags
);
887 * We need the following queues:
891 * Atim: 1 (if required)
893 rt2x00dev
->data_queues
= 2 + rt2x00dev
->ops
->tx_queues
+ req_atim
;
895 queue
= kcalloc(rt2x00dev
->data_queues
, sizeof(*queue
), GFP_KERNEL
);
897 ERROR(rt2x00dev
, "Queue allocation failed.\n");
902 * Initialize pointers
904 rt2x00dev
->rx
= queue
;
905 rt2x00dev
->tx
= &queue
[1];
906 rt2x00dev
->bcn
= &queue
[1 + rt2x00dev
->ops
->tx_queues
];
909 * Initialize queue parameters.
911 * TX: qid = QID_AC_BE + index
912 * TX: cw_min: 2^5 = 32.
913 * TX: cw_max: 2^10 = 1024.
914 * BCN: qid = QID_BEACON
915 * ATIM: qid = QID_ATIM
917 rt2x00queue_init(rt2x00dev
, rt2x00dev
->rx
, QID_RX
);
920 tx_queue_for_each(rt2x00dev
, queue
)
921 rt2x00queue_init(rt2x00dev
, queue
, qid
++);
923 rt2x00queue_init(rt2x00dev
, &rt2x00dev
->bcn
[0], QID_BEACON
);
925 rt2x00queue_init(rt2x00dev
, &rt2x00dev
->bcn
[1], QID_ATIM
);
930 void rt2x00queue_free(struct rt2x00_dev
*rt2x00dev
)
932 kfree(rt2x00dev
->rx
);
933 rt2x00dev
->rx
= NULL
;
934 rt2x00dev
->tx
= NULL
;
935 rt2x00dev
->bcn
= NULL
;