]>
Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
181d6902 | 2 | /* |
7e613e16 ID |
3 | Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> |
4 | Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> | |
9c9a0d14 | 5 | Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> |
181d6902 ID |
6 | <http://rt2x00.serialmonkey.com> |
7 | ||
181d6902 ID |
8 | */ |
9 | ||
10 | /* | |
11 | Module: rt2x00lib | |
12 | Abstract: rt2x00 queue specific routines. | |
13 | */ | |
14 | ||
5a0e3ad6 | 15 | #include <linux/slab.h> |
181d6902 ID |
16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> | |
c4da0048 | 18 | #include <linux/dma-mapping.h> |
181d6902 ID |
19 | |
20 | #include "rt2x00.h" | |
21 | #include "rt2x00lib.h" | |
22 | ||
88211021 | 23 | struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) |
239c249d | 24 | { |
f0bda571 SG |
25 | struct data_queue *queue = entry->queue; |
26 | struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; | |
c4da0048 GW |
27 | struct sk_buff *skb; |
28 | struct skb_frame_desc *skbdesc; | |
2bb057d0 ID |
29 | unsigned int frame_size; |
30 | unsigned int head_size = 0; | |
31 | unsigned int tail_size = 0; | |
239c249d GW |
32 | |
33 | /* | |
34 | * The frame size includes descriptor size, because the | |
35 | * hardware directly receive the frame into the skbuffer. | |
36 | */ | |
f0bda571 | 37 | frame_size = queue->data_size + queue->desc_size + queue->winfo_size; |
239c249d GW |
38 | |
39 | /* | |
ff352391 ID |
40 | * The payload should be aligned to a 4-byte boundary, |
41 | * this means we need at least 3 bytes for moving the frame | |
42 | * into the correct offset. | |
239c249d | 43 | */ |
2bb057d0 ID |
44 | head_size = 4; |
45 | ||
46 | /* | |
47 | * For IV/EIV/ICV assembly we must make sure there is | |
48 | * at least 8 bytes bytes available in headroom for IV/EIV | |
9c3444d3 | 49 | * and 8 bytes for ICV data as tailroon. |
2bb057d0 | 50 | */ |
7b8a00dc | 51 | if (rt2x00_has_cap_hw_crypto(rt2x00dev)) { |
2bb057d0 | 52 | head_size += 8; |
9c3444d3 | 53 | tail_size += 8; |
2bb057d0 | 54 | } |
239c249d GW |
55 | |
56 | /* | |
57 | * Allocate skbuffer. | |
58 | */ | |
88211021 | 59 | skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp); |
239c249d GW |
60 | if (!skb) |
61 | return NULL; | |
62 | ||
2bb057d0 ID |
63 | /* |
64 | * Make sure we not have a frame with the requested bytes | |
65 | * available in the head and tail. | |
66 | */ | |
67 | skb_reserve(skb, head_size); | |
239c249d GW |
68 | skb_put(skb, frame_size); |
69 | ||
c4da0048 GW |
70 | /* |
71 | * Populate skbdesc. | |
72 | */ | |
73 | skbdesc = get_skb_frame_desc(skb); | |
74 | memset(skbdesc, 0, sizeof(*skbdesc)); | |
c4da0048 | 75 | |
b9d305cc | 76 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) { |
4ea545d4 SG |
77 | dma_addr_t skb_dma; |
78 | ||
79 | skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, | |
80 | DMA_FROM_DEVICE); | |
81 | if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { | |
82 | dev_kfree_skb_any(skb); | |
83 | return NULL; | |
84 | } | |
85 | ||
86 | skbdesc->skb_dma = skb_dma; | |
c4da0048 GW |
87 | skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; |
88 | } | |
89 | ||
239c249d GW |
90 | return skb; |
91 | } | |
30caa6e3 | 92 | |
4ea545d4 | 93 | int rt2x00queue_map_txskb(struct queue_entry *entry) |
30caa6e3 | 94 | { |
fa69560f ID |
95 | struct device *dev = entry->queue->rt2x00dev->dev; |
96 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); | |
c4da0048 | 97 | |
3ee54a07 | 98 | skbdesc->skb_dma = |
fa69560f | 99 | dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); |
4ea545d4 SG |
100 | |
101 | if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) | |
102 | return -ENOMEM; | |
103 | ||
c4da0048 | 104 | skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; |
0b0d556e | 105 | rt2x00lib_dmadone(entry); |
4ea545d4 | 106 | return 0; |
c4da0048 GW |
107 | } |
108 | EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); | |
109 | ||
fa69560f | 110 | void rt2x00queue_unmap_skb(struct queue_entry *entry) |
c4da0048 | 111 | { |
fa69560f ID |
112 | struct device *dev = entry->queue->rt2x00dev->dev; |
113 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); | |
c4da0048 GW |
114 | |
115 | if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { | |
fa69560f | 116 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, |
c4da0048 GW |
117 | DMA_FROM_DEVICE); |
118 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; | |
546adf29 | 119 | } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { |
fa69560f | 120 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, |
c4da0048 GW |
121 | DMA_TO_DEVICE); |
122 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; | |
123 | } | |
124 | } | |
0b8004aa | 125 | EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); |
c4da0048 | 126 | |
fa69560f | 127 | void rt2x00queue_free_skb(struct queue_entry *entry) |
c4da0048 | 128 | { |
fa69560f | 129 | if (!entry->skb) |
9a613195 ID |
130 | return; |
131 | ||
fa69560f ID |
132 | rt2x00queue_unmap_skb(entry); |
133 | dev_kfree_skb_any(entry->skb); | |
134 | entry->skb = NULL; | |
30caa6e3 | 135 | } |
239c249d | 136 | |
daee6c09 | 137 | void rt2x00queue_align_frame(struct sk_buff *skb) |
9f166171 | 138 | { |
9f166171 | 139 | unsigned int frame_length = skb->len; |
daee6c09 | 140 | unsigned int align = ALIGN_SIZE(skb, 0); |
9f166171 ID |
141 | |
142 | if (!align) | |
143 | return; | |
144 | ||
daee6c09 ID |
145 | skb_push(skb, align); |
146 | memmove(skb->data, skb->data + align, frame_length); | |
147 | skb_trim(skb, frame_length); | |
148 | } | |
149 | ||
cfd9167a SG |
150 | /* |
151 | * H/W needs L2 padding between the header and the paylod if header size | |
152 | * is not 4 bytes aligned. | |
153 | */ | |
154 | void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) | |
daee6c09 | 155 | { |
cfd9167a | 156 | unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; |
daee6c09 | 157 | |
cfd9167a | 158 | if (!l2pad) |
2e331462 | 159 | return; |
daee6c09 | 160 | |
cfd9167a SG |
161 | skb_push(skb, l2pad); |
162 | memmove(skb->data, skb->data + l2pad, hdr_len); | |
9f166171 ID |
163 | } |
164 | ||
cfd9167a | 165 | void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) |
daee6c09 | 166 | { |
cfd9167a | 167 | unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; |
daee6c09 | 168 | |
354e39db | 169 | if (!l2pad) |
daee6c09 ID |
170 | return; |
171 | ||
cfd9167a | 172 | memmove(skb->data + l2pad, skb->data, hdr_len); |
a061a93b | 173 | skb_pull(skb, l2pad); |
daee6c09 ID |
174 | } |
175 | ||
77b5621b GW |
176 | static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, |
177 | struct sk_buff *skb, | |
7b40982e ID |
178 | struct txentry_desc *txdesc) |
179 | { | |
77b5621b GW |
180 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
181 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
7b40982e | 182 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); |
e5851dac | 183 | u16 seqno; |
7b40982e | 184 | |
c262e08b | 185 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
7b40982e ID |
186 | return; |
187 | ||
7fe7ee77 HS |
188 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); |
189 | ||
b9d305cc | 190 | if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { |
e66a8ddf SG |
191 | /* |
192 | * rt2800 has a H/W (or F/W) bug, device incorrectly increase | |
746ba11f VD |
193 | * seqno on retransmitted data (non-QOS) and management frames. |
194 | * To workaround the problem let's generate seqno in software. | |
195 | * Except for beacons which are transmitted periodically by H/W | |
196 | * hence hardware has to assign seqno for them. | |
e66a8ddf | 197 | */ |
746ba11f VD |
198 | if (ieee80211_is_beacon(hdr->frame_control)) { |
199 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | |
e66a8ddf SG |
200 | /* H/W will generate sequence number */ |
201 | return; | |
746ba11f VD |
202 | } |
203 | ||
204 | __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | |
e66a8ddf | 205 | } |
7fe7ee77 | 206 | |
7b40982e | 207 | /* |
7fe7ee77 HS |
208 | * The hardware is not able to insert a sequence number. Assign a |
209 | * software generated one here. | |
7b40982e ID |
210 | * |
211 | * This is wrong because beacons are not getting sequence | |
212 | * numbers assigned properly. | |
213 | * | |
214 | * A secondary problem exists for drivers that cannot toggle | |
215 | * sequence counting per-frame, since those will override the | |
216 | * sequence counter given by mac80211. | |
217 | */ | |
7b40982e | 218 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) |
e5851dac SG |
219 | seqno = atomic_add_return(0x10, &intf->seqno); |
220 | else | |
221 | seqno = atomic_read(&intf->seqno); | |
7b40982e | 222 | |
e5851dac SG |
223 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
224 | hdr->seq_ctrl |= cpu_to_le16(seqno); | |
7b40982e ID |
225 | } |
226 | ||
77b5621b GW |
227 | static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, |
228 | struct sk_buff *skb, | |
7b40982e ID |
229 | struct txentry_desc *txdesc, |
230 | const struct rt2x00_rate *hwrate) | |
231 | { | |
77b5621b | 232 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
7b40982e ID |
233 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
234 | unsigned int data_length; | |
235 | unsigned int duration; | |
236 | unsigned int residual; | |
237 | ||
2517794b HS |
238 | /* |
239 | * Determine with what IFS priority this frame should be send. | |
240 | * Set ifs to IFS_SIFS when the this is not the first fragment, | |
241 | * or this fragment came after RTS/CTS. | |
242 | */ | |
243 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) | |
244 | txdesc->u.plcp.ifs = IFS_BACKOFF; | |
245 | else | |
246 | txdesc->u.plcp.ifs = IFS_SIFS; | |
247 | ||
7b40982e | 248 | /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ |
77b5621b GW |
249 | data_length = skb->len + 4; |
250 | data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); | |
7b40982e ID |
251 | |
252 | /* | |
253 | * PLCP setup | |
254 | * Length calculation depends on OFDM/CCK rate. | |
255 | */ | |
26a1d07f HS |
256 | txdesc->u.plcp.signal = hwrate->plcp; |
257 | txdesc->u.plcp.service = 0x04; | |
7b40982e ID |
258 | |
259 | if (hwrate->flags & DEV_RATE_OFDM) { | |
26a1d07f HS |
260 | txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; |
261 | txdesc->u.plcp.length_low = data_length & 0x3f; | |
7b40982e ID |
262 | } else { |
263 | /* | |
264 | * Convert length to microseconds. | |
265 | */ | |
266 | residual = GET_DURATION_RES(data_length, hwrate->bitrate); | |
267 | duration = GET_DURATION(data_length, hwrate->bitrate); | |
268 | ||
269 | if (residual != 0) { | |
270 | duration++; | |
271 | ||
272 | /* | |
273 | * Check if we need to set the Length Extension | |
274 | */ | |
275 | if (hwrate->bitrate == 110 && residual <= 30) | |
26a1d07f | 276 | txdesc->u.plcp.service |= 0x80; |
7b40982e ID |
277 | } |
278 | ||
26a1d07f HS |
279 | txdesc->u.plcp.length_high = (duration >> 8) & 0xff; |
280 | txdesc->u.plcp.length_low = duration & 0xff; | |
7b40982e ID |
281 | |
282 | /* | |
283 | * When preamble is enabled we should set the | |
284 | * preamble bit for the signal. | |
285 | */ | |
286 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) | |
26a1d07f | 287 | txdesc->u.plcp.signal |= 0x08; |
7b40982e ID |
288 | } |
289 | } | |
290 | ||
77b5621b GW |
291 | static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, |
292 | struct sk_buff *skb, | |
46a01ec0 | 293 | struct txentry_desc *txdesc, |
36323f81 | 294 | struct ieee80211_sta *sta, |
46a01ec0 GW |
295 | const struct rt2x00_rate *hwrate) |
296 | { | |
77b5621b | 297 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
46a01ec0 | 298 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
77b5621b | 299 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
ead2bb64 | 300 | struct rt2x00_sta *sta_priv = NULL; |
e49abb19 | 301 | u8 density = 0; |
46a01ec0 | 302 | |
36323f81 | 303 | if (sta) { |
36323f81 | 304 | sta_priv = sta_to_rt2x00_sta(sta); |
ead2bb64 | 305 | txdesc->u.ht.wcid = sta_priv->wcid; |
e49abb19 | 306 | density = sta->ht_cap.ampdu_density; |
ead2bb64 HS |
307 | } |
308 | ||
46a01ec0 GW |
309 | /* |
310 | * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the | |
311 | * mcs rate to be used | |
312 | */ | |
313 | if (txrate->flags & IEEE80211_TX_RC_MCS) { | |
314 | txdesc->u.ht.mcs = txrate->idx; | |
315 | ||
316 | /* | |
317 | * MIMO PS should be set to 1 for STA's using dynamic SM PS | |
318 | * when using more then one tx stream (>MCS7). | |
319 | */ | |
36323f81 | 320 | if (sta && txdesc->u.ht.mcs > 7 && |
af0ed69b | 321 | sta->smps_mode == IEEE80211_SMPS_DYNAMIC) |
46a01ec0 GW |
322 | __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); |
323 | } else { | |
324 | txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); | |
325 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) | |
326 | txdesc->u.ht.mcs |= 0x08; | |
327 | } | |
328 | ||
da40f407 SG |
329 | if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { |
330 | if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) | |
331 | txdesc->u.ht.txop = TXOP_SIFS; | |
332 | else | |
333 | txdesc->u.ht.txop = TXOP_BACKOFF; | |
334 | ||
335 | /* Left zero on all other settings. */ | |
336 | return; | |
337 | } | |
338 | ||
da40f407 SG |
339 | /* |
340 | * Only one STBC stream is supported for now. | |
341 | */ | |
342 | if (tx_info->flags & IEEE80211_TX_CTL_STBC) | |
343 | txdesc->u.ht.stbc = 1; | |
344 | ||
46a01ec0 GW |
345 | /* |
346 | * This frame is eligible for an AMPDU, however, don't aggregate | |
347 | * frames that are intended to probe a specific tx rate. | |
348 | */ | |
349 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && | |
e49abb19 | 350 | !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { |
46a01ec0 | 351 | __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); |
e49abb19 SG |
352 | txdesc->u.ht.mpdu_density = density; |
353 | txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ | |
354 | } | |
46a01ec0 GW |
355 | |
356 | /* | |
357 | * Set 40Mhz mode if necessary (for legacy rates this will | |
358 | * duplicate the frame to both channels). | |
359 | */ | |
360 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || | |
361 | txrate->flags & IEEE80211_TX_RC_DUP_DATA) | |
362 | __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); | |
363 | if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) | |
364 | __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); | |
365 | ||
366 | /* | |
367 | * Determine IFS values | |
52a19236 | 368 | * - Use TXOP_BACKOFF for management frames except beacons |
46a01ec0 GW |
369 | * - Use TXOP_SIFS for fragment bursts |
370 | * - Use TXOP_HTTXOP for everything else | |
371 | * | |
372 | * Note: rt2800 devices won't use CTS protection (if used) | |
373 | * for frames not transmitted with TXOP_HTTXOP | |
374 | */ | |
52a19236 SG |
375 | if (ieee80211_is_mgmt(hdr->frame_control) && |
376 | !ieee80211_is_beacon(hdr->frame_control)) | |
46a01ec0 GW |
377 | txdesc->u.ht.txop = TXOP_BACKOFF; |
378 | else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) | |
379 | txdesc->u.ht.txop = TXOP_SIFS; | |
380 | else | |
381 | txdesc->u.ht.txop = TXOP_HTTXOP; | |
382 | } | |
383 | ||
77b5621b GW |
384 | static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, |
385 | struct sk_buff *skb, | |
36323f81 TH |
386 | struct txentry_desc *txdesc, |
387 | struct ieee80211_sta *sta) | |
7050ec82 | 388 | { |
77b5621b GW |
389 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
390 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
55b585e2 HS |
391 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
392 | struct ieee80211_rate *rate; | |
393 | const struct rt2x00_rate *hwrate = NULL; | |
7050ec82 ID |
394 | |
395 | memset(txdesc, 0, sizeof(*txdesc)); | |
396 | ||
9f166171 | 397 | /* |
df624ca5 | 398 | * Header and frame information. |
9f166171 | 399 | */ |
77b5621b GW |
400 | txdesc->length = skb->len; |
401 | txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); | |
9f166171 | 402 | |
7050ec82 ID |
403 | /* |
404 | * Check whether this frame is to be acked. | |
405 | */ | |
e039fa4a | 406 | if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) |
7050ec82 ID |
407 | __set_bit(ENTRY_TXD_ACK, &txdesc->flags); |
408 | ||
409 | /* | |
410 | * Check if this is a RTS/CTS frame | |
411 | */ | |
ac104462 ID |
412 | if (ieee80211_is_rts(hdr->frame_control) || |
413 | ieee80211_is_cts(hdr->frame_control)) { | |
7050ec82 | 414 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); |
ac104462 | 415 | if (ieee80211_is_rts(hdr->frame_control)) |
7050ec82 | 416 | __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); |
e039fa4a | 417 | else |
7050ec82 | 418 | __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); |
e039fa4a | 419 | if (tx_info->control.rts_cts_rate_idx >= 0) |
2e92e6f2 | 420 | rate = |
e039fa4a | 421 | ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); |
7050ec82 ID |
422 | } |
423 | ||
424 | /* | |
425 | * Determine retry information. | |
426 | */ | |
e6a9854b | 427 | txdesc->retry_limit = tx_info->control.rates[0].count - 1; |
42c82857 | 428 | if (txdesc->retry_limit >= rt2x00dev->long_retry) |
7050ec82 ID |
429 | __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); |
430 | ||
431 | /* | |
432 | * Check if more fragments are pending | |
433 | */ | |
2606e422 | 434 | if (ieee80211_has_morefrags(hdr->frame_control)) { |
7050ec82 ID |
435 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); |
436 | __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); | |
437 | } | |
438 | ||
2606e422 HS |
439 | /* |
440 | * Check if more frames (!= fragments) are pending | |
441 | */ | |
442 | if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) | |
443 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); | |
444 | ||
7050ec82 ID |
445 | /* |
446 | * Beacons and probe responses require the tsf timestamp | |
1bce85cf | 447 | * to be inserted into the frame. |
7050ec82 | 448 | */ |
1bce85cf HS |
449 | if (ieee80211_is_beacon(hdr->frame_control) || |
450 | ieee80211_is_probe_resp(hdr->frame_control)) | |
7050ec82 ID |
451 | __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); |
452 | ||
7b40982e | 453 | if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && |
2517794b | 454 | !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) |
7050ec82 | 455 | __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); |
7050ec82 | 456 | |
076f9582 ID |
457 | /* |
458 | * Determine rate modulation. | |
459 | */ | |
55b585e2 HS |
460 | if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) |
461 | txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; | |
462 | else if (txrate->flags & IEEE80211_TX_RC_MCS) | |
463 | txdesc->rate_mode = RATE_MODE_HT_MIX; | |
464 | else { | |
465 | rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); | |
466 | hwrate = rt2x00_get_rate(rate->hw_value); | |
467 | if (hwrate->flags & DEV_RATE_OFDM) | |
468 | txdesc->rate_mode = RATE_MODE_OFDM; | |
469 | else | |
470 | txdesc->rate_mode = RATE_MODE_CCK; | |
471 | } | |
7050ec82 | 472 | |
7b40982e ID |
473 | /* |
474 | * Apply TX descriptor handling by components | |
475 | */ | |
77b5621b GW |
476 | rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); |
477 | rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); | |
26a1d07f | 478 | |
b9d305cc | 479 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC)) |
77b5621b | 480 | rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, |
36323f81 | 481 | sta, hwrate); |
26a1d07f | 482 | else |
77b5621b GW |
483 | rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, |
484 | hwrate); | |
7050ec82 | 485 | } |
7050ec82 | 486 | |
78eea11b GW |
487 | static int rt2x00queue_write_tx_data(struct queue_entry *entry, |
488 | struct txentry_desc *txdesc) | |
489 | { | |
490 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | |
491 | ||
492 | /* | |
493 | * This should not happen, we already checked the entry | |
494 | * was ours. When the hardware disagrees there has been | |
495 | * a queue corruption! | |
496 | */ | |
497 | if (unlikely(rt2x00dev->ops->lib->get_entry_state && | |
498 | rt2x00dev->ops->lib->get_entry_state(entry))) { | |
ec9c4989 JP |
499 | rt2x00_err(rt2x00dev, |
500 | "Corrupt queue %d, accessing entry which is not ours\n" | |
501 | "Please file bug report to %s\n", | |
502 | entry->queue->qid, DRV_PROJECT); | |
78eea11b GW |
503 | return -EINVAL; |
504 | } | |
505 | ||
506 | /* | |
507 | * Add the requested extra tx headroom in front of the skb. | |
508 | */ | |
5616a6ef GJ |
509 | skb_push(entry->skb, rt2x00dev->extra_tx_headroom); |
510 | memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); | |
78eea11b GW |
511 | |
512 | /* | |
76dd5ddf | 513 | * Call the driver's write_tx_data function, if it exists. |
78eea11b | 514 | */ |
76dd5ddf GW |
515 | if (rt2x00dev->ops->lib->write_tx_data) |
516 | rt2x00dev->ops->lib->write_tx_data(entry, txdesc); | |
78eea11b GW |
517 | |
518 | /* | |
519 | * Map the skb to DMA. | |
520 | */ | |
b9d305cc | 521 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) && |
4ea545d4 SG |
522 | rt2x00queue_map_txskb(entry)) |
523 | return -ENOMEM; | |
78eea11b GW |
524 | |
525 | return 0; | |
526 | } | |
527 | ||
bd88a781 ID |
528 | static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, |
529 | struct txentry_desc *txdesc) | |
7050ec82 | 530 | { |
b869767b | 531 | struct data_queue *queue = entry->queue; |
7050ec82 | 532 | |
93331458 | 533 | queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); |
7050ec82 ID |
534 | |
535 | /* | |
536 | * All processing on the frame has been completed, this means | |
537 | * it is now ready to be dumped to userspace through debugfs. | |
538 | */ | |
2ceb8137 | 539 | rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); |
6295d815 GW |
540 | } |
541 | ||
8be4eed0 | 542 | static void rt2x00queue_kick_tx_queue(struct data_queue *queue, |
6295d815 GW |
543 | struct txentry_desc *txdesc) |
544 | { | |
7050ec82 | 545 | /* |
b869767b | 546 | * Check if we need to kick the queue, there are however a few rules |
6295d815 | 547 | * 1) Don't kick unless this is the last in frame in a burst. |
b869767b ID |
548 | * When the burst flag is set, this frame is always followed |
549 | * by another frame which in some way are related to eachother. | |
550 | * This is true for fragments, RTS or CTS-to-self frames. | |
6295d815 | 551 | * 2) Rule 1 can be broken when the available entries |
b869767b | 552 | * in the queue are less then a certain threshold. |
7050ec82 | 553 | */ |
b869767b ID |
554 | if (rt2x00queue_threshold(queue) || |
555 | !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) | |
dbba306f | 556 | queue->rt2x00dev->ops->lib->kick_queue(queue); |
7050ec82 | 557 | } |
7050ec82 | 558 | |
84e9e8eb HS |
559 | static void rt2x00queue_bar_check(struct queue_entry *entry) |
560 | { | |
561 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | |
562 | struct ieee80211_bar *bar = (void *) (entry->skb->data + | |
5616a6ef | 563 | rt2x00dev->extra_tx_headroom); |
84e9e8eb HS |
564 | struct rt2x00_bar_list_entry *bar_entry; |
565 | ||
566 | if (likely(!ieee80211_is_back_req(bar->frame_control))) | |
567 | return; | |
568 | ||
569 | bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC); | |
570 | ||
571 | /* | |
572 | * If the alloc fails we still send the BAR out but just don't track | |
573 | * it in our bar list. And as a result we will report it to mac80211 | |
574 | * back as failed. | |
575 | */ | |
576 | if (!bar_entry) | |
577 | return; | |
578 | ||
579 | bar_entry->entry = entry; | |
580 | bar_entry->block_acked = 0; | |
581 | ||
582 | /* | |
583 | * Copy the relevant parts of the 802.11 BAR into out check list | |
584 | * such that we can use RCU for less-overhead in the RX path since | |
585 | * sending BARs and processing the according BlockAck should be | |
586 | * the exception. | |
587 | */ | |
588 | memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); | |
589 | memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); | |
590 | bar_entry->control = bar->control; | |
591 | bar_entry->start_seq_num = bar->start_seq_num; | |
592 | ||
593 | /* | |
594 | * Insert BAR into our BAR check list. | |
595 | */ | |
596 | spin_lock_bh(&rt2x00dev->bar_list_lock); | |
597 | list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list); | |
598 | spin_unlock_bh(&rt2x00dev->bar_list_lock); | |
599 | } | |
600 | ||
7351c6bd | 601 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, |
3d8bfe14 | 602 | struct ieee80211_sta *sta, bool local) |
6db3786a | 603 | { |
e6a9854b | 604 | struct ieee80211_tx_info *tx_info; |
77a861c4 | 605 | struct queue_entry *entry; |
6db3786a | 606 | struct txentry_desc txdesc; |
d74f5ba4 | 607 | struct skb_frame_desc *skbdesc; |
e6a9854b | 608 | u8 rate_idx, rate_flags; |
77a861c4 GW |
609 | int ret = 0; |
610 | ||
6db3786a ID |
611 | /* |
612 | * Copy all TX descriptor information into txdesc, | |
613 | * after that we are free to use the skb->cb array | |
614 | * for our information. | |
615 | */ | |
3d8bfe14 | 616 | rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); |
6db3786a | 617 | |
d74f5ba4 | 618 | /* |
e6a9854b | 619 | * All information is retrieved from the skb->cb array, |
2bb057d0 | 620 | * now we should claim ownership of the driver part of that |
e6a9854b | 621 | * array, preserving the bitrate index and flags. |
d74f5ba4 | 622 | */ |
e6a9854b JB |
623 | tx_info = IEEE80211_SKB_CB(skb); |
624 | rate_idx = tx_info->control.rates[0].idx; | |
625 | rate_flags = tx_info->control.rates[0].flags; | |
0e3de998 | 626 | skbdesc = get_skb_frame_desc(skb); |
d74f5ba4 | 627 | memset(skbdesc, 0, sizeof(*skbdesc)); |
e6a9854b JB |
628 | skbdesc->tx_rate_idx = rate_idx; |
629 | skbdesc->tx_rate_flags = rate_flags; | |
d74f5ba4 | 630 | |
7351c6bd JB |
631 | if (local) |
632 | skbdesc->flags |= SKBDESC_NOT_MAC80211; | |
633 | ||
2bb057d0 ID |
634 | /* |
635 | * When hardware encryption is supported, and this frame | |
636 | * is to be encrypted, we should strip the IV/EIV data from | |
3ad2f3fb | 637 | * the frame so we can provide it to the driver separately. |
2bb057d0 ID |
638 | */ |
639 | if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && | |
dddfb478 | 640 | !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { |
b9d305cc | 641 | if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV)) |
9eb4e21e | 642 | rt2x00crypto_tx_copy_iv(skb, &txdesc); |
dddfb478 | 643 | else |
9eb4e21e | 644 | rt2x00crypto_tx_remove_iv(skb, &txdesc); |
dddfb478 | 645 | } |
2bb057d0 | 646 | |
93354cbb | 647 | /* |
25985edc | 648 | * When DMA allocation is required we should guarantee to the |
93354cbb | 649 | * driver that the DMA is aligned to a 4-byte boundary. |
93354cbb ID |
650 | * However some drivers require L2 padding to pad the payload |
651 | * rather then the header. This could be a requirement for | |
652 | * PCI and USB devices, while header alignment only is valid | |
653 | * for PCI devices. | |
654 | */ | |
b9d305cc | 655 | if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD)) |
128f8f77 | 656 | rt2x00queue_insert_l2pad(skb, txdesc.header_length); |
b9d305cc | 657 | else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA)) |
128f8f77 GW |
658 | rt2x00queue_align_frame(skb); |
659 | ||
3780d038 SG |
660 | /* |
661 | * That function must be called with bh disabled. | |
662 | */ | |
128f8f77 GW |
663 | spin_lock(&queue->tx_lock); |
664 | ||
665 | if (unlikely(rt2x00queue_full(queue))) { | |
61a4e5ff | 666 | rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n", |
ec9c4989 | 667 | queue->qid); |
128f8f77 GW |
668 | ret = -ENOBUFS; |
669 | goto out; | |
670 | } | |
671 | ||
672 | entry = rt2x00queue_get_entry(queue, Q_INDEX); | |
673 | ||
674 | if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, | |
675 | &entry->flags))) { | |
ec9c4989 JP |
676 | rt2x00_err(queue->rt2x00dev, |
677 | "Arrived at non-free entry in the non-full queue %d\n" | |
678 | "Please file bug report to %s\n", | |
679 | queue->qid, DRV_PROJECT); | |
128f8f77 GW |
680 | ret = -EINVAL; |
681 | goto out; | |
682 | } | |
683 | ||
128f8f77 | 684 | entry->skb = skb; |
9f166171 | 685 | |
2bb057d0 ID |
686 | /* |
687 | * It could be possible that the queue was corrupted and this | |
0e3de998 ID |
688 | * call failed. Since we always return NETDEV_TX_OK to mac80211, |
689 | * this frame will simply be dropped. | |
2bb057d0 | 690 | */ |
78eea11b | 691 | if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { |
0262ab0d | 692 | clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); |
2bb057d0 | 693 | entry->skb = NULL; |
77a861c4 GW |
694 | ret = -EIO; |
695 | goto out; | |
6db3786a ID |
696 | } |
697 | ||
84e9e8eb HS |
698 | /* |
699 | * Put BlockAckReqs into our check list for driver BA processing. | |
700 | */ | |
701 | rt2x00queue_bar_check(entry); | |
702 | ||
0262ab0d | 703 | set_bit(ENTRY_DATA_PENDING, &entry->flags); |
6db3786a | 704 | |
75256f03 | 705 | rt2x00queue_index_inc(entry, Q_INDEX); |
6db3786a | 706 | rt2x00queue_write_tx_descriptor(entry, &txdesc); |
8be4eed0 | 707 | rt2x00queue_kick_tx_queue(queue, &txdesc); |
6db3786a | 708 | |
77a861c4 | 709 | out: |
3d8f162c SG |
710 | /* |
711 | * Pausing queue has to be serialized with rt2x00lib_txdone(), so we | |
712 | * do this under queue->tx_lock. Bottom halve was already disabled | |
713 | * before ieee80211_xmit() call. | |
714 | */ | |
715 | if (rt2x00queue_threshold(queue)) | |
716 | rt2x00queue_pause_queue(queue); | |
717 | ||
77a861c4 GW |
718 | spin_unlock(&queue->tx_lock); |
719 | return ret; | |
6db3786a ID |
720 | } |
721 | ||
69cf36a4 HS |
722 | int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, |
723 | struct ieee80211_vif *vif) | |
724 | { | |
725 | struct rt2x00_intf *intf = vif_to_intf(vif); | |
726 | ||
727 | if (unlikely(!intf->beacon)) | |
728 | return -ENOBUFS; | |
729 | ||
69cf36a4 HS |
730 | /* |
731 | * Clean up the beacon skb. | |
732 | */ | |
733 | rt2x00queue_free_skb(intf->beacon); | |
734 | ||
735 | /* | |
736 | * Clear beacon (single bssid devices don't need to clear the beacon | |
737 | * since the beacon queue will get stopped anyway). | |
738 | */ | |
739 | if (rt2x00dev->ops->lib->clear_beacon) | |
740 | rt2x00dev->ops->lib->clear_beacon(intf->beacon); | |
741 | ||
69cf36a4 HS |
742 | return 0; |
743 | } | |
744 | ||
283dafa1 SG |
745 | int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, |
746 | struct ieee80211_vif *vif) | |
bd88a781 ID |
747 | { |
748 | struct rt2x00_intf *intf = vif_to_intf(vif); | |
749 | struct skb_frame_desc *skbdesc; | |
750 | struct txentry_desc txdesc; | |
bd88a781 ID |
751 | |
752 | if (unlikely(!intf->beacon)) | |
753 | return -ENOBUFS; | |
754 | ||
17512dc3 IP |
755 | /* |
756 | * Clean up the beacon skb. | |
757 | */ | |
fa69560f | 758 | rt2x00queue_free_skb(intf->beacon); |
17512dc3 | 759 | |
bd88a781 | 760 | intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); |
8414ff07 | 761 | if (!intf->beacon->skb) |
bd88a781 ID |
762 | return -ENOMEM; |
763 | ||
764 | /* | |
765 | * Copy all TX descriptor information into txdesc, | |
766 | * after that we are free to use the skb->cb array | |
767 | * for our information. | |
768 | */ | |
36323f81 | 769 | rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL); |
bd88a781 | 770 | |
bd88a781 ID |
771 | /* |
772 | * Fill in skb descriptor | |
773 | */ | |
774 | skbdesc = get_skb_frame_desc(intf->beacon->skb); | |
775 | memset(skbdesc, 0, sizeof(*skbdesc)); | |
bd88a781 | 776 | |
bd88a781 | 777 | /* |
69cf36a4 | 778 | * Send beacon to hardware. |
bd88a781 | 779 | */ |
f224f4ef | 780 | rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); |
bd88a781 | 781 | |
8414ff07 HS |
782 | return 0; |
783 | ||
784 | } | |
785 | ||
10e11568 | 786 | bool rt2x00queue_for_each_entry(struct data_queue *queue, |
5eb7efe8 ID |
787 | enum queue_index start, |
788 | enum queue_index end, | |
1dd0dbb3 HS |
789 | void *data, |
790 | bool (*fn)(struct queue_entry *entry, | |
791 | void *data)) | |
5eb7efe8 ID |
792 | { |
793 | unsigned long irqflags; | |
794 | unsigned int index_start; | |
795 | unsigned int index_end; | |
796 | unsigned int i; | |
797 | ||
798 | if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { | |
ec9c4989 JP |
799 | rt2x00_err(queue->rt2x00dev, |
800 | "Entry requested from invalid index range (%d - %d)\n", | |
801 | start, end); | |
10e11568 | 802 | return true; |
5eb7efe8 ID |
803 | } |
804 | ||
805 | /* | |
806 | * Only protect the range we are going to loop over, | |
807 | * if during our loop a extra entry is set to pending | |
808 | * it should not be kicked during this run, since it | |
809 | * is part of another TX operation. | |
810 | */ | |
813f0339 | 811 | spin_lock_irqsave(&queue->index_lock, irqflags); |
5eb7efe8 ID |
812 | index_start = queue->index[start]; |
813 | index_end = queue->index[end]; | |
813f0339 | 814 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
5eb7efe8 ID |
815 | |
816 | /* | |
25985edc | 817 | * Start from the TX done pointer, this guarantees that we will |
5eb7efe8 ID |
818 | * send out all frames in the correct order. |
819 | */ | |
820 | if (index_start < index_end) { | |
10e11568 | 821 | for (i = index_start; i < index_end; i++) { |
1dd0dbb3 | 822 | if (fn(&queue->entries[i], data)) |
10e11568 HS |
823 | return true; |
824 | } | |
5eb7efe8 | 825 | } else { |
10e11568 | 826 | for (i = index_start; i < queue->limit; i++) { |
1dd0dbb3 | 827 | if (fn(&queue->entries[i], data)) |
10e11568 HS |
828 | return true; |
829 | } | |
5eb7efe8 | 830 | |
10e11568 | 831 | for (i = 0; i < index_end; i++) { |
1dd0dbb3 | 832 | if (fn(&queue->entries[i], data)) |
10e11568 HS |
833 | return true; |
834 | } | |
5eb7efe8 | 835 | } |
10e11568 HS |
836 | |
837 | return false; | |
5eb7efe8 ID |
838 | } |
839 | EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); | |
840 | ||
181d6902 ID |
841 | struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, |
842 | enum queue_index index) | |
843 | { | |
844 | struct queue_entry *entry; | |
5f46c4d0 | 845 | unsigned long irqflags; |
181d6902 ID |
846 | |
847 | if (unlikely(index >= Q_INDEX_MAX)) { | |
ec9c4989 JP |
848 | rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n", |
849 | index); | |
181d6902 ID |
850 | return NULL; |
851 | } | |
852 | ||
813f0339 | 853 | spin_lock_irqsave(&queue->index_lock, irqflags); |
181d6902 ID |
854 | |
855 | entry = &queue->entries[queue->index[index]]; | |
856 | ||
813f0339 | 857 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
181d6902 ID |
858 | |
859 | return entry; | |
860 | } | |
861 | EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); | |
862 | ||
75256f03 | 863 | void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) |
181d6902 | 864 | { |
75256f03 | 865 | struct data_queue *queue = entry->queue; |
5f46c4d0 ID |
866 | unsigned long irqflags; |
867 | ||
181d6902 | 868 | if (unlikely(index >= Q_INDEX_MAX)) { |
ec9c4989 JP |
869 | rt2x00_err(queue->rt2x00dev, |
870 | "Index change on invalid index type (%d)\n", index); | |
181d6902 ID |
871 | return; |
872 | } | |
873 | ||
813f0339 | 874 | spin_lock_irqsave(&queue->index_lock, irqflags); |
181d6902 ID |
875 | |
876 | queue->index[index]++; | |
877 | if (queue->index[index] >= queue->limit) | |
878 | queue->index[index] = 0; | |
879 | ||
75256f03 | 880 | entry->last_action = jiffies; |
652a9dd2 | 881 | |
10b6b801 ID |
882 | if (index == Q_INDEX) { |
883 | queue->length++; | |
884 | } else if (index == Q_INDEX_DONE) { | |
885 | queue->length--; | |
55887511 | 886 | queue->count++; |
10b6b801 | 887 | } |
181d6902 | 888 | |
813f0339 | 889 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
181d6902 | 890 | } |
181d6902 | 891 | |
6cdfc1de | 892 | static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) |
0b7fde54 | 893 | { |
0b7fde54 | 894 | switch (queue->qid) { |
f615e9a3 ID |
895 | case QID_AC_VO: |
896 | case QID_AC_VI: | |
0b7fde54 ID |
897 | case QID_AC_BE: |
898 | case QID_AC_BK: | |
0b7fde54 ID |
899 | /* |
900 | * For TX queues, we have to disable the queue | |
901 | * inside mac80211. | |
902 | */ | |
903 | ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); | |
904 | break; | |
905 | default: | |
906 | break; | |
907 | } | |
908 | } | |
e2288b66 SG |
909 | void rt2x00queue_pause_queue(struct data_queue *queue) |
910 | { | |
911 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | |
912 | !test_bit(QUEUE_STARTED, &queue->flags) || | |
913 | test_and_set_bit(QUEUE_PAUSED, &queue->flags)) | |
914 | return; | |
915 | ||
916 | rt2x00queue_pause_queue_nocheck(queue); | |
917 | } | |
0b7fde54 ID |
918 | EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); |
919 | ||
920 | void rt2x00queue_unpause_queue(struct data_queue *queue) | |
921 | { | |
922 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | |
923 | !test_bit(QUEUE_STARTED, &queue->flags) || | |
924 | !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) | |
925 | return; | |
926 | ||
927 | switch (queue->qid) { | |
f615e9a3 ID |
928 | case QID_AC_VO: |
929 | case QID_AC_VI: | |
0b7fde54 ID |
930 | case QID_AC_BE: |
931 | case QID_AC_BK: | |
0b7fde54 ID |
932 | /* |
933 | * For TX queues, we have to enable the queue | |
934 | * inside mac80211. | |
935 | */ | |
936 | ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); | |
937 | break; | |
5be65609 ID |
938 | case QID_RX: |
939 | /* | |
940 | * For RX we need to kick the queue now in order to | |
941 | * receive frames. | |
942 | */ | |
943 | queue->rt2x00dev->ops->lib->kick_queue(queue); | |
0b7fde54 ID |
944 | default: |
945 | break; | |
946 | } | |
947 | } | |
948 | EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); | |
949 | ||
950 | void rt2x00queue_start_queue(struct data_queue *queue) | |
951 | { | |
952 | mutex_lock(&queue->status_lock); | |
953 | ||
954 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | |
955 | test_and_set_bit(QUEUE_STARTED, &queue->flags)) { | |
956 | mutex_unlock(&queue->status_lock); | |
957 | return; | |
958 | } | |
959 | ||
960 | set_bit(QUEUE_PAUSED, &queue->flags); | |
961 | ||
962 | queue->rt2x00dev->ops->lib->start_queue(queue); | |
963 | ||
964 | rt2x00queue_unpause_queue(queue); | |
965 | ||
966 | mutex_unlock(&queue->status_lock); | |
967 | } | |
968 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); | |
969 | ||
970 | void rt2x00queue_stop_queue(struct data_queue *queue) | |
971 | { | |
972 | mutex_lock(&queue->status_lock); | |
973 | ||
974 | if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { | |
975 | mutex_unlock(&queue->status_lock); | |
976 | return; | |
977 | } | |
978 | ||
e2288b66 | 979 | rt2x00queue_pause_queue_nocheck(queue); |
0b7fde54 ID |
980 | |
981 | queue->rt2x00dev->ops->lib->stop_queue(queue); | |
982 | ||
983 | mutex_unlock(&queue->status_lock); | |
984 | } | |
985 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); | |
986 | ||
5be65609 ID |
987 | void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) |
988 | { | |
5be65609 | 989 | bool tx_queue = |
f615e9a3 | 990 | (queue->qid == QID_AC_VO) || |
5be65609 | 991 | (queue->qid == QID_AC_VI) || |
f615e9a3 ID |
992 | (queue->qid == QID_AC_BE) || |
993 | (queue->qid == QID_AC_BK); | |
5be65609 | 994 | |
811a3991 SG |
995 | if (rt2x00queue_empty(queue)) |
996 | return; | |
5be65609 ID |
997 | |
998 | /* | |
fdbdd25c SG |
999 | * If we are not supposed to drop any pending |
1000 | * frames, this means we must force a start (=kick) | |
1001 | * to the queue to make sure the hardware will | |
1002 | * start transmitting. | |
5be65609 | 1003 | */ |
fdbdd25c SG |
1004 | if (!drop && tx_queue) |
1005 | queue->rt2x00dev->ops->lib->kick_queue(queue); | |
5be65609 ID |
1006 | |
1007 | /* | |
152a5992 ID |
1008 | * Check if driver supports flushing, if that is the case we can |
1009 | * defer the flushing to the driver. Otherwise we must use the | |
1010 | * alternative which just waits for the queue to become empty. | |
5be65609 | 1011 | */ |
152a5992 ID |
1012 | if (likely(queue->rt2x00dev->ops->lib->flush_queue)) |
1013 | queue->rt2x00dev->ops->lib->flush_queue(queue, drop); | |
5be65609 ID |
1014 | |
1015 | /* | |
1016 | * The queue flush has failed... | |
1017 | */ | |
1018 | if (unlikely(!rt2x00queue_empty(queue))) | |
ec9c4989 JP |
1019 | rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", |
1020 | queue->qid); | |
5be65609 ID |
1021 | } |
1022 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); | |
1023 | ||
0b7fde54 ID |
1024 | void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) |
1025 | { | |
1026 | struct data_queue *queue; | |
1027 | ||
1028 | /* | |
1029 | * rt2x00queue_start_queue will call ieee80211_wake_queue | |
1030 | * for each queue after is has been properly initialized. | |
1031 | */ | |
1032 | tx_queue_for_each(rt2x00dev, queue) | |
1033 | rt2x00queue_start_queue(queue); | |
1034 | ||
1035 | rt2x00queue_start_queue(rt2x00dev->rx); | |
1036 | } | |
1037 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); | |
1038 | ||
1039 | void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) | |
1040 | { | |
1041 | struct data_queue *queue; | |
1042 | ||
1043 | /* | |
1044 | * rt2x00queue_stop_queue will call ieee80211_stop_queue | |
1045 | * as well, but we are completely shutting doing everything | |
1046 | * now, so it is much safer to stop all TX queues at once, | |
1047 | * and use rt2x00queue_stop_queue for cleaning up. | |
1048 | */ | |
1049 | ieee80211_stop_queues(rt2x00dev->hw); | |
1050 | ||
1051 | tx_queue_for_each(rt2x00dev, queue) | |
1052 | rt2x00queue_stop_queue(queue); | |
1053 | ||
1054 | rt2x00queue_stop_queue(rt2x00dev->rx); | |
1055 | } | |
1056 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); | |
1057 | ||
5be65609 ID |
1058 | void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) |
1059 | { | |
1060 | struct data_queue *queue; | |
1061 | ||
1062 | tx_queue_for_each(rt2x00dev, queue) | |
1063 | rt2x00queue_flush_queue(queue, drop); | |
1064 | ||
1065 | rt2x00queue_flush_queue(rt2x00dev->rx, drop); | |
1066 | } | |
1067 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); | |
1068 | ||
181d6902 ID |
1069 | static void rt2x00queue_reset(struct data_queue *queue) |
1070 | { | |
5f46c4d0 | 1071 | unsigned long irqflags; |
652a9dd2 | 1072 | unsigned int i; |
5f46c4d0 | 1073 | |
813f0339 | 1074 | spin_lock_irqsave(&queue->index_lock, irqflags); |
181d6902 ID |
1075 | |
1076 | queue->count = 0; | |
1077 | queue->length = 0; | |
652a9dd2 | 1078 | |
75256f03 | 1079 | for (i = 0; i < Q_INDEX_MAX; i++) |
652a9dd2 | 1080 | queue->index[i] = 0; |
181d6902 | 1081 | |
813f0339 | 1082 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
181d6902 ID |
1083 | } |
1084 | ||
798b7adb | 1085 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) |
181d6902 ID |
1086 | { |
1087 | struct data_queue *queue; | |
1088 | unsigned int i; | |
1089 | ||
798b7adb | 1090 | queue_for_each(rt2x00dev, queue) { |
181d6902 ID |
1091 | rt2x00queue_reset(queue); |
1092 | ||
64e7d723 | 1093 | for (i = 0; i < queue->limit; i++) |
798b7adb | 1094 | rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); |
181d6902 ID |
1095 | } |
1096 | } | |
1097 | ||
15d6c079 | 1098 | static int rt2x00queue_alloc_entries(struct data_queue *queue) |
181d6902 ID |
1099 | { |
1100 | struct queue_entry *entries; | |
1101 | unsigned int entry_size; | |
1102 | unsigned int i; | |
1103 | ||
1104 | rt2x00queue_reset(queue); | |
1105 | ||
181d6902 ID |
1106 | /* |
1107 | * Allocate all queue entries. | |
1108 | */ | |
568f7a43 | 1109 | entry_size = sizeof(*entries) + queue->priv_size; |
baeb2ffa | 1110 | entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); |
181d6902 ID |
1111 | if (!entries) |
1112 | return -ENOMEM; | |
1113 | ||
1114 | #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ | |
f8bfbc31 ME |
1115 | (((char *)(__base)) + ((__limit) * (__esize)) + \ |
1116 | ((__index) * (__psize))) | |
181d6902 ID |
1117 | |
1118 | for (i = 0; i < queue->limit; i++) { | |
1119 | entries[i].flags = 0; | |
1120 | entries[i].queue = queue; | |
1121 | entries[i].skb = NULL; | |
1122 | entries[i].entry_idx = i; | |
1123 | entries[i].priv_data = | |
1124 | QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, | |
568f7a43 | 1125 | sizeof(*entries), queue->priv_size); |
181d6902 ID |
1126 | } |
1127 | ||
1128 | #undef QUEUE_ENTRY_PRIV_OFFSET | |
1129 | ||
1130 | queue->entries = entries; | |
1131 | ||
1132 | return 0; | |
1133 | } | |
1134 | ||
fa69560f | 1135 | static void rt2x00queue_free_skbs(struct data_queue *queue) |
30caa6e3 GW |
1136 | { |
1137 | unsigned int i; | |
1138 | ||
1139 | if (!queue->entries) | |
1140 | return; | |
1141 | ||
1142 | for (i = 0; i < queue->limit; i++) { | |
fa69560f | 1143 | rt2x00queue_free_skb(&queue->entries[i]); |
30caa6e3 GW |
1144 | } |
1145 | } | |
1146 | ||
fa69560f | 1147 | static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) |
30caa6e3 GW |
1148 | { |
1149 | unsigned int i; | |
1150 | struct sk_buff *skb; | |
1151 | ||
1152 | for (i = 0; i < queue->limit; i++) { | |
88211021 | 1153 | skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); |
30caa6e3 | 1154 | if (!skb) |
61243d8e | 1155 | return -ENOMEM; |
30caa6e3 GW |
1156 | queue->entries[i].skb = skb; |
1157 | } | |
1158 | ||
1159 | return 0; | |
30caa6e3 GW |
1160 | } |
1161 | ||
181d6902 ID |
1162 | int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) |
1163 | { | |
1164 | struct data_queue *queue; | |
1165 | int status; | |
1166 | ||
15d6c079 | 1167 | status = rt2x00queue_alloc_entries(rt2x00dev->rx); |
181d6902 ID |
1168 | if (status) |
1169 | goto exit; | |
1170 | ||
1171 | tx_queue_for_each(rt2x00dev, queue) { | |
15d6c079 | 1172 | status = rt2x00queue_alloc_entries(queue); |
181d6902 ID |
1173 | if (status) |
1174 | goto exit; | |
1175 | } | |
1176 | ||
15d6c079 | 1177 | status = rt2x00queue_alloc_entries(rt2x00dev->bcn); |
181d6902 ID |
1178 | if (status) |
1179 | goto exit; | |
1180 | ||
b9d305cc | 1181 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) { |
15d6c079 | 1182 | status = rt2x00queue_alloc_entries(rt2x00dev->atim); |
30caa6e3 GW |
1183 | if (status) |
1184 | goto exit; | |
1185 | } | |
181d6902 | 1186 | |
fa69560f | 1187 | status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); |
181d6902 ID |
1188 | if (status) |
1189 | goto exit; | |
1190 | ||
1191 | return 0; | |
1192 | ||
1193 | exit: | |
ec9c4989 | 1194 | rt2x00_err(rt2x00dev, "Queue entries allocation failed\n"); |
181d6902 ID |
1195 | |
1196 | rt2x00queue_uninitialize(rt2x00dev); | |
1197 | ||
1198 | return status; | |
1199 | } | |
1200 | ||
1201 | void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) | |
1202 | { | |
1203 | struct data_queue *queue; | |
1204 | ||
fa69560f | 1205 | rt2x00queue_free_skbs(rt2x00dev->rx); |
30caa6e3 | 1206 | |
181d6902 ID |
1207 | queue_for_each(rt2x00dev, queue) { |
1208 | kfree(queue->entries); | |
1209 | queue->entries = NULL; | |
1210 | } | |
1211 | } | |
1212 | ||
8f539276 ID |
1213 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, |
1214 | struct data_queue *queue, enum data_queue_qid qid) | |
1215 | { | |
0b7fde54 | 1216 | mutex_init(&queue->status_lock); |
77a861c4 | 1217 | spin_lock_init(&queue->tx_lock); |
813f0339 | 1218 | spin_lock_init(&queue->index_lock); |
8f539276 ID |
1219 | |
1220 | queue->rt2x00dev = rt2x00dev; | |
1221 | queue->qid = qid; | |
2af0a570 | 1222 | queue->txop = 0; |
8f539276 ID |
1223 | queue->aifs = 2; |
1224 | queue->cw_min = 5; | |
1225 | queue->cw_max = 10; | |
10af87c3 | 1226 | |
705802bf | 1227 | rt2x00dev->ops->queue_init(queue); |
04453e9b GJ |
1228 | |
1229 | queue->threshold = DIV_ROUND_UP(queue->limit, 10); | |
8f539276 ID |
1230 | } |
1231 | ||
181d6902 ID |
1232 | int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) |
1233 | { | |
1234 | struct data_queue *queue; | |
1235 | enum data_queue_qid qid; | |
1236 | unsigned int req_atim = | |
b9d305cc | 1237 | rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE); |
181d6902 ID |
1238 | |
1239 | /* | |
1240 | * We need the following queues: | |
1241 | * RX: 1 | |
61448f88 | 1242 | * TX: ops->tx_queues |
181d6902 ID |
1243 | * Beacon: 1 |
1244 | * Atim: 1 (if required) | |
1245 | */ | |
61448f88 | 1246 | rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; |
181d6902 | 1247 | |
baeb2ffa | 1248 | queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); |
cd7c0cda | 1249 | if (!queue) |
181d6902 | 1250 | return -ENOMEM; |
181d6902 ID |
1251 | |
1252 | /* | |
1253 | * Initialize pointers | |
1254 | */ | |
1255 | rt2x00dev->rx = queue; | |
1256 | rt2x00dev->tx = &queue[1]; | |
61448f88 | 1257 | rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; |
e74df4a7 | 1258 | rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; |
181d6902 ID |
1259 | |
1260 | /* | |
1261 | * Initialize queue parameters. | |
1262 | * RX: qid = QID_RX | |
f615e9a3 | 1263 | * TX: qid = QID_AC_VO + index |
181d6902 ID |
1264 | * TX: cw_min: 2^5 = 32. |
1265 | * TX: cw_max: 2^10 = 1024. | |
565a019a ID |
1266 | * BCN: qid = QID_BEACON |
1267 | * ATIM: qid = QID_ATIM | |
181d6902 | 1268 | */ |
8f539276 | 1269 | rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); |
181d6902 | 1270 | |
f615e9a3 | 1271 | qid = QID_AC_VO; |
8f539276 ID |
1272 | tx_queue_for_each(rt2x00dev, queue) |
1273 | rt2x00queue_init(rt2x00dev, queue, qid++); | |
181d6902 | 1274 | |
e74df4a7 | 1275 | rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); |
181d6902 | 1276 | if (req_atim) |
e74df4a7 | 1277 | rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); |
181d6902 ID |
1278 | |
1279 | return 0; | |
1280 | } | |
1281 | ||
1282 | void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) | |
1283 | { | |
1284 | kfree(rt2x00dev->rx); | |
1285 | rt2x00dev->rx = NULL; | |
1286 | rt2x00dev->tx = NULL; | |
1287 | rt2x00dev->bcn = NULL; | |
1288 | } |