]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/wireless/intersil/p54/txrx.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-jammy-kernel.git] / drivers / net / wireless / intersil / p54 / txrx.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
0a5fb84f
CL
2/*
3 * Common code for mac80211 Prism54 drivers
4 *
5 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
6 * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
8 *
9 * Based on:
10 * - the islsm (softmac prism54) driver, which is:
11 * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
12 * - stlc45xx driver
13 * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
0a5fb84f
CL
14 */
15
ee40fa06 16#include <linux/export.h>
0a5fb84f
CL
17#include <linux/firmware.h>
18#include <linux/etherdevice.h>
1cda0fd6 19#include <asm/div64.h>
0a5fb84f
CL
20
21#include <net/mac80211.h>
22
23#include "p54.h"
24#include "lmac.h"
25
26#ifdef P54_MM_DEBUG
27static void p54_dump_tx_queue(struct p54_common *priv)
28{
29 unsigned long flags;
30 struct ieee80211_tx_info *info;
31 struct p54_tx_info *range;
32 struct sk_buff *skb;
33 struct p54_hdr *hdr;
34 unsigned int i = 0;
35 u32 prev_addr;
36 u32 largest_hole = 0, free;
37
38 spin_lock_irqsave(&priv->tx_queue.lock, flags);
c96c31e4
JP
39 wiphy_debug(priv->hw->wiphy, "/ --- tx queue dump (%d entries) ---\n",
40 skb_queue_len(&priv->tx_queue));
0a5fb84f
CL
41
42 prev_addr = priv->rx_start;
43 skb_queue_walk(&priv->tx_queue, skb) {
44 info = IEEE80211_SKB_CB(skb);
45 range = (void *) info->rate_driver_data;
46 hdr = (void *) skb->data;
47
48 free = range->start_addr - prev_addr;
c96c31e4
JP
49 wiphy_debug(priv->hw->wiphy,
50 "| [%02d] => [skb:%p skb_len:0x%04x "
51 "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} "
52 "mem:{start:%04x end:%04x, free:%d}]\n",
53 i++, skb, skb->len,
54 le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len),
55 le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type),
56 range->start_addr, range->end_addr, free);
0a5fb84f
CL
57
58 prev_addr = range->end_addr;
59 largest_hole = max(largest_hole, free);
60 }
61 free = priv->rx_end - prev_addr;
62 largest_hole = max(largest_hole, free);
c96c31e4
JP
63 wiphy_debug(priv->hw->wiphy,
64 "\\ --- [free: %d], largest free block: %d ---\n",
65 free, largest_hole);
0a5fb84f
CL
66 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
67}
68#endif /* P54_MM_DEBUG */
69
70/*
71 * So, the firmware is somewhat stupid and doesn't know what places in its
72 * memory incoming data should go to. By poking around in the firmware, we
73 * can find some unused memory to upload our packets to. However, data that we
74 * want the card to TX needs to stay intact until the card has told us that
75 * it is done with it. This function finds empty places we can upload to and
76 * marks allocated areas as reserved if necessary. p54_find_and_unlink_skb or
77 * p54_free_skb frees allocated areas.
78 */
79static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb)
80{
81 struct sk_buff *entry, *target_skb = NULL;
82 struct ieee80211_tx_info *info;
83 struct p54_tx_info *range;
84 struct p54_hdr *data = (void *) skb->data;
85 unsigned long flags;
86 u32 last_addr = priv->rx_start;
87 u32 target_addr = priv->rx_start;
88 u16 len = priv->headroom + skb->len + priv->tailroom + 3;
89
0a5fb84f
CL
90 info = IEEE80211_SKB_CB(skb);
91 range = (void *) info->rate_driver_data;
92 len = (range->extra_len + len) & ~0x3;
93
94 spin_lock_irqsave(&priv->tx_queue.lock, flags);
95 if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) {
96 /*
97 * The tx_queue is now really full.
98 *
99 * TODO: check if the device has crashed and reset it.
100 */
101 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
102 return -EBUSY;
103 }
104
105 skb_queue_walk(&priv->tx_queue, entry) {
106 u32 hole_size;
107 info = IEEE80211_SKB_CB(entry);
108 range = (void *) info->rate_driver_data;
109 hole_size = range->start_addr - last_addr;
110
0a5fb84f
CL
111 if (!target_skb && hole_size >= len) {
112 target_skb = entry->prev;
113 hole_size -= len;
114 target_addr = last_addr;
115 break;
116 }
117 last_addr = range->end_addr;
118 }
119 if (unlikely(!target_skb)) {
120 if (priv->rx_end - last_addr >= len) {
e3554197
DM
121 target_skb = skb_peek_tail(&priv->tx_queue);
122 if (target_skb) {
0a5fb84f
CL
123 info = IEEE80211_SKB_CB(target_skb);
124 range = (void *)info->rate_driver_data;
125 target_addr = range->end_addr;
126 }
127 } else {
128 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
129 return -ENOSPC;
130 }
131 }
132
133 info = IEEE80211_SKB_CB(skb);
134 range = (void *) info->rate_driver_data;
135 range->start_addr = target_addr;
136 range->end_addr = target_addr + len;
46df10ae
CL
137 data->req_id = cpu_to_le32(target_addr + priv->headroom);
138 if (IS_DATA_FRAME(skb) &&
139 unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON))
140 priv->beacon_req_id = data->req_id;
141
0a5fb84f
CL
142 __skb_queue_after(&priv->tx_queue, target_skb, skb);
143 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
0a5fb84f
CL
144 return 0;
145}
146
147static void p54_tx_pending(struct p54_common *priv)
148{
149 struct sk_buff *skb;
150 int ret;
151
0a5fb84f
CL
152 skb = skb_dequeue(&priv->tx_pending);
153 if (unlikely(!skb))
154 return ;
155
156 ret = p54_assign_address(priv, skb);
157 if (unlikely(ret))
158 skb_queue_head(&priv->tx_pending, skb);
159 else
160 priv->tx(priv->hw, skb);
161}
162
163static void p54_wake_queues(struct p54_common *priv)
164{
165 unsigned long flags;
166 unsigned int i;
167
168 if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
169 return ;
170
171 p54_tx_pending(priv);
172
173 spin_lock_irqsave(&priv->tx_stats_lock, flags);
174 for (i = 0; i < priv->hw->queues; i++) {
175 if (priv->tx_stats[i + P54_QUEUE_DATA].len <
176 priv->tx_stats[i + P54_QUEUE_DATA].limit)
177 ieee80211_wake_queue(priv->hw, i);
178 }
179 spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
180}
181
182static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
183 struct sk_buff *skb,
184 const u16 p54_queue)
185{
97e93fcd 186 struct p54_tx_queue_stats *queue;
0a5fb84f
CL
187 unsigned long flags;
188
088ea189 189 if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
0a5fb84f
CL
190 return -EINVAL;
191
192 queue = &priv->tx_stats[p54_queue];
193
194 spin_lock_irqsave(&priv->tx_stats_lock, flags);
2ffa5fed 195 if (unlikely(queue->len >= queue->limit && IS_QOS_QUEUE(p54_queue))) {
0a5fb84f
CL
196 spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
197 return -ENOSPC;
198 }
199
200 queue->len++;
201 queue->count++;
202
203 if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) {
204 u16 ac_queue = p54_queue - P54_QUEUE_DATA;
205 ieee80211_stop_queue(priv->hw, ac_queue);
206 }
207
208 spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
209 return 0;
210}
211
212static void p54_tx_qos_accounting_free(struct p54_common *priv,
213 struct sk_buff *skb)
214{
12f49a79 215 if (IS_DATA_FRAME(skb)) {
2ffa5fed 216 unsigned long flags;
0a5fb84f 217
2ffa5fed 218 spin_lock_irqsave(&priv->tx_stats_lock, flags);
46df10ae 219 priv->tx_stats[GET_HW_QUEUE(skb)].len--;
2ffa5fed 220 spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
46df10ae
CL
221
222 if (unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) {
223 if (priv->beacon_req_id == GET_REQ_ID(skb)) {
224 /* this is the active beacon set anymore */
225 priv->beacon_req_id = 0;
226 }
227 complete(&priv->beacon_comp);
228 }
0a5fb84f
CL
229 }
230 p54_wake_queues(priv);
231}
232
233void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
234{
235 struct p54_common *priv = dev->priv;
236 if (unlikely(!skb))
237 return ;
238
239 skb_unlink(skb, &priv->tx_queue);
240 p54_tx_qos_accounting_free(priv, skb);
c3745b40 241 ieee80211_free_txskb(dev, skb);
0a5fb84f
CL
242}
243EXPORT_SYMBOL_GPL(p54_free_skb);
244
245static struct sk_buff *p54_find_and_unlink_skb(struct p54_common *priv,
246 const __le32 req_id)
247{
248 struct sk_buff *entry;
249 unsigned long flags;
250
251 spin_lock_irqsave(&priv->tx_queue.lock, flags);
252 skb_queue_walk(&priv->tx_queue, entry) {
253 struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
254
255 if (hdr->req_id == req_id) {
256 __skb_unlink(entry, &priv->tx_queue);
257 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
258 p54_tx_qos_accounting_free(priv, entry);
259 return entry;
260 }
261 }
262 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
263 return NULL;
264}
265
266void p54_tx(struct p54_common *priv, struct sk_buff *skb)
267{
0a5fb84f
CL
268 skb_queue_tail(&priv->tx_pending, skb);
269 p54_tx_pending(priv);
270}
271
272static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
273{
45f7e311 274 if (priv->rxhw != 5) {
7a047f4f
CL
275 return ((rssi * priv->cur_rssi->mul) / 64 +
276 priv->cur_rssi->add) / 4;
45f7e311 277 } else {
0a5fb84f
CL
278 /*
279 * TODO: find the correct formula
280 */
45f7e311
CL
281 return rssi / 2 - 110;
282 }
0a5fb84f
CL
283}
284
e0f114e8
CL
285/*
286 * Even if the firmware is capable of dealing with incoming traffic,
287 * while dozing, we have to prepared in case mac80211 uses PS-POLL
288 * to retrieve outstanding frames from our AP.
289 * (see comment in net/mac80211/mlme.c @ line 1993)
290 */
291static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
292{
293 struct ieee80211_hdr *hdr = (void *) skb->data;
294 struct ieee80211_tim_ie *tim_ie;
295 u8 *tim;
296 u8 tim_len;
297 bool new_psm;
298
299 /* only beacons have a TIM IE */
300 if (!ieee80211_is_beacon(hdr->frame_control))
301 return;
302
303 if (!priv->aid)
304 return;
305
306 /* only consider beacons from the associated BSSID */
4d2a33e1 307 if (!ether_addr_equal_64bits(hdr->addr3, priv->bssid))
e0f114e8
CL
308 return;
309
310 tim = p54_find_ie(skb, WLAN_EID_TIM);
311 if (!tim)
312 return;
313
314 tim_len = tim[1];
315 tim_ie = (struct ieee80211_tim_ie *) &tim[2];
316
317 new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid);
318 if (new_psm != priv->powersave_override) {
319 priv->powersave_override = new_psm;
320 p54_set_ps(priv);
321 }
322}
323
0a5fb84f
CL
324static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
325{
326 struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data;
327 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
328 u16 freq = le16_to_cpu(hdr->freq);
329 size_t header_len = sizeof(*hdr);
330 u32 tsf32;
331 u8 rate = hdr->rate & 0xf;
332
333 /*
334 * If the device is in a unspecified state we have to
335 * ignore all data frames. Else we could end up with a
336 * nasty crash.
337 */
338 if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
339 return 0;
340
341 if (!(hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_IN_FCS_GOOD)))
342 return 0;
343
344 if (hdr->decrypt_status == P54_DECRYPT_OK)
345 rx_status->flag |= RX_FLAG_DECRYPTED;
346 if ((hdr->decrypt_status == P54_DECRYPT_FAIL_MICHAEL) ||
347 (hdr->decrypt_status == P54_DECRYPT_FAIL_TKIP))
348 rx_status->flag |= RX_FLAG_MMIC_ERROR;
349
350 rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
0a5fb84f 351 if (hdr->rate & 0x10)
7fdd69c5 352 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
57fbcce3 353 if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
0a5fb84f
CL
354 rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
355 else
356 rx_status->rate_idx = rate;
357
358 rx_status->freq = freq;
675a0b04 359 rx_status->band = priv->hw->conf.chandef.chan->band;
0a5fb84f
CL
360 rx_status->antenna = hdr->antenna;
361
362 tsf32 = le32_to_cpu(hdr->tsf32);
363 if (tsf32 < priv->tsf_low32)
364 priv->tsf_high32++;
365 rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
366 priv->tsf_low32 = tsf32;
367
725d255e
CL
368 /* LMAC API Page 10/29 - s_lm_data_in - clock
369 * "usec accurate timestamp of hardware clock
370 * at end of frame (before OFDM SIFS EOF padding"
371 */
372 rx_status->flag |= RX_FLAG_MACTIME_END;
0a5fb84f
CL
373
374 if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
375 header_len += hdr->align[0];
376
377 skb_pull(skb, header_len);
378 skb_trim(skb, le16_to_cpu(hdr->len));
e0f114e8
CL
379 if (unlikely(priv->hw->conf.flags & IEEE80211_CONF_PS))
380 p54_pspoll_workaround(priv, skb);
381
0a5fb84f
CL
382 ieee80211_rx_irqsafe(priv->hw, skb);
383
42935eca 384 ieee80211_queue_delayed_work(priv->hw, &priv->work,
0a5fb84f
CL
385 msecs_to_jiffies(P54_STATISTICS_UPDATE));
386
387 return -1;
388}
389
390static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
391{
392 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
393 struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data;
394 struct ieee80211_tx_info *info;
395 struct p54_hdr *entry_hdr;
396 struct p54_tx_data *entry_data;
397 struct sk_buff *entry;
398 unsigned int pad = 0, frame_len;
399 int count, idx;
400
401 entry = p54_find_and_unlink_skb(priv, hdr->req_id);
402 if (unlikely(!entry))
403 return ;
404
405 frame_len = entry->len;
406 info = IEEE80211_SKB_CB(entry);
407 entry_hdr = (struct p54_hdr *) entry->data;
408 entry_data = (struct p54_tx_data *) entry_hdr->data;
409 priv->stats.dot11ACKFailureCount += payload->tries - 1;
410
411 /*
412 * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are
413 * generated by the driver. Therefore tx_status is bogus
414 * and we don't want to confuse the mac80211 stack.
415 */
416 if (unlikely(entry_data->hw_queue < P54_QUEUE_FWSCAN)) {
0a5fb84f
CL
417 dev_kfree_skb_any(entry);
418 return ;
419 }
420
421 /*
422 * Clear manually, ieee80211_tx_info_clear_status would
423 * clear the counts too and we need them.
424 */
e3e1a0bc 425 memset(&info->status.ack_signal, 0,
0a5fb84f 426 sizeof(struct ieee80211_tx_info) -
e3e1a0bc 427 offsetof(struct ieee80211_tx_info, status.ack_signal));
0a5fb84f 428 BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
e3e1a0bc 429 status.ack_signal) != 20);
0a5fb84f
CL
430
431 if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
432 pad = entry_data->align[0];
433
434 /* walk through the rates array and adjust the counts */
435 count = payload->tries;
436 for (idx = 0; idx < 4; idx++) {
437 if (count >= info->status.rates[idx].count) {
438 count -= info->status.rates[idx].count;
439 } else if (count > 0) {
440 info->status.rates[idx].count = count;
441 count = 0;
442 } else {
443 info->status.rates[idx].idx = -1;
444 info->status.rates[idx].count = 0;
445 }
446 }
447
448 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
f880c205 449 !(payload->status & P54_TX_FAILED))
0a5fb84f
CL
450 info->flags |= IEEE80211_TX_STAT_ACK;
451 if (payload->status & P54_TX_PSM_CANCELLED)
452 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
453 info->status.ack_signal = p54_rssi_to_dbm(priv,
454 (int)payload->ack_rssi);
455
456 /* Undo all changes to the frame. */
457 switch (entry_data->key_type) {
458 case P54_CRYPTO_TKIPMICHAEL: {
459 u8 *iv = (u8 *)(entry_data->align + pad +
460 entry_data->crypt_offset);
461
462 /* Restore the original TKIP IV. */
463 iv[2] = iv[0];
464 iv[0] = iv[1];
465 iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */
466
467 frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */
468 break;
469 }
470 case P54_CRYPTO_AESCCMP:
471 frame_len -= 8; /* remove CCMP_MIC */
472 break;
473 case P54_CRYPTO_WEP:
474 frame_len -= 4; /* remove WEP_ICV */
475 break;
476 }
477
478 skb_trim(entry, frame_len);
479 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
480 ieee80211_tx_status_irqsafe(priv->hw, entry);
481}
482
483static void p54_rx_eeprom_readback(struct p54_common *priv,
484 struct sk_buff *skb)
485{
486 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
487 struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data;
488 struct sk_buff *tmp;
489
490 if (!priv->eeprom)
491 return ;
492
493 if (priv->fw_var >= 0x509) {
494 memcpy(priv->eeprom, eeprom->v2.data,
495 le16_to_cpu(eeprom->v2.len));
496 } else {
497 memcpy(priv->eeprom, eeprom->v1.data,
498 le16_to_cpu(eeprom->v1.len));
499 }
500
501 priv->eeprom = NULL;
502 tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
0a5fb84f
CL
503 dev_kfree_skb_any(tmp);
504 complete(&priv->eeprom_comp);
505}
506
507static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
508{
509 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
510 struct p54_statistics *stats = (struct p54_statistics *) hdr->data;
511 struct sk_buff *tmp;
0d78156e
CL
512 struct ieee80211_channel *chan;
513 unsigned int i, rssi, tx, cca, dtime, dtotal, dcca, dtx, drssi, unit;
0a5fb84f
CL
514 u32 tsf32;
515
516 if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
517 return ;
518
519 tsf32 = le32_to_cpu(stats->tsf32);
520 if (tsf32 < priv->tsf_low32)
521 priv->tsf_high32++;
522 priv->tsf_low32 = tsf32;
523
524 priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail);
525 priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success);
526 priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs);
527
528 priv->noise = p54_rssi_to_dbm(priv, le32_to_cpu(stats->noise));
529
0d78156e
CL
530 /*
531 * STSW450X LMAC API page 26 - 3.8 Statistics
532 * "The exact measurement period can be derived from the
533 * timestamp member".
534 */
535 dtime = tsf32 - priv->survey_raw.timestamp;
536
537 /*
538 * STSW450X LMAC API page 26 - 3.8.1 Noise histogram
539 * The LMAC samples RSSI, CCA and transmit state at regular
540 * periods (typically 8 times per 1k [as in 1024] usec).
541 */
542 cca = le32_to_cpu(stats->sample_cca);
543 tx = le32_to_cpu(stats->sample_tx);
544 rssi = 0;
545 for (i = 0; i < ARRAY_SIZE(stats->sample_noise); i++)
546 rssi += le32_to_cpu(stats->sample_noise[i]);
547
548 dcca = cca - priv->survey_raw.cached_cca;
549 drssi = rssi - priv->survey_raw.cached_rssi;
550 dtx = tx - priv->survey_raw.cached_tx;
551 dtotal = dcca + drssi + dtx;
552
553 /*
554 * update statistics when more than a second is over since the
555 * last call, or when a update is badly needed.
556 */
557 if (dtotal && (priv->update_stats || dtime >= USEC_PER_SEC) &&
558 dtime >= dtotal) {
559 priv->survey_raw.timestamp = tsf32;
560 priv->update_stats = false;
561 unit = dtime / dtotal;
562
563 if (dcca) {
564 priv->survey_raw.cca += dcca * unit;
565 priv->survey_raw.cached_cca = cca;
566 }
567 if (dtx) {
568 priv->survey_raw.tx += dtx * unit;
569 priv->survey_raw.cached_tx = tx;
570 }
571 if (drssi) {
572 priv->survey_raw.rssi += drssi * unit;
573 priv->survey_raw.cached_rssi = rssi;
574 }
575
576 /* 1024 usec / 8 times = 128 usec / time */
577 if (!(priv->phy_ps || priv->phy_idle))
578 priv->survey_raw.active += dtotal * unit;
579 else
580 priv->survey_raw.active += (dcca + dtx) * unit;
581 }
582
583 chan = priv->curchan;
584 if (chan) {
585 struct survey_info *survey = &priv->survey[chan->hw_value];
608cfbe4 586 survey->noise = clamp(priv->noise, -128, 127);
4ed20beb
JB
587 survey->time = priv->survey_raw.active;
588 survey->time_tx = priv->survey_raw.tx;
589 survey->time_busy = priv->survey_raw.tx +
1cda0fd6 590 priv->survey_raw.cca;
4ed20beb
JB
591 do_div(survey->time, 1024);
592 do_div(survey->time_tx, 1024);
593 do_div(survey->time_busy, 1024);
0d78156e
CL
594 }
595
0a5fb84f 596 tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
0a5fb84f 597 dev_kfree_skb_any(tmp);
0d78156e 598 complete(&priv->stat_comp);
0a5fb84f
CL
599}
600
601static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
602{
603 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
604 struct p54_trap *trap = (struct p54_trap *) hdr->data;
605 u16 event = le16_to_cpu(trap->event);
606 u16 freq = le16_to_cpu(trap->frequency);
607
608 switch (event) {
609 case P54_TRAP_BEACON_TX:
610 break;
611 case P54_TRAP_RADAR:
5db55844 612 wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq);
0a5fb84f
CL
613 break;
614 case P54_TRAP_NO_BEACON:
615 if (priv->vif)
616 ieee80211_beacon_loss(priv->vif);
617 break;
618 case P54_TRAP_SCAN:
619 break;
620 case P54_TRAP_TBTT:
621 break;
622 case P54_TRAP_TIMER:
623 break;
6208f8b2
CL
624 case P54_TRAP_FAA_RADIO_OFF:
625 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
626 break;
627 case P54_TRAP_FAA_RADIO_ON:
628 wiphy_rfkill_set_hw_state(priv->hw->wiphy, false);
629 break;
0a5fb84f 630 default:
c96c31e4
JP
631 wiphy_info(priv->hw->wiphy, "received event:%x freq:%d\n",
632 event, freq);
0a5fb84f
CL
633 break;
634 }
635}
636
637static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb)
638{
639 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
640
641 switch (le16_to_cpu(hdr->type)) {
642 case P54_CONTROL_TYPE_TXDONE:
643 p54_rx_frame_sent(priv, skb);
644 break;
645 case P54_CONTROL_TYPE_TRAP:
646 p54_rx_trap(priv, skb);
647 break;
648 case P54_CONTROL_TYPE_BBP:
649 break;
650 case P54_CONTROL_TYPE_STAT_READBACK:
651 p54_rx_stats(priv, skb);
652 break;
653 case P54_CONTROL_TYPE_EEPROM_READBACK:
654 p54_rx_eeprom_readback(priv, skb);
655 break;
656 default:
c96c31e4
JP
657 wiphy_debug(priv->hw->wiphy,
658 "not handling 0x%02x type control frame\n",
659 le16_to_cpu(hdr->type));
0a5fb84f
CL
660 break;
661 }
662 return 0;
663}
664
665/* returns zero if skb can be reused */
666int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
667{
668 struct p54_common *priv = dev->priv;
669 u16 type = le16_to_cpu(*((__le16 *)skb->data));
670
671 if (type & P54_HDR_FLAG_CONTROL)
672 return p54_rx_control(priv, skb);
673 else
674 return p54_rx_data(priv, skb);
675}
676EXPORT_SYMBOL_GPL(p54_rx);
677
678static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
36323f81
TH
679 struct ieee80211_tx_info *info,
680 struct ieee80211_sta *sta,
681 u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
0a5fb84f
CL
682 bool *burst_possible)
683{
684 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
685
686 if (ieee80211_is_data_qos(hdr->frame_control))
687 *burst_possible = true;
688 else
689 *burst_possible = false;
690
3b5c5827 691 if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
0a5fb84f
CL
692 *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
693
02f2f1a9 694 if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)
0a5fb84f
CL
695 *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
696
90d6f928
CL
697 if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
698 *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
699
0a5fb84f
CL
700 *queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA;
701
702 switch (priv->mode) {
703 case NL80211_IFTYPE_MONITOR:
704 /*
705 * We have to set P54_HDR_FLAG_DATA_OUT_PROMISC for
706 * every frame in promiscuous/monitor mode.
707 * see STSW45x0C LMAC API - page 12.
708 */
709 *aid = 0;
710 *flags |= P54_HDR_FLAG_DATA_OUT_PROMISC;
711 break;
712 case NL80211_IFTYPE_STATION:
713 *aid = 1;
714 break;
715 case NL80211_IFTYPE_AP:
716 case NL80211_IFTYPE_ADHOC:
717 case NL80211_IFTYPE_MESH_POINT:
718 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
719 *aid = 0;
720 *queue = P54_QUEUE_CAB;
721 return;
722 }
723
724 if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) {
725 if (ieee80211_is_probe_resp(hdr->frame_control)) {
726 *aid = 0;
727 *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP |
728 P54_HDR_FLAG_DATA_OUT_NOCANCEL;
729 return;
730 } else if (ieee80211_is_beacon(hdr->frame_control)) {
731 *aid = 0;
732
733 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
734 /*
735 * Injecting beacons on top of a AP is
736 * not a good idea... nevertheless,
737 * it should be doable.
738 */
739
740 return;
741 }
742
743 *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP;
744 *queue = P54_QUEUE_BEACON;
745 *extra_len = IEEE80211_MAX_TIM_LEN;
746 return;
747 }
748 }
749
36323f81
TH
750 if (sta)
751 *aid = sta->aid;
0a5fb84f
CL
752 break;
753 }
754}
755
97359d12 756static u8 p54_convert_algo(u32 cipher)
0a5fb84f 757{
97359d12
JB
758 switch (cipher) {
759 case WLAN_CIPHER_SUITE_WEP40:
760 case WLAN_CIPHER_SUITE_WEP104:
0a5fb84f 761 return P54_CRYPTO_WEP;
97359d12 762 case WLAN_CIPHER_SUITE_TKIP:
0a5fb84f 763 return P54_CRYPTO_TKIPMICHAEL;
97359d12 764 case WLAN_CIPHER_SUITE_CCMP:
0a5fb84f
CL
765 return P54_CRYPTO_AESCCMP;
766 default:
767 return 0;
768 }
769}
770
36323f81
TH
771void p54_tx_80211(struct ieee80211_hw *dev,
772 struct ieee80211_tx_control *control,
773 struct sk_buff *skb)
0a5fb84f
CL
774{
775 struct p54_common *priv = dev->priv;
776 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
777 struct p54_tx_info *p54info;
778 struct p54_hdr *hdr;
779 struct p54_tx_data *txhdr;
a6756da9 780 unsigned int padding, len, extra_len = 0;
0a5fb84f
CL
781 int i, j, ridx;
782 u16 hdr_flags = 0, aid = 0;
783 u8 rate, queue = 0, crypt_offset = 0;
784 u8 cts_rate = 0x20;
785 u8 rc_flags;
786 u8 calculated_tries[4];
787 u8 nrates = 0, nremaining = 8;
788 bool burst_allowed = false;
789
36323f81 790 p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
0a5fb84f
CL
791 &hdr_flags, &aid, &burst_allowed);
792
793 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
c3745b40 794 ieee80211_free_txskb(dev, skb);
7bb45683 795 return;
0a5fb84f
CL
796 }
797
798 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
799 len = skb->len;
800
801 if (info->control.hw_key) {
802 crypt_offset = ieee80211_get_hdrlen_from_skb(skb);
97359d12 803 if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
0a5fb84f
CL
804 u8 *iv = (u8 *)(skb->data + crypt_offset);
805 /*
806 * The firmware excepts that the IV has to have
807 * this special format
808 */
809 iv[1] = iv[0];
810 iv[0] = iv[2];
811 iv[2] = 0;
812 }
813 }
814
d58ff351
JB
815 txhdr = skb_push(skb, sizeof(*txhdr) + padding);
816 hdr = skb_push(skb, sizeof(*hdr));
0a5fb84f
CL
817
818 if (padding)
819 hdr_flags |= P54_HDR_FLAG_DATA_ALIGN;
820 hdr->type = cpu_to_le16(aid);
821 hdr->rts_tries = info->control.rates[0].count;
822
823 /*
824 * we register the rates in perfect order, and
825 * RTS/CTS won't happen on 5 GHz
826 */
827 cts_rate = info->control.rts_cts_rate_idx;
828
829 memset(&txhdr->rateset, 0, sizeof(txhdr->rateset));
830
831 /* see how many rates got used */
832 for (i = 0; i < dev->max_rates; i++) {
833 if (info->control.rates[i].idx < 0)
834 break;
835 nrates++;
836 }
837
838 /* limit tries to 8/nrates per rate */
839 for (i = 0; i < nrates; i++) {
840 /*
841 * The magic expression here is equivalent to 8/nrates for
842 * all values that matter, but avoids division and jumps.
843 * Note that nrates can only take the values 1 through 4.
844 */
845 calculated_tries[i] = min_t(int, ((15 >> nrates) | 1) + 1,
846 info->control.rates[i].count);
847 nremaining -= calculated_tries[i];
848 }
849
850 /* if there are tries left, distribute from back to front */
851 for (i = nrates - 1; nremaining > 0 && i >= 0; i--) {
852 int tmp = info->control.rates[i].count - calculated_tries[i];
853
854 if (tmp <= 0)
855 continue;
856 /* RC requested more tries at this rate */
857
858 tmp = min_t(int, tmp, nremaining);
859 calculated_tries[i] += tmp;
860 nremaining -= tmp;
861 }
862
863 ridx = 0;
864 for (i = 0; i < nrates && ridx < 8; i++) {
865 /* we register the rates in perfect order */
866 rate = info->control.rates[i].idx;
57fbcce3 867 if (info->band == NL80211_BAND_5GHZ)
0a5fb84f
CL
868 rate += 4;
869
870 /* store the count we actually calculated for TX status */
871 info->control.rates[i].count = calculated_tries[i];
872
873 rc_flags = info->control.rates[i].flags;
874 if (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) {
875 rate |= 0x10;
876 cts_rate |= 0x10;
877 }
878 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
879 burst_allowed = false;
880 rate |= 0x40;
881 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
882 rate |= 0x20;
883 burst_allowed = false;
884 }
885 for (j = 0; j < calculated_tries[i] && ridx < 8; j++) {
886 txhdr->rateset[ridx] = rate;
887 ridx++;
888 }
889 }
890
891 if (burst_allowed)
892 hdr_flags |= P54_HDR_FLAG_DATA_OUT_BURST;
893
894 /* TODO: enable bursting */
895 hdr->flags = cpu_to_le16(hdr_flags);
896 hdr->tries = ridx;
897 txhdr->rts_rate_idx = 0;
898 if (info->control.hw_key) {
97359d12 899 txhdr->key_type = p54_convert_algo(info->control.hw_key->cipher);
0a5fb84f
CL
900 txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
901 memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
97359d12 902 if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
0a5fb84f
CL
903 /* reserve space for the MIC key */
904 len += 8;
59ae1d12
JB
905 skb_put_data(skb,
906 &(info->control.hw_key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]),
907 8);
0a5fb84f
CL
908 }
909 /* reserve some space for ICV */
910 len += info->control.hw_key->icv_len;
b080db58 911 skb_put_zero(skb, info->control.hw_key->icv_len);
0a5fb84f
CL
912 } else {
913 txhdr->key_type = 0;
914 txhdr->key_len = 0;
915 }
916 txhdr->crypt_offset = crypt_offset;
917 txhdr->hw_queue = queue;
918 txhdr->backlog = priv->tx_stats[queue].len - 1;
919 memset(txhdr->durations, 0, sizeof(txhdr->durations));
d748b464 920 txhdr->tx_antenna = 2 & priv->tx_diversity_mask;
0a5fb84f
CL
921 if (priv->rxhw == 5) {
922 txhdr->longbow.cts_rate = cts_rate;
923 txhdr->longbow.output_power = cpu_to_le16(priv->output_power);
924 } else {
925 txhdr->normal.output_power = priv->output_power;
926 txhdr->normal.cts_rate = cts_rate;
927 }
928 if (padding)
929 txhdr->align[0] = padding;
930
931 hdr->len = cpu_to_le16(len);
932 /* modifies skb->cb and with it info, so must be last! */
933 p54info = (void *) info->rate_driver_data;
934 p54info->extra_len = extra_len;
935
936 p54_tx(priv, skb);
0a5fb84f 937}