]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/ti/wlcore/main.c
wlcore: remove WLCORE_QUIRK_NO_ELP
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wl1271
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37
38 #include "wlcore.h"
39 #include "debug.h"
40 #include "wl12xx_80211.h"
41 #include "io.h"
42 #include "event.h"
43 #include "tx.h"
44 #include "rx.h"
45 #include "ps.h"
46 #include "init.h"
47 #include "debugfs.h"
48 #include "cmd.h"
49 #include "boot.h"
50 #include "testmode.h"
51 #include "scan.h"
52 #include "hw_ops.h"
53
54 #define WL1271_BOOT_RETRIES 3
55
56 #define WL1271_BOOT_RETRIES 3
57
58 static char *fwlog_param;
59 static int bug_on_recovery = -1;
60 static int no_recovery = -1;
61
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
70 {
71 int ret;
72
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
74 return -EINVAL;
75
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77 return 0;
78
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80 return 0;
81
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
83 if (ret < 0)
84 return ret;
85
86 wl1271_info("Association completed.");
87 return 0;
88 }
89
90 static int wl1271_reg_notify(struct wiphy *wiphy,
91 struct regulatory_request *request)
92 {
93 struct ieee80211_supported_band *band;
94 struct ieee80211_channel *ch;
95 int i;
96 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
97 struct wl1271 *wl = hw->priv;
98
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
103 continue;
104
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
108
109 }
110
111 if (likely(wl->state == WLCORE_STATE_ON))
112 wlcore_regdomain_config(wl);
113
114 return 0;
115 }
116
117 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
118 bool enable)
119 {
120 int ret = 0;
121
122 /* we should hold wl->mutex */
123 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
124 if (ret < 0)
125 goto out;
126
127 if (enable)
128 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
129 else
130 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
131 out:
132 return ret;
133 }
134
135 /*
136 * this function is being called when the rx_streaming interval
137 * has beed changed or rx_streaming should be disabled
138 */
139 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
140 {
141 int ret = 0;
142 int period = wl->conf.rx_streaming.interval;
143
144 /* don't reconfigure if rx_streaming is disabled */
145 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
146 goto out;
147
148 /* reconfigure/disable according to new streaming_period */
149 if (period &&
150 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
151 (wl->conf.rx_streaming.always ||
152 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 ret = wl1271_set_rx_streaming(wl, wlvif, true);
154 else {
155 ret = wl1271_set_rx_streaming(wl, wlvif, false);
156 /* don't cancel_work_sync since we might deadlock */
157 del_timer_sync(&wlvif->rx_streaming_timer);
158 }
159 out:
160 return ret;
161 }
162
163 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
164 {
165 int ret;
166 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
167 rx_streaming_enable_work);
168 struct wl1271 *wl = wlvif->wl;
169
170 mutex_lock(&wl->mutex);
171
172 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
173 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
174 (!wl->conf.rx_streaming.always &&
175 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
176 goto out;
177
178 if (!wl->conf.rx_streaming.interval)
179 goto out;
180
181 ret = wl1271_ps_elp_wakeup(wl);
182 if (ret < 0)
183 goto out;
184
185 ret = wl1271_set_rx_streaming(wl, wlvif, true);
186 if (ret < 0)
187 goto out_sleep;
188
189 /* stop it after some time of inactivity */
190 mod_timer(&wlvif->rx_streaming_timer,
191 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
192
193 out_sleep:
194 wl1271_ps_elp_sleep(wl);
195 out:
196 mutex_unlock(&wl->mutex);
197 }
198
199 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
200 {
201 int ret;
202 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
203 rx_streaming_disable_work);
204 struct wl1271 *wl = wlvif->wl;
205
206 mutex_lock(&wl->mutex);
207
208 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
209 goto out;
210
211 ret = wl1271_ps_elp_wakeup(wl);
212 if (ret < 0)
213 goto out;
214
215 ret = wl1271_set_rx_streaming(wl, wlvif, false);
216 if (ret)
217 goto out_sleep;
218
219 out_sleep:
220 wl1271_ps_elp_sleep(wl);
221 out:
222 mutex_unlock(&wl->mutex);
223 }
224
225 static void wl1271_rx_streaming_timer(unsigned long data)
226 {
227 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
228 struct wl1271 *wl = wlvif->wl;
229 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
230 }
231
232 /* wl->mutex must be taken */
233 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
234 {
235 /* if the watchdog is not armed, don't do anything */
236 if (wl->tx_allocated_blocks == 0)
237 return;
238
239 cancel_delayed_work(&wl->tx_watchdog_work);
240 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
241 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
242 }
243
244 static void wl12xx_tx_watchdog_work(struct work_struct *work)
245 {
246 struct delayed_work *dwork;
247 struct wl1271 *wl;
248
249 dwork = container_of(work, struct delayed_work, work);
250 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
251
252 mutex_lock(&wl->mutex);
253
254 if (unlikely(wl->state != WLCORE_STATE_ON))
255 goto out;
256
257 /* Tx went out in the meantime - everything is ok */
258 if (unlikely(wl->tx_allocated_blocks == 0))
259 goto out;
260
261 /*
262 * if a ROC is in progress, we might not have any Tx for a long
263 * time (e.g. pending Tx on the non-ROC channels)
264 */
265 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
266 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
267 wl->conf.tx.tx_watchdog_timeout);
268 wl12xx_rearm_tx_watchdog_locked(wl);
269 goto out;
270 }
271
272 /*
273 * if a scan is in progress, we might not have any Tx for a long
274 * time
275 */
276 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
277 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
278 wl->conf.tx.tx_watchdog_timeout);
279 wl12xx_rearm_tx_watchdog_locked(wl);
280 goto out;
281 }
282
283 /*
284 * AP might cache a frame for a long time for a sleeping station,
285 * so rearm the timer if there's an AP interface with stations. If
286 * Tx is genuinely stuck we will most hopefully discover it when all
287 * stations are removed due to inactivity.
288 */
289 if (wl->active_sta_count) {
290 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
291 " %d stations",
292 wl->conf.tx.tx_watchdog_timeout,
293 wl->active_sta_count);
294 wl12xx_rearm_tx_watchdog_locked(wl);
295 goto out;
296 }
297
298 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
299 wl->conf.tx.tx_watchdog_timeout);
300 wl12xx_queue_recovery_work(wl);
301
302 out:
303 mutex_unlock(&wl->mutex);
304 }
305
306 static void wlcore_adjust_conf(struct wl1271 *wl)
307 {
308 /* Adjust settings according to optional module parameters */
309
310 if (fwlog_param) {
311 if (!strcmp(fwlog_param, "continuous")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 } else if (!strcmp(fwlog_param, "ondemand")) {
314 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
315 } else if (!strcmp(fwlog_param, "dbgpins")) {
316 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
318 } else if (!strcmp(fwlog_param, "disable")) {
319 wl->conf.fwlog.mem_blocks = 0;
320 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
321 } else {
322 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
323 }
324 }
325
326 if (bug_on_recovery != -1)
327 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
328
329 if (no_recovery != -1)
330 wl->conf.recovery.no_recovery = (u8) no_recovery;
331 }
332
333 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
334 struct wl12xx_vif *wlvif,
335 u8 hlid, u8 tx_pkts)
336 {
337 bool fw_ps, single_sta;
338
339 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
340 single_sta = (wl->active_sta_count == 1);
341
342 /*
343 * Wake up from high level PS if the STA is asleep with too little
344 * packets in FW or if the STA is awake.
345 */
346 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
347 wl12xx_ps_link_end(wl, wlvif, hlid);
348
349 /*
350 * Start high-level PS if the STA is asleep with enough blocks in FW.
351 * Make an exception if this is the only connected station. In this
352 * case FW-memory congestion is not a problem.
353 */
354 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
356 }
357
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status_2 *status)
361 {
362 struct wl1271_link *lnk;
363 u32 cur_fw_ps_map;
364 u8 hlid, cnt;
365
366 /* TODO: also use link_fast_bitmap here */
367
368 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
369 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
370 wl1271_debug(DEBUG_PSM,
371 "link ps prev 0x%x cur 0x%x changed 0x%x",
372 wl->ap_fw_ps_map, cur_fw_ps_map,
373 wl->ap_fw_ps_map ^ cur_fw_ps_map);
374
375 wl->ap_fw_ps_map = cur_fw_ps_map;
376 }
377
378 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
379 lnk = &wl->links[hlid];
380 cnt = status->counters.tx_lnk_free_pkts[hlid] -
381 lnk->prev_freed_pkts;
382
383 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
384 lnk->allocated_pkts -= cnt;
385
386 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
387 lnk->allocated_pkts);
388 }
389 }
390
391 static int wlcore_fw_status(struct wl1271 *wl,
392 struct wl_fw_status_1 *status_1,
393 struct wl_fw_status_2 *status_2)
394 {
395 struct wl12xx_vif *wlvif;
396 struct timespec ts;
397 u32 old_tx_blk_count = wl->tx_blocks_available;
398 int avail, freed_blocks;
399 int i;
400 size_t status_len;
401 int ret;
402
403 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
404 sizeof(*status_2) + wl->fw_status_priv_len;
405
406 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
407 status_len, false);
408 if (ret < 0)
409 return ret;
410
411 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
412 "drv_rx_counter = %d, tx_results_counter = %d)",
413 status_1->intr,
414 status_1->fw_rx_counter,
415 status_1->drv_rx_counter,
416 status_1->tx_results_counter);
417
418 for (i = 0; i < NUM_TX_QUEUES; i++) {
419 /* prevent wrap-around in freed-packets counter */
420 wl->tx_allocated_pkts[i] -=
421 (status_2->counters.tx_released_pkts[i] -
422 wl->tx_pkts_freed[i]) & 0xff;
423
424 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
425 }
426
427 /* prevent wrap-around in total blocks counter */
428 if (likely(wl->tx_blocks_freed <=
429 le32_to_cpu(status_2->total_released_blks)))
430 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
431 wl->tx_blocks_freed;
432 else
433 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
434 le32_to_cpu(status_2->total_released_blks);
435
436 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
437
438 wl->tx_allocated_blocks -= freed_blocks;
439
440 /*
441 * If the FW freed some blocks:
442 * If we still have allocated blocks - re-arm the timer, Tx is
443 * not stuck. Otherwise, cancel the timer (no Tx currently).
444 */
445 if (freed_blocks) {
446 if (wl->tx_allocated_blocks)
447 wl12xx_rearm_tx_watchdog_locked(wl);
448 else
449 cancel_delayed_work(&wl->tx_watchdog_work);
450 }
451
452 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
453
454 /*
455 * The FW might change the total number of TX memblocks before
456 * we get a notification about blocks being released. Thus, the
457 * available blocks calculation might yield a temporary result
458 * which is lower than the actual available blocks. Keeping in
459 * mind that only blocks that were allocated can be moved from
460 * TX to RX, tx_blocks_available should never decrease here.
461 */
462 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
463 avail);
464
465 /* if more blocks are available now, tx work can be scheduled */
466 if (wl->tx_blocks_available > old_tx_blk_count)
467 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
468
469 /* for AP update num of allocated TX blocks per link and ps status */
470 wl12xx_for_each_wlvif_ap(wl, wlvif) {
471 wl12xx_irq_update_links_status(wl, wlvif, status_2);
472 }
473
474 /* update the host-chipset time offset */
475 getnstimeofday(&ts);
476 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
477 (s64)le32_to_cpu(status_2->fw_localtime);
478
479 return 0;
480 }
481
482 static void wl1271_flush_deferred_work(struct wl1271 *wl)
483 {
484 struct sk_buff *skb;
485
486 /* Pass all received frames to the network stack */
487 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
488 ieee80211_rx_ni(wl->hw, skb);
489
490 /* Return sent skbs to the network stack */
491 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
492 ieee80211_tx_status_ni(wl->hw, skb);
493 }
494
495 static void wl1271_netstack_work(struct work_struct *work)
496 {
497 struct wl1271 *wl =
498 container_of(work, struct wl1271, netstack_work);
499
500 do {
501 wl1271_flush_deferred_work(wl);
502 } while (skb_queue_len(&wl->deferred_rx_queue));
503 }
504
505 #define WL1271_IRQ_MAX_LOOPS 256
506
507 static int wlcore_irq_locked(struct wl1271 *wl)
508 {
509 int ret = 0;
510 u32 intr;
511 int loopcount = WL1271_IRQ_MAX_LOOPS;
512 bool done = false;
513 unsigned int defer_count;
514 unsigned long flags;
515
516 /*
517 * In case edge triggered interrupt must be used, we cannot iterate
518 * more than once without introducing race conditions with the hardirq.
519 */
520 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
521 loopcount = 1;
522
523 wl1271_debug(DEBUG_IRQ, "IRQ work");
524
525 if (unlikely(wl->state != WLCORE_STATE_ON))
526 goto out;
527
528 ret = wl1271_ps_elp_wakeup(wl);
529 if (ret < 0)
530 goto out;
531
532 while (!done && loopcount--) {
533 /*
534 * In order to avoid a race with the hardirq, clear the flag
535 * before acknowledging the chip. Since the mutex is held,
536 * wl1271_ps_elp_wakeup cannot be called concurrently.
537 */
538 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
539 smp_mb__after_clear_bit();
540
541 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
542 if (ret < 0)
543 goto out;
544
545 wlcore_hw_tx_immediate_compl(wl);
546
547 intr = le32_to_cpu(wl->fw_status_1->intr);
548 intr &= WLCORE_ALL_INTR_MASK;
549 if (!intr) {
550 done = true;
551 continue;
552 }
553
554 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
555 wl1271_error("HW watchdog interrupt received! starting recovery.");
556 wl->watchdog_recovery = true;
557 ret = -EIO;
558
559 /* restarting the chip. ignore any other interrupt. */
560 goto out;
561 }
562
563 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
564 wl1271_error("SW watchdog interrupt received! "
565 "starting recovery.");
566 wl->watchdog_recovery = true;
567 ret = -EIO;
568
569 /* restarting the chip. ignore any other interrupt. */
570 goto out;
571 }
572
573 if (likely(intr & WL1271_ACX_INTR_DATA)) {
574 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
575
576 ret = wlcore_rx(wl, wl->fw_status_1);
577 if (ret < 0)
578 goto out;
579
580 /* Check if any tx blocks were freed */
581 spin_lock_irqsave(&wl->wl_lock, flags);
582 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
583 wl1271_tx_total_queue_count(wl) > 0) {
584 spin_unlock_irqrestore(&wl->wl_lock, flags);
585 /*
586 * In order to avoid starvation of the TX path,
587 * call the work function directly.
588 */
589 ret = wlcore_tx_work_locked(wl);
590 if (ret < 0)
591 goto out;
592 } else {
593 spin_unlock_irqrestore(&wl->wl_lock, flags);
594 }
595
596 /* check for tx results */
597 ret = wlcore_hw_tx_delayed_compl(wl);
598 if (ret < 0)
599 goto out;
600
601 /* Make sure the deferred queues don't get too long */
602 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
603 skb_queue_len(&wl->deferred_rx_queue);
604 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
605 wl1271_flush_deferred_work(wl);
606 }
607
608 if (intr & WL1271_ACX_INTR_EVENT_A) {
609 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
610 ret = wl1271_event_handle(wl, 0);
611 if (ret < 0)
612 goto out;
613 }
614
615 if (intr & WL1271_ACX_INTR_EVENT_B) {
616 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
617 ret = wl1271_event_handle(wl, 1);
618 if (ret < 0)
619 goto out;
620 }
621
622 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
623 wl1271_debug(DEBUG_IRQ,
624 "WL1271_ACX_INTR_INIT_COMPLETE");
625
626 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
627 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
628 }
629
630 wl1271_ps_elp_sleep(wl);
631
632 out:
633 return ret;
634 }
635
636 static irqreturn_t wlcore_irq(int irq, void *cookie)
637 {
638 int ret;
639 unsigned long flags;
640 struct wl1271 *wl = cookie;
641
642 /* TX might be handled here, avoid redundant work */
643 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
644 cancel_work_sync(&wl->tx_work);
645
646 mutex_lock(&wl->mutex);
647
648 ret = wlcore_irq_locked(wl);
649 if (ret)
650 wl12xx_queue_recovery_work(wl);
651
652 spin_lock_irqsave(&wl->wl_lock, flags);
653 /* In case TX was not handled here, queue TX work */
654 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
655 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
656 wl1271_tx_total_queue_count(wl) > 0)
657 ieee80211_queue_work(wl->hw, &wl->tx_work);
658 spin_unlock_irqrestore(&wl->wl_lock, flags);
659
660 mutex_unlock(&wl->mutex);
661
662 return IRQ_HANDLED;
663 }
664
665 struct vif_counter_data {
666 u8 counter;
667
668 struct ieee80211_vif *cur_vif;
669 bool cur_vif_running;
670 };
671
672 static void wl12xx_vif_count_iter(void *data, u8 *mac,
673 struct ieee80211_vif *vif)
674 {
675 struct vif_counter_data *counter = data;
676
677 counter->counter++;
678 if (counter->cur_vif == vif)
679 counter->cur_vif_running = true;
680 }
681
682 /* caller must not hold wl->mutex, as it might deadlock */
683 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
684 struct ieee80211_vif *cur_vif,
685 struct vif_counter_data *data)
686 {
687 memset(data, 0, sizeof(*data));
688 data->cur_vif = cur_vif;
689
690 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
691 wl12xx_vif_count_iter, data);
692 }
693
694 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
695 {
696 const struct firmware *fw;
697 const char *fw_name;
698 enum wl12xx_fw_type fw_type;
699 int ret;
700
701 if (plt) {
702 fw_type = WL12XX_FW_TYPE_PLT;
703 fw_name = wl->plt_fw_name;
704 } else {
705 /*
706 * we can't call wl12xx_get_vif_count() here because
707 * wl->mutex is taken, so use the cached last_vif_count value
708 */
709 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
710 fw_type = WL12XX_FW_TYPE_MULTI;
711 fw_name = wl->mr_fw_name;
712 } else {
713 fw_type = WL12XX_FW_TYPE_NORMAL;
714 fw_name = wl->sr_fw_name;
715 }
716 }
717
718 if (wl->fw_type == fw_type)
719 return 0;
720
721 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
722
723 ret = request_firmware(&fw, fw_name, wl->dev);
724
725 if (ret < 0) {
726 wl1271_error("could not get firmware %s: %d", fw_name, ret);
727 return ret;
728 }
729
730 if (fw->size % 4) {
731 wl1271_error("firmware size is not multiple of 32 bits: %zu",
732 fw->size);
733 ret = -EILSEQ;
734 goto out;
735 }
736
737 vfree(wl->fw);
738 wl->fw_type = WL12XX_FW_TYPE_NONE;
739 wl->fw_len = fw->size;
740 wl->fw = vmalloc(wl->fw_len);
741
742 if (!wl->fw) {
743 wl1271_error("could not allocate memory for the firmware");
744 ret = -ENOMEM;
745 goto out;
746 }
747
748 memcpy(wl->fw, fw->data, wl->fw_len);
749 ret = 0;
750 wl->fw_type = fw_type;
751 out:
752 release_firmware(fw);
753
754 return ret;
755 }
756
757 void wl12xx_queue_recovery_work(struct wl1271 *wl)
758 {
759 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
760
761 /* Avoid a recursive recovery */
762 if (wl->state == WLCORE_STATE_ON) {
763 wl->state = WLCORE_STATE_RESTARTING;
764 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
765 wlcore_disable_interrupts_nosync(wl);
766 ieee80211_queue_work(wl->hw, &wl->recovery_work);
767 }
768 }
769
770 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
771 {
772 size_t len = 0;
773
774 /* The FW log is a length-value list, find where the log end */
775 while (len < maxlen) {
776 if (memblock[len] == 0)
777 break;
778 if (len + memblock[len] + 1 > maxlen)
779 break;
780 len += memblock[len] + 1;
781 }
782
783 /* Make sure we have enough room */
784 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
785
786 /* Fill the FW log file, consumed by the sysfs fwlog entry */
787 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
788 wl->fwlog_size += len;
789
790 return len;
791 }
792
793 #define WLCORE_FW_LOG_END 0x2000000
794
795 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
796 {
797 u32 addr;
798 u32 offset;
799 u32 end_of_log;
800 u8 *block;
801 int ret;
802
803 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
804 (wl->conf.fwlog.mem_blocks == 0))
805 return;
806
807 wl1271_info("Reading FW panic log");
808
809 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
810 if (!block)
811 return;
812
813 /*
814 * Make sure the chip is awake and the logger isn't active.
815 * Do not send a stop fwlog command if the fw is hanged or if
816 * dbgpins are used (due to some fw bug).
817 */
818 if (wl1271_ps_elp_wakeup(wl))
819 goto out;
820 if (!wl->watchdog_recovery &&
821 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
822 wl12xx_cmd_stop_fwlog(wl);
823
824 /* Read the first memory block address */
825 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
826 if (ret < 0)
827 goto out;
828
829 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
830 if (!addr)
831 goto out;
832
833 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
834 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
835 end_of_log = WLCORE_FW_LOG_END;
836 } else {
837 offset = sizeof(addr);
838 end_of_log = addr;
839 }
840
841 /* Traverse the memory blocks linked list */
842 do {
843 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
844 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
845 false);
846 if (ret < 0)
847 goto out;
848
849 /*
850 * Memory blocks are linked to one another. The first 4 bytes
851 * of each memory block hold the hardware address of the next
852 * one. The last memory block points to the first one in
853 * on demand mode and is equal to 0x2000000 in continuous mode.
854 */
855 addr = le32_to_cpup((__le32 *)block);
856 if (!wl12xx_copy_fwlog(wl, block + offset,
857 WL12XX_HW_BLOCK_SIZE - offset))
858 break;
859 } while (addr && (addr != end_of_log));
860
861 wake_up_interruptible(&wl->fwlog_waitq);
862
863 out:
864 kfree(block);
865 }
866
867 static void wlcore_print_recovery(struct wl1271 *wl)
868 {
869 u32 pc = 0;
870 u32 hint_sts = 0;
871 int ret;
872
873 wl1271_info("Hardware recovery in progress. FW ver: %s",
874 wl->chip.fw_ver_str);
875
876 /* change partitions momentarily so we can read the FW pc */
877 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
878 if (ret < 0)
879 return;
880
881 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
882 if (ret < 0)
883 return;
884
885 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
886 if (ret < 0)
887 return;
888
889 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
890 pc, hint_sts, ++wl->recovery_count);
891
892 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
893 }
894
895
896 static void wl1271_recovery_work(struct work_struct *work)
897 {
898 struct wl1271 *wl =
899 container_of(work, struct wl1271, recovery_work);
900 struct wl12xx_vif *wlvif;
901 struct ieee80211_vif *vif;
902
903 mutex_lock(&wl->mutex);
904
905 if (wl->state == WLCORE_STATE_OFF || wl->plt)
906 goto out_unlock;
907
908 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
909 wl12xx_read_fwlog_panic(wl);
910 wlcore_print_recovery(wl);
911 }
912
913 BUG_ON(wl->conf.recovery.bug_on_recovery &&
914 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
915
916 if (wl->conf.recovery.no_recovery) {
917 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
918 goto out_unlock;
919 }
920
921 /*
922 * Advance security sequence number to overcome potential progress
923 * in the firmware during recovery. This doens't hurt if the network is
924 * not encrypted.
925 */
926 wl12xx_for_each_wlvif(wl, wlvif) {
927 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
928 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
929 wlvif->tx_security_seq +=
930 WL1271_TX_SQN_POST_RECOVERY_PADDING;
931 }
932
933 /* Prevent spurious TX during FW restart */
934 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
935
936 /* reboot the chipset */
937 while (!list_empty(&wl->wlvif_list)) {
938 wlvif = list_first_entry(&wl->wlvif_list,
939 struct wl12xx_vif, list);
940 vif = wl12xx_wlvif_to_vif(wlvif);
941 __wl1271_op_remove_interface(wl, vif, false);
942 }
943
944 wlcore_op_stop_locked(wl);
945
946 ieee80211_restart_hw(wl->hw);
947
948 /*
949 * Its safe to enable TX now - the queues are stopped after a request
950 * to restart the HW.
951 */
952 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
953
954 out_unlock:
955 wl->watchdog_recovery = false;
956 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
957 mutex_unlock(&wl->mutex);
958 }
959
960 static int wlcore_fw_wakeup(struct wl1271 *wl)
961 {
962 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
963 }
964
965 static int wl1271_setup(struct wl1271 *wl)
966 {
967 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
968 sizeof(*wl->fw_status_2) +
969 wl->fw_status_priv_len, GFP_KERNEL);
970 if (!wl->fw_status_1)
971 return -ENOMEM;
972
973 wl->fw_status_2 = (struct wl_fw_status_2 *)
974 (((u8 *) wl->fw_status_1) +
975 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
976
977 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
978 if (!wl->tx_res_if) {
979 kfree(wl->fw_status_1);
980 return -ENOMEM;
981 }
982
983 return 0;
984 }
985
986 static int wl12xx_set_power_on(struct wl1271 *wl)
987 {
988 int ret;
989
990 msleep(WL1271_PRE_POWER_ON_SLEEP);
991 ret = wl1271_power_on(wl);
992 if (ret < 0)
993 goto out;
994 msleep(WL1271_POWER_ON_SLEEP);
995 wl1271_io_reset(wl);
996 wl1271_io_init(wl);
997
998 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
999 if (ret < 0)
1000 goto fail;
1001
1002 /* ELP module wake up */
1003 ret = wlcore_fw_wakeup(wl);
1004 if (ret < 0)
1005 goto fail;
1006
1007 out:
1008 return ret;
1009
1010 fail:
1011 wl1271_power_off(wl);
1012 return ret;
1013 }
1014
1015 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1016 {
1017 int ret = 0;
1018
1019 ret = wl12xx_set_power_on(wl);
1020 if (ret < 0)
1021 goto out;
1022
1023 /*
1024 * For wl127x based devices we could use the default block
1025 * size (512 bytes), but due to a bug in the sdio driver, we
1026 * need to set it explicitly after the chip is powered on. To
1027 * simplify the code and since the performance impact is
1028 * negligible, we use the same block size for all different
1029 * chip types.
1030 *
1031 * Check if the bus supports blocksize alignment and, if it
1032 * doesn't, make sure we don't have the quirk.
1033 */
1034 if (!wl1271_set_block_size(wl))
1035 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1036
1037 /* TODO: make sure the lower driver has set things up correctly */
1038
1039 ret = wl1271_setup(wl);
1040 if (ret < 0)
1041 goto out;
1042
1043 ret = wl12xx_fetch_firmware(wl, plt);
1044 if (ret < 0)
1045 goto out;
1046
1047 out:
1048 return ret;
1049 }
1050
1051 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1052 {
1053 int retries = WL1271_BOOT_RETRIES;
1054 struct wiphy *wiphy = wl->hw->wiphy;
1055
1056 static const char* const PLT_MODE[] = {
1057 "PLT_OFF",
1058 "PLT_ON",
1059 "PLT_FEM_DETECT"
1060 };
1061
1062 int ret;
1063
1064 mutex_lock(&wl->mutex);
1065
1066 wl1271_notice("power up");
1067
1068 if (wl->state != WLCORE_STATE_OFF) {
1069 wl1271_error("cannot go into PLT state because not "
1070 "in off state: %d", wl->state);
1071 ret = -EBUSY;
1072 goto out;
1073 }
1074
1075 /* Indicate to lower levels that we are now in PLT mode */
1076 wl->plt = true;
1077 wl->plt_mode = plt_mode;
1078
1079 while (retries) {
1080 retries--;
1081 ret = wl12xx_chip_wakeup(wl, true);
1082 if (ret < 0)
1083 goto power_off;
1084
1085 ret = wl->ops->plt_init(wl);
1086 if (ret < 0)
1087 goto power_off;
1088
1089 wl->state = WLCORE_STATE_ON;
1090 wl1271_notice("firmware booted in PLT mode %s (%s)",
1091 PLT_MODE[plt_mode],
1092 wl->chip.fw_ver_str);
1093
1094 /* update hw/fw version info in wiphy struct */
1095 wiphy->hw_version = wl->chip.id;
1096 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1097 sizeof(wiphy->fw_version));
1098
1099 goto out;
1100
1101 power_off:
1102 wl1271_power_off(wl);
1103 }
1104
1105 wl->plt = false;
1106 wl->plt_mode = PLT_OFF;
1107
1108 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1109 WL1271_BOOT_RETRIES);
1110 out:
1111 mutex_unlock(&wl->mutex);
1112
1113 return ret;
1114 }
1115
1116 int wl1271_plt_stop(struct wl1271 *wl)
1117 {
1118 int ret = 0;
1119
1120 wl1271_notice("power down");
1121
1122 /*
1123 * Interrupts must be disabled before setting the state to OFF.
1124 * Otherwise, the interrupt handler might be called and exit without
1125 * reading the interrupt status.
1126 */
1127 wlcore_disable_interrupts(wl);
1128 mutex_lock(&wl->mutex);
1129 if (!wl->plt) {
1130 mutex_unlock(&wl->mutex);
1131
1132 /*
1133 * This will not necessarily enable interrupts as interrupts
1134 * may have been disabled when op_stop was called. It will,
1135 * however, balance the above call to disable_interrupts().
1136 */
1137 wlcore_enable_interrupts(wl);
1138
1139 wl1271_error("cannot power down because not in PLT "
1140 "state: %d", wl->state);
1141 ret = -EBUSY;
1142 goto out;
1143 }
1144
1145 mutex_unlock(&wl->mutex);
1146
1147 wl1271_flush_deferred_work(wl);
1148 cancel_work_sync(&wl->netstack_work);
1149 cancel_work_sync(&wl->recovery_work);
1150 cancel_delayed_work_sync(&wl->elp_work);
1151 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1152
1153 mutex_lock(&wl->mutex);
1154 wl1271_power_off(wl);
1155 wl->flags = 0;
1156 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1157 wl->state = WLCORE_STATE_OFF;
1158 wl->plt = false;
1159 wl->plt_mode = PLT_OFF;
1160 wl->rx_counter = 0;
1161 mutex_unlock(&wl->mutex);
1162
1163 out:
1164 return ret;
1165 }
1166
1167 static void wl1271_op_tx(struct ieee80211_hw *hw,
1168 struct ieee80211_tx_control *control,
1169 struct sk_buff *skb)
1170 {
1171 struct wl1271 *wl = hw->priv;
1172 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1173 struct ieee80211_vif *vif = info->control.vif;
1174 struct wl12xx_vif *wlvif = NULL;
1175 unsigned long flags;
1176 int q, mapping;
1177 u8 hlid;
1178
1179 if (vif)
1180 wlvif = wl12xx_vif_to_data(vif);
1181
1182 mapping = skb_get_queue_mapping(skb);
1183 q = wl1271_tx_get_queue(mapping);
1184
1185 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1186
1187 spin_lock_irqsave(&wl->wl_lock, flags);
1188
1189 /*
1190 * drop the packet if the link is invalid or the queue is stopped
1191 * for any reason but watermark. Watermark is a "soft"-stop so we
1192 * allow these packets through.
1193 */
1194 if (hlid == WL12XX_INVALID_LINK_ID ||
1195 (wlvif && !test_bit(hlid, wlvif->links_map)) ||
1196 (wlcore_is_queue_stopped(wl, q) &&
1197 !wlcore_is_queue_stopped_by_reason(wl, q,
1198 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1199 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1200 ieee80211_free_txskb(hw, skb);
1201 goto out;
1202 }
1203
1204 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1205 hlid, q, skb->len);
1206 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1207
1208 wl->tx_queue_count[q]++;
1209
1210 /*
1211 * The workqueue is slow to process the tx_queue and we need stop
1212 * the queue here, otherwise the queue will get too long.
1213 */
1214 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1215 !wlcore_is_queue_stopped_by_reason(wl, q,
1216 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1217 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1218 wlcore_stop_queue_locked(wl, q,
1219 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1220 }
1221
1222 /*
1223 * The chip specific setup must run before the first TX packet -
1224 * before that, the tx_work will not be initialized!
1225 */
1226
1227 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1228 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1229 ieee80211_queue_work(wl->hw, &wl->tx_work);
1230
1231 out:
1232 spin_unlock_irqrestore(&wl->wl_lock, flags);
1233 }
1234
1235 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1236 {
1237 unsigned long flags;
1238 int q;
1239
1240 /* no need to queue a new dummy packet if one is already pending */
1241 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1242 return 0;
1243
1244 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1245
1246 spin_lock_irqsave(&wl->wl_lock, flags);
1247 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1248 wl->tx_queue_count[q]++;
1249 spin_unlock_irqrestore(&wl->wl_lock, flags);
1250
1251 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1252 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1253 return wlcore_tx_work_locked(wl);
1254
1255 /*
1256 * If the FW TX is busy, TX work will be scheduled by the threaded
1257 * interrupt handler function
1258 */
1259 return 0;
1260 }
1261
1262 /*
1263 * The size of the dummy packet should be at least 1400 bytes. However, in
1264 * order to minimize the number of bus transactions, aligning it to 512 bytes
1265 * boundaries could be beneficial, performance wise
1266 */
1267 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1268
1269 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1270 {
1271 struct sk_buff *skb;
1272 struct ieee80211_hdr_3addr *hdr;
1273 unsigned int dummy_packet_size;
1274
1275 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1276 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1277
1278 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1279 if (!skb) {
1280 wl1271_warning("Failed to allocate a dummy packet skb");
1281 return NULL;
1282 }
1283
1284 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1285
1286 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1287 memset(hdr, 0, sizeof(*hdr));
1288 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1289 IEEE80211_STYPE_NULLFUNC |
1290 IEEE80211_FCTL_TODS);
1291
1292 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1293
1294 /* Dummy packets require the TID to be management */
1295 skb->priority = WL1271_TID_MGMT;
1296
1297 /* Initialize all fields that might be used */
1298 skb_set_queue_mapping(skb, 0);
1299 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1300
1301 return skb;
1302 }
1303
1304
1305 #ifdef CONFIG_PM
1306 static int
1307 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1308 {
1309 int num_fields = 0, in_field = 0, fields_size = 0;
1310 int i, pattern_len = 0;
1311
1312 if (!p->mask) {
1313 wl1271_warning("No mask in WoWLAN pattern");
1314 return -EINVAL;
1315 }
1316
1317 /*
1318 * The pattern is broken up into segments of bytes at different offsets
1319 * that need to be checked by the FW filter. Each segment is called
1320 * a field in the FW API. We verify that the total number of fields
1321 * required for this pattern won't exceed FW limits (8)
1322 * as well as the total fields buffer won't exceed the FW limit.
1323 * Note that if there's a pattern which crosses Ethernet/IP header
1324 * boundary a new field is required.
1325 */
1326 for (i = 0; i < p->pattern_len; i++) {
1327 if (test_bit(i, (unsigned long *)p->mask)) {
1328 if (!in_field) {
1329 in_field = 1;
1330 pattern_len = 1;
1331 } else {
1332 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1333 num_fields++;
1334 fields_size += pattern_len +
1335 RX_FILTER_FIELD_OVERHEAD;
1336 pattern_len = 1;
1337 } else
1338 pattern_len++;
1339 }
1340 } else {
1341 if (in_field) {
1342 in_field = 0;
1343 fields_size += pattern_len +
1344 RX_FILTER_FIELD_OVERHEAD;
1345 num_fields++;
1346 }
1347 }
1348 }
1349
1350 if (in_field) {
1351 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1352 num_fields++;
1353 }
1354
1355 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1356 wl1271_warning("RX Filter too complex. Too many segments");
1357 return -EINVAL;
1358 }
1359
1360 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1361 wl1271_warning("RX filter pattern is too big");
1362 return -E2BIG;
1363 }
1364
1365 return 0;
1366 }
1367
1368 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1369 {
1370 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1371 }
1372
1373 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1374 {
1375 int i;
1376
1377 if (filter == NULL)
1378 return;
1379
1380 for (i = 0; i < filter->num_fields; i++)
1381 kfree(filter->fields[i].pattern);
1382
1383 kfree(filter);
1384 }
1385
1386 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1387 u16 offset, u8 flags,
1388 u8 *pattern, u8 len)
1389 {
1390 struct wl12xx_rx_filter_field *field;
1391
1392 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1393 wl1271_warning("Max fields per RX filter. can't alloc another");
1394 return -EINVAL;
1395 }
1396
1397 field = &filter->fields[filter->num_fields];
1398
1399 field->pattern = kzalloc(len, GFP_KERNEL);
1400 if (!field->pattern) {
1401 wl1271_warning("Failed to allocate RX filter pattern");
1402 return -ENOMEM;
1403 }
1404
1405 filter->num_fields++;
1406
1407 field->offset = cpu_to_le16(offset);
1408 field->flags = flags;
1409 field->len = len;
1410 memcpy(field->pattern, pattern, len);
1411
1412 return 0;
1413 }
1414
1415 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1416 {
1417 int i, fields_size = 0;
1418
1419 for (i = 0; i < filter->num_fields; i++)
1420 fields_size += filter->fields[i].len +
1421 sizeof(struct wl12xx_rx_filter_field) -
1422 sizeof(u8 *);
1423
1424 return fields_size;
1425 }
1426
1427 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1428 u8 *buf)
1429 {
1430 int i;
1431 struct wl12xx_rx_filter_field *field;
1432
1433 for (i = 0; i < filter->num_fields; i++) {
1434 field = (struct wl12xx_rx_filter_field *)buf;
1435
1436 field->offset = filter->fields[i].offset;
1437 field->flags = filter->fields[i].flags;
1438 field->len = filter->fields[i].len;
1439
1440 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1441 buf += sizeof(struct wl12xx_rx_filter_field) -
1442 sizeof(u8 *) + field->len;
1443 }
1444 }
1445
1446 /*
1447 * Allocates an RX filter returned through f
1448 * which needs to be freed using rx_filter_free()
1449 */
1450 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1451 struct cfg80211_wowlan_trig_pkt_pattern *p,
1452 struct wl12xx_rx_filter **f)
1453 {
1454 int i, j, ret = 0;
1455 struct wl12xx_rx_filter *filter;
1456 u16 offset;
1457 u8 flags, len;
1458
1459 filter = wl1271_rx_filter_alloc();
1460 if (!filter) {
1461 wl1271_warning("Failed to alloc rx filter");
1462 ret = -ENOMEM;
1463 goto err;
1464 }
1465
1466 i = 0;
1467 while (i < p->pattern_len) {
1468 if (!test_bit(i, (unsigned long *)p->mask)) {
1469 i++;
1470 continue;
1471 }
1472
1473 for (j = i; j < p->pattern_len; j++) {
1474 if (!test_bit(j, (unsigned long *)p->mask))
1475 break;
1476
1477 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1478 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1479 break;
1480 }
1481
1482 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1483 offset = i;
1484 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1485 } else {
1486 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1487 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1488 }
1489
1490 len = j - i;
1491
1492 ret = wl1271_rx_filter_alloc_field(filter,
1493 offset,
1494 flags,
1495 &p->pattern[i], len);
1496 if (ret)
1497 goto err;
1498
1499 i = j;
1500 }
1501
1502 filter->action = FILTER_SIGNAL;
1503
1504 *f = filter;
1505 return 0;
1506
1507 err:
1508 wl1271_rx_filter_free(filter);
1509 *f = NULL;
1510
1511 return ret;
1512 }
1513
1514 static int wl1271_configure_wowlan(struct wl1271 *wl,
1515 struct cfg80211_wowlan *wow)
1516 {
1517 int i, ret;
1518
1519 if (!wow || wow->any || !wow->n_patterns) {
1520 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1521 FILTER_SIGNAL);
1522 if (ret)
1523 goto out;
1524
1525 ret = wl1271_rx_filter_clear_all(wl);
1526 if (ret)
1527 goto out;
1528
1529 return 0;
1530 }
1531
1532 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1533 return -EINVAL;
1534
1535 /* Validate all incoming patterns before clearing current FW state */
1536 for (i = 0; i < wow->n_patterns; i++) {
1537 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1538 if (ret) {
1539 wl1271_warning("Bad wowlan pattern %d", i);
1540 return ret;
1541 }
1542 }
1543
1544 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1545 if (ret)
1546 goto out;
1547
1548 ret = wl1271_rx_filter_clear_all(wl);
1549 if (ret)
1550 goto out;
1551
1552 /* Translate WoWLAN patterns into filters */
1553 for (i = 0; i < wow->n_patterns; i++) {
1554 struct cfg80211_wowlan_trig_pkt_pattern *p;
1555 struct wl12xx_rx_filter *filter = NULL;
1556
1557 p = &wow->patterns[i];
1558
1559 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1560 if (ret) {
1561 wl1271_warning("Failed to create an RX filter from "
1562 "wowlan pattern %d", i);
1563 goto out;
1564 }
1565
1566 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1567
1568 wl1271_rx_filter_free(filter);
1569 if (ret)
1570 goto out;
1571 }
1572
1573 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1574
1575 out:
1576 return ret;
1577 }
1578
1579 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1580 struct wl12xx_vif *wlvif,
1581 struct cfg80211_wowlan *wow)
1582 {
1583 int ret = 0;
1584
1585 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1586 goto out;
1587
1588 ret = wl1271_ps_elp_wakeup(wl);
1589 if (ret < 0)
1590 goto out;
1591
1592 ret = wl1271_configure_wowlan(wl, wow);
1593 if (ret < 0)
1594 goto out_sleep;
1595
1596 if ((wl->conf.conn.suspend_wake_up_event ==
1597 wl->conf.conn.wake_up_event) &&
1598 (wl->conf.conn.suspend_listen_interval ==
1599 wl->conf.conn.listen_interval))
1600 goto out_sleep;
1601
1602 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1603 wl->conf.conn.suspend_wake_up_event,
1604 wl->conf.conn.suspend_listen_interval);
1605
1606 if (ret < 0)
1607 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1608
1609 out_sleep:
1610 wl1271_ps_elp_sleep(wl);
1611 out:
1612 return ret;
1613
1614 }
1615
1616 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1617 struct wl12xx_vif *wlvif)
1618 {
1619 int ret = 0;
1620
1621 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1622 goto out;
1623
1624 ret = wl1271_ps_elp_wakeup(wl);
1625 if (ret < 0)
1626 goto out;
1627
1628 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1629
1630 wl1271_ps_elp_sleep(wl);
1631 out:
1632 return ret;
1633
1634 }
1635
1636 static int wl1271_configure_suspend(struct wl1271 *wl,
1637 struct wl12xx_vif *wlvif,
1638 struct cfg80211_wowlan *wow)
1639 {
1640 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1641 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1642 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1643 return wl1271_configure_suspend_ap(wl, wlvif);
1644 return 0;
1645 }
1646
1647 static void wl1271_configure_resume(struct wl1271 *wl,
1648 struct wl12xx_vif *wlvif)
1649 {
1650 int ret = 0;
1651 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1652 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1653
1654 if ((!is_ap) && (!is_sta))
1655 return;
1656
1657 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1658 return;
1659
1660 ret = wl1271_ps_elp_wakeup(wl);
1661 if (ret < 0)
1662 return;
1663
1664 if (is_sta) {
1665 wl1271_configure_wowlan(wl, NULL);
1666
1667 if ((wl->conf.conn.suspend_wake_up_event ==
1668 wl->conf.conn.wake_up_event) &&
1669 (wl->conf.conn.suspend_listen_interval ==
1670 wl->conf.conn.listen_interval))
1671 goto out_sleep;
1672
1673 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1674 wl->conf.conn.wake_up_event,
1675 wl->conf.conn.listen_interval);
1676
1677 if (ret < 0)
1678 wl1271_error("resume: wake up conditions failed: %d",
1679 ret);
1680
1681 } else if (is_ap) {
1682 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1683 }
1684
1685 out_sleep:
1686 wl1271_ps_elp_sleep(wl);
1687 }
1688
1689 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1690 struct cfg80211_wowlan *wow)
1691 {
1692 struct wl1271 *wl = hw->priv;
1693 struct wl12xx_vif *wlvif;
1694 int ret;
1695
1696 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1697 WARN_ON(!wow);
1698
1699 /* we want to perform the recovery before suspending */
1700 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1701 wl1271_warning("postponing suspend to perform recovery");
1702 return -EBUSY;
1703 }
1704
1705 wl1271_tx_flush(wl);
1706
1707 mutex_lock(&wl->mutex);
1708 wl->wow_enabled = true;
1709 wl12xx_for_each_wlvif(wl, wlvif) {
1710 ret = wl1271_configure_suspend(wl, wlvif, wow);
1711 if (ret < 0) {
1712 mutex_unlock(&wl->mutex);
1713 wl1271_warning("couldn't prepare device to suspend");
1714 return ret;
1715 }
1716 }
1717 mutex_unlock(&wl->mutex);
1718 /* flush any remaining work */
1719 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1720
1721 /*
1722 * disable and re-enable interrupts in order to flush
1723 * the threaded_irq
1724 */
1725 wlcore_disable_interrupts(wl);
1726
1727 /*
1728 * set suspended flag to avoid triggering a new threaded_irq
1729 * work. no need for spinlock as interrupts are disabled.
1730 */
1731 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1732
1733 wlcore_enable_interrupts(wl);
1734 flush_work(&wl->tx_work);
1735 flush_delayed_work(&wl->elp_work);
1736
1737 return 0;
1738 }
1739
1740 static int wl1271_op_resume(struct ieee80211_hw *hw)
1741 {
1742 struct wl1271 *wl = hw->priv;
1743 struct wl12xx_vif *wlvif;
1744 unsigned long flags;
1745 bool run_irq_work = false, pending_recovery;
1746 int ret;
1747
1748 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1749 wl->wow_enabled);
1750 WARN_ON(!wl->wow_enabled);
1751
1752 /*
1753 * re-enable irq_work enqueuing, and call irq_work directly if
1754 * there is a pending work.
1755 */
1756 spin_lock_irqsave(&wl->wl_lock, flags);
1757 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1758 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1759 run_irq_work = true;
1760 spin_unlock_irqrestore(&wl->wl_lock, flags);
1761
1762 mutex_lock(&wl->mutex);
1763
1764 /* test the recovery flag before calling any SDIO functions */
1765 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1766 &wl->flags);
1767
1768 if (run_irq_work) {
1769 wl1271_debug(DEBUG_MAC80211,
1770 "run postponed irq_work directly");
1771
1772 /* don't talk to the HW if recovery is pending */
1773 if (!pending_recovery) {
1774 ret = wlcore_irq_locked(wl);
1775 if (ret)
1776 wl12xx_queue_recovery_work(wl);
1777 }
1778
1779 wlcore_enable_interrupts(wl);
1780 }
1781
1782 if (pending_recovery) {
1783 wl1271_warning("queuing forgotten recovery on resume");
1784 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1785 goto out;
1786 }
1787
1788 wl12xx_for_each_wlvif(wl, wlvif) {
1789 wl1271_configure_resume(wl, wlvif);
1790 }
1791
1792 out:
1793 wl->wow_enabled = false;
1794 mutex_unlock(&wl->mutex);
1795
1796 return 0;
1797 }
1798 #endif
1799
1800 static int wl1271_op_start(struct ieee80211_hw *hw)
1801 {
1802 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1803
1804 /*
1805 * We have to delay the booting of the hardware because
1806 * we need to know the local MAC address before downloading and
1807 * initializing the firmware. The MAC address cannot be changed
1808 * after boot, and without the proper MAC address, the firmware
1809 * will not function properly.
1810 *
1811 * The MAC address is first known when the corresponding interface
1812 * is added. That is where we will initialize the hardware.
1813 */
1814
1815 return 0;
1816 }
1817
1818 static void wlcore_op_stop_locked(struct wl1271 *wl)
1819 {
1820 int i;
1821
1822 if (wl->state == WLCORE_STATE_OFF) {
1823 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1824 &wl->flags))
1825 wlcore_enable_interrupts(wl);
1826
1827 return;
1828 }
1829
1830 /*
1831 * this must be before the cancel_work calls below, so that the work
1832 * functions don't perform further work.
1833 */
1834 wl->state = WLCORE_STATE_OFF;
1835
1836 /*
1837 * Use the nosync variant to disable interrupts, so the mutex could be
1838 * held while doing so without deadlocking.
1839 */
1840 wlcore_disable_interrupts_nosync(wl);
1841
1842 mutex_unlock(&wl->mutex);
1843
1844 wlcore_synchronize_interrupts(wl);
1845 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1846 cancel_work_sync(&wl->recovery_work);
1847 wl1271_flush_deferred_work(wl);
1848 cancel_delayed_work_sync(&wl->scan_complete_work);
1849 cancel_work_sync(&wl->netstack_work);
1850 cancel_work_sync(&wl->tx_work);
1851 cancel_delayed_work_sync(&wl->elp_work);
1852 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1853
1854 /* let's notify MAC80211 about the remaining pending TX frames */
1855 wl12xx_tx_reset(wl);
1856 mutex_lock(&wl->mutex);
1857
1858 wl1271_power_off(wl);
1859 /*
1860 * In case a recovery was scheduled, interrupts were disabled to avoid
1861 * an interrupt storm. Now that the power is down, it is safe to
1862 * re-enable interrupts to balance the disable depth
1863 */
1864 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1865 wlcore_enable_interrupts(wl);
1866
1867 wl->band = IEEE80211_BAND_2GHZ;
1868
1869 wl->rx_counter = 0;
1870 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1871 wl->channel_type = NL80211_CHAN_NO_HT;
1872 wl->tx_blocks_available = 0;
1873 wl->tx_allocated_blocks = 0;
1874 wl->tx_results_count = 0;
1875 wl->tx_packets_count = 0;
1876 wl->time_offset = 0;
1877 wl->ap_fw_ps_map = 0;
1878 wl->ap_ps_map = 0;
1879 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1880 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1881 memset(wl->links_map, 0, sizeof(wl->links_map));
1882 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1883 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1884 wl->active_sta_count = 0;
1885
1886 /* The system link is always allocated */
1887 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1888
1889 /*
1890 * this is performed after the cancel_work calls and the associated
1891 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1892 * get executed before all these vars have been reset.
1893 */
1894 wl->flags = 0;
1895
1896 wl->tx_blocks_freed = 0;
1897
1898 for (i = 0; i < NUM_TX_QUEUES; i++) {
1899 wl->tx_pkts_freed[i] = 0;
1900 wl->tx_allocated_pkts[i] = 0;
1901 }
1902
1903 wl1271_debugfs_reset(wl);
1904
1905 kfree(wl->fw_status_1);
1906 wl->fw_status_1 = NULL;
1907 wl->fw_status_2 = NULL;
1908 kfree(wl->tx_res_if);
1909 wl->tx_res_if = NULL;
1910 kfree(wl->target_mem_map);
1911 wl->target_mem_map = NULL;
1912
1913 /*
1914 * FW channels must be re-calibrated after recovery,
1915 * clear the last Reg-Domain channel configuration.
1916 */
1917 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1918 }
1919
1920 static void wlcore_op_stop(struct ieee80211_hw *hw)
1921 {
1922 struct wl1271 *wl = hw->priv;
1923
1924 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1925
1926 mutex_lock(&wl->mutex);
1927
1928 wlcore_op_stop_locked(wl);
1929
1930 mutex_unlock(&wl->mutex);
1931 }
1932
1933 static void wlcore_channel_switch_work(struct work_struct *work)
1934 {
1935 struct delayed_work *dwork;
1936 struct wl1271 *wl;
1937 struct ieee80211_vif *vif;
1938 struct wl12xx_vif *wlvif;
1939 int ret;
1940
1941 dwork = container_of(work, struct delayed_work, work);
1942 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1943 wl = wlvif->wl;
1944
1945 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1946
1947 mutex_lock(&wl->mutex);
1948
1949 if (unlikely(wl->state != WLCORE_STATE_ON))
1950 goto out;
1951
1952 /* check the channel switch is still ongoing */
1953 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1954 goto out;
1955
1956 vif = wl12xx_wlvif_to_vif(wlvif);
1957 ieee80211_chswitch_done(vif, false);
1958
1959 ret = wl1271_ps_elp_wakeup(wl);
1960 if (ret < 0)
1961 goto out;
1962
1963 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1964
1965 wl1271_ps_elp_sleep(wl);
1966 out:
1967 mutex_unlock(&wl->mutex);
1968 }
1969
1970 static void wlcore_connection_loss_work(struct work_struct *work)
1971 {
1972 struct delayed_work *dwork;
1973 struct wl1271 *wl;
1974 struct ieee80211_vif *vif;
1975 struct wl12xx_vif *wlvif;
1976
1977 dwork = container_of(work, struct delayed_work, work);
1978 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1979 wl = wlvif->wl;
1980
1981 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1982
1983 mutex_lock(&wl->mutex);
1984
1985 if (unlikely(wl->state != WLCORE_STATE_ON))
1986 goto out;
1987
1988 /* Call mac80211 connection loss */
1989 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1990 goto out;
1991
1992 vif = wl12xx_wlvif_to_vif(wlvif);
1993 ieee80211_connection_loss(vif);
1994 out:
1995 mutex_unlock(&wl->mutex);
1996 }
1997
1998 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1999 {
2000 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2001 WL12XX_MAX_RATE_POLICIES);
2002 if (policy >= WL12XX_MAX_RATE_POLICIES)
2003 return -EBUSY;
2004
2005 __set_bit(policy, wl->rate_policies_map);
2006 *idx = policy;
2007 return 0;
2008 }
2009
2010 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2011 {
2012 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2013 return;
2014
2015 __clear_bit(*idx, wl->rate_policies_map);
2016 *idx = WL12XX_MAX_RATE_POLICIES;
2017 }
2018
2019 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2020 {
2021 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2022 WLCORE_MAX_KLV_TEMPLATES);
2023 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2024 return -EBUSY;
2025
2026 __set_bit(policy, wl->klv_templates_map);
2027 *idx = policy;
2028 return 0;
2029 }
2030
2031 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2032 {
2033 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2034 return;
2035
2036 __clear_bit(*idx, wl->klv_templates_map);
2037 *idx = WLCORE_MAX_KLV_TEMPLATES;
2038 }
2039
2040 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2041 {
2042 switch (wlvif->bss_type) {
2043 case BSS_TYPE_AP_BSS:
2044 if (wlvif->p2p)
2045 return WL1271_ROLE_P2P_GO;
2046 else
2047 return WL1271_ROLE_AP;
2048
2049 case BSS_TYPE_STA_BSS:
2050 if (wlvif->p2p)
2051 return WL1271_ROLE_P2P_CL;
2052 else
2053 return WL1271_ROLE_STA;
2054
2055 case BSS_TYPE_IBSS:
2056 return WL1271_ROLE_IBSS;
2057
2058 default:
2059 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2060 }
2061 return WL12XX_INVALID_ROLE_TYPE;
2062 }
2063
2064 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2065 {
2066 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2067 int i;
2068
2069 /* clear everything but the persistent data */
2070 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2071
2072 switch (ieee80211_vif_type_p2p(vif)) {
2073 case NL80211_IFTYPE_P2P_CLIENT:
2074 wlvif->p2p = 1;
2075 /* fall-through */
2076 case NL80211_IFTYPE_STATION:
2077 wlvif->bss_type = BSS_TYPE_STA_BSS;
2078 break;
2079 case NL80211_IFTYPE_ADHOC:
2080 wlvif->bss_type = BSS_TYPE_IBSS;
2081 break;
2082 case NL80211_IFTYPE_P2P_GO:
2083 wlvif->p2p = 1;
2084 /* fall-through */
2085 case NL80211_IFTYPE_AP:
2086 wlvif->bss_type = BSS_TYPE_AP_BSS;
2087 break;
2088 default:
2089 wlvif->bss_type = MAX_BSS_TYPE;
2090 return -EOPNOTSUPP;
2091 }
2092
2093 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2094 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2095 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2096
2097 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2098 wlvif->bss_type == BSS_TYPE_IBSS) {
2099 /* init sta/ibss data */
2100 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2101 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2102 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2103 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2104 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2105 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2106 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2107 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2108 } else {
2109 /* init ap data */
2110 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2111 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2112 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2113 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2114 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2115 wl12xx_allocate_rate_policy(wl,
2116 &wlvif->ap.ucast_rate_idx[i]);
2117 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2118 /*
2119 * TODO: check if basic_rate shouldn't be
2120 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2121 * instead (the same thing for STA above).
2122 */
2123 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2124 /* TODO: this seems to be used only for STA, check it */
2125 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2126 }
2127
2128 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2129 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2130 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2131
2132 /*
2133 * mac80211 configures some values globally, while we treat them
2134 * per-interface. thus, on init, we have to copy them from wl
2135 */
2136 wlvif->band = wl->band;
2137 wlvif->channel = wl->channel;
2138 wlvif->power_level = wl->power_level;
2139 wlvif->channel_type = wl->channel_type;
2140
2141 INIT_WORK(&wlvif->rx_streaming_enable_work,
2142 wl1271_rx_streaming_enable_work);
2143 INIT_WORK(&wlvif->rx_streaming_disable_work,
2144 wl1271_rx_streaming_disable_work);
2145 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2146 wlcore_channel_switch_work);
2147 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2148 wlcore_connection_loss_work);
2149 INIT_LIST_HEAD(&wlvif->list);
2150
2151 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2152 (unsigned long) wlvif);
2153 return 0;
2154 }
2155
2156 static bool wl12xx_init_fw(struct wl1271 *wl)
2157 {
2158 int retries = WL1271_BOOT_RETRIES;
2159 bool booted = false;
2160 struct wiphy *wiphy = wl->hw->wiphy;
2161 int ret;
2162
2163 while (retries) {
2164 retries--;
2165 ret = wl12xx_chip_wakeup(wl, false);
2166 if (ret < 0)
2167 goto power_off;
2168
2169 ret = wl->ops->boot(wl);
2170 if (ret < 0)
2171 goto power_off;
2172
2173 ret = wl1271_hw_init(wl);
2174 if (ret < 0)
2175 goto irq_disable;
2176
2177 booted = true;
2178 break;
2179
2180 irq_disable:
2181 mutex_unlock(&wl->mutex);
2182 /* Unlocking the mutex in the middle of handling is
2183 inherently unsafe. In this case we deem it safe to do,
2184 because we need to let any possibly pending IRQ out of
2185 the system (and while we are WLCORE_STATE_OFF the IRQ
2186 work function will not do anything.) Also, any other
2187 possible concurrent operations will fail due to the
2188 current state, hence the wl1271 struct should be safe. */
2189 wlcore_disable_interrupts(wl);
2190 wl1271_flush_deferred_work(wl);
2191 cancel_work_sync(&wl->netstack_work);
2192 mutex_lock(&wl->mutex);
2193 power_off:
2194 wl1271_power_off(wl);
2195 }
2196
2197 if (!booted) {
2198 wl1271_error("firmware boot failed despite %d retries",
2199 WL1271_BOOT_RETRIES);
2200 goto out;
2201 }
2202
2203 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2204
2205 /* update hw/fw version info in wiphy struct */
2206 wiphy->hw_version = wl->chip.id;
2207 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2208 sizeof(wiphy->fw_version));
2209
2210 /*
2211 * Now we know if 11a is supported (info from the NVS), so disable
2212 * 11a channels if not supported
2213 */
2214 if (!wl->enable_11a)
2215 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2216
2217 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2218 wl->enable_11a ? "" : "not ");
2219
2220 wl->state = WLCORE_STATE_ON;
2221 out:
2222 return booted;
2223 }
2224
2225 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2226 {
2227 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2228 }
2229
2230 /*
2231 * Check whether a fw switch (i.e. moving from one loaded
2232 * fw to another) is needed. This function is also responsible
2233 * for updating wl->last_vif_count, so it must be called before
2234 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2235 * will be used).
2236 */
2237 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2238 struct vif_counter_data vif_counter_data,
2239 bool add)
2240 {
2241 enum wl12xx_fw_type current_fw = wl->fw_type;
2242 u8 vif_count = vif_counter_data.counter;
2243
2244 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2245 return false;
2246
2247 /* increase the vif count if this is a new vif */
2248 if (add && !vif_counter_data.cur_vif_running)
2249 vif_count++;
2250
2251 wl->last_vif_count = vif_count;
2252
2253 /* no need for fw change if the device is OFF */
2254 if (wl->state == WLCORE_STATE_OFF)
2255 return false;
2256
2257 /* no need for fw change if a single fw is used */
2258 if (!wl->mr_fw_name)
2259 return false;
2260
2261 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2262 return true;
2263 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2264 return true;
2265
2266 return false;
2267 }
2268
2269 /*
2270 * Enter "forced psm". Make sure the sta is in psm against the ap,
2271 * to make the fw switch a bit more disconnection-persistent.
2272 */
2273 static void wl12xx_force_active_psm(struct wl1271 *wl)
2274 {
2275 struct wl12xx_vif *wlvif;
2276
2277 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2278 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2279 }
2280 }
2281
2282 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2283 struct ieee80211_vif *vif)
2284 {
2285 struct wl1271 *wl = hw->priv;
2286 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2287 struct vif_counter_data vif_count;
2288 int ret = 0;
2289 u8 role_type;
2290 bool booted = false;
2291
2292 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2293 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2294
2295 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2296 ieee80211_vif_type_p2p(vif), vif->addr);
2297
2298 wl12xx_get_vif_count(hw, vif, &vif_count);
2299
2300 mutex_lock(&wl->mutex);
2301 ret = wl1271_ps_elp_wakeup(wl);
2302 if (ret < 0)
2303 goto out_unlock;
2304
2305 /*
2306 * in some very corner case HW recovery scenarios its possible to
2307 * get here before __wl1271_op_remove_interface is complete, so
2308 * opt out if that is the case.
2309 */
2310 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2311 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2312 ret = -EBUSY;
2313 goto out;
2314 }
2315
2316
2317 ret = wl12xx_init_vif_data(wl, vif);
2318 if (ret < 0)
2319 goto out;
2320
2321 wlvif->wl = wl;
2322 role_type = wl12xx_get_role_type(wl, wlvif);
2323 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2324 ret = -EINVAL;
2325 goto out;
2326 }
2327
2328 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2329 wl12xx_force_active_psm(wl);
2330 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2331 mutex_unlock(&wl->mutex);
2332 wl1271_recovery_work(&wl->recovery_work);
2333 return 0;
2334 }
2335
2336 /*
2337 * TODO: after the nvs issue will be solved, move this block
2338 * to start(), and make sure here the driver is ON.
2339 */
2340 if (wl->state == WLCORE_STATE_OFF) {
2341 /*
2342 * we still need this in order to configure the fw
2343 * while uploading the nvs
2344 */
2345 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2346
2347 booted = wl12xx_init_fw(wl);
2348 if (!booted) {
2349 ret = -EINVAL;
2350 goto out;
2351 }
2352 }
2353
2354 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2355 role_type, &wlvif->role_id);
2356 if (ret < 0)
2357 goto out;
2358
2359 ret = wl1271_init_vif_specific(wl, vif);
2360 if (ret < 0)
2361 goto out;
2362
2363 list_add(&wlvif->list, &wl->wlvif_list);
2364 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2365
2366 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2367 wl->ap_count++;
2368 else
2369 wl->sta_count++;
2370 out:
2371 wl1271_ps_elp_sleep(wl);
2372 out_unlock:
2373 mutex_unlock(&wl->mutex);
2374
2375 return ret;
2376 }
2377
2378 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2379 struct ieee80211_vif *vif,
2380 bool reset_tx_queues)
2381 {
2382 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2383 int i, ret;
2384 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2385
2386 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2387
2388 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2389 return;
2390
2391 /* because of hardware recovery, we may get here twice */
2392 if (wl->state == WLCORE_STATE_OFF)
2393 return;
2394
2395 wl1271_info("down");
2396
2397 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2398 wl->scan_wlvif == wlvif) {
2399 /*
2400 * Rearm the tx watchdog just before idling scan. This
2401 * prevents just-finished scans from triggering the watchdog
2402 */
2403 wl12xx_rearm_tx_watchdog_locked(wl);
2404
2405 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2406 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2407 wl->scan_wlvif = NULL;
2408 wl->scan.req = NULL;
2409 ieee80211_scan_completed(wl->hw, true);
2410 }
2411
2412 if (wl->sched_vif == wlvif) {
2413 ieee80211_sched_scan_stopped(wl->hw);
2414 wl->sched_vif = NULL;
2415 }
2416
2417 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2418 /* disable active roles */
2419 ret = wl1271_ps_elp_wakeup(wl);
2420 if (ret < 0)
2421 goto deinit;
2422
2423 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2424 wlvif->bss_type == BSS_TYPE_IBSS) {
2425 if (wl12xx_dev_role_started(wlvif))
2426 wl12xx_stop_dev(wl, wlvif);
2427 }
2428
2429 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2430 if (ret < 0)
2431 goto deinit;
2432
2433 wl1271_ps_elp_sleep(wl);
2434 }
2435 deinit:
2436 /* clear all hlids (except system_hlid) */
2437 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2438
2439 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2440 wlvif->bss_type == BSS_TYPE_IBSS) {
2441 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2442 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2443 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2444 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2445 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2446 } else {
2447 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2448 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2449 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2450 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2451 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2452 wl12xx_free_rate_policy(wl,
2453 &wlvif->ap.ucast_rate_idx[i]);
2454 wl1271_free_ap_keys(wl, wlvif);
2455 }
2456
2457 dev_kfree_skb(wlvif->probereq);
2458 wlvif->probereq = NULL;
2459 wl12xx_tx_reset_wlvif(wl, wlvif);
2460 if (wl->last_wlvif == wlvif)
2461 wl->last_wlvif = NULL;
2462 list_del(&wlvif->list);
2463 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2464 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2465 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2466
2467 if (is_ap)
2468 wl->ap_count--;
2469 else
2470 wl->sta_count--;
2471
2472 /*
2473 * Last AP, have more stations. Configure sleep auth according to STA.
2474 * Don't do thin on unintended recovery.
2475 */
2476 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2477 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2478 goto unlock;
2479
2480 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2481 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2482 /* Configure for power according to debugfs */
2483 if (sta_auth != WL1271_PSM_ILLEGAL)
2484 wl1271_acx_sleep_auth(wl, sta_auth);
2485 /* Configure for ELP power saving */
2486 else
2487 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2488 }
2489
2490 unlock:
2491 mutex_unlock(&wl->mutex);
2492
2493 del_timer_sync(&wlvif->rx_streaming_timer);
2494 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2495 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2496 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2497
2498 mutex_lock(&wl->mutex);
2499 }
2500
2501 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2502 struct ieee80211_vif *vif)
2503 {
2504 struct wl1271 *wl = hw->priv;
2505 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2506 struct wl12xx_vif *iter;
2507 struct vif_counter_data vif_count;
2508
2509 wl12xx_get_vif_count(hw, vif, &vif_count);
2510 mutex_lock(&wl->mutex);
2511
2512 if (wl->state == WLCORE_STATE_OFF ||
2513 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2514 goto out;
2515
2516 /*
2517 * wl->vif can be null here if someone shuts down the interface
2518 * just when hardware recovery has been started.
2519 */
2520 wl12xx_for_each_wlvif(wl, iter) {
2521 if (iter != wlvif)
2522 continue;
2523
2524 __wl1271_op_remove_interface(wl, vif, true);
2525 break;
2526 }
2527 WARN_ON(iter != wlvif);
2528 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2529 wl12xx_force_active_psm(wl);
2530 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2531 wl12xx_queue_recovery_work(wl);
2532 }
2533 out:
2534 mutex_unlock(&wl->mutex);
2535 }
2536
2537 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2538 struct ieee80211_vif *vif,
2539 enum nl80211_iftype new_type, bool p2p)
2540 {
2541 struct wl1271 *wl = hw->priv;
2542 int ret;
2543
2544 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2545 wl1271_op_remove_interface(hw, vif);
2546
2547 vif->type = new_type;
2548 vif->p2p = p2p;
2549 ret = wl1271_op_add_interface(hw, vif);
2550
2551 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2552 return ret;
2553 }
2554
2555 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2556 {
2557 int ret;
2558 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2559
2560 /*
2561 * One of the side effects of the JOIN command is that is clears
2562 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2563 * to a WPA/WPA2 access point will therefore kill the data-path.
2564 * Currently the only valid scenario for JOIN during association
2565 * is on roaming, in which case we will also be given new keys.
2566 * Keep the below message for now, unless it starts bothering
2567 * users who really like to roam a lot :)
2568 */
2569 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2570 wl1271_info("JOIN while associated.");
2571
2572 /* clear encryption type */
2573 wlvif->encryption_type = KEY_NONE;
2574
2575 if (is_ibss)
2576 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2577 else {
2578 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2579 /*
2580 * TODO: this is an ugly workaround for wl12xx fw
2581 * bug - we are not able to tx/rx after the first
2582 * start_sta, so make dummy start+stop calls,
2583 * and then call start_sta again.
2584 * this should be fixed in the fw.
2585 */
2586 wl12xx_cmd_role_start_sta(wl, wlvif);
2587 wl12xx_cmd_role_stop_sta(wl, wlvif);
2588 }
2589
2590 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2591 }
2592
2593 return ret;
2594 }
2595
2596 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2597 int offset)
2598 {
2599 u8 ssid_len;
2600 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2601 skb->len - offset);
2602
2603 if (!ptr) {
2604 wl1271_error("No SSID in IEs!");
2605 return -ENOENT;
2606 }
2607
2608 ssid_len = ptr[1];
2609 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2610 wl1271_error("SSID is too long!");
2611 return -EINVAL;
2612 }
2613
2614 wlvif->ssid_len = ssid_len;
2615 memcpy(wlvif->ssid, ptr+2, ssid_len);
2616 return 0;
2617 }
2618
2619 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2620 {
2621 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2622 struct sk_buff *skb;
2623 int ieoffset;
2624
2625 /* we currently only support setting the ssid from the ap probe req */
2626 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2627 return -EINVAL;
2628
2629 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2630 if (!skb)
2631 return -EINVAL;
2632
2633 ieoffset = offsetof(struct ieee80211_mgmt,
2634 u.probe_req.variable);
2635 wl1271_ssid_set(wlvif, skb, ieoffset);
2636 dev_kfree_skb(skb);
2637
2638 return 0;
2639 }
2640
2641 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2642 struct ieee80211_bss_conf *bss_conf,
2643 u32 sta_rate_set)
2644 {
2645 int ieoffset;
2646 int ret;
2647
2648 wlvif->aid = bss_conf->aid;
2649 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2650 wlvif->beacon_int = bss_conf->beacon_int;
2651 wlvif->wmm_enabled = bss_conf->qos;
2652
2653 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2654
2655 /*
2656 * with wl1271, we don't need to update the
2657 * beacon_int and dtim_period, because the firmware
2658 * updates it by itself when the first beacon is
2659 * received after a join.
2660 */
2661 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2662 if (ret < 0)
2663 return ret;
2664
2665 /*
2666 * Get a template for hardware connection maintenance
2667 */
2668 dev_kfree_skb(wlvif->probereq);
2669 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2670 wlvif,
2671 NULL);
2672 ieoffset = offsetof(struct ieee80211_mgmt,
2673 u.probe_req.variable);
2674 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2675
2676 /* enable the connection monitoring feature */
2677 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2678 if (ret < 0)
2679 return ret;
2680
2681 /*
2682 * The join command disable the keep-alive mode, shut down its process,
2683 * and also clear the template config, so we need to reset it all after
2684 * the join. The acx_aid starts the keep-alive process, and the order
2685 * of the commands below is relevant.
2686 */
2687 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2688 if (ret < 0)
2689 return ret;
2690
2691 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2692 if (ret < 0)
2693 return ret;
2694
2695 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2696 if (ret < 0)
2697 return ret;
2698
2699 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2700 wlvif->sta.klv_template_id,
2701 ACX_KEEP_ALIVE_TPL_VALID);
2702 if (ret < 0)
2703 return ret;
2704
2705 /*
2706 * The default fw psm configuration is AUTO, while mac80211 default
2707 * setting is off (ACTIVE), so sync the fw with the correct value.
2708 */
2709 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2710 if (ret < 0)
2711 return ret;
2712
2713 if (sta_rate_set) {
2714 wlvif->rate_set =
2715 wl1271_tx_enabled_rates_get(wl,
2716 sta_rate_set,
2717 wlvif->band);
2718 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2719 if (ret < 0)
2720 return ret;
2721 }
2722
2723 return ret;
2724 }
2725
2726 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2727 {
2728 int ret;
2729 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2730
2731 /* make sure we are connected (sta) joined */
2732 if (sta &&
2733 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2734 return false;
2735
2736 /* make sure we are joined (ibss) */
2737 if (!sta &&
2738 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2739 return false;
2740
2741 if (sta) {
2742 /* use defaults when not associated */
2743 wlvif->aid = 0;
2744
2745 /* free probe-request template */
2746 dev_kfree_skb(wlvif->probereq);
2747 wlvif->probereq = NULL;
2748
2749 /* disable connection monitor features */
2750 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2751 if (ret < 0)
2752 return ret;
2753
2754 /* Disable the keep-alive feature */
2755 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2756 if (ret < 0)
2757 return ret;
2758 }
2759
2760 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2761 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2762
2763 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2764 ieee80211_chswitch_done(vif, false);
2765 cancel_delayed_work(&wlvif->channel_switch_work);
2766 }
2767
2768 /* invalidate keep-alive template */
2769 wl1271_acx_keep_alive_config(wl, wlvif,
2770 wlvif->sta.klv_template_id,
2771 ACX_KEEP_ALIVE_TPL_INVALID);
2772
2773 /* reset TX security counters on a clean disconnect */
2774 wlvif->tx_security_last_seq_lsb = 0;
2775 wlvif->tx_security_seq = 0;
2776
2777 return 0;
2778 }
2779
2780 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2781 {
2782 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2783 wlvif->rate_set = wlvif->basic_rate_set;
2784 }
2785
2786 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2787 struct ieee80211_conf *conf, u32 changed)
2788 {
2789 int ret;
2790
2791 if (conf->power_level != wlvif->power_level) {
2792 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2793 if (ret < 0)
2794 return ret;
2795
2796 wlvif->power_level = conf->power_level;
2797 }
2798
2799 return 0;
2800 }
2801
2802 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2803 {
2804 struct wl1271 *wl = hw->priv;
2805 struct wl12xx_vif *wlvif;
2806 struct ieee80211_conf *conf = &hw->conf;
2807 int ret = 0;
2808
2809 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2810 " changed 0x%x",
2811 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2812 conf->power_level,
2813 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2814 changed);
2815
2816 mutex_lock(&wl->mutex);
2817
2818 if (changed & IEEE80211_CONF_CHANGE_POWER)
2819 wl->power_level = conf->power_level;
2820
2821 if (unlikely(wl->state != WLCORE_STATE_ON))
2822 goto out;
2823
2824 ret = wl1271_ps_elp_wakeup(wl);
2825 if (ret < 0)
2826 goto out;
2827
2828 /* configure each interface */
2829 wl12xx_for_each_wlvif(wl, wlvif) {
2830 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2831 if (ret < 0)
2832 goto out_sleep;
2833 }
2834
2835 out_sleep:
2836 wl1271_ps_elp_sleep(wl);
2837
2838 out:
2839 mutex_unlock(&wl->mutex);
2840
2841 return ret;
2842 }
2843
2844 struct wl1271_filter_params {
2845 bool enabled;
2846 int mc_list_length;
2847 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2848 };
2849
2850 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2851 struct netdev_hw_addr_list *mc_list)
2852 {
2853 struct wl1271_filter_params *fp;
2854 struct netdev_hw_addr *ha;
2855
2856 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2857 if (!fp) {
2858 wl1271_error("Out of memory setting filters.");
2859 return 0;
2860 }
2861
2862 /* update multicast filtering parameters */
2863 fp->mc_list_length = 0;
2864 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2865 fp->enabled = false;
2866 } else {
2867 fp->enabled = true;
2868 netdev_hw_addr_list_for_each(ha, mc_list) {
2869 memcpy(fp->mc_list[fp->mc_list_length],
2870 ha->addr, ETH_ALEN);
2871 fp->mc_list_length++;
2872 }
2873 }
2874
2875 return (u64)(unsigned long)fp;
2876 }
2877
2878 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2879 FIF_ALLMULTI | \
2880 FIF_FCSFAIL | \
2881 FIF_BCN_PRBRESP_PROMISC | \
2882 FIF_CONTROL | \
2883 FIF_OTHER_BSS)
2884
2885 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2886 unsigned int changed,
2887 unsigned int *total, u64 multicast)
2888 {
2889 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2890 struct wl1271 *wl = hw->priv;
2891 struct wl12xx_vif *wlvif;
2892
2893 int ret;
2894
2895 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2896 " total %x", changed, *total);
2897
2898 mutex_lock(&wl->mutex);
2899
2900 *total &= WL1271_SUPPORTED_FILTERS;
2901 changed &= WL1271_SUPPORTED_FILTERS;
2902
2903 if (unlikely(wl->state != WLCORE_STATE_ON))
2904 goto out;
2905
2906 ret = wl1271_ps_elp_wakeup(wl);
2907 if (ret < 0)
2908 goto out;
2909
2910 wl12xx_for_each_wlvif(wl, wlvif) {
2911 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2912 if (*total & FIF_ALLMULTI)
2913 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2914 false,
2915 NULL, 0);
2916 else if (fp)
2917 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2918 fp->enabled,
2919 fp->mc_list,
2920 fp->mc_list_length);
2921 if (ret < 0)
2922 goto out_sleep;
2923 }
2924 }
2925
2926 /*
2927 * the fw doesn't provide an api to configure the filters. instead,
2928 * the filters configuration is based on the active roles / ROC
2929 * state.
2930 */
2931
2932 out_sleep:
2933 wl1271_ps_elp_sleep(wl);
2934
2935 out:
2936 mutex_unlock(&wl->mutex);
2937 kfree(fp);
2938 }
2939
2940 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2941 u8 id, u8 key_type, u8 key_size,
2942 const u8 *key, u8 hlid, u32 tx_seq_32,
2943 u16 tx_seq_16)
2944 {
2945 struct wl1271_ap_key *ap_key;
2946 int i;
2947
2948 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2949
2950 if (key_size > MAX_KEY_SIZE)
2951 return -EINVAL;
2952
2953 /*
2954 * Find next free entry in ap_keys. Also check we are not replacing
2955 * an existing key.
2956 */
2957 for (i = 0; i < MAX_NUM_KEYS; i++) {
2958 if (wlvif->ap.recorded_keys[i] == NULL)
2959 break;
2960
2961 if (wlvif->ap.recorded_keys[i]->id == id) {
2962 wl1271_warning("trying to record key replacement");
2963 return -EINVAL;
2964 }
2965 }
2966
2967 if (i == MAX_NUM_KEYS)
2968 return -EBUSY;
2969
2970 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2971 if (!ap_key)
2972 return -ENOMEM;
2973
2974 ap_key->id = id;
2975 ap_key->key_type = key_type;
2976 ap_key->key_size = key_size;
2977 memcpy(ap_key->key, key, key_size);
2978 ap_key->hlid = hlid;
2979 ap_key->tx_seq_32 = tx_seq_32;
2980 ap_key->tx_seq_16 = tx_seq_16;
2981
2982 wlvif->ap.recorded_keys[i] = ap_key;
2983 return 0;
2984 }
2985
2986 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2987 {
2988 int i;
2989
2990 for (i = 0; i < MAX_NUM_KEYS; i++) {
2991 kfree(wlvif->ap.recorded_keys[i]);
2992 wlvif->ap.recorded_keys[i] = NULL;
2993 }
2994 }
2995
2996 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2997 {
2998 int i, ret = 0;
2999 struct wl1271_ap_key *key;
3000 bool wep_key_added = false;
3001
3002 for (i = 0; i < MAX_NUM_KEYS; i++) {
3003 u8 hlid;
3004 if (wlvif->ap.recorded_keys[i] == NULL)
3005 break;
3006
3007 key = wlvif->ap.recorded_keys[i];
3008 hlid = key->hlid;
3009 if (hlid == WL12XX_INVALID_LINK_ID)
3010 hlid = wlvif->ap.bcast_hlid;
3011
3012 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3013 key->id, key->key_type,
3014 key->key_size, key->key,
3015 hlid, key->tx_seq_32,
3016 key->tx_seq_16);
3017 if (ret < 0)
3018 goto out;
3019
3020 if (key->key_type == KEY_WEP)
3021 wep_key_added = true;
3022 }
3023
3024 if (wep_key_added) {
3025 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3026 wlvif->ap.bcast_hlid);
3027 if (ret < 0)
3028 goto out;
3029 }
3030
3031 out:
3032 wl1271_free_ap_keys(wl, wlvif);
3033 return ret;
3034 }
3035
3036 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3037 u16 action, u8 id, u8 key_type,
3038 u8 key_size, const u8 *key, u32 tx_seq_32,
3039 u16 tx_seq_16, struct ieee80211_sta *sta)
3040 {
3041 int ret;
3042 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3043
3044 if (is_ap) {
3045 struct wl1271_station *wl_sta;
3046 u8 hlid;
3047
3048 if (sta) {
3049 wl_sta = (struct wl1271_station *)sta->drv_priv;
3050 hlid = wl_sta->hlid;
3051 } else {
3052 hlid = wlvif->ap.bcast_hlid;
3053 }
3054
3055 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3056 /*
3057 * We do not support removing keys after AP shutdown.
3058 * Pretend we do to make mac80211 happy.
3059 */
3060 if (action != KEY_ADD_OR_REPLACE)
3061 return 0;
3062
3063 ret = wl1271_record_ap_key(wl, wlvif, id,
3064 key_type, key_size,
3065 key, hlid, tx_seq_32,
3066 tx_seq_16);
3067 } else {
3068 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3069 id, key_type, key_size,
3070 key, hlid, tx_seq_32,
3071 tx_seq_16);
3072 }
3073
3074 if (ret < 0)
3075 return ret;
3076 } else {
3077 const u8 *addr;
3078 static const u8 bcast_addr[ETH_ALEN] = {
3079 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3080 };
3081
3082 addr = sta ? sta->addr : bcast_addr;
3083
3084 if (is_zero_ether_addr(addr)) {
3085 /* We dont support TX only encryption */
3086 return -EOPNOTSUPP;
3087 }
3088
3089 /* The wl1271 does not allow to remove unicast keys - they
3090 will be cleared automatically on next CMD_JOIN. Ignore the
3091 request silently, as we dont want the mac80211 to emit
3092 an error message. */
3093 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3094 return 0;
3095
3096 /* don't remove key if hlid was already deleted */
3097 if (action == KEY_REMOVE &&
3098 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3099 return 0;
3100
3101 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3102 id, key_type, key_size,
3103 key, addr, tx_seq_32,
3104 tx_seq_16);
3105 if (ret < 0)
3106 return ret;
3107
3108 /* the default WEP key needs to be configured at least once */
3109 if (key_type == KEY_WEP) {
3110 ret = wl12xx_cmd_set_default_wep_key(wl,
3111 wlvif->default_key,
3112 wlvif->sta.hlid);
3113 if (ret < 0)
3114 return ret;
3115 }
3116 }
3117
3118 return 0;
3119 }
3120
3121 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3122 struct ieee80211_vif *vif,
3123 struct ieee80211_sta *sta,
3124 struct ieee80211_key_conf *key_conf)
3125 {
3126 struct wl1271 *wl = hw->priv;
3127 int ret;
3128 bool might_change_spare =
3129 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3130 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3131
3132 if (might_change_spare) {
3133 /*
3134 * stop the queues and flush to ensure the next packets are
3135 * in sync with FW spare block accounting
3136 */
3137 mutex_lock(&wl->mutex);
3138 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3139 mutex_unlock(&wl->mutex);
3140
3141 wl1271_tx_flush(wl);
3142 }
3143
3144 mutex_lock(&wl->mutex);
3145
3146 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3147 ret = -EAGAIN;
3148 goto out_wake_queues;
3149 }
3150
3151 ret = wl1271_ps_elp_wakeup(wl);
3152 if (ret < 0)
3153 goto out_wake_queues;
3154
3155 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3156
3157 wl1271_ps_elp_sleep(wl);
3158
3159 out_wake_queues:
3160 if (might_change_spare)
3161 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3162
3163 mutex_unlock(&wl->mutex);
3164
3165 return ret;
3166 }
3167
3168 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3169 struct ieee80211_vif *vif,
3170 struct ieee80211_sta *sta,
3171 struct ieee80211_key_conf *key_conf)
3172 {
3173 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3174 int ret;
3175 u32 tx_seq_32 = 0;
3176 u16 tx_seq_16 = 0;
3177 u8 key_type;
3178
3179 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3180
3181 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3182 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3183 key_conf->cipher, key_conf->keyidx,
3184 key_conf->keylen, key_conf->flags);
3185 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3186
3187 switch (key_conf->cipher) {
3188 case WLAN_CIPHER_SUITE_WEP40:
3189 case WLAN_CIPHER_SUITE_WEP104:
3190 key_type = KEY_WEP;
3191
3192 key_conf->hw_key_idx = key_conf->keyidx;
3193 break;
3194 case WLAN_CIPHER_SUITE_TKIP:
3195 key_type = KEY_TKIP;
3196
3197 key_conf->hw_key_idx = key_conf->keyidx;
3198 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3199 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3200 break;
3201 case WLAN_CIPHER_SUITE_CCMP:
3202 key_type = KEY_AES;
3203
3204 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3205 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3206 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3207 break;
3208 case WL1271_CIPHER_SUITE_GEM:
3209 key_type = KEY_GEM;
3210 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3211 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3212 break;
3213 default:
3214 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3215
3216 return -EOPNOTSUPP;
3217 }
3218
3219 switch (cmd) {
3220 case SET_KEY:
3221 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3222 key_conf->keyidx, key_type,
3223 key_conf->keylen, key_conf->key,
3224 tx_seq_32, tx_seq_16, sta);
3225 if (ret < 0) {
3226 wl1271_error("Could not add or replace key");
3227 return ret;
3228 }
3229
3230 /*
3231 * reconfiguring arp response if the unicast (or common)
3232 * encryption key type was changed
3233 */
3234 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3235 (sta || key_type == KEY_WEP) &&
3236 wlvif->encryption_type != key_type) {
3237 wlvif->encryption_type = key_type;
3238 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3239 if (ret < 0) {
3240 wl1271_warning("build arp rsp failed: %d", ret);
3241 return ret;
3242 }
3243 }
3244 break;
3245
3246 case DISABLE_KEY:
3247 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3248 key_conf->keyidx, key_type,
3249 key_conf->keylen, key_conf->key,
3250 0, 0, sta);
3251 if (ret < 0) {
3252 wl1271_error("Could not remove key");
3253 return ret;
3254 }
3255 break;
3256
3257 default:
3258 wl1271_error("Unsupported key cmd 0x%x", cmd);
3259 return -EOPNOTSUPP;
3260 }
3261
3262 return ret;
3263 }
3264 EXPORT_SYMBOL_GPL(wlcore_set_key);
3265
3266 void wlcore_regdomain_config(struct wl1271 *wl)
3267 {
3268 int ret;
3269
3270 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3271 return;
3272
3273 mutex_lock(&wl->mutex);
3274 ret = wl1271_ps_elp_wakeup(wl);
3275 if (ret < 0)
3276 goto out;
3277
3278 ret = wlcore_cmd_regdomain_config_locked(wl);
3279 if (ret < 0) {
3280 wl12xx_queue_recovery_work(wl);
3281 goto out;
3282 }
3283
3284 wl1271_ps_elp_sleep(wl);
3285 out:
3286 mutex_unlock(&wl->mutex);
3287 }
3288
3289 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3290 struct ieee80211_vif *vif,
3291 struct cfg80211_scan_request *req)
3292 {
3293 struct wl1271 *wl = hw->priv;
3294 int ret;
3295 u8 *ssid = NULL;
3296 size_t len = 0;
3297
3298 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3299
3300 if (req->n_ssids) {
3301 ssid = req->ssids[0].ssid;
3302 len = req->ssids[0].ssid_len;
3303 }
3304
3305 mutex_lock(&wl->mutex);
3306
3307 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3308 /*
3309 * We cannot return -EBUSY here because cfg80211 will expect
3310 * a call to ieee80211_scan_completed if we do - in this case
3311 * there won't be any call.
3312 */
3313 ret = -EAGAIN;
3314 goto out;
3315 }
3316
3317 ret = wl1271_ps_elp_wakeup(wl);
3318 if (ret < 0)
3319 goto out;
3320
3321 /* fail if there is any role in ROC */
3322 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3323 /* don't allow scanning right now */
3324 ret = -EBUSY;
3325 goto out_sleep;
3326 }
3327
3328 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3329 out_sleep:
3330 wl1271_ps_elp_sleep(wl);
3331 out:
3332 mutex_unlock(&wl->mutex);
3333
3334 return ret;
3335 }
3336
3337 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3338 struct ieee80211_vif *vif)
3339 {
3340 struct wl1271 *wl = hw->priv;
3341 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3342 int ret;
3343
3344 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3345
3346 mutex_lock(&wl->mutex);
3347
3348 if (unlikely(wl->state != WLCORE_STATE_ON))
3349 goto out;
3350
3351 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3352 goto out;
3353
3354 ret = wl1271_ps_elp_wakeup(wl);
3355 if (ret < 0)
3356 goto out;
3357
3358 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3359 ret = wl->ops->scan_stop(wl, wlvif);
3360 if (ret < 0)
3361 goto out_sleep;
3362 }
3363
3364 /*
3365 * Rearm the tx watchdog just before idling scan. This
3366 * prevents just-finished scans from triggering the watchdog
3367 */
3368 wl12xx_rearm_tx_watchdog_locked(wl);
3369
3370 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3371 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3372 wl->scan_wlvif = NULL;
3373 wl->scan.req = NULL;
3374 ieee80211_scan_completed(wl->hw, true);
3375
3376 out_sleep:
3377 wl1271_ps_elp_sleep(wl);
3378 out:
3379 mutex_unlock(&wl->mutex);
3380
3381 cancel_delayed_work_sync(&wl->scan_complete_work);
3382 }
3383
3384 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3385 struct ieee80211_vif *vif,
3386 struct cfg80211_sched_scan_request *req,
3387 struct ieee80211_sched_scan_ies *ies)
3388 {
3389 struct wl1271 *wl = hw->priv;
3390 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3391 int ret;
3392
3393 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3394
3395 mutex_lock(&wl->mutex);
3396
3397 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3398 ret = -EAGAIN;
3399 goto out;
3400 }
3401
3402 ret = wl1271_ps_elp_wakeup(wl);
3403 if (ret < 0)
3404 goto out;
3405
3406 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3407 if (ret < 0)
3408 goto out_sleep;
3409
3410 wl->sched_vif = wlvif;
3411
3412 out_sleep:
3413 wl1271_ps_elp_sleep(wl);
3414 out:
3415 mutex_unlock(&wl->mutex);
3416 return ret;
3417 }
3418
3419 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3420 struct ieee80211_vif *vif)
3421 {
3422 struct wl1271 *wl = hw->priv;
3423 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3424 int ret;
3425
3426 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3427
3428 mutex_lock(&wl->mutex);
3429
3430 if (unlikely(wl->state != WLCORE_STATE_ON))
3431 goto out;
3432
3433 ret = wl1271_ps_elp_wakeup(wl);
3434 if (ret < 0)
3435 goto out;
3436
3437 wl->ops->sched_scan_stop(wl, wlvif);
3438
3439 wl1271_ps_elp_sleep(wl);
3440 out:
3441 mutex_unlock(&wl->mutex);
3442 }
3443
3444 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3445 {
3446 struct wl1271 *wl = hw->priv;
3447 int ret = 0;
3448
3449 mutex_lock(&wl->mutex);
3450
3451 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3452 ret = -EAGAIN;
3453 goto out;
3454 }
3455
3456 ret = wl1271_ps_elp_wakeup(wl);
3457 if (ret < 0)
3458 goto out;
3459
3460 ret = wl1271_acx_frag_threshold(wl, value);
3461 if (ret < 0)
3462 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3463
3464 wl1271_ps_elp_sleep(wl);
3465
3466 out:
3467 mutex_unlock(&wl->mutex);
3468
3469 return ret;
3470 }
3471
3472 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3473 {
3474 struct wl1271 *wl = hw->priv;
3475 struct wl12xx_vif *wlvif;
3476 int ret = 0;
3477
3478 mutex_lock(&wl->mutex);
3479
3480 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3481 ret = -EAGAIN;
3482 goto out;
3483 }
3484
3485 ret = wl1271_ps_elp_wakeup(wl);
3486 if (ret < 0)
3487 goto out;
3488
3489 wl12xx_for_each_wlvif(wl, wlvif) {
3490 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3491 if (ret < 0)
3492 wl1271_warning("set rts threshold failed: %d", ret);
3493 }
3494 wl1271_ps_elp_sleep(wl);
3495
3496 out:
3497 mutex_unlock(&wl->mutex);
3498
3499 return ret;
3500 }
3501
3502 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3503 {
3504 int len;
3505 const u8 *next, *end = skb->data + skb->len;
3506 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3507 skb->len - ieoffset);
3508 if (!ie)
3509 return;
3510 len = ie[1] + 2;
3511 next = ie + len;
3512 memmove(ie, next, end - next);
3513 skb_trim(skb, skb->len - len);
3514 }
3515
3516 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3517 unsigned int oui, u8 oui_type,
3518 int ieoffset)
3519 {
3520 int len;
3521 const u8 *next, *end = skb->data + skb->len;
3522 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3523 skb->data + ieoffset,
3524 skb->len - ieoffset);
3525 if (!ie)
3526 return;
3527 len = ie[1] + 2;
3528 next = ie + len;
3529 memmove(ie, next, end - next);
3530 skb_trim(skb, skb->len - len);
3531 }
3532
3533 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3534 struct ieee80211_vif *vif)
3535 {
3536 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3537 struct sk_buff *skb;
3538 int ret;
3539
3540 skb = ieee80211_proberesp_get(wl->hw, vif);
3541 if (!skb)
3542 return -EOPNOTSUPP;
3543
3544 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3545 CMD_TEMPL_AP_PROBE_RESPONSE,
3546 skb->data,
3547 skb->len, 0,
3548 rates);
3549 dev_kfree_skb(skb);
3550
3551 if (ret < 0)
3552 goto out;
3553
3554 wl1271_debug(DEBUG_AP, "probe response updated");
3555 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3556
3557 out:
3558 return ret;
3559 }
3560
3561 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3562 struct ieee80211_vif *vif,
3563 u8 *probe_rsp_data,
3564 size_t probe_rsp_len,
3565 u32 rates)
3566 {
3567 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3568 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3569 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3570 int ssid_ie_offset, ie_offset, templ_len;
3571 const u8 *ptr;
3572
3573 /* no need to change probe response if the SSID is set correctly */
3574 if (wlvif->ssid_len > 0)
3575 return wl1271_cmd_template_set(wl, wlvif->role_id,
3576 CMD_TEMPL_AP_PROBE_RESPONSE,
3577 probe_rsp_data,
3578 probe_rsp_len, 0,
3579 rates);
3580
3581 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3582 wl1271_error("probe_rsp template too big");
3583 return -EINVAL;
3584 }
3585
3586 /* start searching from IE offset */
3587 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3588
3589 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3590 probe_rsp_len - ie_offset);
3591 if (!ptr) {
3592 wl1271_error("No SSID in beacon!");
3593 return -EINVAL;
3594 }
3595
3596 ssid_ie_offset = ptr - probe_rsp_data;
3597 ptr += (ptr[1] + 2);
3598
3599 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3600
3601 /* insert SSID from bss_conf */
3602 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3603 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3604 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3605 bss_conf->ssid, bss_conf->ssid_len);
3606 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3607
3608 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3609 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3610 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3611
3612 return wl1271_cmd_template_set(wl, wlvif->role_id,
3613 CMD_TEMPL_AP_PROBE_RESPONSE,
3614 probe_rsp_templ,
3615 templ_len, 0,
3616 rates);
3617 }
3618
3619 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3620 struct ieee80211_vif *vif,
3621 struct ieee80211_bss_conf *bss_conf,
3622 u32 changed)
3623 {
3624 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3625 int ret = 0;
3626
3627 if (changed & BSS_CHANGED_ERP_SLOT) {
3628 if (bss_conf->use_short_slot)
3629 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3630 else
3631 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3632 if (ret < 0) {
3633 wl1271_warning("Set slot time failed %d", ret);
3634 goto out;
3635 }
3636 }
3637
3638 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3639 if (bss_conf->use_short_preamble)
3640 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3641 else
3642 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3643 }
3644
3645 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3646 if (bss_conf->use_cts_prot)
3647 ret = wl1271_acx_cts_protect(wl, wlvif,
3648 CTSPROTECT_ENABLE);
3649 else
3650 ret = wl1271_acx_cts_protect(wl, wlvif,
3651 CTSPROTECT_DISABLE);
3652 if (ret < 0) {
3653 wl1271_warning("Set ctsprotect failed %d", ret);
3654 goto out;
3655 }
3656 }
3657
3658 out:
3659 return ret;
3660 }
3661
3662 static int wlcore_set_beacon_template(struct wl1271 *wl,
3663 struct ieee80211_vif *vif,
3664 bool is_ap)
3665 {
3666 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3667 struct ieee80211_hdr *hdr;
3668 u32 min_rate;
3669 int ret;
3670 int ieoffset = offsetof(struct ieee80211_mgmt,
3671 u.beacon.variable);
3672 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3673 u16 tmpl_id;
3674
3675 if (!beacon) {
3676 ret = -EINVAL;
3677 goto out;
3678 }
3679
3680 wl1271_debug(DEBUG_MASTER, "beacon updated");
3681
3682 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3683 if (ret < 0) {
3684 dev_kfree_skb(beacon);
3685 goto out;
3686 }
3687 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3688 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3689 CMD_TEMPL_BEACON;
3690 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3691 beacon->data,
3692 beacon->len, 0,
3693 min_rate);
3694 if (ret < 0) {
3695 dev_kfree_skb(beacon);
3696 goto out;
3697 }
3698
3699 wlvif->wmm_enabled =
3700 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3701 WLAN_OUI_TYPE_MICROSOFT_WMM,
3702 beacon->data + ieoffset,
3703 beacon->len - ieoffset);
3704
3705 /*
3706 * In case we already have a probe-resp beacon set explicitly
3707 * by usermode, don't use the beacon data.
3708 */
3709 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3710 goto end_bcn;
3711
3712 /* remove TIM ie from probe response */
3713 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3714
3715 /*
3716 * remove p2p ie from probe response.
3717 * the fw reponds to probe requests that don't include
3718 * the p2p ie. probe requests with p2p ie will be passed,
3719 * and will be responded by the supplicant (the spec
3720 * forbids including the p2p ie when responding to probe
3721 * requests that didn't include it).
3722 */
3723 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3724 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3725
3726 hdr = (struct ieee80211_hdr *) beacon->data;
3727 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3728 IEEE80211_STYPE_PROBE_RESP);
3729 if (is_ap)
3730 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3731 beacon->data,
3732 beacon->len,
3733 min_rate);
3734 else
3735 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3736 CMD_TEMPL_PROBE_RESPONSE,
3737 beacon->data,
3738 beacon->len, 0,
3739 min_rate);
3740 end_bcn:
3741 dev_kfree_skb(beacon);
3742 if (ret < 0)
3743 goto out;
3744
3745 out:
3746 return ret;
3747 }
3748
3749 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3750 struct ieee80211_vif *vif,
3751 struct ieee80211_bss_conf *bss_conf,
3752 u32 changed)
3753 {
3754 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3755 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3756 int ret = 0;
3757
3758 if (changed & BSS_CHANGED_BEACON_INT) {
3759 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3760 bss_conf->beacon_int);
3761
3762 wlvif->beacon_int = bss_conf->beacon_int;
3763 }
3764
3765 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3766 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3767
3768 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3769 }
3770
3771 if (changed & BSS_CHANGED_BEACON) {
3772 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3773 if (ret < 0)
3774 goto out;
3775 }
3776
3777 out:
3778 if (ret != 0)
3779 wl1271_error("beacon info change failed: %d", ret);
3780 return ret;
3781 }
3782
3783 /* AP mode changes */
3784 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3785 struct ieee80211_vif *vif,
3786 struct ieee80211_bss_conf *bss_conf,
3787 u32 changed)
3788 {
3789 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3790 int ret = 0;
3791
3792 if (changed & BSS_CHANGED_BASIC_RATES) {
3793 u32 rates = bss_conf->basic_rates;
3794
3795 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3796 wlvif->band);
3797 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3798 wlvif->basic_rate_set);
3799
3800 ret = wl1271_init_ap_rates(wl, wlvif);
3801 if (ret < 0) {
3802 wl1271_error("AP rate policy change failed %d", ret);
3803 goto out;
3804 }
3805
3806 ret = wl1271_ap_init_templates(wl, vif);
3807 if (ret < 0)
3808 goto out;
3809
3810 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3811 if (ret < 0)
3812 goto out;
3813
3814 ret = wlcore_set_beacon_template(wl, vif, true);
3815 if (ret < 0)
3816 goto out;
3817 }
3818
3819 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3820 if (ret < 0)
3821 goto out;
3822
3823 if (changed & BSS_CHANGED_BEACON_ENABLED) {
3824 if (bss_conf->enable_beacon) {
3825 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3826 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3827 if (ret < 0)
3828 goto out;
3829
3830 ret = wl1271_ap_init_hwenc(wl, wlvif);
3831 if (ret < 0)
3832 goto out;
3833
3834 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3835 wl1271_debug(DEBUG_AP, "started AP");
3836 }
3837 } else {
3838 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3839 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3840 if (ret < 0)
3841 goto out;
3842
3843 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3844 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3845 &wlvif->flags);
3846 wl1271_debug(DEBUG_AP, "stopped AP");
3847 }
3848 }
3849 }
3850
3851 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3852 if (ret < 0)
3853 goto out;
3854
3855 /* Handle HT information change */
3856 if ((changed & BSS_CHANGED_HT) &&
3857 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3858 ret = wl1271_acx_set_ht_information(wl, wlvif,
3859 bss_conf->ht_operation_mode);
3860 if (ret < 0) {
3861 wl1271_warning("Set ht information failed %d", ret);
3862 goto out;
3863 }
3864 }
3865
3866 out:
3867 return;
3868 }
3869
3870 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3871 struct ieee80211_bss_conf *bss_conf,
3872 u32 sta_rate_set)
3873 {
3874 u32 rates;
3875 int ret;
3876
3877 wl1271_debug(DEBUG_MAC80211,
3878 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3879 bss_conf->bssid, bss_conf->aid,
3880 bss_conf->beacon_int,
3881 bss_conf->basic_rates, sta_rate_set);
3882
3883 wlvif->beacon_int = bss_conf->beacon_int;
3884 rates = bss_conf->basic_rates;
3885 wlvif->basic_rate_set =
3886 wl1271_tx_enabled_rates_get(wl, rates,
3887 wlvif->band);
3888 wlvif->basic_rate =
3889 wl1271_tx_min_rate_get(wl,
3890 wlvif->basic_rate_set);
3891
3892 if (sta_rate_set)
3893 wlvif->rate_set =
3894 wl1271_tx_enabled_rates_get(wl,
3895 sta_rate_set,
3896 wlvif->band);
3897
3898 /* we only support sched_scan while not connected */
3899 if (wl->sched_vif == wlvif)
3900 wl->ops->sched_scan_stop(wl, wlvif);
3901
3902 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3903 if (ret < 0)
3904 return ret;
3905
3906 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3907 if (ret < 0)
3908 return ret;
3909
3910 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
3911 if (ret < 0)
3912 return ret;
3913
3914 wlcore_set_ssid(wl, wlvif);
3915
3916 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
3917
3918 return 0;
3919 }
3920
3921 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3922 {
3923 int ret;
3924
3925 /* revert back to minimum rates for the current band */
3926 wl1271_set_band_rate(wl, wlvif);
3927 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3928
3929 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3930 if (ret < 0)
3931 return ret;
3932
3933 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3934 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
3935 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
3936 if (ret < 0)
3937 return ret;
3938 }
3939
3940 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
3941 return 0;
3942 }
3943 /* STA/IBSS mode changes */
3944 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3945 struct ieee80211_vif *vif,
3946 struct ieee80211_bss_conf *bss_conf,
3947 u32 changed)
3948 {
3949 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3950 bool do_join = false;
3951 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3952 bool ibss_joined = false;
3953 u32 sta_rate_set = 0;
3954 int ret;
3955 struct ieee80211_sta *sta;
3956 bool sta_exists = false;
3957 struct ieee80211_sta_ht_cap sta_ht_cap;
3958
3959 if (is_ibss) {
3960 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3961 changed);
3962 if (ret < 0)
3963 goto out;
3964 }
3965
3966 if (changed & BSS_CHANGED_IBSS) {
3967 if (bss_conf->ibss_joined) {
3968 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3969 ibss_joined = true;
3970 } else {
3971 wlcore_unset_assoc(wl, wlvif);
3972 wl12xx_cmd_role_stop_sta(wl, wlvif);
3973 }
3974 }
3975
3976 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3977 do_join = true;
3978
3979 /* Need to update the SSID (for filtering etc) */
3980 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3981 do_join = true;
3982
3983 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3984 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3985 bss_conf->enable_beacon ? "enabled" : "disabled");
3986
3987 do_join = true;
3988 }
3989
3990 if (changed & BSS_CHANGED_CQM) {
3991 bool enable = false;
3992 if (bss_conf->cqm_rssi_thold)
3993 enable = true;
3994 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3995 bss_conf->cqm_rssi_thold,
3996 bss_conf->cqm_rssi_hyst);
3997 if (ret < 0)
3998 goto out;
3999 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4000 }
4001
4002 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4003 BSS_CHANGED_ASSOC)) {
4004 rcu_read_lock();
4005 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4006 if (sta) {
4007 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4008
4009 /* save the supp_rates of the ap */
4010 sta_rate_set = sta->supp_rates[wlvif->band];
4011 if (sta->ht_cap.ht_supported)
4012 sta_rate_set |=
4013 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4014 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4015 sta_ht_cap = sta->ht_cap;
4016 sta_exists = true;
4017 }
4018
4019 rcu_read_unlock();
4020 }
4021
4022 if (changed & BSS_CHANGED_BSSID) {
4023 if (!is_zero_ether_addr(bss_conf->bssid)) {
4024 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4025 sta_rate_set);
4026 if (ret < 0)
4027 goto out;
4028
4029 /* Need to update the BSSID (for filtering etc) */
4030 do_join = true;
4031 } else {
4032 ret = wlcore_clear_bssid(wl, wlvif);
4033 if (ret < 0)
4034 goto out;
4035 }
4036 }
4037
4038 if (changed & BSS_CHANGED_IBSS) {
4039 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4040 bss_conf->ibss_joined);
4041
4042 if (bss_conf->ibss_joined) {
4043 u32 rates = bss_conf->basic_rates;
4044 wlvif->basic_rate_set =
4045 wl1271_tx_enabled_rates_get(wl, rates,
4046 wlvif->band);
4047 wlvif->basic_rate =
4048 wl1271_tx_min_rate_get(wl,
4049 wlvif->basic_rate_set);
4050
4051 /* by default, use 11b + OFDM rates */
4052 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4053 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4054 if (ret < 0)
4055 goto out;
4056 }
4057 }
4058
4059 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4060 if (ret < 0)
4061 goto out;
4062
4063 if (do_join) {
4064 ret = wlcore_join(wl, wlvif);
4065 if (ret < 0) {
4066 wl1271_warning("cmd join failed %d", ret);
4067 goto out;
4068 }
4069 }
4070
4071 if (changed & BSS_CHANGED_ASSOC) {
4072 if (bss_conf->assoc) {
4073 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4074 sta_rate_set);
4075 if (ret < 0)
4076 goto out;
4077
4078 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4079 wl12xx_set_authorized(wl, wlvif);
4080 } else {
4081 wlcore_unset_assoc(wl, wlvif);
4082 }
4083 }
4084
4085 if (changed & BSS_CHANGED_PS) {
4086 if ((bss_conf->ps) &&
4087 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4088 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4089 int ps_mode;
4090 char *ps_mode_str;
4091
4092 if (wl->conf.conn.forced_ps) {
4093 ps_mode = STATION_POWER_SAVE_MODE;
4094 ps_mode_str = "forced";
4095 } else {
4096 ps_mode = STATION_AUTO_PS_MODE;
4097 ps_mode_str = "auto";
4098 }
4099
4100 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4101
4102 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4103 if (ret < 0)
4104 wl1271_warning("enter %s ps failed %d",
4105 ps_mode_str, ret);
4106 } else if (!bss_conf->ps &&
4107 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4108 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4109
4110 ret = wl1271_ps_set_mode(wl, wlvif,
4111 STATION_ACTIVE_MODE);
4112 if (ret < 0)
4113 wl1271_warning("exit auto ps failed %d", ret);
4114 }
4115 }
4116
4117 /* Handle new association with HT. Do this after join. */
4118 if (sta_exists &&
4119 (changed & BSS_CHANGED_HT)) {
4120 bool enabled =
4121 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4122
4123 ret = wl1271_acx_set_ht_capabilities(wl,
4124 &sta_ht_cap,
4125 enabled,
4126 wlvif->sta.hlid);
4127 if (ret < 0) {
4128 wl1271_warning("Set ht cap failed %d", ret);
4129 goto out;
4130
4131 }
4132
4133 if (enabled) {
4134 ret = wl1271_acx_set_ht_information(wl, wlvif,
4135 bss_conf->ht_operation_mode);
4136 if (ret < 0) {
4137 wl1271_warning("Set ht information failed %d",
4138 ret);
4139 goto out;
4140 }
4141 }
4142 }
4143
4144 /* Handle arp filtering. Done after join. */
4145 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4146 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4147 __be32 addr = bss_conf->arp_addr_list[0];
4148 wlvif->sta.qos = bss_conf->qos;
4149 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4150
4151 if (bss_conf->arp_addr_cnt == 1 &&
4152 bss_conf->arp_filter_enabled) {
4153 wlvif->ip_addr = addr;
4154 /*
4155 * The template should have been configured only upon
4156 * association. however, it seems that the correct ip
4157 * isn't being set (when sending), so we have to
4158 * reconfigure the template upon every ip change.
4159 */
4160 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4161 if (ret < 0) {
4162 wl1271_warning("build arp rsp failed: %d", ret);
4163 goto out;
4164 }
4165
4166 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4167 (ACX_ARP_FILTER_ARP_FILTERING |
4168 ACX_ARP_FILTER_AUTO_ARP),
4169 addr);
4170 } else {
4171 wlvif->ip_addr = 0;
4172 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4173 }
4174
4175 if (ret < 0)
4176 goto out;
4177 }
4178
4179 out:
4180 return;
4181 }
4182
4183 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4184 struct ieee80211_vif *vif,
4185 struct ieee80211_bss_conf *bss_conf,
4186 u32 changed)
4187 {
4188 struct wl1271 *wl = hw->priv;
4189 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4190 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4191 int ret;
4192
4193 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4194 wlvif->role_id, (int)changed);
4195
4196 /*
4197 * make sure to cancel pending disconnections if our association
4198 * state changed
4199 */
4200 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4201 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4202
4203 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4204 !bss_conf->enable_beacon)
4205 wl1271_tx_flush(wl);
4206
4207 mutex_lock(&wl->mutex);
4208
4209 if (unlikely(wl->state != WLCORE_STATE_ON))
4210 goto out;
4211
4212 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4213 goto out;
4214
4215 ret = wl1271_ps_elp_wakeup(wl);
4216 if (ret < 0)
4217 goto out;
4218
4219 if (is_ap)
4220 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4221 else
4222 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4223
4224 wl1271_ps_elp_sleep(wl);
4225
4226 out:
4227 mutex_unlock(&wl->mutex);
4228 }
4229
4230 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4231 struct ieee80211_chanctx_conf *ctx)
4232 {
4233 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4234 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4235 cfg80211_get_chandef_type(&ctx->def));
4236 return 0;
4237 }
4238
4239 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4240 struct ieee80211_chanctx_conf *ctx)
4241 {
4242 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4243 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4244 cfg80211_get_chandef_type(&ctx->def));
4245 }
4246
4247 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4248 struct ieee80211_chanctx_conf *ctx,
4249 u32 changed)
4250 {
4251 wl1271_debug(DEBUG_MAC80211,
4252 "mac80211 change chanctx %d (type %d) changed 0x%x",
4253 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4254 cfg80211_get_chandef_type(&ctx->def), changed);
4255 }
4256
4257 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4258 struct ieee80211_vif *vif,
4259 struct ieee80211_chanctx_conf *ctx)
4260 {
4261 struct wl1271 *wl = hw->priv;
4262 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4263 int channel = ieee80211_frequency_to_channel(
4264 ctx->def.chan->center_freq);
4265
4266 wl1271_debug(DEBUG_MAC80211,
4267 "mac80211 assign chanctx (role %d) %d (type %d)",
4268 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4269
4270 mutex_lock(&wl->mutex);
4271
4272 wlvif->band = ctx->def.chan->band;
4273 wlvif->channel = channel;
4274 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4275
4276 /* update default rates according to the band */
4277 wl1271_set_band_rate(wl, wlvif);
4278
4279 mutex_unlock(&wl->mutex);
4280
4281 return 0;
4282 }
4283
4284 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4285 struct ieee80211_vif *vif,
4286 struct ieee80211_chanctx_conf *ctx)
4287 {
4288 struct wl1271 *wl = hw->priv;
4289 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4290
4291 wl1271_debug(DEBUG_MAC80211,
4292 "mac80211 unassign chanctx (role %d) %d (type %d)",
4293 wlvif->role_id,
4294 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4295 cfg80211_get_chandef_type(&ctx->def));
4296
4297 wl1271_tx_flush(wl);
4298 }
4299
4300 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4301 struct ieee80211_vif *vif, u16 queue,
4302 const struct ieee80211_tx_queue_params *params)
4303 {
4304 struct wl1271 *wl = hw->priv;
4305 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4306 u8 ps_scheme;
4307 int ret = 0;
4308
4309 mutex_lock(&wl->mutex);
4310
4311 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4312
4313 if (params->uapsd)
4314 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4315 else
4316 ps_scheme = CONF_PS_SCHEME_LEGACY;
4317
4318 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4319 goto out;
4320
4321 ret = wl1271_ps_elp_wakeup(wl);
4322 if (ret < 0)
4323 goto out;
4324
4325 /*
4326 * the txop is confed in units of 32us by the mac80211,
4327 * we need us
4328 */
4329 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4330 params->cw_min, params->cw_max,
4331 params->aifs, params->txop << 5);
4332 if (ret < 0)
4333 goto out_sleep;
4334
4335 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4336 CONF_CHANNEL_TYPE_EDCF,
4337 wl1271_tx_get_queue(queue),
4338 ps_scheme, CONF_ACK_POLICY_LEGACY,
4339 0, 0);
4340
4341 out_sleep:
4342 wl1271_ps_elp_sleep(wl);
4343
4344 out:
4345 mutex_unlock(&wl->mutex);
4346
4347 return ret;
4348 }
4349
4350 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4351 struct ieee80211_vif *vif)
4352 {
4353
4354 struct wl1271 *wl = hw->priv;
4355 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4356 u64 mactime = ULLONG_MAX;
4357 int ret;
4358
4359 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4360
4361 mutex_lock(&wl->mutex);
4362
4363 if (unlikely(wl->state != WLCORE_STATE_ON))
4364 goto out;
4365
4366 ret = wl1271_ps_elp_wakeup(wl);
4367 if (ret < 0)
4368 goto out;
4369
4370 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4371 if (ret < 0)
4372 goto out_sleep;
4373
4374 out_sleep:
4375 wl1271_ps_elp_sleep(wl);
4376
4377 out:
4378 mutex_unlock(&wl->mutex);
4379 return mactime;
4380 }
4381
4382 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4383 struct survey_info *survey)
4384 {
4385 struct ieee80211_conf *conf = &hw->conf;
4386
4387 if (idx != 0)
4388 return -ENOENT;
4389
4390 survey->channel = conf->channel;
4391 survey->filled = 0;
4392 return 0;
4393 }
4394
4395 static int wl1271_allocate_sta(struct wl1271 *wl,
4396 struct wl12xx_vif *wlvif,
4397 struct ieee80211_sta *sta)
4398 {
4399 struct wl1271_station *wl_sta;
4400 int ret;
4401
4402
4403 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4404 wl1271_warning("could not allocate HLID - too much stations");
4405 return -EBUSY;
4406 }
4407
4408 wl_sta = (struct wl1271_station *)sta->drv_priv;
4409 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4410 if (ret < 0) {
4411 wl1271_warning("could not allocate HLID - too many links");
4412 return -EBUSY;
4413 }
4414
4415 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4416 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4417 wl->active_sta_count++;
4418 return 0;
4419 }
4420
4421 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4422 {
4423 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4424 return;
4425
4426 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4427 memset(wl->links[hlid].addr, 0, ETH_ALEN);
4428 wl->links[hlid].ba_bitmap = 0;
4429 __clear_bit(hlid, &wl->ap_ps_map);
4430 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4431 wl12xx_free_link(wl, wlvif, &hlid);
4432 wl->active_sta_count--;
4433
4434 /*
4435 * rearm the tx watchdog when the last STA is freed - give the FW a
4436 * chance to return STA-buffered packets before complaining.
4437 */
4438 if (wl->active_sta_count == 0)
4439 wl12xx_rearm_tx_watchdog_locked(wl);
4440 }
4441
4442 static int wl12xx_sta_add(struct wl1271 *wl,
4443 struct wl12xx_vif *wlvif,
4444 struct ieee80211_sta *sta)
4445 {
4446 struct wl1271_station *wl_sta;
4447 int ret = 0;
4448 u8 hlid;
4449
4450 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4451
4452 ret = wl1271_allocate_sta(wl, wlvif, sta);
4453 if (ret < 0)
4454 return ret;
4455
4456 wl_sta = (struct wl1271_station *)sta->drv_priv;
4457 hlid = wl_sta->hlid;
4458
4459 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4460 if (ret < 0)
4461 wl1271_free_sta(wl, wlvif, hlid);
4462
4463 return ret;
4464 }
4465
4466 static int wl12xx_sta_remove(struct wl1271 *wl,
4467 struct wl12xx_vif *wlvif,
4468 struct ieee80211_sta *sta)
4469 {
4470 struct wl1271_station *wl_sta;
4471 int ret = 0, id;
4472
4473 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4474
4475 wl_sta = (struct wl1271_station *)sta->drv_priv;
4476 id = wl_sta->hlid;
4477 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4478 return -EINVAL;
4479
4480 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4481 if (ret < 0)
4482 return ret;
4483
4484 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4485 return ret;
4486 }
4487
4488 static void wlcore_roc_if_possible(struct wl1271 *wl,
4489 struct wl12xx_vif *wlvif)
4490 {
4491 if (find_first_bit(wl->roc_map,
4492 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4493 return;
4494
4495 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4496 return;
4497
4498 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4499 }
4500
4501 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4502 struct wl12xx_vif *wlvif,
4503 struct wl1271_station *wl_sta,
4504 bool in_connection)
4505 {
4506 if (in_connection) {
4507 if (WARN_ON(wl_sta->in_connection))
4508 return;
4509 wl_sta->in_connection = true;
4510 if (!wlvif->inconn_count++)
4511 wlcore_roc_if_possible(wl, wlvif);
4512 } else {
4513 if (!wl_sta->in_connection)
4514 return;
4515
4516 wl_sta->in_connection = false;
4517 wlvif->inconn_count--;
4518 if (WARN_ON(wlvif->inconn_count < 0))
4519 return;
4520
4521 if (!wlvif->inconn_count)
4522 if (test_bit(wlvif->role_id, wl->roc_map))
4523 wl12xx_croc(wl, wlvif->role_id);
4524 }
4525 }
4526
4527 static int wl12xx_update_sta_state(struct wl1271 *wl,
4528 struct wl12xx_vif *wlvif,
4529 struct ieee80211_sta *sta,
4530 enum ieee80211_sta_state old_state,
4531 enum ieee80211_sta_state new_state)
4532 {
4533 struct wl1271_station *wl_sta;
4534 u8 hlid;
4535 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4536 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4537 int ret;
4538
4539 wl_sta = (struct wl1271_station *)sta->drv_priv;
4540 hlid = wl_sta->hlid;
4541
4542 /* Add station (AP mode) */
4543 if (is_ap &&
4544 old_state == IEEE80211_STA_NOTEXIST &&
4545 new_state == IEEE80211_STA_NONE) {
4546 ret = wl12xx_sta_add(wl, wlvif, sta);
4547 if (ret)
4548 return ret;
4549
4550 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4551 }
4552
4553 /* Remove station (AP mode) */
4554 if (is_ap &&
4555 old_state == IEEE80211_STA_NONE &&
4556 new_state == IEEE80211_STA_NOTEXIST) {
4557 /* must not fail */
4558 wl12xx_sta_remove(wl, wlvif, sta);
4559
4560 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4561 }
4562
4563 /* Authorize station (AP mode) */
4564 if (is_ap &&
4565 new_state == IEEE80211_STA_AUTHORIZED) {
4566 ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid);
4567 if (ret < 0)
4568 return ret;
4569
4570 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4571 hlid);
4572 if (ret)
4573 return ret;
4574
4575 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4576 }
4577
4578 /* Authorize station */
4579 if (is_sta &&
4580 new_state == IEEE80211_STA_AUTHORIZED) {
4581 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4582 ret = wl12xx_set_authorized(wl, wlvif);
4583 if (ret)
4584 return ret;
4585 }
4586
4587 if (is_sta &&
4588 old_state == IEEE80211_STA_AUTHORIZED &&
4589 new_state == IEEE80211_STA_ASSOC) {
4590 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4591 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4592 }
4593
4594 /* clear ROCs on failure or authorization */
4595 if (is_sta &&
4596 (new_state == IEEE80211_STA_AUTHORIZED ||
4597 new_state == IEEE80211_STA_NOTEXIST)) {
4598 if (test_bit(wlvif->role_id, wl->roc_map))
4599 wl12xx_croc(wl, wlvif->role_id);
4600 }
4601
4602 if (is_sta &&
4603 old_state == IEEE80211_STA_NOTEXIST &&
4604 new_state == IEEE80211_STA_NONE) {
4605 if (find_first_bit(wl->roc_map,
4606 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4607 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4608 wl12xx_roc(wl, wlvif, wlvif->role_id,
4609 wlvif->band, wlvif->channel);
4610 }
4611 }
4612 return 0;
4613 }
4614
4615 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4616 struct ieee80211_vif *vif,
4617 struct ieee80211_sta *sta,
4618 enum ieee80211_sta_state old_state,
4619 enum ieee80211_sta_state new_state)
4620 {
4621 struct wl1271 *wl = hw->priv;
4622 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4623 int ret;
4624
4625 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4626 sta->aid, old_state, new_state);
4627
4628 mutex_lock(&wl->mutex);
4629
4630 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4631 ret = -EBUSY;
4632 goto out;
4633 }
4634
4635 ret = wl1271_ps_elp_wakeup(wl);
4636 if (ret < 0)
4637 goto out;
4638
4639 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4640
4641 wl1271_ps_elp_sleep(wl);
4642 out:
4643 mutex_unlock(&wl->mutex);
4644 if (new_state < old_state)
4645 return 0;
4646 return ret;
4647 }
4648
4649 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4650 struct ieee80211_vif *vif,
4651 enum ieee80211_ampdu_mlme_action action,
4652 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4653 u8 buf_size)
4654 {
4655 struct wl1271 *wl = hw->priv;
4656 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4657 int ret;
4658 u8 hlid, *ba_bitmap;
4659
4660 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4661 tid);
4662
4663 /* sanity check - the fields in FW are only 8bits wide */
4664 if (WARN_ON(tid > 0xFF))
4665 return -ENOTSUPP;
4666
4667 mutex_lock(&wl->mutex);
4668
4669 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4670 ret = -EAGAIN;
4671 goto out;
4672 }
4673
4674 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4675 hlid = wlvif->sta.hlid;
4676 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4677 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4678 struct wl1271_station *wl_sta;
4679
4680 wl_sta = (struct wl1271_station *)sta->drv_priv;
4681 hlid = wl_sta->hlid;
4682 ba_bitmap = &wl->links[hlid].ba_bitmap;
4683 } else {
4684 ret = -EINVAL;
4685 goto out;
4686 }
4687
4688 ret = wl1271_ps_elp_wakeup(wl);
4689 if (ret < 0)
4690 goto out;
4691
4692 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4693 tid, action);
4694
4695 switch (action) {
4696 case IEEE80211_AMPDU_RX_START:
4697 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4698 ret = -ENOTSUPP;
4699 break;
4700 }
4701
4702 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4703 ret = -EBUSY;
4704 wl1271_error("exceeded max RX BA sessions");
4705 break;
4706 }
4707
4708 if (*ba_bitmap & BIT(tid)) {
4709 ret = -EINVAL;
4710 wl1271_error("cannot enable RX BA session on active "
4711 "tid: %d", tid);
4712 break;
4713 }
4714
4715 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4716 hlid);
4717 if (!ret) {
4718 *ba_bitmap |= BIT(tid);
4719 wl->ba_rx_session_count++;
4720 }
4721 break;
4722
4723 case IEEE80211_AMPDU_RX_STOP:
4724 if (!(*ba_bitmap & BIT(tid))) {
4725 /*
4726 * this happens on reconfig - so only output a debug
4727 * message for now, and don't fail the function.
4728 */
4729 wl1271_debug(DEBUG_MAC80211,
4730 "no active RX BA session on tid: %d",
4731 tid);
4732 ret = 0;
4733 break;
4734 }
4735
4736 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4737 hlid);
4738 if (!ret) {
4739 *ba_bitmap &= ~BIT(tid);
4740 wl->ba_rx_session_count--;
4741 }
4742 break;
4743
4744 /*
4745 * The BA initiator session management in FW independently.
4746 * Falling break here on purpose for all TX APDU commands.
4747 */
4748 case IEEE80211_AMPDU_TX_START:
4749 case IEEE80211_AMPDU_TX_STOP:
4750 case IEEE80211_AMPDU_TX_OPERATIONAL:
4751 ret = -EINVAL;
4752 break;
4753
4754 default:
4755 wl1271_error("Incorrect ampdu action id=%x\n", action);
4756 ret = -EINVAL;
4757 }
4758
4759 wl1271_ps_elp_sleep(wl);
4760
4761 out:
4762 mutex_unlock(&wl->mutex);
4763
4764 return ret;
4765 }
4766
4767 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4768 struct ieee80211_vif *vif,
4769 const struct cfg80211_bitrate_mask *mask)
4770 {
4771 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4772 struct wl1271 *wl = hw->priv;
4773 int i, ret = 0;
4774
4775 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4776 mask->control[NL80211_BAND_2GHZ].legacy,
4777 mask->control[NL80211_BAND_5GHZ].legacy);
4778
4779 mutex_lock(&wl->mutex);
4780
4781 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4782 wlvif->bitrate_masks[i] =
4783 wl1271_tx_enabled_rates_get(wl,
4784 mask->control[i].legacy,
4785 i);
4786
4787 if (unlikely(wl->state != WLCORE_STATE_ON))
4788 goto out;
4789
4790 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4791 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4792
4793 ret = wl1271_ps_elp_wakeup(wl);
4794 if (ret < 0)
4795 goto out;
4796
4797 wl1271_set_band_rate(wl, wlvif);
4798 wlvif->basic_rate =
4799 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4800 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4801
4802 wl1271_ps_elp_sleep(wl);
4803 }
4804 out:
4805 mutex_unlock(&wl->mutex);
4806
4807 return ret;
4808 }
4809
4810 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4811 struct ieee80211_channel_switch *ch_switch)
4812 {
4813 struct wl1271 *wl = hw->priv;
4814 struct wl12xx_vif *wlvif;
4815 int ret;
4816
4817 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4818
4819 wl1271_tx_flush(wl);
4820
4821 mutex_lock(&wl->mutex);
4822
4823 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4824 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4825 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4826 ieee80211_chswitch_done(vif, false);
4827 }
4828 goto out;
4829 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4830 goto out;
4831 }
4832
4833 ret = wl1271_ps_elp_wakeup(wl);
4834 if (ret < 0)
4835 goto out;
4836
4837 /* TODO: change mac80211 to pass vif as param */
4838 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4839 unsigned long delay_usec;
4840
4841 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4842 if (ret)
4843 goto out_sleep;
4844
4845 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4846
4847 /* indicate failure 5 seconds after channel switch time */
4848 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4849 ch_switch->count;
4850 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4851 usecs_to_jiffies(delay_usec) +
4852 msecs_to_jiffies(5000));
4853 }
4854
4855 out_sleep:
4856 wl1271_ps_elp_sleep(wl);
4857
4858 out:
4859 mutex_unlock(&wl->mutex);
4860 }
4861
4862 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4863 {
4864 struct wl1271 *wl = hw->priv;
4865
4866 wl1271_tx_flush(wl);
4867 }
4868
4869 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4870 struct ieee80211_vif *vif,
4871 struct ieee80211_channel *chan,
4872 int duration)
4873 {
4874 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4875 struct wl1271 *wl = hw->priv;
4876 int channel, ret = 0;
4877
4878 channel = ieee80211_frequency_to_channel(chan->center_freq);
4879
4880 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
4881 channel, wlvif->role_id);
4882
4883 mutex_lock(&wl->mutex);
4884
4885 if (unlikely(wl->state != WLCORE_STATE_ON))
4886 goto out;
4887
4888 /* return EBUSY if we can't ROC right now */
4889 if (WARN_ON(wl->roc_vif ||
4890 find_first_bit(wl->roc_map,
4891 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
4892 ret = -EBUSY;
4893 goto out;
4894 }
4895
4896 ret = wl1271_ps_elp_wakeup(wl);
4897 if (ret < 0)
4898 goto out;
4899
4900 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
4901 if (ret < 0)
4902 goto out_sleep;
4903
4904 wl->roc_vif = vif;
4905 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
4906 msecs_to_jiffies(duration));
4907 out_sleep:
4908 wl1271_ps_elp_sleep(wl);
4909 out:
4910 mutex_unlock(&wl->mutex);
4911 return ret;
4912 }
4913
4914 static int __wlcore_roc_completed(struct wl1271 *wl)
4915 {
4916 struct wl12xx_vif *wlvif;
4917 int ret;
4918
4919 /* already completed */
4920 if (unlikely(!wl->roc_vif))
4921 return 0;
4922
4923 wlvif = wl12xx_vif_to_data(wl->roc_vif);
4924
4925 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4926 return -EBUSY;
4927
4928 ret = wl12xx_stop_dev(wl, wlvif);
4929 if (ret < 0)
4930 return ret;
4931
4932 wl->roc_vif = NULL;
4933
4934 return 0;
4935 }
4936
4937 static int wlcore_roc_completed(struct wl1271 *wl)
4938 {
4939 int ret;
4940
4941 wl1271_debug(DEBUG_MAC80211, "roc complete");
4942
4943 mutex_lock(&wl->mutex);
4944
4945 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4946 ret = -EBUSY;
4947 goto out;
4948 }
4949
4950 ret = wl1271_ps_elp_wakeup(wl);
4951 if (ret < 0)
4952 goto out;
4953
4954 ret = __wlcore_roc_completed(wl);
4955
4956 wl1271_ps_elp_sleep(wl);
4957 out:
4958 mutex_unlock(&wl->mutex);
4959
4960 return ret;
4961 }
4962
4963 static void wlcore_roc_complete_work(struct work_struct *work)
4964 {
4965 struct delayed_work *dwork;
4966 struct wl1271 *wl;
4967 int ret;
4968
4969 dwork = container_of(work, struct delayed_work, work);
4970 wl = container_of(dwork, struct wl1271, roc_complete_work);
4971
4972 ret = wlcore_roc_completed(wl);
4973 if (!ret)
4974 ieee80211_remain_on_channel_expired(wl->hw);
4975 }
4976
4977 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
4978 {
4979 struct wl1271 *wl = hw->priv;
4980
4981 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
4982
4983 /* TODO: per-vif */
4984 wl1271_tx_flush(wl);
4985
4986 /*
4987 * we can't just flush_work here, because it might deadlock
4988 * (as we might get called from the same workqueue)
4989 */
4990 cancel_delayed_work_sync(&wl->roc_complete_work);
4991 wlcore_roc_completed(wl);
4992
4993 return 0;
4994 }
4995
4996 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
4997 struct ieee80211_vif *vif,
4998 struct ieee80211_sta *sta,
4999 u32 changed)
5000 {
5001 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5002 struct wl1271 *wl = hw->priv;
5003
5004 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5005 }
5006
5007 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5008 {
5009 struct wl1271 *wl = hw->priv;
5010 bool ret = false;
5011
5012 mutex_lock(&wl->mutex);
5013
5014 if (unlikely(wl->state != WLCORE_STATE_ON))
5015 goto out;
5016
5017 /* packets are considered pending if in the TX queue or the FW */
5018 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5019 out:
5020 mutex_unlock(&wl->mutex);
5021
5022 return ret;
5023 }
5024
5025 /* can't be const, mac80211 writes to this */
5026 static struct ieee80211_rate wl1271_rates[] = {
5027 { .bitrate = 10,
5028 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5029 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5030 { .bitrate = 20,
5031 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5032 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5033 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5034 { .bitrate = 55,
5035 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5036 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5037 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5038 { .bitrate = 110,
5039 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5040 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5041 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5042 { .bitrate = 60,
5043 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5044 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5045 { .bitrate = 90,
5046 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5047 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5048 { .bitrate = 120,
5049 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5050 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5051 { .bitrate = 180,
5052 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5053 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5054 { .bitrate = 240,
5055 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5056 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5057 { .bitrate = 360,
5058 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5059 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5060 { .bitrate = 480,
5061 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5062 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5063 { .bitrate = 540,
5064 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5065 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5066 };
5067
5068 /* can't be const, mac80211 writes to this */
5069 static struct ieee80211_channel wl1271_channels[] = {
5070 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
5071 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
5072 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
5073 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
5074 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
5075 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
5076 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
5077 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
5078 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
5079 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
5080 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
5081 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
5082 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
5083 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
5084 };
5085
5086 /* can't be const, mac80211 writes to this */
5087 static struct ieee80211_supported_band wl1271_band_2ghz = {
5088 .channels = wl1271_channels,
5089 .n_channels = ARRAY_SIZE(wl1271_channels),
5090 .bitrates = wl1271_rates,
5091 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5092 };
5093
5094 /* 5 GHz data rates for WL1273 */
5095 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5096 { .bitrate = 60,
5097 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5098 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5099 { .bitrate = 90,
5100 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5101 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5102 { .bitrate = 120,
5103 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5104 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5105 { .bitrate = 180,
5106 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5107 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5108 { .bitrate = 240,
5109 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5110 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5111 { .bitrate = 360,
5112 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5113 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5114 { .bitrate = 480,
5115 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5116 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5117 { .bitrate = 540,
5118 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5119 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5120 };
5121
5122 /* 5 GHz band channels for WL1273 */
5123 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5124 { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
5125 { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
5126 { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
5127 { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
5128 { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
5129 { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
5130 { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
5131 { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
5132 { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
5133 { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
5134 { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
5135 { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
5136 { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
5137 { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
5138 { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
5139 { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
5140 { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
5141 { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
5142 { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
5143 { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
5144 { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
5145 { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
5146 { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
5147 { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
5148 { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
5149 { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
5150 { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
5151 { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
5152 { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
5153 { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
5154 { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
5155 { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
5156 { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
5157 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
5158 };
5159
5160 static struct ieee80211_supported_band wl1271_band_5ghz = {
5161 .channels = wl1271_channels_5ghz,
5162 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5163 .bitrates = wl1271_rates_5ghz,
5164 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5165 };
5166
5167 static const struct ieee80211_ops wl1271_ops = {
5168 .start = wl1271_op_start,
5169 .stop = wlcore_op_stop,
5170 .add_interface = wl1271_op_add_interface,
5171 .remove_interface = wl1271_op_remove_interface,
5172 .change_interface = wl12xx_op_change_interface,
5173 #ifdef CONFIG_PM
5174 .suspend = wl1271_op_suspend,
5175 .resume = wl1271_op_resume,
5176 #endif
5177 .config = wl1271_op_config,
5178 .prepare_multicast = wl1271_op_prepare_multicast,
5179 .configure_filter = wl1271_op_configure_filter,
5180 .tx = wl1271_op_tx,
5181 .set_key = wlcore_op_set_key,
5182 .hw_scan = wl1271_op_hw_scan,
5183 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5184 .sched_scan_start = wl1271_op_sched_scan_start,
5185 .sched_scan_stop = wl1271_op_sched_scan_stop,
5186 .bss_info_changed = wl1271_op_bss_info_changed,
5187 .set_frag_threshold = wl1271_op_set_frag_threshold,
5188 .set_rts_threshold = wl1271_op_set_rts_threshold,
5189 .conf_tx = wl1271_op_conf_tx,
5190 .get_tsf = wl1271_op_get_tsf,
5191 .get_survey = wl1271_op_get_survey,
5192 .sta_state = wl12xx_op_sta_state,
5193 .ampdu_action = wl1271_op_ampdu_action,
5194 .tx_frames_pending = wl1271_tx_frames_pending,
5195 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5196 .channel_switch = wl12xx_op_channel_switch,
5197 .flush = wlcore_op_flush,
5198 .remain_on_channel = wlcore_op_remain_on_channel,
5199 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5200 .add_chanctx = wlcore_op_add_chanctx,
5201 .remove_chanctx = wlcore_op_remove_chanctx,
5202 .change_chanctx = wlcore_op_change_chanctx,
5203 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5204 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5205 .sta_rc_update = wlcore_op_sta_rc_update,
5206 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5207 };
5208
5209
5210 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5211 {
5212 u8 idx;
5213
5214 BUG_ON(band >= 2);
5215
5216 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5217 wl1271_error("Illegal RX rate from HW: %d", rate);
5218 return 0;
5219 }
5220
5221 idx = wl->band_rate_to_idx[band][rate];
5222 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5223 wl1271_error("Unsupported RX rate from HW: %d", rate);
5224 return 0;
5225 }
5226
5227 return idx;
5228 }
5229
5230 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5231 struct device_attribute *attr,
5232 char *buf)
5233 {
5234 struct wl1271 *wl = dev_get_drvdata(dev);
5235 ssize_t len;
5236
5237 len = PAGE_SIZE;
5238
5239 mutex_lock(&wl->mutex);
5240 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5241 wl->sg_enabled);
5242 mutex_unlock(&wl->mutex);
5243
5244 return len;
5245
5246 }
5247
5248 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5249 struct device_attribute *attr,
5250 const char *buf, size_t count)
5251 {
5252 struct wl1271 *wl = dev_get_drvdata(dev);
5253 unsigned long res;
5254 int ret;
5255
5256 ret = kstrtoul(buf, 10, &res);
5257 if (ret < 0) {
5258 wl1271_warning("incorrect value written to bt_coex_mode");
5259 return count;
5260 }
5261
5262 mutex_lock(&wl->mutex);
5263
5264 res = !!res;
5265
5266 if (res == wl->sg_enabled)
5267 goto out;
5268
5269 wl->sg_enabled = res;
5270
5271 if (unlikely(wl->state != WLCORE_STATE_ON))
5272 goto out;
5273
5274 ret = wl1271_ps_elp_wakeup(wl);
5275 if (ret < 0)
5276 goto out;
5277
5278 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5279 wl1271_ps_elp_sleep(wl);
5280
5281 out:
5282 mutex_unlock(&wl->mutex);
5283 return count;
5284 }
5285
5286 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5287 wl1271_sysfs_show_bt_coex_state,
5288 wl1271_sysfs_store_bt_coex_state);
5289
5290 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5291 struct device_attribute *attr,
5292 char *buf)
5293 {
5294 struct wl1271 *wl = dev_get_drvdata(dev);
5295 ssize_t len;
5296
5297 len = PAGE_SIZE;
5298
5299 mutex_lock(&wl->mutex);
5300 if (wl->hw_pg_ver >= 0)
5301 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5302 else
5303 len = snprintf(buf, len, "n/a\n");
5304 mutex_unlock(&wl->mutex);
5305
5306 return len;
5307 }
5308
5309 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5310 wl1271_sysfs_show_hw_pg_ver, NULL);
5311
5312 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5313 struct bin_attribute *bin_attr,
5314 char *buffer, loff_t pos, size_t count)
5315 {
5316 struct device *dev = container_of(kobj, struct device, kobj);
5317 struct wl1271 *wl = dev_get_drvdata(dev);
5318 ssize_t len;
5319 int ret;
5320
5321 ret = mutex_lock_interruptible(&wl->mutex);
5322 if (ret < 0)
5323 return -ERESTARTSYS;
5324
5325 /* Let only one thread read the log at a time, blocking others */
5326 while (wl->fwlog_size == 0) {
5327 DEFINE_WAIT(wait);
5328
5329 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5330 &wait,
5331 TASK_INTERRUPTIBLE);
5332
5333 if (wl->fwlog_size != 0) {
5334 finish_wait(&wl->fwlog_waitq, &wait);
5335 break;
5336 }
5337
5338 mutex_unlock(&wl->mutex);
5339
5340 schedule();
5341 finish_wait(&wl->fwlog_waitq, &wait);
5342
5343 if (signal_pending(current))
5344 return -ERESTARTSYS;
5345
5346 ret = mutex_lock_interruptible(&wl->mutex);
5347 if (ret < 0)
5348 return -ERESTARTSYS;
5349 }
5350
5351 /* Check if the fwlog is still valid */
5352 if (wl->fwlog_size < 0) {
5353 mutex_unlock(&wl->mutex);
5354 return 0;
5355 }
5356
5357 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5358 len = min(count, (size_t)wl->fwlog_size);
5359 wl->fwlog_size -= len;
5360 memcpy(buffer, wl->fwlog, len);
5361
5362 /* Make room for new messages */
5363 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5364
5365 mutex_unlock(&wl->mutex);
5366
5367 return len;
5368 }
5369
5370 static struct bin_attribute fwlog_attr = {
5371 .attr = {.name = "fwlog", .mode = S_IRUSR},
5372 .read = wl1271_sysfs_read_fwlog,
5373 };
5374
5375 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5376 {
5377 int i;
5378
5379 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5380 oui, nic);
5381
5382 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5383 wl1271_warning("NIC part of the MAC address wraps around!");
5384
5385 for (i = 0; i < wl->num_mac_addr; i++) {
5386 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5387 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5388 wl->addresses[i].addr[2] = (u8) oui;
5389 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5390 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5391 wl->addresses[i].addr[5] = (u8) nic;
5392 nic++;
5393 }
5394
5395 /* we may be one address short at the most */
5396 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5397
5398 /*
5399 * turn on the LAA bit in the first address and use it as
5400 * the last address.
5401 */
5402 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5403 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5404 memcpy(&wl->addresses[idx], &wl->addresses[0],
5405 sizeof(wl->addresses[0]));
5406 /* LAA bit */
5407 wl->addresses[idx].addr[2] |= BIT(1);
5408 }
5409
5410 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5411 wl->hw->wiphy->addresses = wl->addresses;
5412 }
5413
5414 static int wl12xx_get_hw_info(struct wl1271 *wl)
5415 {
5416 int ret;
5417
5418 ret = wl12xx_set_power_on(wl);
5419 if (ret < 0)
5420 return ret;
5421
5422 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5423 if (ret < 0)
5424 goto out;
5425
5426 wl->fuse_oui_addr = 0;
5427 wl->fuse_nic_addr = 0;
5428
5429 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5430 if (ret < 0)
5431 goto out;
5432
5433 if (wl->ops->get_mac)
5434 ret = wl->ops->get_mac(wl);
5435
5436 out:
5437 wl1271_power_off(wl);
5438 return ret;
5439 }
5440
5441 static int wl1271_register_hw(struct wl1271 *wl)
5442 {
5443 int ret;
5444 u32 oui_addr = 0, nic_addr = 0;
5445
5446 if (wl->mac80211_registered)
5447 return 0;
5448
5449 if (wl->nvs_len >= 12) {
5450 /* NOTE: The wl->nvs->nvs element must be first, in
5451 * order to simplify the casting, we assume it is at
5452 * the beginning of the wl->nvs structure.
5453 */
5454 u8 *nvs_ptr = (u8 *)wl->nvs;
5455
5456 oui_addr =
5457 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5458 nic_addr =
5459 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5460 }
5461
5462 /* if the MAC address is zeroed in the NVS derive from fuse */
5463 if (oui_addr == 0 && nic_addr == 0) {
5464 oui_addr = wl->fuse_oui_addr;
5465 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5466 nic_addr = wl->fuse_nic_addr + 1;
5467 }
5468
5469 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5470
5471 ret = ieee80211_register_hw(wl->hw);
5472 if (ret < 0) {
5473 wl1271_error("unable to register mac80211 hw: %d", ret);
5474 goto out;
5475 }
5476
5477 wl->mac80211_registered = true;
5478
5479 wl1271_debugfs_init(wl);
5480
5481 wl1271_notice("loaded");
5482
5483 out:
5484 return ret;
5485 }
5486
5487 static void wl1271_unregister_hw(struct wl1271 *wl)
5488 {
5489 if (wl->plt)
5490 wl1271_plt_stop(wl);
5491
5492 ieee80211_unregister_hw(wl->hw);
5493 wl->mac80211_registered = false;
5494
5495 }
5496
5497 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5498 {
5499 .max = 3,
5500 .types = BIT(NL80211_IFTYPE_STATION),
5501 },
5502 {
5503 .max = 1,
5504 .types = BIT(NL80211_IFTYPE_AP) |
5505 BIT(NL80211_IFTYPE_P2P_GO) |
5506 BIT(NL80211_IFTYPE_P2P_CLIENT),
5507 },
5508 };
5509
5510 static struct ieee80211_iface_combination
5511 wlcore_iface_combinations[] = {
5512 {
5513 .max_interfaces = 3,
5514 .limits = wlcore_iface_limits,
5515 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5516 },
5517 };
5518
5519 static int wl1271_init_ieee80211(struct wl1271 *wl)
5520 {
5521 static const u32 cipher_suites[] = {
5522 WLAN_CIPHER_SUITE_WEP40,
5523 WLAN_CIPHER_SUITE_WEP104,
5524 WLAN_CIPHER_SUITE_TKIP,
5525 WLAN_CIPHER_SUITE_CCMP,
5526 WL1271_CIPHER_SUITE_GEM,
5527 };
5528
5529 /* The tx descriptor buffer */
5530 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5531
5532 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5533 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5534
5535 /* unit us */
5536 /* FIXME: find a proper value */
5537 wl->hw->channel_change_time = 10000;
5538 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5539
5540 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5541 IEEE80211_HW_SUPPORTS_PS |
5542 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5543 IEEE80211_HW_SUPPORTS_UAPSD |
5544 IEEE80211_HW_HAS_RATE_CONTROL |
5545 IEEE80211_HW_CONNECTION_MONITOR |
5546 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5547 IEEE80211_HW_SPECTRUM_MGMT |
5548 IEEE80211_HW_AP_LINK_PS |
5549 IEEE80211_HW_AMPDU_AGGREGATION |
5550 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5551 IEEE80211_HW_SCAN_WHILE_IDLE;
5552
5553 wl->hw->wiphy->cipher_suites = cipher_suites;
5554 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5555
5556 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5557 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5558 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5559 wl->hw->wiphy->max_scan_ssids = 1;
5560 wl->hw->wiphy->max_sched_scan_ssids = 16;
5561 wl->hw->wiphy->max_match_sets = 16;
5562 /*
5563 * Maximum length of elements in scanning probe request templates
5564 * should be the maximum length possible for a template, without
5565 * the IEEE80211 header of the template
5566 */
5567 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5568 sizeof(struct ieee80211_header);
5569
5570 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5571 sizeof(struct ieee80211_header);
5572
5573 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5574
5575 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5576 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5577
5578 /* make sure all our channels fit in the scanned_ch bitmask */
5579 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5580 ARRAY_SIZE(wl1271_channels_5ghz) >
5581 WL1271_MAX_CHANNELS);
5582 /*
5583 * We keep local copies of the band structs because we need to
5584 * modify them on a per-device basis.
5585 */
5586 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5587 sizeof(wl1271_band_2ghz));
5588 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5589 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5590 sizeof(*wl->ht_cap));
5591 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5592 sizeof(wl1271_band_5ghz));
5593 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5594 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5595 sizeof(*wl->ht_cap));
5596
5597 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5598 &wl->bands[IEEE80211_BAND_2GHZ];
5599 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5600 &wl->bands[IEEE80211_BAND_5GHZ];
5601
5602 wl->hw->queues = 4;
5603 wl->hw->max_rates = 1;
5604
5605 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5606
5607 /* the FW answers probe-requests in AP-mode */
5608 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5609 wl->hw->wiphy->probe_resp_offload =
5610 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5611 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5612 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5613
5614 /* allowed interface combinations */
5615 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5616 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5617 wl->hw->wiphy->n_iface_combinations =
5618 ARRAY_SIZE(wlcore_iface_combinations);
5619
5620 SET_IEEE80211_DEV(wl->hw, wl->dev);
5621
5622 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5623 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5624
5625 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5626
5627 return 0;
5628 }
5629
5630 #define WL1271_DEFAULT_CHANNEL 0
5631
5632 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5633 u32 mbox_size)
5634 {
5635 struct ieee80211_hw *hw;
5636 struct wl1271 *wl;
5637 int i, j, ret;
5638 unsigned int order;
5639
5640 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5641
5642 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5643 if (!hw) {
5644 wl1271_error("could not alloc ieee80211_hw");
5645 ret = -ENOMEM;
5646 goto err_hw_alloc;
5647 }
5648
5649 wl = hw->priv;
5650 memset(wl, 0, sizeof(*wl));
5651
5652 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5653 if (!wl->priv) {
5654 wl1271_error("could not alloc wl priv");
5655 ret = -ENOMEM;
5656 goto err_priv_alloc;
5657 }
5658
5659 INIT_LIST_HEAD(&wl->wlvif_list);
5660
5661 wl->hw = hw;
5662
5663 for (i = 0; i < NUM_TX_QUEUES; i++)
5664 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5665 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5666
5667 skb_queue_head_init(&wl->deferred_rx_queue);
5668 skb_queue_head_init(&wl->deferred_tx_queue);
5669
5670 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5671 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5672 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5673 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5674 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5675 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5676 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5677
5678 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5679 if (!wl->freezable_wq) {
5680 ret = -ENOMEM;
5681 goto err_hw;
5682 }
5683
5684 wl->channel = WL1271_DEFAULT_CHANNEL;
5685 wl->rx_counter = 0;
5686 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5687 wl->band = IEEE80211_BAND_2GHZ;
5688 wl->channel_type = NL80211_CHAN_NO_HT;
5689 wl->flags = 0;
5690 wl->sg_enabled = true;
5691 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5692 wl->recovery_count = 0;
5693 wl->hw_pg_ver = -1;
5694 wl->ap_ps_map = 0;
5695 wl->ap_fw_ps_map = 0;
5696 wl->quirks = 0;
5697 wl->platform_quirks = 0;
5698 wl->system_hlid = WL12XX_SYSTEM_HLID;
5699 wl->active_sta_count = 0;
5700 wl->fwlog_size = 0;
5701 init_waitqueue_head(&wl->fwlog_waitq);
5702
5703 /* The system link is always allocated */
5704 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5705
5706 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5707 for (i = 0; i < wl->num_tx_desc; i++)
5708 wl->tx_frames[i] = NULL;
5709
5710 spin_lock_init(&wl->wl_lock);
5711
5712 wl->state = WLCORE_STATE_OFF;
5713 wl->fw_type = WL12XX_FW_TYPE_NONE;
5714 mutex_init(&wl->mutex);
5715 mutex_init(&wl->flush_mutex);
5716 init_completion(&wl->nvs_loading_complete);
5717
5718 order = get_order(aggr_buf_size);
5719 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5720 if (!wl->aggr_buf) {
5721 ret = -ENOMEM;
5722 goto err_wq;
5723 }
5724 wl->aggr_buf_size = aggr_buf_size;
5725
5726 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5727 if (!wl->dummy_packet) {
5728 ret = -ENOMEM;
5729 goto err_aggr;
5730 }
5731
5732 /* Allocate one page for the FW log */
5733 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5734 if (!wl->fwlog) {
5735 ret = -ENOMEM;
5736 goto err_dummy_packet;
5737 }
5738
5739 wl->mbox_size = mbox_size;
5740 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5741 if (!wl->mbox) {
5742 ret = -ENOMEM;
5743 goto err_fwlog;
5744 }
5745
5746 return hw;
5747
5748 err_fwlog:
5749 free_page((unsigned long)wl->fwlog);
5750
5751 err_dummy_packet:
5752 dev_kfree_skb(wl->dummy_packet);
5753
5754 err_aggr:
5755 free_pages((unsigned long)wl->aggr_buf, order);
5756
5757 err_wq:
5758 destroy_workqueue(wl->freezable_wq);
5759
5760 err_hw:
5761 wl1271_debugfs_exit(wl);
5762 kfree(wl->priv);
5763
5764 err_priv_alloc:
5765 ieee80211_free_hw(hw);
5766
5767 err_hw_alloc:
5768
5769 return ERR_PTR(ret);
5770 }
5771 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5772
5773 int wlcore_free_hw(struct wl1271 *wl)
5774 {
5775 /* Unblock any fwlog readers */
5776 mutex_lock(&wl->mutex);
5777 wl->fwlog_size = -1;
5778 wake_up_interruptible_all(&wl->fwlog_waitq);
5779 mutex_unlock(&wl->mutex);
5780
5781 device_remove_bin_file(wl->dev, &fwlog_attr);
5782
5783 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5784
5785 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5786 kfree(wl->mbox);
5787 free_page((unsigned long)wl->fwlog);
5788 dev_kfree_skb(wl->dummy_packet);
5789 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5790
5791 wl1271_debugfs_exit(wl);
5792
5793 vfree(wl->fw);
5794 wl->fw = NULL;
5795 wl->fw_type = WL12XX_FW_TYPE_NONE;
5796 kfree(wl->nvs);
5797 wl->nvs = NULL;
5798
5799 kfree(wl->fw_status_1);
5800 kfree(wl->tx_res_if);
5801 destroy_workqueue(wl->freezable_wq);
5802
5803 kfree(wl->priv);
5804 ieee80211_free_hw(wl->hw);
5805
5806 return 0;
5807 }
5808 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5809
5810 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5811 {
5812 struct wl1271 *wl = cookie;
5813 unsigned long flags;
5814
5815 wl1271_debug(DEBUG_IRQ, "IRQ");
5816
5817 /* complete the ELP completion */
5818 spin_lock_irqsave(&wl->wl_lock, flags);
5819 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5820 if (wl->elp_compl) {
5821 complete(wl->elp_compl);
5822 wl->elp_compl = NULL;
5823 }
5824
5825 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5826 /* don't enqueue a work right now. mark it as pending */
5827 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5828 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5829 disable_irq_nosync(wl->irq);
5830 pm_wakeup_event(wl->dev, 0);
5831 spin_unlock_irqrestore(&wl->wl_lock, flags);
5832 return IRQ_HANDLED;
5833 }
5834 spin_unlock_irqrestore(&wl->wl_lock, flags);
5835
5836 return IRQ_WAKE_THREAD;
5837 }
5838
5839 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5840 {
5841 struct wl1271 *wl = context;
5842 struct platform_device *pdev = wl->pdev;
5843 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5844 unsigned long irqflags;
5845 int ret;
5846
5847 if (fw) {
5848 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5849 if (!wl->nvs) {
5850 wl1271_error("Could not allocate nvs data");
5851 goto out;
5852 }
5853 wl->nvs_len = fw->size;
5854 } else {
5855 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5856 WL12XX_NVS_NAME);
5857 wl->nvs = NULL;
5858 wl->nvs_len = 0;
5859 }
5860
5861 ret = wl->ops->setup(wl);
5862 if (ret < 0)
5863 goto out_free_nvs;
5864
5865 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5866
5867 /* adjust some runtime configuration parameters */
5868 wlcore_adjust_conf(wl);
5869
5870 wl->irq = platform_get_irq(pdev, 0);
5871 wl->platform_quirks = pdata->platform_quirks;
5872 wl->set_power = pdata->set_power;
5873 wl->if_ops = pdata->ops;
5874
5875 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5876 irqflags = IRQF_TRIGGER_RISING;
5877 else
5878 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5879
5880 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
5881 irqflags,
5882 pdev->name, wl);
5883 if (ret < 0) {
5884 wl1271_error("request_irq() failed: %d", ret);
5885 goto out_free_nvs;
5886 }
5887
5888 #ifdef CONFIG_PM
5889 ret = enable_irq_wake(wl->irq);
5890 if (!ret) {
5891 wl->irq_wake_enabled = true;
5892 device_init_wakeup(wl->dev, 1);
5893 if (pdata->pwr_in_suspend) {
5894 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5895 wl->hw->wiphy->wowlan.n_patterns =
5896 WL1271_MAX_RX_FILTERS;
5897 wl->hw->wiphy->wowlan.pattern_min_len = 1;
5898 wl->hw->wiphy->wowlan.pattern_max_len =
5899 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5900 }
5901 }
5902 #endif
5903 disable_irq(wl->irq);
5904
5905 ret = wl12xx_get_hw_info(wl);
5906 if (ret < 0) {
5907 wl1271_error("couldn't get hw info");
5908 goto out_irq;
5909 }
5910
5911 ret = wl->ops->identify_chip(wl);
5912 if (ret < 0)
5913 goto out_irq;
5914
5915 ret = wl1271_init_ieee80211(wl);
5916 if (ret)
5917 goto out_irq;
5918
5919 ret = wl1271_register_hw(wl);
5920 if (ret)
5921 goto out_irq;
5922
5923 /* Create sysfs file to control bt coex state */
5924 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5925 if (ret < 0) {
5926 wl1271_error("failed to create sysfs file bt_coex_state");
5927 goto out_unreg;
5928 }
5929
5930 /* Create sysfs file to get HW PG version */
5931 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5932 if (ret < 0) {
5933 wl1271_error("failed to create sysfs file hw_pg_ver");
5934 goto out_bt_coex_state;
5935 }
5936
5937 /* Create sysfs file for the FW log */
5938 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5939 if (ret < 0) {
5940 wl1271_error("failed to create sysfs file fwlog");
5941 goto out_hw_pg_ver;
5942 }
5943
5944 wl->initialized = true;
5945 goto out;
5946
5947 out_hw_pg_ver:
5948 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5949
5950 out_bt_coex_state:
5951 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5952
5953 out_unreg:
5954 wl1271_unregister_hw(wl);
5955
5956 out_irq:
5957 free_irq(wl->irq, wl);
5958
5959 out_free_nvs:
5960 kfree(wl->nvs);
5961
5962 out:
5963 release_firmware(fw);
5964 complete_all(&wl->nvs_loading_complete);
5965 }
5966
5967 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5968 {
5969 int ret;
5970
5971 if (!wl->ops || !wl->ptable)
5972 return -EINVAL;
5973
5974 wl->dev = &pdev->dev;
5975 wl->pdev = pdev;
5976 platform_set_drvdata(pdev, wl);
5977
5978 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
5979 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
5980 wl, wlcore_nvs_cb);
5981 if (ret < 0) {
5982 wl1271_error("request_firmware_nowait failed: %d", ret);
5983 complete_all(&wl->nvs_loading_complete);
5984 }
5985
5986 return ret;
5987 }
5988 EXPORT_SYMBOL_GPL(wlcore_probe);
5989
5990 int __devexit wlcore_remove(struct platform_device *pdev)
5991 {
5992 struct wl1271 *wl = platform_get_drvdata(pdev);
5993
5994 wait_for_completion(&wl->nvs_loading_complete);
5995 if (!wl->initialized)
5996 return 0;
5997
5998 if (wl->irq_wake_enabled) {
5999 device_init_wakeup(wl->dev, 0);
6000 disable_irq_wake(wl->irq);
6001 }
6002 wl1271_unregister_hw(wl);
6003 free_irq(wl->irq, wl);
6004 wlcore_free_hw(wl);
6005
6006 return 0;
6007 }
6008 EXPORT_SYMBOL_GPL(wlcore_remove);
6009
6010 u32 wl12xx_debug_level = DEBUG_NONE;
6011 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6012 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6013 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6014
6015 module_param_named(fwlog, fwlog_param, charp, 0);
6016 MODULE_PARM_DESC(fwlog,
6017 "FW logger options: continuous, ondemand, dbgpins or disable");
6018
6019 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6020 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6021
6022 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6023 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6024
6025 MODULE_LICENSE("GPL");
6026 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6027 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6028 MODULE_FIRMWARE(WL12XX_NVS_NAME);