]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/wireless/ti/wlcore/main.c
treewide: setup_timer() -> timer_setup()
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / ti / wlcore / main.c
1 /*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29
30 #include "wlcore.h"
31 #include "debug.h"
32 #include "wl12xx_80211.h"
33 #include "io.h"
34 #include "tx.h"
35 #include "ps.h"
36 #include "init.h"
37 #include "debugfs.h"
38 #include "testmode.h"
39 #include "vendor_cmd.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43
44 #define WL1271_BOOT_RETRIES 3
45
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
50
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 int ret;
60
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 return -EINVAL;
63
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 return 0;
66
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 return 0;
69
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 if (ret < 0)
72 return ret;
73
74 wl1271_info("Association completed.");
75 return 0;
76 }
77
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
80 {
81 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
82 struct wl1271 *wl = hw->priv;
83
84 /* copy the current dfs region */
85 if (request)
86 wl->dfs_region = request->dfs_region;
87
88 wlcore_regdomain_config(wl);
89 }
90
91 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
92 bool enable)
93 {
94 int ret = 0;
95
96 /* we should hold wl->mutex */
97 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
98 if (ret < 0)
99 goto out;
100
101 if (enable)
102 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
103 else
104 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 out:
106 return ret;
107 }
108
109 /*
110 * this function is being called when the rx_streaming interval
111 * has beed changed or rx_streaming should be disabled
112 */
113 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
114 {
115 int ret = 0;
116 int period = wl->conf.rx_streaming.interval;
117
118 /* don't reconfigure if rx_streaming is disabled */
119 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
120 goto out;
121
122 /* reconfigure/disable according to new streaming_period */
123 if (period &&
124 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
125 (wl->conf.rx_streaming.always ||
126 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
127 ret = wl1271_set_rx_streaming(wl, wlvif, true);
128 else {
129 ret = wl1271_set_rx_streaming(wl, wlvif, false);
130 /* don't cancel_work_sync since we might deadlock */
131 del_timer_sync(&wlvif->rx_streaming_timer);
132 }
133 out:
134 return ret;
135 }
136
137 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
138 {
139 int ret;
140 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
141 rx_streaming_enable_work);
142 struct wl1271 *wl = wlvif->wl;
143
144 mutex_lock(&wl->mutex);
145
146 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
147 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
148 (!wl->conf.rx_streaming.always &&
149 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 goto out;
151
152 if (!wl->conf.rx_streaming.interval)
153 goto out;
154
155 ret = wl1271_ps_elp_wakeup(wl);
156 if (ret < 0)
157 goto out;
158
159 ret = wl1271_set_rx_streaming(wl, wlvif, true);
160 if (ret < 0)
161 goto out_sleep;
162
163 /* stop it after some time of inactivity */
164 mod_timer(&wlvif->rx_streaming_timer,
165 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
166
167 out_sleep:
168 wl1271_ps_elp_sleep(wl);
169 out:
170 mutex_unlock(&wl->mutex);
171 }
172
173 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
174 {
175 int ret;
176 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
177 rx_streaming_disable_work);
178 struct wl1271 *wl = wlvif->wl;
179
180 mutex_lock(&wl->mutex);
181
182 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
183 goto out;
184
185 ret = wl1271_ps_elp_wakeup(wl);
186 if (ret < 0)
187 goto out;
188
189 ret = wl1271_set_rx_streaming(wl, wlvif, false);
190 if (ret)
191 goto out_sleep;
192
193 out_sleep:
194 wl1271_ps_elp_sleep(wl);
195 out:
196 mutex_unlock(&wl->mutex);
197 }
198
199 static void wl1271_rx_streaming_timer(struct timer_list *t)
200 {
201 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
202 struct wl1271 *wl = wlvif->wl;
203 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
204 }
205
206 /* wl->mutex must be taken */
207 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
208 {
209 /* if the watchdog is not armed, don't do anything */
210 if (wl->tx_allocated_blocks == 0)
211 return;
212
213 cancel_delayed_work(&wl->tx_watchdog_work);
214 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
215 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
216 }
217
218 static void wlcore_rc_update_work(struct work_struct *work)
219 {
220 int ret;
221 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
222 rc_update_work);
223 struct wl1271 *wl = wlvif->wl;
224 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
225
226 mutex_lock(&wl->mutex);
227
228 if (unlikely(wl->state != WLCORE_STATE_ON))
229 goto out;
230
231 ret = wl1271_ps_elp_wakeup(wl);
232 if (ret < 0)
233 goto out;
234
235 if (ieee80211_vif_is_mesh(vif)) {
236 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
237 true, wlvif->sta.hlid);
238 if (ret < 0)
239 goto out_sleep;
240 } else {
241 wlcore_hw_sta_rc_update(wl, wlvif);
242 }
243
244 out_sleep:
245 wl1271_ps_elp_sleep(wl);
246 out:
247 mutex_unlock(&wl->mutex);
248 }
249
250 static void wl12xx_tx_watchdog_work(struct work_struct *work)
251 {
252 struct delayed_work *dwork;
253 struct wl1271 *wl;
254
255 dwork = to_delayed_work(work);
256 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
257
258 mutex_lock(&wl->mutex);
259
260 if (unlikely(wl->state != WLCORE_STATE_ON))
261 goto out;
262
263 /* Tx went out in the meantime - everything is ok */
264 if (unlikely(wl->tx_allocated_blocks == 0))
265 goto out;
266
267 /*
268 * if a ROC is in progress, we might not have any Tx for a long
269 * time (e.g. pending Tx on the non-ROC channels)
270 */
271 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
272 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
273 wl->conf.tx.tx_watchdog_timeout);
274 wl12xx_rearm_tx_watchdog_locked(wl);
275 goto out;
276 }
277
278 /*
279 * if a scan is in progress, we might not have any Tx for a long
280 * time
281 */
282 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
283 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
284 wl->conf.tx.tx_watchdog_timeout);
285 wl12xx_rearm_tx_watchdog_locked(wl);
286 goto out;
287 }
288
289 /*
290 * AP might cache a frame for a long time for a sleeping station,
291 * so rearm the timer if there's an AP interface with stations. If
292 * Tx is genuinely stuck we will most hopefully discover it when all
293 * stations are removed due to inactivity.
294 */
295 if (wl->active_sta_count) {
296 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
297 " %d stations",
298 wl->conf.tx.tx_watchdog_timeout,
299 wl->active_sta_count);
300 wl12xx_rearm_tx_watchdog_locked(wl);
301 goto out;
302 }
303
304 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
305 wl->conf.tx.tx_watchdog_timeout);
306 wl12xx_queue_recovery_work(wl);
307
308 out:
309 mutex_unlock(&wl->mutex);
310 }
311
312 static void wlcore_adjust_conf(struct wl1271 *wl)
313 {
314
315 if (fwlog_param) {
316 if (!strcmp(fwlog_param, "continuous")) {
317 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
318 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
319 } else if (!strcmp(fwlog_param, "dbgpins")) {
320 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
321 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
322 } else if (!strcmp(fwlog_param, "disable")) {
323 wl->conf.fwlog.mem_blocks = 0;
324 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
325 } else {
326 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
327 }
328 }
329
330 if (bug_on_recovery != -1)
331 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
332
333 if (no_recovery != -1)
334 wl->conf.recovery.no_recovery = (u8) no_recovery;
335 }
336
337 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
338 struct wl12xx_vif *wlvif,
339 u8 hlid, u8 tx_pkts)
340 {
341 bool fw_ps;
342
343 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
344
345 /*
346 * Wake up from high level PS if the STA is asleep with too little
347 * packets in FW or if the STA is awake.
348 */
349 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
350 wl12xx_ps_link_end(wl, wlvif, hlid);
351
352 /*
353 * Start high-level PS if the STA is asleep with enough blocks in FW.
354 * Make an exception if this is the only connected link. In this
355 * case FW-memory congestion is less of a problem.
356 * Note that a single connected STA means 2*ap_count + 1 active links,
357 * since we must account for the global and broadcast AP links
358 * for each AP. The "fw_ps" check assures us the other link is a STA
359 * connected to the AP. Otherwise the FW would not set the PSM bit.
360 */
361 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
362 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
363 wl12xx_ps_link_start(wl, wlvif, hlid, true);
364 }
365
366 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
367 struct wl12xx_vif *wlvif,
368 struct wl_fw_status *status)
369 {
370 unsigned long cur_fw_ps_map;
371 u8 hlid;
372
373 cur_fw_ps_map = status->link_ps_bitmap;
374 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
375 wl1271_debug(DEBUG_PSM,
376 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
377 wl->ap_fw_ps_map, cur_fw_ps_map,
378 wl->ap_fw_ps_map ^ cur_fw_ps_map);
379
380 wl->ap_fw_ps_map = cur_fw_ps_map;
381 }
382
383 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
384 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
385 wl->links[hlid].allocated_pkts);
386 }
387
388 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
389 {
390 struct wl12xx_vif *wlvif;
391 struct timespec ts;
392 u32 old_tx_blk_count = wl->tx_blocks_available;
393 int avail, freed_blocks;
394 int i;
395 int ret;
396 struct wl1271_link *lnk;
397
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
399 wl->raw_fw_status,
400 wl->fw_status_len, false);
401 if (ret < 0)
402 return ret;
403
404 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
405
406 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
407 "drv_rx_counter = %d, tx_results_counter = %d)",
408 status->intr,
409 status->fw_rx_counter,
410 status->drv_rx_counter,
411 status->tx_results_counter);
412
413 for (i = 0; i < NUM_TX_QUEUES; i++) {
414 /* prevent wrap-around in freed-packets counter */
415 wl->tx_allocated_pkts[i] -=
416 (status->counters.tx_released_pkts[i] -
417 wl->tx_pkts_freed[i]) & 0xff;
418
419 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
420 }
421
422
423 for_each_set_bit(i, wl->links_map, wl->num_links) {
424 u8 diff;
425 lnk = &wl->links[i];
426
427 /* prevent wrap-around in freed-packets counter */
428 diff = (status->counters.tx_lnk_free_pkts[i] -
429 lnk->prev_freed_pkts) & 0xff;
430
431 if (diff == 0)
432 continue;
433
434 lnk->allocated_pkts -= diff;
435 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
436
437 /* accumulate the prev_freed_pkts counter */
438 lnk->total_freed_pkts += diff;
439 }
440
441 /* prevent wrap-around in total blocks counter */
442 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
443 freed_blocks = status->total_released_blks -
444 wl->tx_blocks_freed;
445 else
446 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
447 status->total_released_blks;
448
449 wl->tx_blocks_freed = status->total_released_blks;
450
451 wl->tx_allocated_blocks -= freed_blocks;
452
453 /*
454 * If the FW freed some blocks:
455 * If we still have allocated blocks - re-arm the timer, Tx is
456 * not stuck. Otherwise, cancel the timer (no Tx currently).
457 */
458 if (freed_blocks) {
459 if (wl->tx_allocated_blocks)
460 wl12xx_rearm_tx_watchdog_locked(wl);
461 else
462 cancel_delayed_work(&wl->tx_watchdog_work);
463 }
464
465 avail = status->tx_total - wl->tx_allocated_blocks;
466
467 /*
468 * The FW might change the total number of TX memblocks before
469 * we get a notification about blocks being released. Thus, the
470 * available blocks calculation might yield a temporary result
471 * which is lower than the actual available blocks. Keeping in
472 * mind that only blocks that were allocated can be moved from
473 * TX to RX, tx_blocks_available should never decrease here.
474 */
475 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
476 avail);
477
478 /* if more blocks are available now, tx work can be scheduled */
479 if (wl->tx_blocks_available > old_tx_blk_count)
480 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
481
482 /* for AP update num of allocated TX blocks per link and ps status */
483 wl12xx_for_each_wlvif_ap(wl, wlvif) {
484 wl12xx_irq_update_links_status(wl, wlvif, status);
485 }
486
487 /* update the host-chipset time offset */
488 getnstimeofday(&ts);
489 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
490 (s64)(status->fw_localtime);
491
492 wl->fw_fast_lnk_map = status->link_fast_bitmap;
493
494 return 0;
495 }
496
497 static void wl1271_flush_deferred_work(struct wl1271 *wl)
498 {
499 struct sk_buff *skb;
500
501 /* Pass all received frames to the network stack */
502 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
503 ieee80211_rx_ni(wl->hw, skb);
504
505 /* Return sent skbs to the network stack */
506 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
507 ieee80211_tx_status_ni(wl->hw, skb);
508 }
509
510 static void wl1271_netstack_work(struct work_struct *work)
511 {
512 struct wl1271 *wl =
513 container_of(work, struct wl1271, netstack_work);
514
515 do {
516 wl1271_flush_deferred_work(wl);
517 } while (skb_queue_len(&wl->deferred_rx_queue));
518 }
519
520 #define WL1271_IRQ_MAX_LOOPS 256
521
522 static int wlcore_irq_locked(struct wl1271 *wl)
523 {
524 int ret = 0;
525 u32 intr;
526 int loopcount = WL1271_IRQ_MAX_LOOPS;
527 bool done = false;
528 unsigned int defer_count;
529 unsigned long flags;
530
531 /*
532 * In case edge triggered interrupt must be used, we cannot iterate
533 * more than once without introducing race conditions with the hardirq.
534 */
535 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
536 loopcount = 1;
537
538 wl1271_debug(DEBUG_IRQ, "IRQ work");
539
540 if (unlikely(wl->state != WLCORE_STATE_ON))
541 goto out;
542
543 ret = wl1271_ps_elp_wakeup(wl);
544 if (ret < 0)
545 goto out;
546
547 while (!done && loopcount--) {
548 /*
549 * In order to avoid a race with the hardirq, clear the flag
550 * before acknowledging the chip. Since the mutex is held,
551 * wl1271_ps_elp_wakeup cannot be called concurrently.
552 */
553 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
554 smp_mb__after_atomic();
555
556 ret = wlcore_fw_status(wl, wl->fw_status);
557 if (ret < 0)
558 goto out;
559
560 wlcore_hw_tx_immediate_compl(wl);
561
562 intr = wl->fw_status->intr;
563 intr &= WLCORE_ALL_INTR_MASK;
564 if (!intr) {
565 done = true;
566 continue;
567 }
568
569 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
570 wl1271_error("HW watchdog interrupt received! starting recovery.");
571 wl->watchdog_recovery = true;
572 ret = -EIO;
573
574 /* restarting the chip. ignore any other interrupt. */
575 goto out;
576 }
577
578 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
579 wl1271_error("SW watchdog interrupt received! "
580 "starting recovery.");
581 wl->watchdog_recovery = true;
582 ret = -EIO;
583
584 /* restarting the chip. ignore any other interrupt. */
585 goto out;
586 }
587
588 if (likely(intr & WL1271_ACX_INTR_DATA)) {
589 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
590
591 ret = wlcore_rx(wl, wl->fw_status);
592 if (ret < 0)
593 goto out;
594
595 /* Check if any tx blocks were freed */
596 spin_lock_irqsave(&wl->wl_lock, flags);
597 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
598 wl1271_tx_total_queue_count(wl) > 0) {
599 spin_unlock_irqrestore(&wl->wl_lock, flags);
600 /*
601 * In order to avoid starvation of the TX path,
602 * call the work function directly.
603 */
604 ret = wlcore_tx_work_locked(wl);
605 if (ret < 0)
606 goto out;
607 } else {
608 spin_unlock_irqrestore(&wl->wl_lock, flags);
609 }
610
611 /* check for tx results */
612 ret = wlcore_hw_tx_delayed_compl(wl);
613 if (ret < 0)
614 goto out;
615
616 /* Make sure the deferred queues don't get too long */
617 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
618 skb_queue_len(&wl->deferred_rx_queue);
619 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
620 wl1271_flush_deferred_work(wl);
621 }
622
623 if (intr & WL1271_ACX_INTR_EVENT_A) {
624 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
625 ret = wl1271_event_handle(wl, 0);
626 if (ret < 0)
627 goto out;
628 }
629
630 if (intr & WL1271_ACX_INTR_EVENT_B) {
631 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
632 ret = wl1271_event_handle(wl, 1);
633 if (ret < 0)
634 goto out;
635 }
636
637 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
638 wl1271_debug(DEBUG_IRQ,
639 "WL1271_ACX_INTR_INIT_COMPLETE");
640
641 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
642 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
643 }
644
645 wl1271_ps_elp_sleep(wl);
646
647 out:
648 return ret;
649 }
650
651 static irqreturn_t wlcore_irq(int irq, void *cookie)
652 {
653 int ret;
654 unsigned long flags;
655 struct wl1271 *wl = cookie;
656
657 /* complete the ELP completion */
658 spin_lock_irqsave(&wl->wl_lock, flags);
659 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
660 if (wl->elp_compl) {
661 complete(wl->elp_compl);
662 wl->elp_compl = NULL;
663 }
664
665 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
666 /* don't enqueue a work right now. mark it as pending */
667 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
668 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
669 disable_irq_nosync(wl->irq);
670 pm_wakeup_event(wl->dev, 0);
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
672 return IRQ_HANDLED;
673 }
674 spin_unlock_irqrestore(&wl->wl_lock, flags);
675
676 /* TX might be handled here, avoid redundant work */
677 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
678 cancel_work_sync(&wl->tx_work);
679
680 mutex_lock(&wl->mutex);
681
682 ret = wlcore_irq_locked(wl);
683 if (ret)
684 wl12xx_queue_recovery_work(wl);
685
686 spin_lock_irqsave(&wl->wl_lock, flags);
687 /* In case TX was not handled here, queue TX work */
688 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
690 wl1271_tx_total_queue_count(wl) > 0)
691 ieee80211_queue_work(wl->hw, &wl->tx_work);
692 spin_unlock_irqrestore(&wl->wl_lock, flags);
693
694 mutex_unlock(&wl->mutex);
695
696 return IRQ_HANDLED;
697 }
698
699 struct vif_counter_data {
700 u8 counter;
701
702 struct ieee80211_vif *cur_vif;
703 bool cur_vif_running;
704 };
705
706 static void wl12xx_vif_count_iter(void *data, u8 *mac,
707 struct ieee80211_vif *vif)
708 {
709 struct vif_counter_data *counter = data;
710
711 counter->counter++;
712 if (counter->cur_vif == vif)
713 counter->cur_vif_running = true;
714 }
715
716 /* caller must not hold wl->mutex, as it might deadlock */
717 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
718 struct ieee80211_vif *cur_vif,
719 struct vif_counter_data *data)
720 {
721 memset(data, 0, sizeof(*data));
722 data->cur_vif = cur_vif;
723
724 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
725 wl12xx_vif_count_iter, data);
726 }
727
728 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
729 {
730 const struct firmware *fw;
731 const char *fw_name;
732 enum wl12xx_fw_type fw_type;
733 int ret;
734
735 if (plt) {
736 fw_type = WL12XX_FW_TYPE_PLT;
737 fw_name = wl->plt_fw_name;
738 } else {
739 /*
740 * we can't call wl12xx_get_vif_count() here because
741 * wl->mutex is taken, so use the cached last_vif_count value
742 */
743 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
744 fw_type = WL12XX_FW_TYPE_MULTI;
745 fw_name = wl->mr_fw_name;
746 } else {
747 fw_type = WL12XX_FW_TYPE_NORMAL;
748 fw_name = wl->sr_fw_name;
749 }
750 }
751
752 if (wl->fw_type == fw_type)
753 return 0;
754
755 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
756
757 ret = request_firmware(&fw, fw_name, wl->dev);
758
759 if (ret < 0) {
760 wl1271_error("could not get firmware %s: %d", fw_name, ret);
761 return ret;
762 }
763
764 if (fw->size % 4) {
765 wl1271_error("firmware size is not multiple of 32 bits: %zu",
766 fw->size);
767 ret = -EILSEQ;
768 goto out;
769 }
770
771 vfree(wl->fw);
772 wl->fw_type = WL12XX_FW_TYPE_NONE;
773 wl->fw_len = fw->size;
774 wl->fw = vmalloc(wl->fw_len);
775
776 if (!wl->fw) {
777 wl1271_error("could not allocate memory for the firmware");
778 ret = -ENOMEM;
779 goto out;
780 }
781
782 memcpy(wl->fw, fw->data, wl->fw_len);
783 ret = 0;
784 wl->fw_type = fw_type;
785 out:
786 release_firmware(fw);
787
788 return ret;
789 }
790
791 void wl12xx_queue_recovery_work(struct wl1271 *wl)
792 {
793 /* Avoid a recursive recovery */
794 if (wl->state == WLCORE_STATE_ON) {
795 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
796 &wl->flags));
797
798 wl->state = WLCORE_STATE_RESTARTING;
799 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
800 wl1271_ps_elp_wakeup(wl);
801 wlcore_disable_interrupts_nosync(wl);
802 ieee80211_queue_work(wl->hw, &wl->recovery_work);
803 }
804 }
805
806 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
807 {
808 size_t len;
809
810 /* Make sure we have enough room */
811 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
812
813 /* Fill the FW log file, consumed by the sysfs fwlog entry */
814 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
815 wl->fwlog_size += len;
816
817 return len;
818 }
819
820 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
821 {
822 u32 end_of_log = 0;
823
824 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
825 return;
826
827 wl1271_info("Reading FW panic log");
828
829 /*
830 * Make sure the chip is awake and the logger isn't active.
831 * Do not send a stop fwlog command if the fw is hanged or if
832 * dbgpins are used (due to some fw bug).
833 */
834 if (wl1271_ps_elp_wakeup(wl))
835 return;
836 if (!wl->watchdog_recovery &&
837 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
838 wl12xx_cmd_stop_fwlog(wl);
839
840 /* Traverse the memory blocks linked list */
841 do {
842 end_of_log = wlcore_event_fw_logger(wl);
843 if (end_of_log == 0) {
844 msleep(100);
845 end_of_log = wlcore_event_fw_logger(wl);
846 }
847 } while (end_of_log != 0);
848 }
849
850 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
851 u8 hlid, struct ieee80211_sta *sta)
852 {
853 struct wl1271_station *wl_sta;
854 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
855
856 wl_sta = (void *)sta->drv_priv;
857 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
858
859 /*
860 * increment the initial seq number on recovery to account for
861 * transmitted packets that we haven't yet got in the FW status
862 */
863 if (wlvif->encryption_type == KEY_GEM)
864 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
865
866 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
867 wl_sta->total_freed_pkts += sqn_recovery_padding;
868 }
869
870 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
871 struct wl12xx_vif *wlvif,
872 u8 hlid, const u8 *addr)
873 {
874 struct ieee80211_sta *sta;
875 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
876
877 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
878 is_zero_ether_addr(addr)))
879 return;
880
881 rcu_read_lock();
882 sta = ieee80211_find_sta(vif, addr);
883 if (sta)
884 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
885 rcu_read_unlock();
886 }
887
888 static void wlcore_print_recovery(struct wl1271 *wl)
889 {
890 u32 pc = 0;
891 u32 hint_sts = 0;
892 int ret;
893
894 wl1271_info("Hardware recovery in progress. FW ver: %s",
895 wl->chip.fw_ver_str);
896
897 /* change partitions momentarily so we can read the FW pc */
898 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
899 if (ret < 0)
900 return;
901
902 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
903 if (ret < 0)
904 return;
905
906 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
907 if (ret < 0)
908 return;
909
910 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
911 pc, hint_sts, ++wl->recovery_count);
912
913 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
914 }
915
916
917 static void wl1271_recovery_work(struct work_struct *work)
918 {
919 struct wl1271 *wl =
920 container_of(work, struct wl1271, recovery_work);
921 struct wl12xx_vif *wlvif;
922 struct ieee80211_vif *vif;
923
924 mutex_lock(&wl->mutex);
925
926 if (wl->state == WLCORE_STATE_OFF || wl->plt)
927 goto out_unlock;
928
929 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
930 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
931 wl12xx_read_fwlog_panic(wl);
932 wlcore_print_recovery(wl);
933 }
934
935 BUG_ON(wl->conf.recovery.bug_on_recovery &&
936 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
937
938 if (wl->conf.recovery.no_recovery) {
939 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
940 goto out_unlock;
941 }
942
943 /* Prevent spurious TX during FW restart */
944 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
945
946 /* reboot the chipset */
947 while (!list_empty(&wl->wlvif_list)) {
948 wlvif = list_first_entry(&wl->wlvif_list,
949 struct wl12xx_vif, list);
950 vif = wl12xx_wlvif_to_vif(wlvif);
951
952 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
953 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
954 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
955 vif->bss_conf.bssid);
956 }
957
958 __wl1271_op_remove_interface(wl, vif, false);
959 }
960
961 wlcore_op_stop_locked(wl);
962
963 ieee80211_restart_hw(wl->hw);
964
965 /*
966 * Its safe to enable TX now - the queues are stopped after a request
967 * to restart the HW.
968 */
969 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970
971 out_unlock:
972 wl->watchdog_recovery = false;
973 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
974 mutex_unlock(&wl->mutex);
975 }
976
977 static int wlcore_fw_wakeup(struct wl1271 *wl)
978 {
979 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
980 }
981
982 static int wl1271_setup(struct wl1271 *wl)
983 {
984 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
985 if (!wl->raw_fw_status)
986 goto err;
987
988 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
989 if (!wl->fw_status)
990 goto err;
991
992 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
993 if (!wl->tx_res_if)
994 goto err;
995
996 return 0;
997 err:
998 kfree(wl->fw_status);
999 kfree(wl->raw_fw_status);
1000 return -ENOMEM;
1001 }
1002
1003 static int wl12xx_set_power_on(struct wl1271 *wl)
1004 {
1005 int ret;
1006
1007 msleep(WL1271_PRE_POWER_ON_SLEEP);
1008 ret = wl1271_power_on(wl);
1009 if (ret < 0)
1010 goto out;
1011 msleep(WL1271_POWER_ON_SLEEP);
1012 wl1271_io_reset(wl);
1013 wl1271_io_init(wl);
1014
1015 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1016 if (ret < 0)
1017 goto fail;
1018
1019 /* ELP module wake up */
1020 ret = wlcore_fw_wakeup(wl);
1021 if (ret < 0)
1022 goto fail;
1023
1024 out:
1025 return ret;
1026
1027 fail:
1028 wl1271_power_off(wl);
1029 return ret;
1030 }
1031
1032 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1033 {
1034 int ret = 0;
1035
1036 ret = wl12xx_set_power_on(wl);
1037 if (ret < 0)
1038 goto out;
1039
1040 /*
1041 * For wl127x based devices we could use the default block
1042 * size (512 bytes), but due to a bug in the sdio driver, we
1043 * need to set it explicitly after the chip is powered on. To
1044 * simplify the code and since the performance impact is
1045 * negligible, we use the same block size for all different
1046 * chip types.
1047 *
1048 * Check if the bus supports blocksize alignment and, if it
1049 * doesn't, make sure we don't have the quirk.
1050 */
1051 if (!wl1271_set_block_size(wl))
1052 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1053
1054 /* TODO: make sure the lower driver has set things up correctly */
1055
1056 ret = wl1271_setup(wl);
1057 if (ret < 0)
1058 goto out;
1059
1060 ret = wl12xx_fetch_firmware(wl, plt);
1061 if (ret < 0)
1062 goto out;
1063
1064 out:
1065 return ret;
1066 }
1067
1068 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1069 {
1070 int retries = WL1271_BOOT_RETRIES;
1071 struct wiphy *wiphy = wl->hw->wiphy;
1072
1073 static const char* const PLT_MODE[] = {
1074 "PLT_OFF",
1075 "PLT_ON",
1076 "PLT_FEM_DETECT",
1077 "PLT_CHIP_AWAKE"
1078 };
1079
1080 int ret;
1081
1082 mutex_lock(&wl->mutex);
1083
1084 wl1271_notice("power up");
1085
1086 if (wl->state != WLCORE_STATE_OFF) {
1087 wl1271_error("cannot go into PLT state because not "
1088 "in off state: %d", wl->state);
1089 ret = -EBUSY;
1090 goto out;
1091 }
1092
1093 /* Indicate to lower levels that we are now in PLT mode */
1094 wl->plt = true;
1095 wl->plt_mode = plt_mode;
1096
1097 while (retries) {
1098 retries--;
1099 ret = wl12xx_chip_wakeup(wl, true);
1100 if (ret < 0)
1101 goto power_off;
1102
1103 if (plt_mode != PLT_CHIP_AWAKE) {
1104 ret = wl->ops->plt_init(wl);
1105 if (ret < 0)
1106 goto power_off;
1107 }
1108
1109 wl->state = WLCORE_STATE_ON;
1110 wl1271_notice("firmware booted in PLT mode %s (%s)",
1111 PLT_MODE[plt_mode],
1112 wl->chip.fw_ver_str);
1113
1114 /* update hw/fw version info in wiphy struct */
1115 wiphy->hw_version = wl->chip.id;
1116 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1117 sizeof(wiphy->fw_version));
1118
1119 goto out;
1120
1121 power_off:
1122 wl1271_power_off(wl);
1123 }
1124
1125 wl->plt = false;
1126 wl->plt_mode = PLT_OFF;
1127
1128 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1129 WL1271_BOOT_RETRIES);
1130 out:
1131 mutex_unlock(&wl->mutex);
1132
1133 return ret;
1134 }
1135
1136 int wl1271_plt_stop(struct wl1271 *wl)
1137 {
1138 int ret = 0;
1139
1140 wl1271_notice("power down");
1141
1142 /*
1143 * Interrupts must be disabled before setting the state to OFF.
1144 * Otherwise, the interrupt handler might be called and exit without
1145 * reading the interrupt status.
1146 */
1147 wlcore_disable_interrupts(wl);
1148 mutex_lock(&wl->mutex);
1149 if (!wl->plt) {
1150 mutex_unlock(&wl->mutex);
1151
1152 /*
1153 * This will not necessarily enable interrupts as interrupts
1154 * may have been disabled when op_stop was called. It will,
1155 * however, balance the above call to disable_interrupts().
1156 */
1157 wlcore_enable_interrupts(wl);
1158
1159 wl1271_error("cannot power down because not in PLT "
1160 "state: %d", wl->state);
1161 ret = -EBUSY;
1162 goto out;
1163 }
1164
1165 mutex_unlock(&wl->mutex);
1166
1167 wl1271_flush_deferred_work(wl);
1168 cancel_work_sync(&wl->netstack_work);
1169 cancel_work_sync(&wl->recovery_work);
1170 cancel_delayed_work_sync(&wl->elp_work);
1171 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1172
1173 mutex_lock(&wl->mutex);
1174 wl1271_power_off(wl);
1175 wl->flags = 0;
1176 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1177 wl->state = WLCORE_STATE_OFF;
1178 wl->plt = false;
1179 wl->plt_mode = PLT_OFF;
1180 wl->rx_counter = 0;
1181 mutex_unlock(&wl->mutex);
1182
1183 out:
1184 return ret;
1185 }
1186
1187 static void wl1271_op_tx(struct ieee80211_hw *hw,
1188 struct ieee80211_tx_control *control,
1189 struct sk_buff *skb)
1190 {
1191 struct wl1271 *wl = hw->priv;
1192 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1193 struct ieee80211_vif *vif = info->control.vif;
1194 struct wl12xx_vif *wlvif = NULL;
1195 unsigned long flags;
1196 int q, mapping;
1197 u8 hlid;
1198
1199 if (!vif) {
1200 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1201 ieee80211_free_txskb(hw, skb);
1202 return;
1203 }
1204
1205 wlvif = wl12xx_vif_to_data(vif);
1206 mapping = skb_get_queue_mapping(skb);
1207 q = wl1271_tx_get_queue(mapping);
1208
1209 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1210
1211 spin_lock_irqsave(&wl->wl_lock, flags);
1212
1213 /*
1214 * drop the packet if the link is invalid or the queue is stopped
1215 * for any reason but watermark. Watermark is a "soft"-stop so we
1216 * allow these packets through.
1217 */
1218 if (hlid == WL12XX_INVALID_LINK_ID ||
1219 (!test_bit(hlid, wlvif->links_map)) ||
1220 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1221 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1222 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1223 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1224 ieee80211_free_txskb(hw, skb);
1225 goto out;
1226 }
1227
1228 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1229 hlid, q, skb->len);
1230 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1231
1232 wl->tx_queue_count[q]++;
1233 wlvif->tx_queue_count[q]++;
1234
1235 /*
1236 * The workqueue is slow to process the tx_queue and we need stop
1237 * the queue here, otherwise the queue will get too long.
1238 */
1239 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1240 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1241 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1242 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1243 wlcore_stop_queue_locked(wl, wlvif, q,
1244 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1245 }
1246
1247 /*
1248 * The chip specific setup must run before the first TX packet -
1249 * before that, the tx_work will not be initialized!
1250 */
1251
1252 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1253 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1254 ieee80211_queue_work(wl->hw, &wl->tx_work);
1255
1256 out:
1257 spin_unlock_irqrestore(&wl->wl_lock, flags);
1258 }
1259
1260 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1261 {
1262 unsigned long flags;
1263 int q;
1264
1265 /* no need to queue a new dummy packet if one is already pending */
1266 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1267 return 0;
1268
1269 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1270
1271 spin_lock_irqsave(&wl->wl_lock, flags);
1272 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1273 wl->tx_queue_count[q]++;
1274 spin_unlock_irqrestore(&wl->wl_lock, flags);
1275
1276 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1277 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1278 return wlcore_tx_work_locked(wl);
1279
1280 /*
1281 * If the FW TX is busy, TX work will be scheduled by the threaded
1282 * interrupt handler function
1283 */
1284 return 0;
1285 }
1286
1287 /*
1288 * The size of the dummy packet should be at least 1400 bytes. However, in
1289 * order to minimize the number of bus transactions, aligning it to 512 bytes
1290 * boundaries could be beneficial, performance wise
1291 */
1292 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1293
1294 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1295 {
1296 struct sk_buff *skb;
1297 struct ieee80211_hdr_3addr *hdr;
1298 unsigned int dummy_packet_size;
1299
1300 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1301 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1302
1303 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1304 if (!skb) {
1305 wl1271_warning("Failed to allocate a dummy packet skb");
1306 return NULL;
1307 }
1308
1309 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1310
1311 hdr = skb_put_zero(skb, sizeof(*hdr));
1312 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1313 IEEE80211_STYPE_NULLFUNC |
1314 IEEE80211_FCTL_TODS);
1315
1316 skb_put_zero(skb, dummy_packet_size);
1317
1318 /* Dummy packets require the TID to be management */
1319 skb->priority = WL1271_TID_MGMT;
1320
1321 /* Initialize all fields that might be used */
1322 skb_set_queue_mapping(skb, 0);
1323 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1324
1325 return skb;
1326 }
1327
1328
1329 #ifdef CONFIG_PM
1330 static int
1331 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1332 {
1333 int num_fields = 0, in_field = 0, fields_size = 0;
1334 int i, pattern_len = 0;
1335
1336 if (!p->mask) {
1337 wl1271_warning("No mask in WoWLAN pattern");
1338 return -EINVAL;
1339 }
1340
1341 /*
1342 * The pattern is broken up into segments of bytes at different offsets
1343 * that need to be checked by the FW filter. Each segment is called
1344 * a field in the FW API. We verify that the total number of fields
1345 * required for this pattern won't exceed FW limits (8)
1346 * as well as the total fields buffer won't exceed the FW limit.
1347 * Note that if there's a pattern which crosses Ethernet/IP header
1348 * boundary a new field is required.
1349 */
1350 for (i = 0; i < p->pattern_len; i++) {
1351 if (test_bit(i, (unsigned long *)p->mask)) {
1352 if (!in_field) {
1353 in_field = 1;
1354 pattern_len = 1;
1355 } else {
1356 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1357 num_fields++;
1358 fields_size += pattern_len +
1359 RX_FILTER_FIELD_OVERHEAD;
1360 pattern_len = 1;
1361 } else
1362 pattern_len++;
1363 }
1364 } else {
1365 if (in_field) {
1366 in_field = 0;
1367 fields_size += pattern_len +
1368 RX_FILTER_FIELD_OVERHEAD;
1369 num_fields++;
1370 }
1371 }
1372 }
1373
1374 if (in_field) {
1375 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1376 num_fields++;
1377 }
1378
1379 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1380 wl1271_warning("RX Filter too complex. Too many segments");
1381 return -EINVAL;
1382 }
1383
1384 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1385 wl1271_warning("RX filter pattern is too big");
1386 return -E2BIG;
1387 }
1388
1389 return 0;
1390 }
1391
1392 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1393 {
1394 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1395 }
1396
1397 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1398 {
1399 int i;
1400
1401 if (filter == NULL)
1402 return;
1403
1404 for (i = 0; i < filter->num_fields; i++)
1405 kfree(filter->fields[i].pattern);
1406
1407 kfree(filter);
1408 }
1409
1410 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1411 u16 offset, u8 flags,
1412 const u8 *pattern, u8 len)
1413 {
1414 struct wl12xx_rx_filter_field *field;
1415
1416 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1417 wl1271_warning("Max fields per RX filter. can't alloc another");
1418 return -EINVAL;
1419 }
1420
1421 field = &filter->fields[filter->num_fields];
1422
1423 field->pattern = kzalloc(len, GFP_KERNEL);
1424 if (!field->pattern) {
1425 wl1271_warning("Failed to allocate RX filter pattern");
1426 return -ENOMEM;
1427 }
1428
1429 filter->num_fields++;
1430
1431 field->offset = cpu_to_le16(offset);
1432 field->flags = flags;
1433 field->len = len;
1434 memcpy(field->pattern, pattern, len);
1435
1436 return 0;
1437 }
1438
1439 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1440 {
1441 int i, fields_size = 0;
1442
1443 for (i = 0; i < filter->num_fields; i++)
1444 fields_size += filter->fields[i].len +
1445 sizeof(struct wl12xx_rx_filter_field) -
1446 sizeof(u8 *);
1447
1448 return fields_size;
1449 }
1450
1451 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1452 u8 *buf)
1453 {
1454 int i;
1455 struct wl12xx_rx_filter_field *field;
1456
1457 for (i = 0; i < filter->num_fields; i++) {
1458 field = (struct wl12xx_rx_filter_field *)buf;
1459
1460 field->offset = filter->fields[i].offset;
1461 field->flags = filter->fields[i].flags;
1462 field->len = filter->fields[i].len;
1463
1464 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1465 buf += sizeof(struct wl12xx_rx_filter_field) -
1466 sizeof(u8 *) + field->len;
1467 }
1468 }
1469
1470 /*
1471 * Allocates an RX filter returned through f
1472 * which needs to be freed using rx_filter_free()
1473 */
1474 static int
1475 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1476 struct wl12xx_rx_filter **f)
1477 {
1478 int i, j, ret = 0;
1479 struct wl12xx_rx_filter *filter;
1480 u16 offset;
1481 u8 flags, len;
1482
1483 filter = wl1271_rx_filter_alloc();
1484 if (!filter) {
1485 wl1271_warning("Failed to alloc rx filter");
1486 ret = -ENOMEM;
1487 goto err;
1488 }
1489
1490 i = 0;
1491 while (i < p->pattern_len) {
1492 if (!test_bit(i, (unsigned long *)p->mask)) {
1493 i++;
1494 continue;
1495 }
1496
1497 for (j = i; j < p->pattern_len; j++) {
1498 if (!test_bit(j, (unsigned long *)p->mask))
1499 break;
1500
1501 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1502 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1503 break;
1504 }
1505
1506 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1507 offset = i;
1508 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1509 } else {
1510 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1511 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1512 }
1513
1514 len = j - i;
1515
1516 ret = wl1271_rx_filter_alloc_field(filter,
1517 offset,
1518 flags,
1519 &p->pattern[i], len);
1520 if (ret)
1521 goto err;
1522
1523 i = j;
1524 }
1525
1526 filter->action = FILTER_SIGNAL;
1527
1528 *f = filter;
1529 return 0;
1530
1531 err:
1532 wl1271_rx_filter_free(filter);
1533 *f = NULL;
1534
1535 return ret;
1536 }
1537
1538 static int wl1271_configure_wowlan(struct wl1271 *wl,
1539 struct cfg80211_wowlan *wow)
1540 {
1541 int i, ret;
1542
1543 if (!wow || wow->any || !wow->n_patterns) {
1544 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1545 FILTER_SIGNAL);
1546 if (ret)
1547 goto out;
1548
1549 ret = wl1271_rx_filter_clear_all(wl);
1550 if (ret)
1551 goto out;
1552
1553 return 0;
1554 }
1555
1556 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1557 return -EINVAL;
1558
1559 /* Validate all incoming patterns before clearing current FW state */
1560 for (i = 0; i < wow->n_patterns; i++) {
1561 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1562 if (ret) {
1563 wl1271_warning("Bad wowlan pattern %d", i);
1564 return ret;
1565 }
1566 }
1567
1568 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1569 if (ret)
1570 goto out;
1571
1572 ret = wl1271_rx_filter_clear_all(wl);
1573 if (ret)
1574 goto out;
1575
1576 /* Translate WoWLAN patterns into filters */
1577 for (i = 0; i < wow->n_patterns; i++) {
1578 struct cfg80211_pkt_pattern *p;
1579 struct wl12xx_rx_filter *filter = NULL;
1580
1581 p = &wow->patterns[i];
1582
1583 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1584 if (ret) {
1585 wl1271_warning("Failed to create an RX filter from "
1586 "wowlan pattern %d", i);
1587 goto out;
1588 }
1589
1590 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1591
1592 wl1271_rx_filter_free(filter);
1593 if (ret)
1594 goto out;
1595 }
1596
1597 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1598
1599 out:
1600 return ret;
1601 }
1602
1603 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1604 struct wl12xx_vif *wlvif,
1605 struct cfg80211_wowlan *wow)
1606 {
1607 int ret = 0;
1608
1609 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1610 goto out;
1611
1612 ret = wl1271_configure_wowlan(wl, wow);
1613 if (ret < 0)
1614 goto out;
1615
1616 if ((wl->conf.conn.suspend_wake_up_event ==
1617 wl->conf.conn.wake_up_event) &&
1618 (wl->conf.conn.suspend_listen_interval ==
1619 wl->conf.conn.listen_interval))
1620 goto out;
1621
1622 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1623 wl->conf.conn.suspend_wake_up_event,
1624 wl->conf.conn.suspend_listen_interval);
1625
1626 if (ret < 0)
1627 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1628 out:
1629 return ret;
1630
1631 }
1632
1633 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1634 struct wl12xx_vif *wlvif,
1635 struct cfg80211_wowlan *wow)
1636 {
1637 int ret = 0;
1638
1639 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1640 goto out;
1641
1642 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1643 if (ret < 0)
1644 goto out;
1645
1646 ret = wl1271_configure_wowlan(wl, wow);
1647 if (ret < 0)
1648 goto out;
1649
1650 out:
1651 return ret;
1652
1653 }
1654
1655 static int wl1271_configure_suspend(struct wl1271 *wl,
1656 struct wl12xx_vif *wlvif,
1657 struct cfg80211_wowlan *wow)
1658 {
1659 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1660 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1661 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1662 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1663 return 0;
1664 }
1665
1666 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1667 {
1668 int ret = 0;
1669 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1670 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1671
1672 if ((!is_ap) && (!is_sta))
1673 return;
1674
1675 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1676 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1677 return;
1678
1679 wl1271_configure_wowlan(wl, NULL);
1680
1681 if (is_sta) {
1682 if ((wl->conf.conn.suspend_wake_up_event ==
1683 wl->conf.conn.wake_up_event) &&
1684 (wl->conf.conn.suspend_listen_interval ==
1685 wl->conf.conn.listen_interval))
1686 return;
1687
1688 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689 wl->conf.conn.wake_up_event,
1690 wl->conf.conn.listen_interval);
1691
1692 if (ret < 0)
1693 wl1271_error("resume: wake up conditions failed: %d",
1694 ret);
1695
1696 } else if (is_ap) {
1697 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1698 }
1699 }
1700
1701 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1702 struct cfg80211_wowlan *wow)
1703 {
1704 struct wl1271 *wl = hw->priv;
1705 struct wl12xx_vif *wlvif;
1706 int ret;
1707
1708 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1709 WARN_ON(!wow);
1710
1711 /* we want to perform the recovery before suspending */
1712 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1713 wl1271_warning("postponing suspend to perform recovery");
1714 return -EBUSY;
1715 }
1716
1717 wl1271_tx_flush(wl);
1718
1719 mutex_lock(&wl->mutex);
1720
1721 ret = wl1271_ps_elp_wakeup(wl);
1722 if (ret < 0) {
1723 mutex_unlock(&wl->mutex);
1724 return ret;
1725 }
1726
1727 wl->wow_enabled = true;
1728 wl12xx_for_each_wlvif(wl, wlvif) {
1729 if (wlcore_is_p2p_mgmt(wlvif))
1730 continue;
1731
1732 ret = wl1271_configure_suspend(wl, wlvif, wow);
1733 if (ret < 0) {
1734 mutex_unlock(&wl->mutex);
1735 wl1271_warning("couldn't prepare device to suspend");
1736 return ret;
1737 }
1738 }
1739
1740 /* disable fast link flow control notifications from FW */
1741 ret = wlcore_hw_interrupt_notify(wl, false);
1742 if (ret < 0)
1743 goto out_sleep;
1744
1745 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1746 ret = wlcore_hw_rx_ba_filter(wl,
1747 !!wl->conf.conn.suspend_rx_ba_activity);
1748 if (ret < 0)
1749 goto out_sleep;
1750
1751 out_sleep:
1752 wl1271_ps_elp_sleep(wl);
1753 mutex_unlock(&wl->mutex);
1754
1755 if (ret < 0) {
1756 wl1271_warning("couldn't prepare device to suspend");
1757 return ret;
1758 }
1759
1760 /* flush any remaining work */
1761 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1762
1763 /*
1764 * disable and re-enable interrupts in order to flush
1765 * the threaded_irq
1766 */
1767 wlcore_disable_interrupts(wl);
1768
1769 /*
1770 * set suspended flag to avoid triggering a new threaded_irq
1771 * work. no need for spinlock as interrupts are disabled.
1772 */
1773 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1774
1775 wlcore_enable_interrupts(wl);
1776 flush_work(&wl->tx_work);
1777 flush_delayed_work(&wl->elp_work);
1778
1779 /*
1780 * Cancel the watchdog even if above tx_flush failed. We will detect
1781 * it on resume anyway.
1782 */
1783 cancel_delayed_work(&wl->tx_watchdog_work);
1784
1785 return 0;
1786 }
1787
1788 static int wl1271_op_resume(struct ieee80211_hw *hw)
1789 {
1790 struct wl1271 *wl = hw->priv;
1791 struct wl12xx_vif *wlvif;
1792 unsigned long flags;
1793 bool run_irq_work = false, pending_recovery;
1794 int ret;
1795
1796 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1797 wl->wow_enabled);
1798 WARN_ON(!wl->wow_enabled);
1799
1800 /*
1801 * re-enable irq_work enqueuing, and call irq_work directly if
1802 * there is a pending work.
1803 */
1804 spin_lock_irqsave(&wl->wl_lock, flags);
1805 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1806 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1807 run_irq_work = true;
1808 spin_unlock_irqrestore(&wl->wl_lock, flags);
1809
1810 mutex_lock(&wl->mutex);
1811
1812 /* test the recovery flag before calling any SDIO functions */
1813 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1814 &wl->flags);
1815
1816 if (run_irq_work) {
1817 wl1271_debug(DEBUG_MAC80211,
1818 "run postponed irq_work directly");
1819
1820 /* don't talk to the HW if recovery is pending */
1821 if (!pending_recovery) {
1822 ret = wlcore_irq_locked(wl);
1823 if (ret)
1824 wl12xx_queue_recovery_work(wl);
1825 }
1826
1827 wlcore_enable_interrupts(wl);
1828 }
1829
1830 if (pending_recovery) {
1831 wl1271_warning("queuing forgotten recovery on resume");
1832 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1833 goto out_sleep;
1834 }
1835
1836 ret = wl1271_ps_elp_wakeup(wl);
1837 if (ret < 0)
1838 goto out;
1839
1840 wl12xx_for_each_wlvif(wl, wlvif) {
1841 if (wlcore_is_p2p_mgmt(wlvif))
1842 continue;
1843
1844 wl1271_configure_resume(wl, wlvif);
1845 }
1846
1847 ret = wlcore_hw_interrupt_notify(wl, true);
1848 if (ret < 0)
1849 goto out_sleep;
1850
1851 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1852 ret = wlcore_hw_rx_ba_filter(wl, false);
1853 if (ret < 0)
1854 goto out_sleep;
1855
1856 out_sleep:
1857 wl1271_ps_elp_sleep(wl);
1858
1859 out:
1860 wl->wow_enabled = false;
1861
1862 /*
1863 * Set a flag to re-init the watchdog on the first Tx after resume.
1864 * That way we avoid possible conditions where Tx-complete interrupts
1865 * fail to arrive and we perform a spurious recovery.
1866 */
1867 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1868 mutex_unlock(&wl->mutex);
1869
1870 return 0;
1871 }
1872 #endif
1873
1874 static int wl1271_op_start(struct ieee80211_hw *hw)
1875 {
1876 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1877
1878 /*
1879 * We have to delay the booting of the hardware because
1880 * we need to know the local MAC address before downloading and
1881 * initializing the firmware. The MAC address cannot be changed
1882 * after boot, and without the proper MAC address, the firmware
1883 * will not function properly.
1884 *
1885 * The MAC address is first known when the corresponding interface
1886 * is added. That is where we will initialize the hardware.
1887 */
1888
1889 return 0;
1890 }
1891
1892 static void wlcore_op_stop_locked(struct wl1271 *wl)
1893 {
1894 int i;
1895
1896 if (wl->state == WLCORE_STATE_OFF) {
1897 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1898 &wl->flags))
1899 wlcore_enable_interrupts(wl);
1900
1901 return;
1902 }
1903
1904 /*
1905 * this must be before the cancel_work calls below, so that the work
1906 * functions don't perform further work.
1907 */
1908 wl->state = WLCORE_STATE_OFF;
1909
1910 /*
1911 * Use the nosync variant to disable interrupts, so the mutex could be
1912 * held while doing so without deadlocking.
1913 */
1914 wlcore_disable_interrupts_nosync(wl);
1915
1916 mutex_unlock(&wl->mutex);
1917
1918 wlcore_synchronize_interrupts(wl);
1919 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1920 cancel_work_sync(&wl->recovery_work);
1921 wl1271_flush_deferred_work(wl);
1922 cancel_delayed_work_sync(&wl->scan_complete_work);
1923 cancel_work_sync(&wl->netstack_work);
1924 cancel_work_sync(&wl->tx_work);
1925 cancel_delayed_work_sync(&wl->elp_work);
1926 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1927
1928 /* let's notify MAC80211 about the remaining pending TX frames */
1929 mutex_lock(&wl->mutex);
1930 wl12xx_tx_reset(wl);
1931
1932 wl1271_power_off(wl);
1933 /*
1934 * In case a recovery was scheduled, interrupts were disabled to avoid
1935 * an interrupt storm. Now that the power is down, it is safe to
1936 * re-enable interrupts to balance the disable depth
1937 */
1938 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1939 wlcore_enable_interrupts(wl);
1940
1941 wl->band = NL80211_BAND_2GHZ;
1942
1943 wl->rx_counter = 0;
1944 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1945 wl->channel_type = NL80211_CHAN_NO_HT;
1946 wl->tx_blocks_available = 0;
1947 wl->tx_allocated_blocks = 0;
1948 wl->tx_results_count = 0;
1949 wl->tx_packets_count = 0;
1950 wl->time_offset = 0;
1951 wl->ap_fw_ps_map = 0;
1952 wl->ap_ps_map = 0;
1953 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1954 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1955 memset(wl->links_map, 0, sizeof(wl->links_map));
1956 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1957 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1958 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1959 wl->active_sta_count = 0;
1960 wl->active_link_count = 0;
1961
1962 /* The system link is always allocated */
1963 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1964 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1965 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1966
1967 /*
1968 * this is performed after the cancel_work calls and the associated
1969 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1970 * get executed before all these vars have been reset.
1971 */
1972 wl->flags = 0;
1973
1974 wl->tx_blocks_freed = 0;
1975
1976 for (i = 0; i < NUM_TX_QUEUES; i++) {
1977 wl->tx_pkts_freed[i] = 0;
1978 wl->tx_allocated_pkts[i] = 0;
1979 }
1980
1981 wl1271_debugfs_reset(wl);
1982
1983 kfree(wl->raw_fw_status);
1984 wl->raw_fw_status = NULL;
1985 kfree(wl->fw_status);
1986 wl->fw_status = NULL;
1987 kfree(wl->tx_res_if);
1988 wl->tx_res_if = NULL;
1989 kfree(wl->target_mem_map);
1990 wl->target_mem_map = NULL;
1991
1992 /*
1993 * FW channels must be re-calibrated after recovery,
1994 * save current Reg-Domain channel configuration and clear it.
1995 */
1996 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1997 sizeof(wl->reg_ch_conf_pending));
1998 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1999 }
2000
2001 static void wlcore_op_stop(struct ieee80211_hw *hw)
2002 {
2003 struct wl1271 *wl = hw->priv;
2004
2005 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2006
2007 mutex_lock(&wl->mutex);
2008
2009 wlcore_op_stop_locked(wl);
2010
2011 mutex_unlock(&wl->mutex);
2012 }
2013
2014 static void wlcore_channel_switch_work(struct work_struct *work)
2015 {
2016 struct delayed_work *dwork;
2017 struct wl1271 *wl;
2018 struct ieee80211_vif *vif;
2019 struct wl12xx_vif *wlvif;
2020 int ret;
2021
2022 dwork = to_delayed_work(work);
2023 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2024 wl = wlvif->wl;
2025
2026 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2027
2028 mutex_lock(&wl->mutex);
2029
2030 if (unlikely(wl->state != WLCORE_STATE_ON))
2031 goto out;
2032
2033 /* check the channel switch is still ongoing */
2034 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2035 goto out;
2036
2037 vif = wl12xx_wlvif_to_vif(wlvif);
2038 ieee80211_chswitch_done(vif, false);
2039
2040 ret = wl1271_ps_elp_wakeup(wl);
2041 if (ret < 0)
2042 goto out;
2043
2044 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2045
2046 wl1271_ps_elp_sleep(wl);
2047 out:
2048 mutex_unlock(&wl->mutex);
2049 }
2050
2051 static void wlcore_connection_loss_work(struct work_struct *work)
2052 {
2053 struct delayed_work *dwork;
2054 struct wl1271 *wl;
2055 struct ieee80211_vif *vif;
2056 struct wl12xx_vif *wlvif;
2057
2058 dwork = to_delayed_work(work);
2059 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2060 wl = wlvif->wl;
2061
2062 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2063
2064 mutex_lock(&wl->mutex);
2065
2066 if (unlikely(wl->state != WLCORE_STATE_ON))
2067 goto out;
2068
2069 /* Call mac80211 connection loss */
2070 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2071 goto out;
2072
2073 vif = wl12xx_wlvif_to_vif(wlvif);
2074 ieee80211_connection_loss(vif);
2075 out:
2076 mutex_unlock(&wl->mutex);
2077 }
2078
2079 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2080 {
2081 struct delayed_work *dwork;
2082 struct wl1271 *wl;
2083 struct wl12xx_vif *wlvif;
2084 unsigned long time_spare;
2085 int ret;
2086
2087 dwork = to_delayed_work(work);
2088 wlvif = container_of(dwork, struct wl12xx_vif,
2089 pending_auth_complete_work);
2090 wl = wlvif->wl;
2091
2092 mutex_lock(&wl->mutex);
2093
2094 if (unlikely(wl->state != WLCORE_STATE_ON))
2095 goto out;
2096
2097 /*
2098 * Make sure a second really passed since the last auth reply. Maybe
2099 * a second auth reply arrived while we were stuck on the mutex.
2100 * Check for a little less than the timeout to protect from scheduler
2101 * irregularities.
2102 */
2103 time_spare = jiffies +
2104 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2105 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2106 goto out;
2107
2108 ret = wl1271_ps_elp_wakeup(wl);
2109 if (ret < 0)
2110 goto out;
2111
2112 /* cancel the ROC if active */
2113 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2114
2115 wl1271_ps_elp_sleep(wl);
2116 out:
2117 mutex_unlock(&wl->mutex);
2118 }
2119
2120 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2121 {
2122 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2123 WL12XX_MAX_RATE_POLICIES);
2124 if (policy >= WL12XX_MAX_RATE_POLICIES)
2125 return -EBUSY;
2126
2127 __set_bit(policy, wl->rate_policies_map);
2128 *idx = policy;
2129 return 0;
2130 }
2131
2132 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2133 {
2134 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2135 return;
2136
2137 __clear_bit(*idx, wl->rate_policies_map);
2138 *idx = WL12XX_MAX_RATE_POLICIES;
2139 }
2140
2141 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2142 {
2143 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2144 WLCORE_MAX_KLV_TEMPLATES);
2145 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2146 return -EBUSY;
2147
2148 __set_bit(policy, wl->klv_templates_map);
2149 *idx = policy;
2150 return 0;
2151 }
2152
2153 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2154 {
2155 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2156 return;
2157
2158 __clear_bit(*idx, wl->klv_templates_map);
2159 *idx = WLCORE_MAX_KLV_TEMPLATES;
2160 }
2161
2162 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2163 {
2164 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2165
2166 switch (wlvif->bss_type) {
2167 case BSS_TYPE_AP_BSS:
2168 if (wlvif->p2p)
2169 return WL1271_ROLE_P2P_GO;
2170 else if (ieee80211_vif_is_mesh(vif))
2171 return WL1271_ROLE_MESH_POINT;
2172 else
2173 return WL1271_ROLE_AP;
2174
2175 case BSS_TYPE_STA_BSS:
2176 if (wlvif->p2p)
2177 return WL1271_ROLE_P2P_CL;
2178 else
2179 return WL1271_ROLE_STA;
2180
2181 case BSS_TYPE_IBSS:
2182 return WL1271_ROLE_IBSS;
2183
2184 default:
2185 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2186 }
2187 return WL12XX_INVALID_ROLE_TYPE;
2188 }
2189
2190 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2191 {
2192 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2193 int i;
2194
2195 /* clear everything but the persistent data */
2196 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2197
2198 switch (ieee80211_vif_type_p2p(vif)) {
2199 case NL80211_IFTYPE_P2P_CLIENT:
2200 wlvif->p2p = 1;
2201 /* fall-through */
2202 case NL80211_IFTYPE_STATION:
2203 case NL80211_IFTYPE_P2P_DEVICE:
2204 wlvif->bss_type = BSS_TYPE_STA_BSS;
2205 break;
2206 case NL80211_IFTYPE_ADHOC:
2207 wlvif->bss_type = BSS_TYPE_IBSS;
2208 break;
2209 case NL80211_IFTYPE_P2P_GO:
2210 wlvif->p2p = 1;
2211 /* fall-through */
2212 case NL80211_IFTYPE_AP:
2213 case NL80211_IFTYPE_MESH_POINT:
2214 wlvif->bss_type = BSS_TYPE_AP_BSS;
2215 break;
2216 default:
2217 wlvif->bss_type = MAX_BSS_TYPE;
2218 return -EOPNOTSUPP;
2219 }
2220
2221 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2222 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2223 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2224
2225 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2226 wlvif->bss_type == BSS_TYPE_IBSS) {
2227 /* init sta/ibss data */
2228 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2229 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2230 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2231 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2232 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2233 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2234 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2235 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2236 } else {
2237 /* init ap data */
2238 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2239 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2240 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2241 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2242 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2243 wl12xx_allocate_rate_policy(wl,
2244 &wlvif->ap.ucast_rate_idx[i]);
2245 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2246 /*
2247 * TODO: check if basic_rate shouldn't be
2248 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2249 * instead (the same thing for STA above).
2250 */
2251 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2252 /* TODO: this seems to be used only for STA, check it */
2253 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2254 }
2255
2256 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2257 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2258 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2259
2260 /*
2261 * mac80211 configures some values globally, while we treat them
2262 * per-interface. thus, on init, we have to copy them from wl
2263 */
2264 wlvif->band = wl->band;
2265 wlvif->channel = wl->channel;
2266 wlvif->power_level = wl->power_level;
2267 wlvif->channel_type = wl->channel_type;
2268
2269 INIT_WORK(&wlvif->rx_streaming_enable_work,
2270 wl1271_rx_streaming_enable_work);
2271 INIT_WORK(&wlvif->rx_streaming_disable_work,
2272 wl1271_rx_streaming_disable_work);
2273 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2274 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2275 wlcore_channel_switch_work);
2276 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2277 wlcore_connection_loss_work);
2278 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2279 wlcore_pending_auth_complete_work);
2280 INIT_LIST_HEAD(&wlvif->list);
2281
2282 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2283 return 0;
2284 }
2285
2286 static int wl12xx_init_fw(struct wl1271 *wl)
2287 {
2288 int retries = WL1271_BOOT_RETRIES;
2289 bool booted = false;
2290 struct wiphy *wiphy = wl->hw->wiphy;
2291 int ret;
2292
2293 while (retries) {
2294 retries--;
2295 ret = wl12xx_chip_wakeup(wl, false);
2296 if (ret < 0)
2297 goto power_off;
2298
2299 ret = wl->ops->boot(wl);
2300 if (ret < 0)
2301 goto power_off;
2302
2303 ret = wl1271_hw_init(wl);
2304 if (ret < 0)
2305 goto irq_disable;
2306
2307 booted = true;
2308 break;
2309
2310 irq_disable:
2311 mutex_unlock(&wl->mutex);
2312 /* Unlocking the mutex in the middle of handling is
2313 inherently unsafe. In this case we deem it safe to do,
2314 because we need to let any possibly pending IRQ out of
2315 the system (and while we are WLCORE_STATE_OFF the IRQ
2316 work function will not do anything.) Also, any other
2317 possible concurrent operations will fail due to the
2318 current state, hence the wl1271 struct should be safe. */
2319 wlcore_disable_interrupts(wl);
2320 wl1271_flush_deferred_work(wl);
2321 cancel_work_sync(&wl->netstack_work);
2322 mutex_lock(&wl->mutex);
2323 power_off:
2324 wl1271_power_off(wl);
2325 }
2326
2327 if (!booted) {
2328 wl1271_error("firmware boot failed despite %d retries",
2329 WL1271_BOOT_RETRIES);
2330 goto out;
2331 }
2332
2333 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2334
2335 /* update hw/fw version info in wiphy struct */
2336 wiphy->hw_version = wl->chip.id;
2337 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2338 sizeof(wiphy->fw_version));
2339
2340 /*
2341 * Now we know if 11a is supported (info from the NVS), so disable
2342 * 11a channels if not supported
2343 */
2344 if (!wl->enable_11a)
2345 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2346
2347 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2348 wl->enable_11a ? "" : "not ");
2349
2350 wl->state = WLCORE_STATE_ON;
2351 out:
2352 return ret;
2353 }
2354
2355 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2356 {
2357 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2358 }
2359
2360 /*
2361 * Check whether a fw switch (i.e. moving from one loaded
2362 * fw to another) is needed. This function is also responsible
2363 * for updating wl->last_vif_count, so it must be called before
2364 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2365 * will be used).
2366 */
2367 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2368 struct vif_counter_data vif_counter_data,
2369 bool add)
2370 {
2371 enum wl12xx_fw_type current_fw = wl->fw_type;
2372 u8 vif_count = vif_counter_data.counter;
2373
2374 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2375 return false;
2376
2377 /* increase the vif count if this is a new vif */
2378 if (add && !vif_counter_data.cur_vif_running)
2379 vif_count++;
2380
2381 wl->last_vif_count = vif_count;
2382
2383 /* no need for fw change if the device is OFF */
2384 if (wl->state == WLCORE_STATE_OFF)
2385 return false;
2386
2387 /* no need for fw change if a single fw is used */
2388 if (!wl->mr_fw_name)
2389 return false;
2390
2391 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2392 return true;
2393 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2394 return true;
2395
2396 return false;
2397 }
2398
2399 /*
2400 * Enter "forced psm". Make sure the sta is in psm against the ap,
2401 * to make the fw switch a bit more disconnection-persistent.
2402 */
2403 static void wl12xx_force_active_psm(struct wl1271 *wl)
2404 {
2405 struct wl12xx_vif *wlvif;
2406
2407 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2408 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2409 }
2410 }
2411
2412 struct wlcore_hw_queue_iter_data {
2413 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2414 /* current vif */
2415 struct ieee80211_vif *vif;
2416 /* is the current vif among those iterated */
2417 bool cur_running;
2418 };
2419
2420 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2421 struct ieee80211_vif *vif)
2422 {
2423 struct wlcore_hw_queue_iter_data *iter_data = data;
2424
2425 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2426 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2427 return;
2428
2429 if (iter_data->cur_running || vif == iter_data->vif) {
2430 iter_data->cur_running = true;
2431 return;
2432 }
2433
2434 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2435 }
2436
2437 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2438 struct wl12xx_vif *wlvif)
2439 {
2440 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2441 struct wlcore_hw_queue_iter_data iter_data = {};
2442 int i, q_base;
2443
2444 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2445 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2446 return 0;
2447 }
2448
2449 iter_data.vif = vif;
2450
2451 /* mark all bits taken by active interfaces */
2452 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2453 IEEE80211_IFACE_ITER_RESUME_ALL,
2454 wlcore_hw_queue_iter, &iter_data);
2455
2456 /* the current vif is already running in mac80211 (resume/recovery) */
2457 if (iter_data.cur_running) {
2458 wlvif->hw_queue_base = vif->hw_queue[0];
2459 wl1271_debug(DEBUG_MAC80211,
2460 "using pre-allocated hw queue base %d",
2461 wlvif->hw_queue_base);
2462
2463 /* interface type might have changed type */
2464 goto adjust_cab_queue;
2465 }
2466
2467 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2468 WLCORE_NUM_MAC_ADDRESSES);
2469 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2470 return -EBUSY;
2471
2472 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2473 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2474 wlvif->hw_queue_base);
2475
2476 for (i = 0; i < NUM_TX_QUEUES; i++) {
2477 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2478 /* register hw queues in mac80211 */
2479 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2480 }
2481
2482 adjust_cab_queue:
2483 /* the last places are reserved for cab queues per interface */
2484 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2485 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2486 wlvif->hw_queue_base / NUM_TX_QUEUES;
2487 else
2488 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2489
2490 return 0;
2491 }
2492
2493 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2494 struct ieee80211_vif *vif)
2495 {
2496 struct wl1271 *wl = hw->priv;
2497 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2498 struct vif_counter_data vif_count;
2499 int ret = 0;
2500 u8 role_type;
2501
2502 if (wl->plt) {
2503 wl1271_error("Adding Interface not allowed while in PLT mode");
2504 return -EBUSY;
2505 }
2506
2507 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2508 IEEE80211_VIF_SUPPORTS_UAPSD |
2509 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2510
2511 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2512 ieee80211_vif_type_p2p(vif), vif->addr);
2513
2514 wl12xx_get_vif_count(hw, vif, &vif_count);
2515
2516 mutex_lock(&wl->mutex);
2517 ret = wl1271_ps_elp_wakeup(wl);
2518 if (ret < 0)
2519 goto out_unlock;
2520
2521 /*
2522 * in some very corner case HW recovery scenarios its possible to
2523 * get here before __wl1271_op_remove_interface is complete, so
2524 * opt out if that is the case.
2525 */
2526 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2527 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2528 ret = -EBUSY;
2529 goto out;
2530 }
2531
2532
2533 ret = wl12xx_init_vif_data(wl, vif);
2534 if (ret < 0)
2535 goto out;
2536
2537 wlvif->wl = wl;
2538 role_type = wl12xx_get_role_type(wl, wlvif);
2539 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2540 ret = -EINVAL;
2541 goto out;
2542 }
2543
2544 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2545 if (ret < 0)
2546 goto out;
2547
2548 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2549 wl12xx_force_active_psm(wl);
2550 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2551 mutex_unlock(&wl->mutex);
2552 wl1271_recovery_work(&wl->recovery_work);
2553 return 0;
2554 }
2555
2556 /*
2557 * TODO: after the nvs issue will be solved, move this block
2558 * to start(), and make sure here the driver is ON.
2559 */
2560 if (wl->state == WLCORE_STATE_OFF) {
2561 /*
2562 * we still need this in order to configure the fw
2563 * while uploading the nvs
2564 */
2565 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2566
2567 ret = wl12xx_init_fw(wl);
2568 if (ret < 0)
2569 goto out;
2570 }
2571
2572 if (!wlcore_is_p2p_mgmt(wlvif)) {
2573 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2574 role_type, &wlvif->role_id);
2575 if (ret < 0)
2576 goto out;
2577
2578 ret = wl1271_init_vif_specific(wl, vif);
2579 if (ret < 0)
2580 goto out;
2581
2582 } else {
2583 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2584 &wlvif->dev_role_id);
2585 if (ret < 0)
2586 goto out;
2587
2588 /* needed mainly for configuring rate policies */
2589 ret = wl1271_sta_hw_init(wl, wlvif);
2590 if (ret < 0)
2591 goto out;
2592 }
2593
2594 list_add(&wlvif->list, &wl->wlvif_list);
2595 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2596
2597 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2598 wl->ap_count++;
2599 else
2600 wl->sta_count++;
2601 out:
2602 wl1271_ps_elp_sleep(wl);
2603 out_unlock:
2604 mutex_unlock(&wl->mutex);
2605
2606 return ret;
2607 }
2608
2609 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2610 struct ieee80211_vif *vif,
2611 bool reset_tx_queues)
2612 {
2613 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2614 int i, ret;
2615 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2616
2617 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2618
2619 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2620 return;
2621
2622 /* because of hardware recovery, we may get here twice */
2623 if (wl->state == WLCORE_STATE_OFF)
2624 return;
2625
2626 wl1271_info("down");
2627
2628 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2629 wl->scan_wlvif == wlvif) {
2630 struct cfg80211_scan_info info = {
2631 .aborted = true,
2632 };
2633
2634 /*
2635 * Rearm the tx watchdog just before idling scan. This
2636 * prevents just-finished scans from triggering the watchdog
2637 */
2638 wl12xx_rearm_tx_watchdog_locked(wl);
2639
2640 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2641 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2642 wl->scan_wlvif = NULL;
2643 wl->scan.req = NULL;
2644 ieee80211_scan_completed(wl->hw, &info);
2645 }
2646
2647 if (wl->sched_vif == wlvif)
2648 wl->sched_vif = NULL;
2649
2650 if (wl->roc_vif == vif) {
2651 wl->roc_vif = NULL;
2652 ieee80211_remain_on_channel_expired(wl->hw);
2653 }
2654
2655 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2656 /* disable active roles */
2657 ret = wl1271_ps_elp_wakeup(wl);
2658 if (ret < 0)
2659 goto deinit;
2660
2661 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2662 wlvif->bss_type == BSS_TYPE_IBSS) {
2663 if (wl12xx_dev_role_started(wlvif))
2664 wl12xx_stop_dev(wl, wlvif);
2665 }
2666
2667 if (!wlcore_is_p2p_mgmt(wlvif)) {
2668 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2669 if (ret < 0)
2670 goto deinit;
2671 } else {
2672 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2673 if (ret < 0)
2674 goto deinit;
2675 }
2676
2677 wl1271_ps_elp_sleep(wl);
2678 }
2679 deinit:
2680 wl12xx_tx_reset_wlvif(wl, wlvif);
2681
2682 /* clear all hlids (except system_hlid) */
2683 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2684
2685 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2686 wlvif->bss_type == BSS_TYPE_IBSS) {
2687 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2688 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2689 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2690 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2691 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2692 } else {
2693 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2694 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2695 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2696 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2697 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2698 wl12xx_free_rate_policy(wl,
2699 &wlvif->ap.ucast_rate_idx[i]);
2700 wl1271_free_ap_keys(wl, wlvif);
2701 }
2702
2703 dev_kfree_skb(wlvif->probereq);
2704 wlvif->probereq = NULL;
2705 if (wl->last_wlvif == wlvif)
2706 wl->last_wlvif = NULL;
2707 list_del(&wlvif->list);
2708 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2709 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2710 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2711
2712 if (is_ap)
2713 wl->ap_count--;
2714 else
2715 wl->sta_count--;
2716
2717 /*
2718 * Last AP, have more stations. Configure sleep auth according to STA.
2719 * Don't do thin on unintended recovery.
2720 */
2721 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2722 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2723 goto unlock;
2724
2725 if (wl->ap_count == 0 && is_ap) {
2726 /* mask ap events */
2727 wl->event_mask &= ~wl->ap_event_mask;
2728 wl1271_event_unmask(wl);
2729 }
2730
2731 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2732 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2733 /* Configure for power according to debugfs */
2734 if (sta_auth != WL1271_PSM_ILLEGAL)
2735 wl1271_acx_sleep_auth(wl, sta_auth);
2736 /* Configure for ELP power saving */
2737 else
2738 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2739 }
2740
2741 unlock:
2742 mutex_unlock(&wl->mutex);
2743
2744 del_timer_sync(&wlvif->rx_streaming_timer);
2745 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2746 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2747 cancel_work_sync(&wlvif->rc_update_work);
2748 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2749 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2750 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2751
2752 mutex_lock(&wl->mutex);
2753 }
2754
2755 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2756 struct ieee80211_vif *vif)
2757 {
2758 struct wl1271 *wl = hw->priv;
2759 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2760 struct wl12xx_vif *iter;
2761 struct vif_counter_data vif_count;
2762
2763 wl12xx_get_vif_count(hw, vif, &vif_count);
2764 mutex_lock(&wl->mutex);
2765
2766 if (wl->state == WLCORE_STATE_OFF ||
2767 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2768 goto out;
2769
2770 /*
2771 * wl->vif can be null here if someone shuts down the interface
2772 * just when hardware recovery has been started.
2773 */
2774 wl12xx_for_each_wlvif(wl, iter) {
2775 if (iter != wlvif)
2776 continue;
2777
2778 __wl1271_op_remove_interface(wl, vif, true);
2779 break;
2780 }
2781 WARN_ON(iter != wlvif);
2782 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2783 wl12xx_force_active_psm(wl);
2784 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2785 wl12xx_queue_recovery_work(wl);
2786 }
2787 out:
2788 mutex_unlock(&wl->mutex);
2789 }
2790
2791 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2792 struct ieee80211_vif *vif,
2793 enum nl80211_iftype new_type, bool p2p)
2794 {
2795 struct wl1271 *wl = hw->priv;
2796 int ret;
2797
2798 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2799 wl1271_op_remove_interface(hw, vif);
2800
2801 vif->type = new_type;
2802 vif->p2p = p2p;
2803 ret = wl1271_op_add_interface(hw, vif);
2804
2805 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2806 return ret;
2807 }
2808
2809 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2810 {
2811 int ret;
2812 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2813
2814 /*
2815 * One of the side effects of the JOIN command is that is clears
2816 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2817 * to a WPA/WPA2 access point will therefore kill the data-path.
2818 * Currently the only valid scenario for JOIN during association
2819 * is on roaming, in which case we will also be given new keys.
2820 * Keep the below message for now, unless it starts bothering
2821 * users who really like to roam a lot :)
2822 */
2823 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2824 wl1271_info("JOIN while associated.");
2825
2826 /* clear encryption type */
2827 wlvif->encryption_type = KEY_NONE;
2828
2829 if (is_ibss)
2830 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2831 else {
2832 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2833 /*
2834 * TODO: this is an ugly workaround for wl12xx fw
2835 * bug - we are not able to tx/rx after the first
2836 * start_sta, so make dummy start+stop calls,
2837 * and then call start_sta again.
2838 * this should be fixed in the fw.
2839 */
2840 wl12xx_cmd_role_start_sta(wl, wlvif);
2841 wl12xx_cmd_role_stop_sta(wl, wlvif);
2842 }
2843
2844 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2845 }
2846
2847 return ret;
2848 }
2849
2850 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2851 int offset)
2852 {
2853 u8 ssid_len;
2854 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2855 skb->len - offset);
2856
2857 if (!ptr) {
2858 wl1271_error("No SSID in IEs!");
2859 return -ENOENT;
2860 }
2861
2862 ssid_len = ptr[1];
2863 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2864 wl1271_error("SSID is too long!");
2865 return -EINVAL;
2866 }
2867
2868 wlvif->ssid_len = ssid_len;
2869 memcpy(wlvif->ssid, ptr+2, ssid_len);
2870 return 0;
2871 }
2872
2873 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2874 {
2875 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2876 struct sk_buff *skb;
2877 int ieoffset;
2878
2879 /* we currently only support setting the ssid from the ap probe req */
2880 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2881 return -EINVAL;
2882
2883 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2884 if (!skb)
2885 return -EINVAL;
2886
2887 ieoffset = offsetof(struct ieee80211_mgmt,
2888 u.probe_req.variable);
2889 wl1271_ssid_set(wlvif, skb, ieoffset);
2890 dev_kfree_skb(skb);
2891
2892 return 0;
2893 }
2894
2895 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2896 struct ieee80211_bss_conf *bss_conf,
2897 u32 sta_rate_set)
2898 {
2899 int ieoffset;
2900 int ret;
2901
2902 wlvif->aid = bss_conf->aid;
2903 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2904 wlvif->beacon_int = bss_conf->beacon_int;
2905 wlvif->wmm_enabled = bss_conf->qos;
2906
2907 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2908
2909 /*
2910 * with wl1271, we don't need to update the
2911 * beacon_int and dtim_period, because the firmware
2912 * updates it by itself when the first beacon is
2913 * received after a join.
2914 */
2915 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2916 if (ret < 0)
2917 return ret;
2918
2919 /*
2920 * Get a template for hardware connection maintenance
2921 */
2922 dev_kfree_skb(wlvif->probereq);
2923 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2924 wlvif,
2925 NULL);
2926 ieoffset = offsetof(struct ieee80211_mgmt,
2927 u.probe_req.variable);
2928 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2929
2930 /* enable the connection monitoring feature */
2931 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2932 if (ret < 0)
2933 return ret;
2934
2935 /*
2936 * The join command disable the keep-alive mode, shut down its process,
2937 * and also clear the template config, so we need to reset it all after
2938 * the join. The acx_aid starts the keep-alive process, and the order
2939 * of the commands below is relevant.
2940 */
2941 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2942 if (ret < 0)
2943 return ret;
2944
2945 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2946 if (ret < 0)
2947 return ret;
2948
2949 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2950 if (ret < 0)
2951 return ret;
2952
2953 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2954 wlvif->sta.klv_template_id,
2955 ACX_KEEP_ALIVE_TPL_VALID);
2956 if (ret < 0)
2957 return ret;
2958
2959 /*
2960 * The default fw psm configuration is AUTO, while mac80211 default
2961 * setting is off (ACTIVE), so sync the fw with the correct value.
2962 */
2963 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2964 if (ret < 0)
2965 return ret;
2966
2967 if (sta_rate_set) {
2968 wlvif->rate_set =
2969 wl1271_tx_enabled_rates_get(wl,
2970 sta_rate_set,
2971 wlvif->band);
2972 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2973 if (ret < 0)
2974 return ret;
2975 }
2976
2977 return ret;
2978 }
2979
2980 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2981 {
2982 int ret;
2983 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2984
2985 /* make sure we are connected (sta) joined */
2986 if (sta &&
2987 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2988 return false;
2989
2990 /* make sure we are joined (ibss) */
2991 if (!sta &&
2992 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2993 return false;
2994
2995 if (sta) {
2996 /* use defaults when not associated */
2997 wlvif->aid = 0;
2998
2999 /* free probe-request template */
3000 dev_kfree_skb(wlvif->probereq);
3001 wlvif->probereq = NULL;
3002
3003 /* disable connection monitor features */
3004 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3005 if (ret < 0)
3006 return ret;
3007
3008 /* Disable the keep-alive feature */
3009 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3010 if (ret < 0)
3011 return ret;
3012
3013 /* disable beacon filtering */
3014 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3015 if (ret < 0)
3016 return ret;
3017 }
3018
3019 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3020 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3021
3022 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3023 ieee80211_chswitch_done(vif, false);
3024 cancel_delayed_work(&wlvif->channel_switch_work);
3025 }
3026
3027 /* invalidate keep-alive template */
3028 wl1271_acx_keep_alive_config(wl, wlvif,
3029 wlvif->sta.klv_template_id,
3030 ACX_KEEP_ALIVE_TPL_INVALID);
3031
3032 return 0;
3033 }
3034
3035 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3036 {
3037 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3038 wlvif->rate_set = wlvif->basic_rate_set;
3039 }
3040
3041 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3042 bool idle)
3043 {
3044 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3045
3046 if (idle == cur_idle)
3047 return;
3048
3049 if (idle) {
3050 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3051 } else {
3052 /* The current firmware only supports sched_scan in idle */
3053 if (wl->sched_vif == wlvif)
3054 wl->ops->sched_scan_stop(wl, wlvif);
3055
3056 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3057 }
3058 }
3059
3060 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3061 struct ieee80211_conf *conf, u32 changed)
3062 {
3063 int ret;
3064
3065 if (wlcore_is_p2p_mgmt(wlvif))
3066 return 0;
3067
3068 if (conf->power_level != wlvif->power_level) {
3069 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3070 if (ret < 0)
3071 return ret;
3072
3073 wlvif->power_level = conf->power_level;
3074 }
3075
3076 return 0;
3077 }
3078
3079 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3080 {
3081 struct wl1271 *wl = hw->priv;
3082 struct wl12xx_vif *wlvif;
3083 struct ieee80211_conf *conf = &hw->conf;
3084 int ret = 0;
3085
3086 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3087 " changed 0x%x",
3088 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3089 conf->power_level,
3090 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3091 changed);
3092
3093 mutex_lock(&wl->mutex);
3094
3095 if (changed & IEEE80211_CONF_CHANGE_POWER)
3096 wl->power_level = conf->power_level;
3097
3098 if (unlikely(wl->state != WLCORE_STATE_ON))
3099 goto out;
3100
3101 ret = wl1271_ps_elp_wakeup(wl);
3102 if (ret < 0)
3103 goto out;
3104
3105 /* configure each interface */
3106 wl12xx_for_each_wlvif(wl, wlvif) {
3107 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3108 if (ret < 0)
3109 goto out_sleep;
3110 }
3111
3112 out_sleep:
3113 wl1271_ps_elp_sleep(wl);
3114
3115 out:
3116 mutex_unlock(&wl->mutex);
3117
3118 return ret;
3119 }
3120
3121 struct wl1271_filter_params {
3122 bool enabled;
3123 int mc_list_length;
3124 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3125 };
3126
3127 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3128 struct netdev_hw_addr_list *mc_list)
3129 {
3130 struct wl1271_filter_params *fp;
3131 struct netdev_hw_addr *ha;
3132
3133 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3134 if (!fp) {
3135 wl1271_error("Out of memory setting filters.");
3136 return 0;
3137 }
3138
3139 /* update multicast filtering parameters */
3140 fp->mc_list_length = 0;
3141 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3142 fp->enabled = false;
3143 } else {
3144 fp->enabled = true;
3145 netdev_hw_addr_list_for_each(ha, mc_list) {
3146 memcpy(fp->mc_list[fp->mc_list_length],
3147 ha->addr, ETH_ALEN);
3148 fp->mc_list_length++;
3149 }
3150 }
3151
3152 return (u64)(unsigned long)fp;
3153 }
3154
3155 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3156 FIF_FCSFAIL | \
3157 FIF_BCN_PRBRESP_PROMISC | \
3158 FIF_CONTROL | \
3159 FIF_OTHER_BSS)
3160
3161 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3162 unsigned int changed,
3163 unsigned int *total, u64 multicast)
3164 {
3165 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3166 struct wl1271 *wl = hw->priv;
3167 struct wl12xx_vif *wlvif;
3168
3169 int ret;
3170
3171 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3172 " total %x", changed, *total);
3173
3174 mutex_lock(&wl->mutex);
3175
3176 *total &= WL1271_SUPPORTED_FILTERS;
3177 changed &= WL1271_SUPPORTED_FILTERS;
3178
3179 if (unlikely(wl->state != WLCORE_STATE_ON))
3180 goto out;
3181
3182 ret = wl1271_ps_elp_wakeup(wl);
3183 if (ret < 0)
3184 goto out;
3185
3186 wl12xx_for_each_wlvif(wl, wlvif) {
3187 if (wlcore_is_p2p_mgmt(wlvif))
3188 continue;
3189
3190 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3191 if (*total & FIF_ALLMULTI)
3192 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3193 false,
3194 NULL, 0);
3195 else if (fp)
3196 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3197 fp->enabled,
3198 fp->mc_list,
3199 fp->mc_list_length);
3200 if (ret < 0)
3201 goto out_sleep;
3202 }
3203
3204 /*
3205 * If interface in AP mode and created with allmulticast then disable
3206 * the firmware filters so that all multicast packets are passed
3207 * This is mandatory for MDNS based discovery protocols
3208 */
3209 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3210 if (*total & FIF_ALLMULTI) {
3211 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3212 false,
3213 NULL, 0);
3214 if (ret < 0)
3215 goto out_sleep;
3216 }
3217 }
3218 }
3219
3220 /*
3221 * the fw doesn't provide an api to configure the filters. instead,
3222 * the filters configuration is based on the active roles / ROC
3223 * state.
3224 */
3225
3226 out_sleep:
3227 wl1271_ps_elp_sleep(wl);
3228
3229 out:
3230 mutex_unlock(&wl->mutex);
3231 kfree(fp);
3232 }
3233
3234 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3235 u8 id, u8 key_type, u8 key_size,
3236 const u8 *key, u8 hlid, u32 tx_seq_32,
3237 u16 tx_seq_16)
3238 {
3239 struct wl1271_ap_key *ap_key;
3240 int i;
3241
3242 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3243
3244 if (key_size > MAX_KEY_SIZE)
3245 return -EINVAL;
3246
3247 /*
3248 * Find next free entry in ap_keys. Also check we are not replacing
3249 * an existing key.
3250 */
3251 for (i = 0; i < MAX_NUM_KEYS; i++) {
3252 if (wlvif->ap.recorded_keys[i] == NULL)
3253 break;
3254
3255 if (wlvif->ap.recorded_keys[i]->id == id) {
3256 wl1271_warning("trying to record key replacement");
3257 return -EINVAL;
3258 }
3259 }
3260
3261 if (i == MAX_NUM_KEYS)
3262 return -EBUSY;
3263
3264 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3265 if (!ap_key)
3266 return -ENOMEM;
3267
3268 ap_key->id = id;
3269 ap_key->key_type = key_type;
3270 ap_key->key_size = key_size;
3271 memcpy(ap_key->key, key, key_size);
3272 ap_key->hlid = hlid;
3273 ap_key->tx_seq_32 = tx_seq_32;
3274 ap_key->tx_seq_16 = tx_seq_16;
3275
3276 wlvif->ap.recorded_keys[i] = ap_key;
3277 return 0;
3278 }
3279
3280 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3281 {
3282 int i;
3283
3284 for (i = 0; i < MAX_NUM_KEYS; i++) {
3285 kfree(wlvif->ap.recorded_keys[i]);
3286 wlvif->ap.recorded_keys[i] = NULL;
3287 }
3288 }
3289
3290 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3291 {
3292 int i, ret = 0;
3293 struct wl1271_ap_key *key;
3294 bool wep_key_added = false;
3295
3296 for (i = 0; i < MAX_NUM_KEYS; i++) {
3297 u8 hlid;
3298 if (wlvif->ap.recorded_keys[i] == NULL)
3299 break;
3300
3301 key = wlvif->ap.recorded_keys[i];
3302 hlid = key->hlid;
3303 if (hlid == WL12XX_INVALID_LINK_ID)
3304 hlid = wlvif->ap.bcast_hlid;
3305
3306 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3307 key->id, key->key_type,
3308 key->key_size, key->key,
3309 hlid, key->tx_seq_32,
3310 key->tx_seq_16);
3311 if (ret < 0)
3312 goto out;
3313
3314 if (key->key_type == KEY_WEP)
3315 wep_key_added = true;
3316 }
3317
3318 if (wep_key_added) {
3319 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3320 wlvif->ap.bcast_hlid);
3321 if (ret < 0)
3322 goto out;
3323 }
3324
3325 out:
3326 wl1271_free_ap_keys(wl, wlvif);
3327 return ret;
3328 }
3329
3330 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3331 u16 action, u8 id, u8 key_type,
3332 u8 key_size, const u8 *key, u32 tx_seq_32,
3333 u16 tx_seq_16, struct ieee80211_sta *sta)
3334 {
3335 int ret;
3336 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3337
3338 if (is_ap) {
3339 struct wl1271_station *wl_sta;
3340 u8 hlid;
3341
3342 if (sta) {
3343 wl_sta = (struct wl1271_station *)sta->drv_priv;
3344 hlid = wl_sta->hlid;
3345 } else {
3346 hlid = wlvif->ap.bcast_hlid;
3347 }
3348
3349 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3350 /*
3351 * We do not support removing keys after AP shutdown.
3352 * Pretend we do to make mac80211 happy.
3353 */
3354 if (action != KEY_ADD_OR_REPLACE)
3355 return 0;
3356
3357 ret = wl1271_record_ap_key(wl, wlvif, id,
3358 key_type, key_size,
3359 key, hlid, tx_seq_32,
3360 tx_seq_16);
3361 } else {
3362 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3363 id, key_type, key_size,
3364 key, hlid, tx_seq_32,
3365 tx_seq_16);
3366 }
3367
3368 if (ret < 0)
3369 return ret;
3370 } else {
3371 const u8 *addr;
3372 static const u8 bcast_addr[ETH_ALEN] = {
3373 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3374 };
3375
3376 addr = sta ? sta->addr : bcast_addr;
3377
3378 if (is_zero_ether_addr(addr)) {
3379 /* We dont support TX only encryption */
3380 return -EOPNOTSUPP;
3381 }
3382
3383 /* The wl1271 does not allow to remove unicast keys - they
3384 will be cleared automatically on next CMD_JOIN. Ignore the
3385 request silently, as we dont want the mac80211 to emit
3386 an error message. */
3387 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3388 return 0;
3389
3390 /* don't remove key if hlid was already deleted */
3391 if (action == KEY_REMOVE &&
3392 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3393 return 0;
3394
3395 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3396 id, key_type, key_size,
3397 key, addr, tx_seq_32,
3398 tx_seq_16);
3399 if (ret < 0)
3400 return ret;
3401
3402 }
3403
3404 return 0;
3405 }
3406
3407 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3408 struct ieee80211_vif *vif,
3409 struct ieee80211_sta *sta,
3410 struct ieee80211_key_conf *key_conf)
3411 {
3412 struct wl1271 *wl = hw->priv;
3413 int ret;
3414 bool might_change_spare =
3415 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3416 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3417
3418 if (might_change_spare) {
3419 /*
3420 * stop the queues and flush to ensure the next packets are
3421 * in sync with FW spare block accounting
3422 */
3423 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3424 wl1271_tx_flush(wl);
3425 }
3426
3427 mutex_lock(&wl->mutex);
3428
3429 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3430 ret = -EAGAIN;
3431 goto out_wake_queues;
3432 }
3433
3434 ret = wl1271_ps_elp_wakeup(wl);
3435 if (ret < 0)
3436 goto out_wake_queues;
3437
3438 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3439
3440 wl1271_ps_elp_sleep(wl);
3441
3442 out_wake_queues:
3443 if (might_change_spare)
3444 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3445
3446 mutex_unlock(&wl->mutex);
3447
3448 return ret;
3449 }
3450
3451 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3452 struct ieee80211_vif *vif,
3453 struct ieee80211_sta *sta,
3454 struct ieee80211_key_conf *key_conf)
3455 {
3456 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3457 int ret;
3458 u32 tx_seq_32 = 0;
3459 u16 tx_seq_16 = 0;
3460 u8 key_type;
3461 u8 hlid;
3462
3463 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3464
3465 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3466 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3467 key_conf->cipher, key_conf->keyidx,
3468 key_conf->keylen, key_conf->flags);
3469 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3470
3471 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3472 if (sta) {
3473 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3474 hlid = wl_sta->hlid;
3475 } else {
3476 hlid = wlvif->ap.bcast_hlid;
3477 }
3478 else
3479 hlid = wlvif->sta.hlid;
3480
3481 if (hlid != WL12XX_INVALID_LINK_ID) {
3482 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3483 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3484 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3485 }
3486
3487 switch (key_conf->cipher) {
3488 case WLAN_CIPHER_SUITE_WEP40:
3489 case WLAN_CIPHER_SUITE_WEP104:
3490 key_type = KEY_WEP;
3491
3492 key_conf->hw_key_idx = key_conf->keyidx;
3493 break;
3494 case WLAN_CIPHER_SUITE_TKIP:
3495 key_type = KEY_TKIP;
3496 key_conf->hw_key_idx = key_conf->keyidx;
3497 break;
3498 case WLAN_CIPHER_SUITE_CCMP:
3499 key_type = KEY_AES;
3500 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3501 break;
3502 case WL1271_CIPHER_SUITE_GEM:
3503 key_type = KEY_GEM;
3504 break;
3505 default:
3506 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3507
3508 return -EOPNOTSUPP;
3509 }
3510
3511 switch (cmd) {
3512 case SET_KEY:
3513 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3514 key_conf->keyidx, key_type,
3515 key_conf->keylen, key_conf->key,
3516 tx_seq_32, tx_seq_16, sta);
3517 if (ret < 0) {
3518 wl1271_error("Could not add or replace key");
3519 return ret;
3520 }
3521
3522 /*
3523 * reconfiguring arp response if the unicast (or common)
3524 * encryption key type was changed
3525 */
3526 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3527 (sta || key_type == KEY_WEP) &&
3528 wlvif->encryption_type != key_type) {
3529 wlvif->encryption_type = key_type;
3530 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3531 if (ret < 0) {
3532 wl1271_warning("build arp rsp failed: %d", ret);
3533 return ret;
3534 }
3535 }
3536 break;
3537
3538 case DISABLE_KEY:
3539 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3540 key_conf->keyidx, key_type,
3541 key_conf->keylen, key_conf->key,
3542 0, 0, sta);
3543 if (ret < 0) {
3544 wl1271_error("Could not remove key");
3545 return ret;
3546 }
3547 break;
3548
3549 default:
3550 wl1271_error("Unsupported key cmd 0x%x", cmd);
3551 return -EOPNOTSUPP;
3552 }
3553
3554 return ret;
3555 }
3556 EXPORT_SYMBOL_GPL(wlcore_set_key);
3557
3558 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3559 struct ieee80211_vif *vif,
3560 int key_idx)
3561 {
3562 struct wl1271 *wl = hw->priv;
3563 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3564 int ret;
3565
3566 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3567 key_idx);
3568
3569 /* we don't handle unsetting of default key */
3570 if (key_idx == -1)
3571 return;
3572
3573 mutex_lock(&wl->mutex);
3574
3575 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3576 ret = -EAGAIN;
3577 goto out_unlock;
3578 }
3579
3580 ret = wl1271_ps_elp_wakeup(wl);
3581 if (ret < 0)
3582 goto out_unlock;
3583
3584 wlvif->default_key = key_idx;
3585
3586 /* the default WEP key needs to be configured at least once */
3587 if (wlvif->encryption_type == KEY_WEP) {
3588 ret = wl12xx_cmd_set_default_wep_key(wl,
3589 key_idx,
3590 wlvif->sta.hlid);
3591 if (ret < 0)
3592 goto out_sleep;
3593 }
3594
3595 out_sleep:
3596 wl1271_ps_elp_sleep(wl);
3597
3598 out_unlock:
3599 mutex_unlock(&wl->mutex);
3600 }
3601
3602 void wlcore_regdomain_config(struct wl1271 *wl)
3603 {
3604 int ret;
3605
3606 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3607 return;
3608
3609 mutex_lock(&wl->mutex);
3610
3611 if (unlikely(wl->state != WLCORE_STATE_ON))
3612 goto out;
3613
3614 ret = wl1271_ps_elp_wakeup(wl);
3615 if (ret < 0)
3616 goto out;
3617
3618 ret = wlcore_cmd_regdomain_config_locked(wl);
3619 if (ret < 0) {
3620 wl12xx_queue_recovery_work(wl);
3621 goto out;
3622 }
3623
3624 wl1271_ps_elp_sleep(wl);
3625 out:
3626 mutex_unlock(&wl->mutex);
3627 }
3628
3629 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3630 struct ieee80211_vif *vif,
3631 struct ieee80211_scan_request *hw_req)
3632 {
3633 struct cfg80211_scan_request *req = &hw_req->req;
3634 struct wl1271 *wl = hw->priv;
3635 int ret;
3636 u8 *ssid = NULL;
3637 size_t len = 0;
3638
3639 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3640
3641 if (req->n_ssids) {
3642 ssid = req->ssids[0].ssid;
3643 len = req->ssids[0].ssid_len;
3644 }
3645
3646 mutex_lock(&wl->mutex);
3647
3648 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3649 /*
3650 * We cannot return -EBUSY here because cfg80211 will expect
3651 * a call to ieee80211_scan_completed if we do - in this case
3652 * there won't be any call.
3653 */
3654 ret = -EAGAIN;
3655 goto out;
3656 }
3657
3658 ret = wl1271_ps_elp_wakeup(wl);
3659 if (ret < 0)
3660 goto out;
3661
3662 /* fail if there is any role in ROC */
3663 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3664 /* don't allow scanning right now */
3665 ret = -EBUSY;
3666 goto out_sleep;
3667 }
3668
3669 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3670 out_sleep:
3671 wl1271_ps_elp_sleep(wl);
3672 out:
3673 mutex_unlock(&wl->mutex);
3674
3675 return ret;
3676 }
3677
3678 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3679 struct ieee80211_vif *vif)
3680 {
3681 struct wl1271 *wl = hw->priv;
3682 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3683 struct cfg80211_scan_info info = {
3684 .aborted = true,
3685 };
3686 int ret;
3687
3688 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3689
3690 mutex_lock(&wl->mutex);
3691
3692 if (unlikely(wl->state != WLCORE_STATE_ON))
3693 goto out;
3694
3695 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3696 goto out;
3697
3698 ret = wl1271_ps_elp_wakeup(wl);
3699 if (ret < 0)
3700 goto out;
3701
3702 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3703 ret = wl->ops->scan_stop(wl, wlvif);
3704 if (ret < 0)
3705 goto out_sleep;
3706 }
3707
3708 /*
3709 * Rearm the tx watchdog just before idling scan. This
3710 * prevents just-finished scans from triggering the watchdog
3711 */
3712 wl12xx_rearm_tx_watchdog_locked(wl);
3713
3714 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3715 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3716 wl->scan_wlvif = NULL;
3717 wl->scan.req = NULL;
3718 ieee80211_scan_completed(wl->hw, &info);
3719
3720 out_sleep:
3721 wl1271_ps_elp_sleep(wl);
3722 out:
3723 mutex_unlock(&wl->mutex);
3724
3725 cancel_delayed_work_sync(&wl->scan_complete_work);
3726 }
3727
3728 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3729 struct ieee80211_vif *vif,
3730 struct cfg80211_sched_scan_request *req,
3731 struct ieee80211_scan_ies *ies)
3732 {
3733 struct wl1271 *wl = hw->priv;
3734 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3735 int ret;
3736
3737 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3738
3739 mutex_lock(&wl->mutex);
3740
3741 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3742 ret = -EAGAIN;
3743 goto out;
3744 }
3745
3746 ret = wl1271_ps_elp_wakeup(wl);
3747 if (ret < 0)
3748 goto out;
3749
3750 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3751 if (ret < 0)
3752 goto out_sleep;
3753
3754 wl->sched_vif = wlvif;
3755
3756 out_sleep:
3757 wl1271_ps_elp_sleep(wl);
3758 out:
3759 mutex_unlock(&wl->mutex);
3760 return ret;
3761 }
3762
3763 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3764 struct ieee80211_vif *vif)
3765 {
3766 struct wl1271 *wl = hw->priv;
3767 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3768 int ret;
3769
3770 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3771
3772 mutex_lock(&wl->mutex);
3773
3774 if (unlikely(wl->state != WLCORE_STATE_ON))
3775 goto out;
3776
3777 ret = wl1271_ps_elp_wakeup(wl);
3778 if (ret < 0)
3779 goto out;
3780
3781 wl->ops->sched_scan_stop(wl, wlvif);
3782
3783 wl1271_ps_elp_sleep(wl);
3784 out:
3785 mutex_unlock(&wl->mutex);
3786
3787 return 0;
3788 }
3789
3790 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3791 {
3792 struct wl1271 *wl = hw->priv;
3793 int ret = 0;
3794
3795 mutex_lock(&wl->mutex);
3796
3797 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3798 ret = -EAGAIN;
3799 goto out;
3800 }
3801
3802 ret = wl1271_ps_elp_wakeup(wl);
3803 if (ret < 0)
3804 goto out;
3805
3806 ret = wl1271_acx_frag_threshold(wl, value);
3807 if (ret < 0)
3808 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3809
3810 wl1271_ps_elp_sleep(wl);
3811
3812 out:
3813 mutex_unlock(&wl->mutex);
3814
3815 return ret;
3816 }
3817
3818 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3819 {
3820 struct wl1271 *wl = hw->priv;
3821 struct wl12xx_vif *wlvif;
3822 int ret = 0;
3823
3824 mutex_lock(&wl->mutex);
3825
3826 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3827 ret = -EAGAIN;
3828 goto out;
3829 }
3830
3831 ret = wl1271_ps_elp_wakeup(wl);
3832 if (ret < 0)
3833 goto out;
3834
3835 wl12xx_for_each_wlvif(wl, wlvif) {
3836 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3837 if (ret < 0)
3838 wl1271_warning("set rts threshold failed: %d", ret);
3839 }
3840 wl1271_ps_elp_sleep(wl);
3841
3842 out:
3843 mutex_unlock(&wl->mutex);
3844
3845 return ret;
3846 }
3847
3848 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3849 {
3850 int len;
3851 const u8 *next, *end = skb->data + skb->len;
3852 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3853 skb->len - ieoffset);
3854 if (!ie)
3855 return;
3856 len = ie[1] + 2;
3857 next = ie + len;
3858 memmove(ie, next, end - next);
3859 skb_trim(skb, skb->len - len);
3860 }
3861
3862 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3863 unsigned int oui, u8 oui_type,
3864 int ieoffset)
3865 {
3866 int len;
3867 const u8 *next, *end = skb->data + skb->len;
3868 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3869 skb->data + ieoffset,
3870 skb->len - ieoffset);
3871 if (!ie)
3872 return;
3873 len = ie[1] + 2;
3874 next = ie + len;
3875 memmove(ie, next, end - next);
3876 skb_trim(skb, skb->len - len);
3877 }
3878
3879 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3880 struct ieee80211_vif *vif)
3881 {
3882 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3883 struct sk_buff *skb;
3884 int ret;
3885
3886 skb = ieee80211_proberesp_get(wl->hw, vif);
3887 if (!skb)
3888 return -EOPNOTSUPP;
3889
3890 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3891 CMD_TEMPL_AP_PROBE_RESPONSE,
3892 skb->data,
3893 skb->len, 0,
3894 rates);
3895 dev_kfree_skb(skb);
3896
3897 if (ret < 0)
3898 goto out;
3899
3900 wl1271_debug(DEBUG_AP, "probe response updated");
3901 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3902
3903 out:
3904 return ret;
3905 }
3906
3907 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3908 struct ieee80211_vif *vif,
3909 u8 *probe_rsp_data,
3910 size_t probe_rsp_len,
3911 u32 rates)
3912 {
3913 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3914 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3915 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3916 int ssid_ie_offset, ie_offset, templ_len;
3917 const u8 *ptr;
3918
3919 /* no need to change probe response if the SSID is set correctly */
3920 if (wlvif->ssid_len > 0)
3921 return wl1271_cmd_template_set(wl, wlvif->role_id,
3922 CMD_TEMPL_AP_PROBE_RESPONSE,
3923 probe_rsp_data,
3924 probe_rsp_len, 0,
3925 rates);
3926
3927 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3928 wl1271_error("probe_rsp template too big");
3929 return -EINVAL;
3930 }
3931
3932 /* start searching from IE offset */
3933 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3934
3935 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3936 probe_rsp_len - ie_offset);
3937 if (!ptr) {
3938 wl1271_error("No SSID in beacon!");
3939 return -EINVAL;
3940 }
3941
3942 ssid_ie_offset = ptr - probe_rsp_data;
3943 ptr += (ptr[1] + 2);
3944
3945 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3946
3947 /* insert SSID from bss_conf */
3948 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3949 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3950 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3951 bss_conf->ssid, bss_conf->ssid_len);
3952 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3953
3954 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3955 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3956 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3957
3958 return wl1271_cmd_template_set(wl, wlvif->role_id,
3959 CMD_TEMPL_AP_PROBE_RESPONSE,
3960 probe_rsp_templ,
3961 templ_len, 0,
3962 rates);
3963 }
3964
3965 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3966 struct ieee80211_vif *vif,
3967 struct ieee80211_bss_conf *bss_conf,
3968 u32 changed)
3969 {
3970 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3971 int ret = 0;
3972
3973 if (changed & BSS_CHANGED_ERP_SLOT) {
3974 if (bss_conf->use_short_slot)
3975 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3976 else
3977 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3978 if (ret < 0) {
3979 wl1271_warning("Set slot time failed %d", ret);
3980 goto out;
3981 }
3982 }
3983
3984 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3985 if (bss_conf->use_short_preamble)
3986 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3987 else
3988 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3989 }
3990
3991 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3992 if (bss_conf->use_cts_prot)
3993 ret = wl1271_acx_cts_protect(wl, wlvif,
3994 CTSPROTECT_ENABLE);
3995 else
3996 ret = wl1271_acx_cts_protect(wl, wlvif,
3997 CTSPROTECT_DISABLE);
3998 if (ret < 0) {
3999 wl1271_warning("Set ctsprotect failed %d", ret);
4000 goto out;
4001 }
4002 }
4003
4004 out:
4005 return ret;
4006 }
4007
4008 static int wlcore_set_beacon_template(struct wl1271 *wl,
4009 struct ieee80211_vif *vif,
4010 bool is_ap)
4011 {
4012 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4013 struct ieee80211_hdr *hdr;
4014 u32 min_rate;
4015 int ret;
4016 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4017 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4018 u16 tmpl_id;
4019
4020 if (!beacon) {
4021 ret = -EINVAL;
4022 goto out;
4023 }
4024
4025 wl1271_debug(DEBUG_MASTER, "beacon updated");
4026
4027 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4028 if (ret < 0) {
4029 dev_kfree_skb(beacon);
4030 goto out;
4031 }
4032 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4033 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4034 CMD_TEMPL_BEACON;
4035 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4036 beacon->data,
4037 beacon->len, 0,
4038 min_rate);
4039 if (ret < 0) {
4040 dev_kfree_skb(beacon);
4041 goto out;
4042 }
4043
4044 wlvif->wmm_enabled =
4045 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4046 WLAN_OUI_TYPE_MICROSOFT_WMM,
4047 beacon->data + ieoffset,
4048 beacon->len - ieoffset);
4049
4050 /*
4051 * In case we already have a probe-resp beacon set explicitly
4052 * by usermode, don't use the beacon data.
4053 */
4054 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4055 goto end_bcn;
4056
4057 /* remove TIM ie from probe response */
4058 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4059
4060 /*
4061 * remove p2p ie from probe response.
4062 * the fw reponds to probe requests that don't include
4063 * the p2p ie. probe requests with p2p ie will be passed,
4064 * and will be responded by the supplicant (the spec
4065 * forbids including the p2p ie when responding to probe
4066 * requests that didn't include it).
4067 */
4068 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4069 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4070
4071 hdr = (struct ieee80211_hdr *) beacon->data;
4072 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4073 IEEE80211_STYPE_PROBE_RESP);
4074 if (is_ap)
4075 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4076 beacon->data,
4077 beacon->len,
4078 min_rate);
4079 else
4080 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4081 CMD_TEMPL_PROBE_RESPONSE,
4082 beacon->data,
4083 beacon->len, 0,
4084 min_rate);
4085 end_bcn:
4086 dev_kfree_skb(beacon);
4087 if (ret < 0)
4088 goto out;
4089
4090 out:
4091 return ret;
4092 }
4093
4094 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4095 struct ieee80211_vif *vif,
4096 struct ieee80211_bss_conf *bss_conf,
4097 u32 changed)
4098 {
4099 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4100 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4101 int ret = 0;
4102
4103 if (changed & BSS_CHANGED_BEACON_INT) {
4104 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4105 bss_conf->beacon_int);
4106
4107 wlvif->beacon_int = bss_conf->beacon_int;
4108 }
4109
4110 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4111 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4112
4113 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4114 }
4115
4116 if (changed & BSS_CHANGED_BEACON) {
4117 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4118 if (ret < 0)
4119 goto out;
4120
4121 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4122 &wlvif->flags)) {
4123 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4124 if (ret < 0)
4125 goto out;
4126 }
4127 }
4128 out:
4129 if (ret != 0)
4130 wl1271_error("beacon info change failed: %d", ret);
4131 return ret;
4132 }
4133
4134 /* AP mode changes */
4135 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4136 struct ieee80211_vif *vif,
4137 struct ieee80211_bss_conf *bss_conf,
4138 u32 changed)
4139 {
4140 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4141 int ret = 0;
4142
4143 if (changed & BSS_CHANGED_BASIC_RATES) {
4144 u32 rates = bss_conf->basic_rates;
4145
4146 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4147 wlvif->band);
4148 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4149 wlvif->basic_rate_set);
4150
4151 ret = wl1271_init_ap_rates(wl, wlvif);
4152 if (ret < 0) {
4153 wl1271_error("AP rate policy change failed %d", ret);
4154 goto out;
4155 }
4156
4157 ret = wl1271_ap_init_templates(wl, vif);
4158 if (ret < 0)
4159 goto out;
4160
4161 /* No need to set probe resp template for mesh */
4162 if (!ieee80211_vif_is_mesh(vif)) {
4163 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4164 wlvif->basic_rate,
4165 vif);
4166 if (ret < 0)
4167 goto out;
4168 }
4169
4170 ret = wlcore_set_beacon_template(wl, vif, true);
4171 if (ret < 0)
4172 goto out;
4173 }
4174
4175 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4176 if (ret < 0)
4177 goto out;
4178
4179 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4180 if (bss_conf->enable_beacon) {
4181 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4182 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4183 if (ret < 0)
4184 goto out;
4185
4186 ret = wl1271_ap_init_hwenc(wl, wlvif);
4187 if (ret < 0)
4188 goto out;
4189
4190 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4191 wl1271_debug(DEBUG_AP, "started AP");
4192 }
4193 } else {
4194 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4195 /*
4196 * AP might be in ROC in case we have just
4197 * sent auth reply. handle it.
4198 */
4199 if (test_bit(wlvif->role_id, wl->roc_map))
4200 wl12xx_croc(wl, wlvif->role_id);
4201
4202 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4203 if (ret < 0)
4204 goto out;
4205
4206 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4207 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4208 &wlvif->flags);
4209 wl1271_debug(DEBUG_AP, "stopped AP");
4210 }
4211 }
4212 }
4213
4214 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4215 if (ret < 0)
4216 goto out;
4217
4218 /* Handle HT information change */
4219 if ((changed & BSS_CHANGED_HT) &&
4220 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4221 ret = wl1271_acx_set_ht_information(wl, wlvif,
4222 bss_conf->ht_operation_mode);
4223 if (ret < 0) {
4224 wl1271_warning("Set ht information failed %d", ret);
4225 goto out;
4226 }
4227 }
4228
4229 out:
4230 return;
4231 }
4232
4233 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4234 struct ieee80211_bss_conf *bss_conf,
4235 u32 sta_rate_set)
4236 {
4237 u32 rates;
4238 int ret;
4239
4240 wl1271_debug(DEBUG_MAC80211,
4241 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4242 bss_conf->bssid, bss_conf->aid,
4243 bss_conf->beacon_int,
4244 bss_conf->basic_rates, sta_rate_set);
4245
4246 wlvif->beacon_int = bss_conf->beacon_int;
4247 rates = bss_conf->basic_rates;
4248 wlvif->basic_rate_set =
4249 wl1271_tx_enabled_rates_get(wl, rates,
4250 wlvif->band);
4251 wlvif->basic_rate =
4252 wl1271_tx_min_rate_get(wl,
4253 wlvif->basic_rate_set);
4254
4255 if (sta_rate_set)
4256 wlvif->rate_set =
4257 wl1271_tx_enabled_rates_get(wl,
4258 sta_rate_set,
4259 wlvif->band);
4260
4261 /* we only support sched_scan while not connected */
4262 if (wl->sched_vif == wlvif)
4263 wl->ops->sched_scan_stop(wl, wlvif);
4264
4265 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4266 if (ret < 0)
4267 return ret;
4268
4269 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4270 if (ret < 0)
4271 return ret;
4272
4273 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4274 if (ret < 0)
4275 return ret;
4276
4277 wlcore_set_ssid(wl, wlvif);
4278
4279 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4280
4281 return 0;
4282 }
4283
4284 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4285 {
4286 int ret;
4287
4288 /* revert back to minimum rates for the current band */
4289 wl1271_set_band_rate(wl, wlvif);
4290 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4291
4292 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4293 if (ret < 0)
4294 return ret;
4295
4296 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4297 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4298 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4299 if (ret < 0)
4300 return ret;
4301 }
4302
4303 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4304 return 0;
4305 }
4306 /* STA/IBSS mode changes */
4307 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4308 struct ieee80211_vif *vif,
4309 struct ieee80211_bss_conf *bss_conf,
4310 u32 changed)
4311 {
4312 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4313 bool do_join = false;
4314 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4315 bool ibss_joined = false;
4316 u32 sta_rate_set = 0;
4317 int ret;
4318 struct ieee80211_sta *sta;
4319 bool sta_exists = false;
4320 struct ieee80211_sta_ht_cap sta_ht_cap;
4321
4322 if (is_ibss) {
4323 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4324 changed);
4325 if (ret < 0)
4326 goto out;
4327 }
4328
4329 if (changed & BSS_CHANGED_IBSS) {
4330 if (bss_conf->ibss_joined) {
4331 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4332 ibss_joined = true;
4333 } else {
4334 wlcore_unset_assoc(wl, wlvif);
4335 wl12xx_cmd_role_stop_sta(wl, wlvif);
4336 }
4337 }
4338
4339 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4340 do_join = true;
4341
4342 /* Need to update the SSID (for filtering etc) */
4343 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4344 do_join = true;
4345
4346 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4347 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4348 bss_conf->enable_beacon ? "enabled" : "disabled");
4349
4350 do_join = true;
4351 }
4352
4353 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4354 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4355
4356 if (changed & BSS_CHANGED_CQM) {
4357 bool enable = false;
4358 if (bss_conf->cqm_rssi_thold)
4359 enable = true;
4360 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4361 bss_conf->cqm_rssi_thold,
4362 bss_conf->cqm_rssi_hyst);
4363 if (ret < 0)
4364 goto out;
4365 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4366 }
4367
4368 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4369 BSS_CHANGED_ASSOC)) {
4370 rcu_read_lock();
4371 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4372 if (sta) {
4373 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4374
4375 /* save the supp_rates of the ap */
4376 sta_rate_set = sta->supp_rates[wlvif->band];
4377 if (sta->ht_cap.ht_supported)
4378 sta_rate_set |=
4379 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4380 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4381 sta_ht_cap = sta->ht_cap;
4382 sta_exists = true;
4383 }
4384
4385 rcu_read_unlock();
4386 }
4387
4388 if (changed & BSS_CHANGED_BSSID) {
4389 if (!is_zero_ether_addr(bss_conf->bssid)) {
4390 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4391 sta_rate_set);
4392 if (ret < 0)
4393 goto out;
4394
4395 /* Need to update the BSSID (for filtering etc) */
4396 do_join = true;
4397 } else {
4398 ret = wlcore_clear_bssid(wl, wlvif);
4399 if (ret < 0)
4400 goto out;
4401 }
4402 }
4403
4404 if (changed & BSS_CHANGED_IBSS) {
4405 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4406 bss_conf->ibss_joined);
4407
4408 if (bss_conf->ibss_joined) {
4409 u32 rates = bss_conf->basic_rates;
4410 wlvif->basic_rate_set =
4411 wl1271_tx_enabled_rates_get(wl, rates,
4412 wlvif->band);
4413 wlvif->basic_rate =
4414 wl1271_tx_min_rate_get(wl,
4415 wlvif->basic_rate_set);
4416
4417 /* by default, use 11b + OFDM rates */
4418 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4419 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4420 if (ret < 0)
4421 goto out;
4422 }
4423 }
4424
4425 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4426 /* enable beacon filtering */
4427 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4428 if (ret < 0)
4429 goto out;
4430 }
4431
4432 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4433 if (ret < 0)
4434 goto out;
4435
4436 if (do_join) {
4437 ret = wlcore_join(wl, wlvif);
4438 if (ret < 0) {
4439 wl1271_warning("cmd join failed %d", ret);
4440 goto out;
4441 }
4442 }
4443
4444 if (changed & BSS_CHANGED_ASSOC) {
4445 if (bss_conf->assoc) {
4446 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4447 sta_rate_set);
4448 if (ret < 0)
4449 goto out;
4450
4451 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4452 wl12xx_set_authorized(wl, wlvif);
4453 } else {
4454 wlcore_unset_assoc(wl, wlvif);
4455 }
4456 }
4457
4458 if (changed & BSS_CHANGED_PS) {
4459 if ((bss_conf->ps) &&
4460 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4461 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4462 int ps_mode;
4463 char *ps_mode_str;
4464
4465 if (wl->conf.conn.forced_ps) {
4466 ps_mode = STATION_POWER_SAVE_MODE;
4467 ps_mode_str = "forced";
4468 } else {
4469 ps_mode = STATION_AUTO_PS_MODE;
4470 ps_mode_str = "auto";
4471 }
4472
4473 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4474
4475 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4476 if (ret < 0)
4477 wl1271_warning("enter %s ps failed %d",
4478 ps_mode_str, ret);
4479 } else if (!bss_conf->ps &&
4480 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4481 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4482
4483 ret = wl1271_ps_set_mode(wl, wlvif,
4484 STATION_ACTIVE_MODE);
4485 if (ret < 0)
4486 wl1271_warning("exit auto ps failed %d", ret);
4487 }
4488 }
4489
4490 /* Handle new association with HT. Do this after join. */
4491 if (sta_exists) {
4492 bool enabled =
4493 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4494
4495 ret = wlcore_hw_set_peer_cap(wl,
4496 &sta_ht_cap,
4497 enabled,
4498 wlvif->rate_set,
4499 wlvif->sta.hlid);
4500 if (ret < 0) {
4501 wl1271_warning("Set ht cap failed %d", ret);
4502 goto out;
4503
4504 }
4505
4506 if (enabled) {
4507 ret = wl1271_acx_set_ht_information(wl, wlvif,
4508 bss_conf->ht_operation_mode);
4509 if (ret < 0) {
4510 wl1271_warning("Set ht information failed %d",
4511 ret);
4512 goto out;
4513 }
4514 }
4515 }
4516
4517 /* Handle arp filtering. Done after join. */
4518 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4519 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4520 __be32 addr = bss_conf->arp_addr_list[0];
4521 wlvif->sta.qos = bss_conf->qos;
4522 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4523
4524 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4525 wlvif->ip_addr = addr;
4526 /*
4527 * The template should have been configured only upon
4528 * association. however, it seems that the correct ip
4529 * isn't being set (when sending), so we have to
4530 * reconfigure the template upon every ip change.
4531 */
4532 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4533 if (ret < 0) {
4534 wl1271_warning("build arp rsp failed: %d", ret);
4535 goto out;
4536 }
4537
4538 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4539 (ACX_ARP_FILTER_ARP_FILTERING |
4540 ACX_ARP_FILTER_AUTO_ARP),
4541 addr);
4542 } else {
4543 wlvif->ip_addr = 0;
4544 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4545 }
4546
4547 if (ret < 0)
4548 goto out;
4549 }
4550
4551 out:
4552 return;
4553 }
4554
4555 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4556 struct ieee80211_vif *vif,
4557 struct ieee80211_bss_conf *bss_conf,
4558 u32 changed)
4559 {
4560 struct wl1271 *wl = hw->priv;
4561 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4562 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4563 int ret;
4564
4565 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4566 wlvif->role_id, (int)changed);
4567
4568 /*
4569 * make sure to cancel pending disconnections if our association
4570 * state changed
4571 */
4572 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4573 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4574
4575 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4576 !bss_conf->enable_beacon)
4577 wl1271_tx_flush(wl);
4578
4579 mutex_lock(&wl->mutex);
4580
4581 if (unlikely(wl->state != WLCORE_STATE_ON))
4582 goto out;
4583
4584 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4585 goto out;
4586
4587 ret = wl1271_ps_elp_wakeup(wl);
4588 if (ret < 0)
4589 goto out;
4590
4591 if ((changed & BSS_CHANGED_TXPOWER) &&
4592 bss_conf->txpower != wlvif->power_level) {
4593
4594 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4595 if (ret < 0)
4596 goto out;
4597
4598 wlvif->power_level = bss_conf->txpower;
4599 }
4600
4601 if (is_ap)
4602 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4603 else
4604 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4605
4606 wl1271_ps_elp_sleep(wl);
4607
4608 out:
4609 mutex_unlock(&wl->mutex);
4610 }
4611
4612 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4613 struct ieee80211_chanctx_conf *ctx)
4614 {
4615 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4616 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4617 cfg80211_get_chandef_type(&ctx->def));
4618 return 0;
4619 }
4620
4621 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4622 struct ieee80211_chanctx_conf *ctx)
4623 {
4624 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4625 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4626 cfg80211_get_chandef_type(&ctx->def));
4627 }
4628
4629 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4630 struct ieee80211_chanctx_conf *ctx,
4631 u32 changed)
4632 {
4633 struct wl1271 *wl = hw->priv;
4634 struct wl12xx_vif *wlvif;
4635 int ret;
4636 int channel = ieee80211_frequency_to_channel(
4637 ctx->def.chan->center_freq);
4638
4639 wl1271_debug(DEBUG_MAC80211,
4640 "mac80211 change chanctx %d (type %d) changed 0x%x",
4641 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4642
4643 mutex_lock(&wl->mutex);
4644
4645 ret = wl1271_ps_elp_wakeup(wl);
4646 if (ret < 0)
4647 goto out;
4648
4649 wl12xx_for_each_wlvif(wl, wlvif) {
4650 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4651
4652 rcu_read_lock();
4653 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4654 rcu_read_unlock();
4655 continue;
4656 }
4657 rcu_read_unlock();
4658
4659 /* start radar if needed */
4660 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4661 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4662 ctx->radar_enabled && !wlvif->radar_enabled &&
4663 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4664 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4665 wlcore_hw_set_cac(wl, wlvif, true);
4666 wlvif->radar_enabled = true;
4667 }
4668 }
4669
4670 wl1271_ps_elp_sleep(wl);
4671 out:
4672 mutex_unlock(&wl->mutex);
4673 }
4674
4675 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4676 struct ieee80211_vif *vif,
4677 struct ieee80211_chanctx_conf *ctx)
4678 {
4679 struct wl1271 *wl = hw->priv;
4680 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4681 int channel = ieee80211_frequency_to_channel(
4682 ctx->def.chan->center_freq);
4683 int ret = -EINVAL;
4684
4685 wl1271_debug(DEBUG_MAC80211,
4686 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4687 wlvif->role_id, channel,
4688 cfg80211_get_chandef_type(&ctx->def),
4689 ctx->radar_enabled, ctx->def.chan->dfs_state);
4690
4691 mutex_lock(&wl->mutex);
4692
4693 if (unlikely(wl->state != WLCORE_STATE_ON))
4694 goto out;
4695
4696 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4697 goto out;
4698
4699 ret = wl1271_ps_elp_wakeup(wl);
4700 if (ret < 0)
4701 goto out;
4702
4703 wlvif->band = ctx->def.chan->band;
4704 wlvif->channel = channel;
4705 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4706
4707 /* update default rates according to the band */
4708 wl1271_set_band_rate(wl, wlvif);
4709
4710 if (ctx->radar_enabled &&
4711 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4712 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4713 wlcore_hw_set_cac(wl, wlvif, true);
4714 wlvif->radar_enabled = true;
4715 }
4716
4717 wl1271_ps_elp_sleep(wl);
4718 out:
4719 mutex_unlock(&wl->mutex);
4720
4721 return 0;
4722 }
4723
4724 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4725 struct ieee80211_vif *vif,
4726 struct ieee80211_chanctx_conf *ctx)
4727 {
4728 struct wl1271 *wl = hw->priv;
4729 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4730 int ret;
4731
4732 wl1271_debug(DEBUG_MAC80211,
4733 "mac80211 unassign chanctx (role %d) %d (type %d)",
4734 wlvif->role_id,
4735 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4736 cfg80211_get_chandef_type(&ctx->def));
4737
4738 wl1271_tx_flush(wl);
4739
4740 mutex_lock(&wl->mutex);
4741
4742 if (unlikely(wl->state != WLCORE_STATE_ON))
4743 goto out;
4744
4745 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4746 goto out;
4747
4748 ret = wl1271_ps_elp_wakeup(wl);
4749 if (ret < 0)
4750 goto out;
4751
4752 if (wlvif->radar_enabled) {
4753 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4754 wlcore_hw_set_cac(wl, wlvif, false);
4755 wlvif->radar_enabled = false;
4756 }
4757
4758 wl1271_ps_elp_sleep(wl);
4759 out:
4760 mutex_unlock(&wl->mutex);
4761 }
4762
4763 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4764 struct wl12xx_vif *wlvif,
4765 struct ieee80211_chanctx_conf *new_ctx)
4766 {
4767 int channel = ieee80211_frequency_to_channel(
4768 new_ctx->def.chan->center_freq);
4769
4770 wl1271_debug(DEBUG_MAC80211,
4771 "switch vif (role %d) %d -> %d chan_type: %d",
4772 wlvif->role_id, wlvif->channel, channel,
4773 cfg80211_get_chandef_type(&new_ctx->def));
4774
4775 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4776 return 0;
4777
4778 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4779
4780 if (wlvif->radar_enabled) {
4781 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4782 wlcore_hw_set_cac(wl, wlvif, false);
4783 wlvif->radar_enabled = false;
4784 }
4785
4786 wlvif->band = new_ctx->def.chan->band;
4787 wlvif->channel = channel;
4788 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4789
4790 /* start radar if needed */
4791 if (new_ctx->radar_enabled) {
4792 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4793 wlcore_hw_set_cac(wl, wlvif, true);
4794 wlvif->radar_enabled = true;
4795 }
4796
4797 return 0;
4798 }
4799
4800 static int
4801 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4802 struct ieee80211_vif_chanctx_switch *vifs,
4803 int n_vifs,
4804 enum ieee80211_chanctx_switch_mode mode)
4805 {
4806 struct wl1271 *wl = hw->priv;
4807 int i, ret;
4808
4809 wl1271_debug(DEBUG_MAC80211,
4810 "mac80211 switch chanctx n_vifs %d mode %d",
4811 n_vifs, mode);
4812
4813 mutex_lock(&wl->mutex);
4814
4815 ret = wl1271_ps_elp_wakeup(wl);
4816 if (ret < 0)
4817 goto out;
4818
4819 for (i = 0; i < n_vifs; i++) {
4820 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4821
4822 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4823 if (ret)
4824 goto out_sleep;
4825 }
4826 out_sleep:
4827 wl1271_ps_elp_sleep(wl);
4828 out:
4829 mutex_unlock(&wl->mutex);
4830
4831 return 0;
4832 }
4833
4834 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4835 struct ieee80211_vif *vif, u16 queue,
4836 const struct ieee80211_tx_queue_params *params)
4837 {
4838 struct wl1271 *wl = hw->priv;
4839 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4840 u8 ps_scheme;
4841 int ret = 0;
4842
4843 if (wlcore_is_p2p_mgmt(wlvif))
4844 return 0;
4845
4846 mutex_lock(&wl->mutex);
4847
4848 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4849
4850 if (params->uapsd)
4851 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4852 else
4853 ps_scheme = CONF_PS_SCHEME_LEGACY;
4854
4855 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4856 goto out;
4857
4858 ret = wl1271_ps_elp_wakeup(wl);
4859 if (ret < 0)
4860 goto out;
4861
4862 /*
4863 * the txop is confed in units of 32us by the mac80211,
4864 * we need us
4865 */
4866 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4867 params->cw_min, params->cw_max,
4868 params->aifs, params->txop << 5);
4869 if (ret < 0)
4870 goto out_sleep;
4871
4872 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4873 CONF_CHANNEL_TYPE_EDCF,
4874 wl1271_tx_get_queue(queue),
4875 ps_scheme, CONF_ACK_POLICY_LEGACY,
4876 0, 0);
4877
4878 out_sleep:
4879 wl1271_ps_elp_sleep(wl);
4880
4881 out:
4882 mutex_unlock(&wl->mutex);
4883
4884 return ret;
4885 }
4886
4887 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4888 struct ieee80211_vif *vif)
4889 {
4890
4891 struct wl1271 *wl = hw->priv;
4892 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4893 u64 mactime = ULLONG_MAX;
4894 int ret;
4895
4896 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4897
4898 mutex_lock(&wl->mutex);
4899
4900 if (unlikely(wl->state != WLCORE_STATE_ON))
4901 goto out;
4902
4903 ret = wl1271_ps_elp_wakeup(wl);
4904 if (ret < 0)
4905 goto out;
4906
4907 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4908 if (ret < 0)
4909 goto out_sleep;
4910
4911 out_sleep:
4912 wl1271_ps_elp_sleep(wl);
4913
4914 out:
4915 mutex_unlock(&wl->mutex);
4916 return mactime;
4917 }
4918
4919 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4920 struct survey_info *survey)
4921 {
4922 struct ieee80211_conf *conf = &hw->conf;
4923
4924 if (idx != 0)
4925 return -ENOENT;
4926
4927 survey->channel = conf->chandef.chan;
4928 survey->filled = 0;
4929 return 0;
4930 }
4931
4932 static int wl1271_allocate_sta(struct wl1271 *wl,
4933 struct wl12xx_vif *wlvif,
4934 struct ieee80211_sta *sta)
4935 {
4936 struct wl1271_station *wl_sta;
4937 int ret;
4938
4939
4940 if (wl->active_sta_count >= wl->max_ap_stations) {
4941 wl1271_warning("could not allocate HLID - too much stations");
4942 return -EBUSY;
4943 }
4944
4945 wl_sta = (struct wl1271_station *)sta->drv_priv;
4946 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4947 if (ret < 0) {
4948 wl1271_warning("could not allocate HLID - too many links");
4949 return -EBUSY;
4950 }
4951
4952 /* use the previous security seq, if this is a recovery/resume */
4953 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4954
4955 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4956 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4957 wl->active_sta_count++;
4958 return 0;
4959 }
4960
4961 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4962 {
4963 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4964 return;
4965
4966 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4967 __clear_bit(hlid, &wl->ap_ps_map);
4968 __clear_bit(hlid, &wl->ap_fw_ps_map);
4969
4970 /*
4971 * save the last used PN in the private part of iee80211_sta,
4972 * in case of recovery/suspend
4973 */
4974 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4975
4976 wl12xx_free_link(wl, wlvif, &hlid);
4977 wl->active_sta_count--;
4978
4979 /*
4980 * rearm the tx watchdog when the last STA is freed - give the FW a
4981 * chance to return STA-buffered packets before complaining.
4982 */
4983 if (wl->active_sta_count == 0)
4984 wl12xx_rearm_tx_watchdog_locked(wl);
4985 }
4986
4987 static int wl12xx_sta_add(struct wl1271 *wl,
4988 struct wl12xx_vif *wlvif,
4989 struct ieee80211_sta *sta)
4990 {
4991 struct wl1271_station *wl_sta;
4992 int ret = 0;
4993 u8 hlid;
4994
4995 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4996
4997 ret = wl1271_allocate_sta(wl, wlvif, sta);
4998 if (ret < 0)
4999 return ret;
5000
5001 wl_sta = (struct wl1271_station *)sta->drv_priv;
5002 hlid = wl_sta->hlid;
5003
5004 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5005 if (ret < 0)
5006 wl1271_free_sta(wl, wlvif, hlid);
5007
5008 return ret;
5009 }
5010
5011 static int wl12xx_sta_remove(struct wl1271 *wl,
5012 struct wl12xx_vif *wlvif,
5013 struct ieee80211_sta *sta)
5014 {
5015 struct wl1271_station *wl_sta;
5016 int ret = 0, id;
5017
5018 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5019
5020 wl_sta = (struct wl1271_station *)sta->drv_priv;
5021 id = wl_sta->hlid;
5022 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5023 return -EINVAL;
5024
5025 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5026 if (ret < 0)
5027 return ret;
5028
5029 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5030 return ret;
5031 }
5032
5033 static void wlcore_roc_if_possible(struct wl1271 *wl,
5034 struct wl12xx_vif *wlvif)
5035 {
5036 if (find_first_bit(wl->roc_map,
5037 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5038 return;
5039
5040 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5041 return;
5042
5043 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5044 }
5045
5046 /*
5047 * when wl_sta is NULL, we treat this call as if coming from a
5048 * pending auth reply.
5049 * wl->mutex must be taken and the FW must be awake when the call
5050 * takes place.
5051 */
5052 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5053 struct wl1271_station *wl_sta, bool in_conn)
5054 {
5055 if (in_conn) {
5056 if (WARN_ON(wl_sta && wl_sta->in_connection))
5057 return;
5058
5059 if (!wlvif->ap_pending_auth_reply &&
5060 !wlvif->inconn_count)
5061 wlcore_roc_if_possible(wl, wlvif);
5062
5063 if (wl_sta) {
5064 wl_sta->in_connection = true;
5065 wlvif->inconn_count++;
5066 } else {
5067 wlvif->ap_pending_auth_reply = true;
5068 }
5069 } else {
5070 if (wl_sta && !wl_sta->in_connection)
5071 return;
5072
5073 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5074 return;
5075
5076 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5077 return;
5078
5079 if (wl_sta) {
5080 wl_sta->in_connection = false;
5081 wlvif->inconn_count--;
5082 } else {
5083 wlvif->ap_pending_auth_reply = false;
5084 }
5085
5086 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5087 test_bit(wlvif->role_id, wl->roc_map))
5088 wl12xx_croc(wl, wlvif->role_id);
5089 }
5090 }
5091
5092 static int wl12xx_update_sta_state(struct wl1271 *wl,
5093 struct wl12xx_vif *wlvif,
5094 struct ieee80211_sta *sta,
5095 enum ieee80211_sta_state old_state,
5096 enum ieee80211_sta_state new_state)
5097 {
5098 struct wl1271_station *wl_sta;
5099 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5100 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5101 int ret;
5102
5103 wl_sta = (struct wl1271_station *)sta->drv_priv;
5104
5105 /* Add station (AP mode) */
5106 if (is_ap &&
5107 old_state == IEEE80211_STA_NOTEXIST &&
5108 new_state == IEEE80211_STA_NONE) {
5109 ret = wl12xx_sta_add(wl, wlvif, sta);
5110 if (ret)
5111 return ret;
5112
5113 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5114 }
5115
5116 /* Remove station (AP mode) */
5117 if (is_ap &&
5118 old_state == IEEE80211_STA_NONE &&
5119 new_state == IEEE80211_STA_NOTEXIST) {
5120 /* must not fail */
5121 wl12xx_sta_remove(wl, wlvif, sta);
5122
5123 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5124 }
5125
5126 /* Authorize station (AP mode) */
5127 if (is_ap &&
5128 new_state == IEEE80211_STA_AUTHORIZED) {
5129 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5130 if (ret < 0)
5131 return ret;
5132
5133 /* reconfigure rates */
5134 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5135 if (ret < 0)
5136 return ret;
5137
5138 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5139 wl_sta->hlid);
5140 if (ret)
5141 return ret;
5142
5143 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5144 }
5145
5146 /* Authorize station */
5147 if (is_sta &&
5148 new_state == IEEE80211_STA_AUTHORIZED) {
5149 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5150 ret = wl12xx_set_authorized(wl, wlvif);
5151 if (ret)
5152 return ret;
5153 }
5154
5155 if (is_sta &&
5156 old_state == IEEE80211_STA_AUTHORIZED &&
5157 new_state == IEEE80211_STA_ASSOC) {
5158 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5159 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5160 }
5161
5162 /* save seq number on disassoc (suspend) */
5163 if (is_sta &&
5164 old_state == IEEE80211_STA_ASSOC &&
5165 new_state == IEEE80211_STA_AUTH) {
5166 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5167 wlvif->total_freed_pkts = 0;
5168 }
5169
5170 /* restore seq number on assoc (resume) */
5171 if (is_sta &&
5172 old_state == IEEE80211_STA_AUTH &&
5173 new_state == IEEE80211_STA_ASSOC) {
5174 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5175 }
5176
5177 /* clear ROCs on failure or authorization */
5178 if (is_sta &&
5179 (new_state == IEEE80211_STA_AUTHORIZED ||
5180 new_state == IEEE80211_STA_NOTEXIST)) {
5181 if (test_bit(wlvif->role_id, wl->roc_map))
5182 wl12xx_croc(wl, wlvif->role_id);
5183 }
5184
5185 if (is_sta &&
5186 old_state == IEEE80211_STA_NOTEXIST &&
5187 new_state == IEEE80211_STA_NONE) {
5188 if (find_first_bit(wl->roc_map,
5189 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5190 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5191 wl12xx_roc(wl, wlvif, wlvif->role_id,
5192 wlvif->band, wlvif->channel);
5193 }
5194 }
5195 return 0;
5196 }
5197
5198 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5199 struct ieee80211_vif *vif,
5200 struct ieee80211_sta *sta,
5201 enum ieee80211_sta_state old_state,
5202 enum ieee80211_sta_state new_state)
5203 {
5204 struct wl1271 *wl = hw->priv;
5205 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5206 int ret;
5207
5208 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5209 sta->aid, old_state, new_state);
5210
5211 mutex_lock(&wl->mutex);
5212
5213 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5214 ret = -EBUSY;
5215 goto out;
5216 }
5217
5218 ret = wl1271_ps_elp_wakeup(wl);
5219 if (ret < 0)
5220 goto out;
5221
5222 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5223
5224 wl1271_ps_elp_sleep(wl);
5225 out:
5226 mutex_unlock(&wl->mutex);
5227 if (new_state < old_state)
5228 return 0;
5229 return ret;
5230 }
5231
5232 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5233 struct ieee80211_vif *vif,
5234 struct ieee80211_ampdu_params *params)
5235 {
5236 struct wl1271 *wl = hw->priv;
5237 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5238 int ret;
5239 u8 hlid, *ba_bitmap;
5240 struct ieee80211_sta *sta = params->sta;
5241 enum ieee80211_ampdu_mlme_action action = params->action;
5242 u16 tid = params->tid;
5243 u16 *ssn = &params->ssn;
5244
5245 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5246 tid);
5247
5248 /* sanity check - the fields in FW are only 8bits wide */
5249 if (WARN_ON(tid > 0xFF))
5250 return -ENOTSUPP;
5251
5252 mutex_lock(&wl->mutex);
5253
5254 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5255 ret = -EAGAIN;
5256 goto out;
5257 }
5258
5259 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5260 hlid = wlvif->sta.hlid;
5261 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5262 struct wl1271_station *wl_sta;
5263
5264 wl_sta = (struct wl1271_station *)sta->drv_priv;
5265 hlid = wl_sta->hlid;
5266 } else {
5267 ret = -EINVAL;
5268 goto out;
5269 }
5270
5271 ba_bitmap = &wl->links[hlid].ba_bitmap;
5272
5273 ret = wl1271_ps_elp_wakeup(wl);
5274 if (ret < 0)
5275 goto out;
5276
5277 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5278 tid, action);
5279
5280 switch (action) {
5281 case IEEE80211_AMPDU_RX_START:
5282 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5283 ret = -ENOTSUPP;
5284 break;
5285 }
5286
5287 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5288 ret = -EBUSY;
5289 wl1271_error("exceeded max RX BA sessions");
5290 break;
5291 }
5292
5293 if (*ba_bitmap & BIT(tid)) {
5294 ret = -EINVAL;
5295 wl1271_error("cannot enable RX BA session on active "
5296 "tid: %d", tid);
5297 break;
5298 }
5299
5300 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5301 hlid,
5302 params->buf_size);
5303
5304 if (!ret) {
5305 *ba_bitmap |= BIT(tid);
5306 wl->ba_rx_session_count++;
5307 }
5308 break;
5309
5310 case IEEE80211_AMPDU_RX_STOP:
5311 if (!(*ba_bitmap & BIT(tid))) {
5312 /*
5313 * this happens on reconfig - so only output a debug
5314 * message for now, and don't fail the function.
5315 */
5316 wl1271_debug(DEBUG_MAC80211,
5317 "no active RX BA session on tid: %d",
5318 tid);
5319 ret = 0;
5320 break;
5321 }
5322
5323 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5324 hlid, 0);
5325 if (!ret) {
5326 *ba_bitmap &= ~BIT(tid);
5327 wl->ba_rx_session_count--;
5328 }
5329 break;
5330
5331 /*
5332 * The BA initiator session management in FW independently.
5333 * Falling break here on purpose for all TX APDU commands.
5334 */
5335 case IEEE80211_AMPDU_TX_START:
5336 case IEEE80211_AMPDU_TX_STOP_CONT:
5337 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5338 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5339 case IEEE80211_AMPDU_TX_OPERATIONAL:
5340 ret = -EINVAL;
5341 break;
5342
5343 default:
5344 wl1271_error("Incorrect ampdu action id=%x\n", action);
5345 ret = -EINVAL;
5346 }
5347
5348 wl1271_ps_elp_sleep(wl);
5349
5350 out:
5351 mutex_unlock(&wl->mutex);
5352
5353 return ret;
5354 }
5355
5356 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5357 struct ieee80211_vif *vif,
5358 const struct cfg80211_bitrate_mask *mask)
5359 {
5360 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5361 struct wl1271 *wl = hw->priv;
5362 int i, ret = 0;
5363
5364 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5365 mask->control[NL80211_BAND_2GHZ].legacy,
5366 mask->control[NL80211_BAND_5GHZ].legacy);
5367
5368 mutex_lock(&wl->mutex);
5369
5370 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5371 wlvif->bitrate_masks[i] =
5372 wl1271_tx_enabled_rates_get(wl,
5373 mask->control[i].legacy,
5374 i);
5375
5376 if (unlikely(wl->state != WLCORE_STATE_ON))
5377 goto out;
5378
5379 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5380 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5381
5382 ret = wl1271_ps_elp_wakeup(wl);
5383 if (ret < 0)
5384 goto out;
5385
5386 wl1271_set_band_rate(wl, wlvif);
5387 wlvif->basic_rate =
5388 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5389 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5390
5391 wl1271_ps_elp_sleep(wl);
5392 }
5393 out:
5394 mutex_unlock(&wl->mutex);
5395
5396 return ret;
5397 }
5398
5399 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5400 struct ieee80211_vif *vif,
5401 struct ieee80211_channel_switch *ch_switch)
5402 {
5403 struct wl1271 *wl = hw->priv;
5404 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5405 int ret;
5406
5407 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5408
5409 wl1271_tx_flush(wl);
5410
5411 mutex_lock(&wl->mutex);
5412
5413 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5414 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5415 ieee80211_chswitch_done(vif, false);
5416 goto out;
5417 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5418 goto out;
5419 }
5420
5421 ret = wl1271_ps_elp_wakeup(wl);
5422 if (ret < 0)
5423 goto out;
5424
5425 /* TODO: change mac80211 to pass vif as param */
5426
5427 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5428 unsigned long delay_usec;
5429
5430 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5431 if (ret)
5432 goto out_sleep;
5433
5434 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5435
5436 /* indicate failure 5 seconds after channel switch time */
5437 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5438 ch_switch->count;
5439 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5440 usecs_to_jiffies(delay_usec) +
5441 msecs_to_jiffies(5000));
5442 }
5443
5444 out_sleep:
5445 wl1271_ps_elp_sleep(wl);
5446
5447 out:
5448 mutex_unlock(&wl->mutex);
5449 }
5450
5451 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5452 struct wl12xx_vif *wlvif,
5453 u8 eid)
5454 {
5455 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5456 struct sk_buff *beacon =
5457 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5458
5459 if (!beacon)
5460 return NULL;
5461
5462 return cfg80211_find_ie(eid,
5463 beacon->data + ieoffset,
5464 beacon->len - ieoffset);
5465 }
5466
5467 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5468 u8 *csa_count)
5469 {
5470 const u8 *ie;
5471 const struct ieee80211_channel_sw_ie *ie_csa;
5472
5473 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5474 if (!ie)
5475 return -EINVAL;
5476
5477 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5478 *csa_count = ie_csa->count;
5479
5480 return 0;
5481 }
5482
5483 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5484 struct ieee80211_vif *vif,
5485 struct cfg80211_chan_def *chandef)
5486 {
5487 struct wl1271 *wl = hw->priv;
5488 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5489 struct ieee80211_channel_switch ch_switch = {
5490 .block_tx = true,
5491 .chandef = *chandef,
5492 };
5493 int ret;
5494
5495 wl1271_debug(DEBUG_MAC80211,
5496 "mac80211 channel switch beacon (role %d)",
5497 wlvif->role_id);
5498
5499 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5500 if (ret < 0) {
5501 wl1271_error("error getting beacon (for CSA counter)");
5502 return;
5503 }
5504
5505 mutex_lock(&wl->mutex);
5506
5507 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5508 ret = -EBUSY;
5509 goto out;
5510 }
5511
5512 ret = wl1271_ps_elp_wakeup(wl);
5513 if (ret < 0)
5514 goto out;
5515
5516 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5517 if (ret)
5518 goto out_sleep;
5519
5520 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5521
5522 out_sleep:
5523 wl1271_ps_elp_sleep(wl);
5524 out:
5525 mutex_unlock(&wl->mutex);
5526 }
5527
5528 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5529 u32 queues, bool drop)
5530 {
5531 struct wl1271 *wl = hw->priv;
5532
5533 wl1271_tx_flush(wl);
5534 }
5535
5536 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5537 struct ieee80211_vif *vif,
5538 struct ieee80211_channel *chan,
5539 int duration,
5540 enum ieee80211_roc_type type)
5541 {
5542 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5543 struct wl1271 *wl = hw->priv;
5544 int channel, active_roc, ret = 0;
5545
5546 channel = ieee80211_frequency_to_channel(chan->center_freq);
5547
5548 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5549 channel, wlvif->role_id);
5550
5551 mutex_lock(&wl->mutex);
5552
5553 if (unlikely(wl->state != WLCORE_STATE_ON))
5554 goto out;
5555
5556 /* return EBUSY if we can't ROC right now */
5557 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5558 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5559 wl1271_warning("active roc on role %d", active_roc);
5560 ret = -EBUSY;
5561 goto out;
5562 }
5563
5564 ret = wl1271_ps_elp_wakeup(wl);
5565 if (ret < 0)
5566 goto out;
5567
5568 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5569 if (ret < 0)
5570 goto out_sleep;
5571
5572 wl->roc_vif = vif;
5573 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5574 msecs_to_jiffies(duration));
5575 out_sleep:
5576 wl1271_ps_elp_sleep(wl);
5577 out:
5578 mutex_unlock(&wl->mutex);
5579 return ret;
5580 }
5581
5582 static int __wlcore_roc_completed(struct wl1271 *wl)
5583 {
5584 struct wl12xx_vif *wlvif;
5585 int ret;
5586
5587 /* already completed */
5588 if (unlikely(!wl->roc_vif))
5589 return 0;
5590
5591 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5592
5593 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5594 return -EBUSY;
5595
5596 ret = wl12xx_stop_dev(wl, wlvif);
5597 if (ret < 0)
5598 return ret;
5599
5600 wl->roc_vif = NULL;
5601
5602 return 0;
5603 }
5604
5605 static int wlcore_roc_completed(struct wl1271 *wl)
5606 {
5607 int ret;
5608
5609 wl1271_debug(DEBUG_MAC80211, "roc complete");
5610
5611 mutex_lock(&wl->mutex);
5612
5613 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5614 ret = -EBUSY;
5615 goto out;
5616 }
5617
5618 ret = wl1271_ps_elp_wakeup(wl);
5619 if (ret < 0)
5620 goto out;
5621
5622 ret = __wlcore_roc_completed(wl);
5623
5624 wl1271_ps_elp_sleep(wl);
5625 out:
5626 mutex_unlock(&wl->mutex);
5627
5628 return ret;
5629 }
5630
5631 static void wlcore_roc_complete_work(struct work_struct *work)
5632 {
5633 struct delayed_work *dwork;
5634 struct wl1271 *wl;
5635 int ret;
5636
5637 dwork = to_delayed_work(work);
5638 wl = container_of(dwork, struct wl1271, roc_complete_work);
5639
5640 ret = wlcore_roc_completed(wl);
5641 if (!ret)
5642 ieee80211_remain_on_channel_expired(wl->hw);
5643 }
5644
5645 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5646 {
5647 struct wl1271 *wl = hw->priv;
5648
5649 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5650
5651 /* TODO: per-vif */
5652 wl1271_tx_flush(wl);
5653
5654 /*
5655 * we can't just flush_work here, because it might deadlock
5656 * (as we might get called from the same workqueue)
5657 */
5658 cancel_delayed_work_sync(&wl->roc_complete_work);
5659 wlcore_roc_completed(wl);
5660
5661 return 0;
5662 }
5663
5664 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5665 struct ieee80211_vif *vif,
5666 struct ieee80211_sta *sta,
5667 u32 changed)
5668 {
5669 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5670
5671 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5672
5673 if (!(changed & IEEE80211_RC_BW_CHANGED))
5674 return;
5675
5676 /* this callback is atomic, so schedule a new work */
5677 wlvif->rc_update_bw = sta->bandwidth;
5678 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5679 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5680 }
5681
5682 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5683 struct ieee80211_vif *vif,
5684 struct ieee80211_sta *sta,
5685 struct station_info *sinfo)
5686 {
5687 struct wl1271 *wl = hw->priv;
5688 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5689 s8 rssi_dbm;
5690 int ret;
5691
5692 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5693
5694 mutex_lock(&wl->mutex);
5695
5696 if (unlikely(wl->state != WLCORE_STATE_ON))
5697 goto out;
5698
5699 ret = wl1271_ps_elp_wakeup(wl);
5700 if (ret < 0)
5701 goto out_sleep;
5702
5703 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5704 if (ret < 0)
5705 goto out_sleep;
5706
5707 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5708 sinfo->signal = rssi_dbm;
5709
5710 out_sleep:
5711 wl1271_ps_elp_sleep(wl);
5712
5713 out:
5714 mutex_unlock(&wl->mutex);
5715 }
5716
5717 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5718 struct ieee80211_sta *sta)
5719 {
5720 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5721 struct wl1271 *wl = hw->priv;
5722 u8 hlid = wl_sta->hlid;
5723
5724 /* return in units of Kbps */
5725 return (wl->links[hlid].fw_rate_mbps * 1000);
5726 }
5727
5728 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5729 {
5730 struct wl1271 *wl = hw->priv;
5731 bool ret = false;
5732
5733 mutex_lock(&wl->mutex);
5734
5735 if (unlikely(wl->state != WLCORE_STATE_ON))
5736 goto out;
5737
5738 /* packets are considered pending if in the TX queue or the FW */
5739 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5740 out:
5741 mutex_unlock(&wl->mutex);
5742
5743 return ret;
5744 }
5745
5746 /* can't be const, mac80211 writes to this */
5747 static struct ieee80211_rate wl1271_rates[] = {
5748 { .bitrate = 10,
5749 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5750 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5751 { .bitrate = 20,
5752 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5753 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5754 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5755 { .bitrate = 55,
5756 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5757 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5758 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5759 { .bitrate = 110,
5760 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5761 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5762 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5763 { .bitrate = 60,
5764 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5765 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5766 { .bitrate = 90,
5767 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5768 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5769 { .bitrate = 120,
5770 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5771 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5772 { .bitrate = 180,
5773 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5774 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5775 { .bitrate = 240,
5776 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5777 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5778 { .bitrate = 360,
5779 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5780 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5781 { .bitrate = 480,
5782 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5783 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5784 { .bitrate = 540,
5785 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5786 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5787 };
5788
5789 /* can't be const, mac80211 writes to this */
5790 static struct ieee80211_channel wl1271_channels[] = {
5791 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5792 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5793 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5794 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5795 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5796 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5797 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5798 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5799 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5800 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5801 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5802 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5803 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5804 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5805 };
5806
5807 /* can't be const, mac80211 writes to this */
5808 static struct ieee80211_supported_band wl1271_band_2ghz = {
5809 .channels = wl1271_channels,
5810 .n_channels = ARRAY_SIZE(wl1271_channels),
5811 .bitrates = wl1271_rates,
5812 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5813 };
5814
5815 /* 5 GHz data rates for WL1273 */
5816 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5817 { .bitrate = 60,
5818 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5819 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5820 { .bitrate = 90,
5821 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5822 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5823 { .bitrate = 120,
5824 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5825 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5826 { .bitrate = 180,
5827 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5828 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5829 { .bitrate = 240,
5830 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5831 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5832 { .bitrate = 360,
5833 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5834 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5835 { .bitrate = 480,
5836 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5837 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5838 { .bitrate = 540,
5839 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5840 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5841 };
5842
5843 /* 5 GHz band channels for WL1273 */
5844 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5845 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5846 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5847 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5848 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5849 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5850 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5851 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5852 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5853 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5854 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5855 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5856 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5857 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5858 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5859 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5860 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5861 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5862 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5863 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5864 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5865 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5866 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5867 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5868 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5869 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5870 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5871 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5872 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5873 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5874 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5875 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5876 };
5877
5878 static struct ieee80211_supported_band wl1271_band_5ghz = {
5879 .channels = wl1271_channels_5ghz,
5880 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5881 .bitrates = wl1271_rates_5ghz,
5882 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5883 };
5884
5885 static const struct ieee80211_ops wl1271_ops = {
5886 .start = wl1271_op_start,
5887 .stop = wlcore_op_stop,
5888 .add_interface = wl1271_op_add_interface,
5889 .remove_interface = wl1271_op_remove_interface,
5890 .change_interface = wl12xx_op_change_interface,
5891 #ifdef CONFIG_PM
5892 .suspend = wl1271_op_suspend,
5893 .resume = wl1271_op_resume,
5894 #endif
5895 .config = wl1271_op_config,
5896 .prepare_multicast = wl1271_op_prepare_multicast,
5897 .configure_filter = wl1271_op_configure_filter,
5898 .tx = wl1271_op_tx,
5899 .set_key = wlcore_op_set_key,
5900 .hw_scan = wl1271_op_hw_scan,
5901 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5902 .sched_scan_start = wl1271_op_sched_scan_start,
5903 .sched_scan_stop = wl1271_op_sched_scan_stop,
5904 .bss_info_changed = wl1271_op_bss_info_changed,
5905 .set_frag_threshold = wl1271_op_set_frag_threshold,
5906 .set_rts_threshold = wl1271_op_set_rts_threshold,
5907 .conf_tx = wl1271_op_conf_tx,
5908 .get_tsf = wl1271_op_get_tsf,
5909 .get_survey = wl1271_op_get_survey,
5910 .sta_state = wl12xx_op_sta_state,
5911 .ampdu_action = wl1271_op_ampdu_action,
5912 .tx_frames_pending = wl1271_tx_frames_pending,
5913 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5914 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5915 .channel_switch = wl12xx_op_channel_switch,
5916 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5917 .flush = wlcore_op_flush,
5918 .remain_on_channel = wlcore_op_remain_on_channel,
5919 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5920 .add_chanctx = wlcore_op_add_chanctx,
5921 .remove_chanctx = wlcore_op_remove_chanctx,
5922 .change_chanctx = wlcore_op_change_chanctx,
5923 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5924 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5925 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5926 .sta_rc_update = wlcore_op_sta_rc_update,
5927 .sta_statistics = wlcore_op_sta_statistics,
5928 .get_expected_throughput = wlcore_op_get_expected_throughput,
5929 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5930 };
5931
5932
5933 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5934 {
5935 u8 idx;
5936
5937 BUG_ON(band >= 2);
5938
5939 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5940 wl1271_error("Illegal RX rate from HW: %d", rate);
5941 return 0;
5942 }
5943
5944 idx = wl->band_rate_to_idx[band][rate];
5945 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5946 wl1271_error("Unsupported RX rate from HW: %d", rate);
5947 return 0;
5948 }
5949
5950 return idx;
5951 }
5952
5953 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5954 {
5955 int i;
5956
5957 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5958 oui, nic);
5959
5960 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5961 wl1271_warning("NIC part of the MAC address wraps around!");
5962
5963 for (i = 0; i < wl->num_mac_addr; i++) {
5964 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5965 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5966 wl->addresses[i].addr[2] = (u8) oui;
5967 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5968 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5969 wl->addresses[i].addr[5] = (u8) nic;
5970 nic++;
5971 }
5972
5973 /* we may be one address short at the most */
5974 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5975
5976 /*
5977 * turn on the LAA bit in the first address and use it as
5978 * the last address.
5979 */
5980 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5981 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5982 memcpy(&wl->addresses[idx], &wl->addresses[0],
5983 sizeof(wl->addresses[0]));
5984 /* LAA bit */
5985 wl->addresses[idx].addr[0] |= BIT(1);
5986 }
5987
5988 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5989 wl->hw->wiphy->addresses = wl->addresses;
5990 }
5991
5992 static int wl12xx_get_hw_info(struct wl1271 *wl)
5993 {
5994 int ret;
5995
5996 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5997 if (ret < 0)
5998 goto out;
5999
6000 wl->fuse_oui_addr = 0;
6001 wl->fuse_nic_addr = 0;
6002
6003 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6004 if (ret < 0)
6005 goto out;
6006
6007 if (wl->ops->get_mac)
6008 ret = wl->ops->get_mac(wl);
6009
6010 out:
6011 return ret;
6012 }
6013
6014 static int wl1271_register_hw(struct wl1271 *wl)
6015 {
6016 int ret;
6017 u32 oui_addr = 0, nic_addr = 0;
6018 struct platform_device *pdev = wl->pdev;
6019 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6020
6021 if (wl->mac80211_registered)
6022 return 0;
6023
6024 if (wl->nvs_len >= 12) {
6025 /* NOTE: The wl->nvs->nvs element must be first, in
6026 * order to simplify the casting, we assume it is at
6027 * the beginning of the wl->nvs structure.
6028 */
6029 u8 *nvs_ptr = (u8 *)wl->nvs;
6030
6031 oui_addr =
6032 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6033 nic_addr =
6034 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6035 }
6036
6037 /* if the MAC address is zeroed in the NVS derive from fuse */
6038 if (oui_addr == 0 && nic_addr == 0) {
6039 oui_addr = wl->fuse_oui_addr;
6040 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6041 nic_addr = wl->fuse_nic_addr + 1;
6042 }
6043
6044 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6045 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
6046 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6047 wl1271_warning("This default nvs file can be removed from the file system\n");
6048 } else {
6049 wl1271_warning("Your device performance is not optimized.\n");
6050 wl1271_warning("Please use the calibrator tool to configure your device.\n");
6051 }
6052
6053 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6054 wl1271_warning("Fuse mac address is zero. using random mac\n");
6055 /* Use TI oui and a random nic */
6056 oui_addr = WLCORE_TI_OUI_ADDRESS;
6057 nic_addr = get_random_int();
6058 } else {
6059 oui_addr = wl->fuse_oui_addr;
6060 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6061 nic_addr = wl->fuse_nic_addr + 1;
6062 }
6063 }
6064
6065 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6066
6067 ret = ieee80211_register_hw(wl->hw);
6068 if (ret < 0) {
6069 wl1271_error("unable to register mac80211 hw: %d", ret);
6070 goto out;
6071 }
6072
6073 wl->mac80211_registered = true;
6074
6075 wl1271_debugfs_init(wl);
6076
6077 wl1271_notice("loaded");
6078
6079 out:
6080 return ret;
6081 }
6082
6083 static void wl1271_unregister_hw(struct wl1271 *wl)
6084 {
6085 if (wl->plt)
6086 wl1271_plt_stop(wl);
6087
6088 ieee80211_unregister_hw(wl->hw);
6089 wl->mac80211_registered = false;
6090
6091 }
6092
6093 static int wl1271_init_ieee80211(struct wl1271 *wl)
6094 {
6095 int i;
6096 static const u32 cipher_suites[] = {
6097 WLAN_CIPHER_SUITE_WEP40,
6098 WLAN_CIPHER_SUITE_WEP104,
6099 WLAN_CIPHER_SUITE_TKIP,
6100 WLAN_CIPHER_SUITE_CCMP,
6101 WL1271_CIPHER_SUITE_GEM,
6102 };
6103
6104 /* The tx descriptor buffer */
6105 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6106
6107 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6108 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6109
6110 /* unit us */
6111 /* FIXME: find a proper value */
6112 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6113
6114 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6115 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6116 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6117 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6118 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6119 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6120 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6121 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6122 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6123 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6124 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6125 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6126 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6127 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6128
6129 wl->hw->wiphy->cipher_suites = cipher_suites;
6130 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6131
6132 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6133 BIT(NL80211_IFTYPE_AP) |
6134 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6135 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6136 #ifdef CONFIG_MAC80211_MESH
6137 BIT(NL80211_IFTYPE_MESH_POINT) |
6138 #endif
6139 BIT(NL80211_IFTYPE_P2P_GO);
6140
6141 wl->hw->wiphy->max_scan_ssids = 1;
6142 wl->hw->wiphy->max_sched_scan_ssids = 16;
6143 wl->hw->wiphy->max_match_sets = 16;
6144 /*
6145 * Maximum length of elements in scanning probe request templates
6146 * should be the maximum length possible for a template, without
6147 * the IEEE80211 header of the template
6148 */
6149 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6150 sizeof(struct ieee80211_header);
6151
6152 wl->hw->wiphy->max_sched_scan_reqs = 1;
6153 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6154 sizeof(struct ieee80211_header);
6155
6156 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6157
6158 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6159 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6160 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6161
6162 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6163
6164 /* make sure all our channels fit in the scanned_ch bitmask */
6165 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6166 ARRAY_SIZE(wl1271_channels_5ghz) >
6167 WL1271_MAX_CHANNELS);
6168 /*
6169 * clear channel flags from the previous usage
6170 * and restore max_power & max_antenna_gain values.
6171 */
6172 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6173 wl1271_band_2ghz.channels[i].flags = 0;
6174 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6175 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6176 }
6177
6178 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6179 wl1271_band_5ghz.channels[i].flags = 0;
6180 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6181 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6182 }
6183
6184 /*
6185 * We keep local copies of the band structs because we need to
6186 * modify them on a per-device basis.
6187 */
6188 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6189 sizeof(wl1271_band_2ghz));
6190 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6191 &wl->ht_cap[NL80211_BAND_2GHZ],
6192 sizeof(*wl->ht_cap));
6193 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6194 sizeof(wl1271_band_5ghz));
6195 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6196 &wl->ht_cap[NL80211_BAND_5GHZ],
6197 sizeof(*wl->ht_cap));
6198
6199 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6200 &wl->bands[NL80211_BAND_2GHZ];
6201 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6202 &wl->bands[NL80211_BAND_5GHZ];
6203
6204 /*
6205 * allow 4 queues per mac address we support +
6206 * 1 cab queue per mac + one global offchannel Tx queue
6207 */
6208 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6209
6210 /* the last queue is the offchannel queue */
6211 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6212 wl->hw->max_rates = 1;
6213
6214 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6215
6216 /* the FW answers probe-requests in AP-mode */
6217 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6218 wl->hw->wiphy->probe_resp_offload =
6219 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6220 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6221 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6222
6223 /* allowed interface combinations */
6224 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6225 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6226
6227 /* register vendor commands */
6228 wlcore_set_vendor_commands(wl->hw->wiphy);
6229
6230 SET_IEEE80211_DEV(wl->hw, wl->dev);
6231
6232 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6233 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6234
6235 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6236
6237 return 0;
6238 }
6239
6240 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6241 u32 mbox_size)
6242 {
6243 struct ieee80211_hw *hw;
6244 struct wl1271 *wl;
6245 int i, j, ret;
6246 unsigned int order;
6247
6248 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6249 if (!hw) {
6250 wl1271_error("could not alloc ieee80211_hw");
6251 ret = -ENOMEM;
6252 goto err_hw_alloc;
6253 }
6254
6255 wl = hw->priv;
6256 memset(wl, 0, sizeof(*wl));
6257
6258 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6259 if (!wl->priv) {
6260 wl1271_error("could not alloc wl priv");
6261 ret = -ENOMEM;
6262 goto err_priv_alloc;
6263 }
6264
6265 INIT_LIST_HEAD(&wl->wlvif_list);
6266
6267 wl->hw = hw;
6268
6269 /*
6270 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6271 * we don't allocate any additional resource here, so that's fine.
6272 */
6273 for (i = 0; i < NUM_TX_QUEUES; i++)
6274 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6275 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6276
6277 skb_queue_head_init(&wl->deferred_rx_queue);
6278 skb_queue_head_init(&wl->deferred_tx_queue);
6279
6280 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6281 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6282 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6283 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6284 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6285 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6286 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6287
6288 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6289 if (!wl->freezable_wq) {
6290 ret = -ENOMEM;
6291 goto err_hw;
6292 }
6293
6294 wl->channel = 0;
6295 wl->rx_counter = 0;
6296 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6297 wl->band = NL80211_BAND_2GHZ;
6298 wl->channel_type = NL80211_CHAN_NO_HT;
6299 wl->flags = 0;
6300 wl->sg_enabled = true;
6301 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6302 wl->recovery_count = 0;
6303 wl->hw_pg_ver = -1;
6304 wl->ap_ps_map = 0;
6305 wl->ap_fw_ps_map = 0;
6306 wl->quirks = 0;
6307 wl->system_hlid = WL12XX_SYSTEM_HLID;
6308 wl->active_sta_count = 0;
6309 wl->active_link_count = 0;
6310 wl->fwlog_size = 0;
6311
6312 /* The system link is always allocated */
6313 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6314
6315 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6316 for (i = 0; i < wl->num_tx_desc; i++)
6317 wl->tx_frames[i] = NULL;
6318
6319 spin_lock_init(&wl->wl_lock);
6320
6321 wl->state = WLCORE_STATE_OFF;
6322 wl->fw_type = WL12XX_FW_TYPE_NONE;
6323 mutex_init(&wl->mutex);
6324 mutex_init(&wl->flush_mutex);
6325 init_completion(&wl->nvs_loading_complete);
6326
6327 order = get_order(aggr_buf_size);
6328 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6329 if (!wl->aggr_buf) {
6330 ret = -ENOMEM;
6331 goto err_wq;
6332 }
6333 wl->aggr_buf_size = aggr_buf_size;
6334
6335 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6336 if (!wl->dummy_packet) {
6337 ret = -ENOMEM;
6338 goto err_aggr;
6339 }
6340
6341 /* Allocate one page for the FW log */
6342 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6343 if (!wl->fwlog) {
6344 ret = -ENOMEM;
6345 goto err_dummy_packet;
6346 }
6347
6348 wl->mbox_size = mbox_size;
6349 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6350 if (!wl->mbox) {
6351 ret = -ENOMEM;
6352 goto err_fwlog;
6353 }
6354
6355 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6356 if (!wl->buffer_32) {
6357 ret = -ENOMEM;
6358 goto err_mbox;
6359 }
6360
6361 return hw;
6362
6363 err_mbox:
6364 kfree(wl->mbox);
6365
6366 err_fwlog:
6367 free_page((unsigned long)wl->fwlog);
6368
6369 err_dummy_packet:
6370 dev_kfree_skb(wl->dummy_packet);
6371
6372 err_aggr:
6373 free_pages((unsigned long)wl->aggr_buf, order);
6374
6375 err_wq:
6376 destroy_workqueue(wl->freezable_wq);
6377
6378 err_hw:
6379 wl1271_debugfs_exit(wl);
6380 kfree(wl->priv);
6381
6382 err_priv_alloc:
6383 ieee80211_free_hw(hw);
6384
6385 err_hw_alloc:
6386
6387 return ERR_PTR(ret);
6388 }
6389 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6390
6391 int wlcore_free_hw(struct wl1271 *wl)
6392 {
6393 /* Unblock any fwlog readers */
6394 mutex_lock(&wl->mutex);
6395 wl->fwlog_size = -1;
6396 mutex_unlock(&wl->mutex);
6397
6398 wlcore_sysfs_free(wl);
6399
6400 kfree(wl->buffer_32);
6401 kfree(wl->mbox);
6402 free_page((unsigned long)wl->fwlog);
6403 dev_kfree_skb(wl->dummy_packet);
6404 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6405
6406 wl1271_debugfs_exit(wl);
6407
6408 vfree(wl->fw);
6409 wl->fw = NULL;
6410 wl->fw_type = WL12XX_FW_TYPE_NONE;
6411 kfree(wl->nvs);
6412 wl->nvs = NULL;
6413
6414 kfree(wl->raw_fw_status);
6415 kfree(wl->fw_status);
6416 kfree(wl->tx_res_if);
6417 destroy_workqueue(wl->freezable_wq);
6418
6419 kfree(wl->priv);
6420 ieee80211_free_hw(wl->hw);
6421
6422 return 0;
6423 }
6424 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6425
6426 #ifdef CONFIG_PM
6427 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6428 .flags = WIPHY_WOWLAN_ANY,
6429 .n_patterns = WL1271_MAX_RX_FILTERS,
6430 .pattern_min_len = 1,
6431 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6432 };
6433 #endif
6434
6435 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6436 {
6437 return IRQ_WAKE_THREAD;
6438 }
6439
6440 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6441 {
6442 struct wl1271 *wl = context;
6443 struct platform_device *pdev = wl->pdev;
6444 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6445 struct resource *res;
6446
6447 int ret;
6448 irq_handler_t hardirq_fn = NULL;
6449
6450 if (fw) {
6451 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6452 if (!wl->nvs) {
6453 wl1271_error("Could not allocate nvs data");
6454 goto out;
6455 }
6456 wl->nvs_len = fw->size;
6457 } else if (pdev_data->family->nvs_name) {
6458 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6459 pdev_data->family->nvs_name);
6460 wl->nvs = NULL;
6461 wl->nvs_len = 0;
6462 } else {
6463 wl->nvs = NULL;
6464 wl->nvs_len = 0;
6465 }
6466
6467 ret = wl->ops->setup(wl);
6468 if (ret < 0)
6469 goto out_free_nvs;
6470
6471 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6472
6473 /* adjust some runtime configuration parameters */
6474 wlcore_adjust_conf(wl);
6475
6476 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6477 if (!res) {
6478 wl1271_error("Could not get IRQ resource");
6479 goto out_free_nvs;
6480 }
6481
6482 wl->irq = res->start;
6483 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6484 wl->if_ops = pdev_data->if_ops;
6485
6486 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6487 hardirq_fn = wlcore_hardirq;
6488 else
6489 wl->irq_flags |= IRQF_ONESHOT;
6490
6491 ret = wl12xx_set_power_on(wl);
6492 if (ret < 0)
6493 goto out_free_nvs;
6494
6495 ret = wl12xx_get_hw_info(wl);
6496 if (ret < 0) {
6497 wl1271_error("couldn't get hw info");
6498 wl1271_power_off(wl);
6499 goto out_free_nvs;
6500 }
6501
6502 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6503 wl->irq_flags, pdev->name, wl);
6504 if (ret < 0) {
6505 wl1271_error("interrupt configuration failed");
6506 wl1271_power_off(wl);
6507 goto out_free_nvs;
6508 }
6509
6510 #ifdef CONFIG_PM
6511 ret = enable_irq_wake(wl->irq);
6512 if (!ret) {
6513 wl->irq_wake_enabled = true;
6514 device_init_wakeup(wl->dev, 1);
6515 if (pdev_data->pwr_in_suspend)
6516 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6517 }
6518 #endif
6519 disable_irq(wl->irq);
6520 wl1271_power_off(wl);
6521
6522 ret = wl->ops->identify_chip(wl);
6523 if (ret < 0)
6524 goto out_irq;
6525
6526 ret = wl1271_init_ieee80211(wl);
6527 if (ret)
6528 goto out_irq;
6529
6530 ret = wl1271_register_hw(wl);
6531 if (ret)
6532 goto out_irq;
6533
6534 ret = wlcore_sysfs_init(wl);
6535 if (ret)
6536 goto out_unreg;
6537
6538 wl->initialized = true;
6539 goto out;
6540
6541 out_unreg:
6542 wl1271_unregister_hw(wl);
6543
6544 out_irq:
6545 free_irq(wl->irq, wl);
6546
6547 out_free_nvs:
6548 kfree(wl->nvs);
6549
6550 out:
6551 release_firmware(fw);
6552 complete_all(&wl->nvs_loading_complete);
6553 }
6554
6555 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6556 {
6557 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6558 const char *nvs_name;
6559 int ret = 0;
6560
6561 if (!wl->ops || !wl->ptable || !pdev_data)
6562 return -EINVAL;
6563
6564 wl->dev = &pdev->dev;
6565 wl->pdev = pdev;
6566 platform_set_drvdata(pdev, wl);
6567
6568 if (pdev_data->family && pdev_data->family->nvs_name) {
6569 nvs_name = pdev_data->family->nvs_name;
6570 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6571 nvs_name, &pdev->dev, GFP_KERNEL,
6572 wl, wlcore_nvs_cb);
6573 if (ret < 0) {
6574 wl1271_error("request_firmware_nowait failed for %s: %d",
6575 nvs_name, ret);
6576 complete_all(&wl->nvs_loading_complete);
6577 }
6578 } else {
6579 wlcore_nvs_cb(NULL, wl);
6580 }
6581
6582 return ret;
6583 }
6584 EXPORT_SYMBOL_GPL(wlcore_probe);
6585
6586 int wlcore_remove(struct platform_device *pdev)
6587 {
6588 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6589 struct wl1271 *wl = platform_get_drvdata(pdev);
6590
6591 if (pdev_data->family && pdev_data->family->nvs_name)
6592 wait_for_completion(&wl->nvs_loading_complete);
6593 if (!wl->initialized)
6594 return 0;
6595
6596 if (wl->irq_wake_enabled) {
6597 device_init_wakeup(wl->dev, 0);
6598 disable_irq_wake(wl->irq);
6599 }
6600 wl1271_unregister_hw(wl);
6601 free_irq(wl->irq, wl);
6602 wlcore_free_hw(wl);
6603
6604 return 0;
6605 }
6606 EXPORT_SYMBOL_GPL(wlcore_remove);
6607
6608 u32 wl12xx_debug_level = DEBUG_NONE;
6609 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6610 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6611 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6612
6613 module_param_named(fwlog, fwlog_param, charp, 0);
6614 MODULE_PARM_DESC(fwlog,
6615 "FW logger options: continuous, dbgpins or disable");
6616
6617 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6618 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6619
6620 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6621 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6622
6623 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6624 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6625
6626 MODULE_LICENSE("GPL");
6627 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6628 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");