]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/wireless/ti/wlcore/main.c
treewide: init_timer() -> setup_timer()
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / wireless / ti / wlcore / main.c
1 /*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29
30 #include "wlcore.h"
31 #include "debug.h"
32 #include "wl12xx_80211.h"
33 #include "io.h"
34 #include "tx.h"
35 #include "ps.h"
36 #include "init.h"
37 #include "debugfs.h"
38 #include "testmode.h"
39 #include "vendor_cmd.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43
44 #define WL1271_BOOT_RETRIES 3
45
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
50
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 int ret;
60
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 return -EINVAL;
63
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 return 0;
66
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 return 0;
69
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 if (ret < 0)
72 return ret;
73
74 wl1271_info("Association completed.");
75 return 0;
76 }
77
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
80 {
81 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
82 struct wl1271 *wl = hw->priv;
83
84 /* copy the current dfs region */
85 if (request)
86 wl->dfs_region = request->dfs_region;
87
88 wlcore_regdomain_config(wl);
89 }
90
91 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
92 bool enable)
93 {
94 int ret = 0;
95
96 /* we should hold wl->mutex */
97 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
98 if (ret < 0)
99 goto out;
100
101 if (enable)
102 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
103 else
104 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 out:
106 return ret;
107 }
108
109 /*
110 * this function is being called when the rx_streaming interval
111 * has beed changed or rx_streaming should be disabled
112 */
113 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
114 {
115 int ret = 0;
116 int period = wl->conf.rx_streaming.interval;
117
118 /* don't reconfigure if rx_streaming is disabled */
119 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
120 goto out;
121
122 /* reconfigure/disable according to new streaming_period */
123 if (period &&
124 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
125 (wl->conf.rx_streaming.always ||
126 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
127 ret = wl1271_set_rx_streaming(wl, wlvif, true);
128 else {
129 ret = wl1271_set_rx_streaming(wl, wlvif, false);
130 /* don't cancel_work_sync since we might deadlock */
131 del_timer_sync(&wlvif->rx_streaming_timer);
132 }
133 out:
134 return ret;
135 }
136
137 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
138 {
139 int ret;
140 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
141 rx_streaming_enable_work);
142 struct wl1271 *wl = wlvif->wl;
143
144 mutex_lock(&wl->mutex);
145
146 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
147 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
148 (!wl->conf.rx_streaming.always &&
149 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 goto out;
151
152 if (!wl->conf.rx_streaming.interval)
153 goto out;
154
155 ret = wl1271_ps_elp_wakeup(wl);
156 if (ret < 0)
157 goto out;
158
159 ret = wl1271_set_rx_streaming(wl, wlvif, true);
160 if (ret < 0)
161 goto out_sleep;
162
163 /* stop it after some time of inactivity */
164 mod_timer(&wlvif->rx_streaming_timer,
165 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
166
167 out_sleep:
168 wl1271_ps_elp_sleep(wl);
169 out:
170 mutex_unlock(&wl->mutex);
171 }
172
173 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
174 {
175 int ret;
176 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
177 rx_streaming_disable_work);
178 struct wl1271 *wl = wlvif->wl;
179
180 mutex_lock(&wl->mutex);
181
182 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
183 goto out;
184
185 ret = wl1271_ps_elp_wakeup(wl);
186 if (ret < 0)
187 goto out;
188
189 ret = wl1271_set_rx_streaming(wl, wlvif, false);
190 if (ret)
191 goto out_sleep;
192
193 out_sleep:
194 wl1271_ps_elp_sleep(wl);
195 out:
196 mutex_unlock(&wl->mutex);
197 }
198
199 static void wl1271_rx_streaming_timer(unsigned long data)
200 {
201 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
202 struct wl1271 *wl = wlvif->wl;
203 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
204 }
205
206 /* wl->mutex must be taken */
207 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
208 {
209 /* if the watchdog is not armed, don't do anything */
210 if (wl->tx_allocated_blocks == 0)
211 return;
212
213 cancel_delayed_work(&wl->tx_watchdog_work);
214 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
215 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
216 }
217
218 static void wlcore_rc_update_work(struct work_struct *work)
219 {
220 int ret;
221 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
222 rc_update_work);
223 struct wl1271 *wl = wlvif->wl;
224 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
225
226 mutex_lock(&wl->mutex);
227
228 if (unlikely(wl->state != WLCORE_STATE_ON))
229 goto out;
230
231 ret = wl1271_ps_elp_wakeup(wl);
232 if (ret < 0)
233 goto out;
234
235 if (ieee80211_vif_is_mesh(vif)) {
236 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
237 true, wlvif->sta.hlid);
238 if (ret < 0)
239 goto out_sleep;
240 } else {
241 wlcore_hw_sta_rc_update(wl, wlvif);
242 }
243
244 out_sleep:
245 wl1271_ps_elp_sleep(wl);
246 out:
247 mutex_unlock(&wl->mutex);
248 }
249
250 static void wl12xx_tx_watchdog_work(struct work_struct *work)
251 {
252 struct delayed_work *dwork;
253 struct wl1271 *wl;
254
255 dwork = to_delayed_work(work);
256 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
257
258 mutex_lock(&wl->mutex);
259
260 if (unlikely(wl->state != WLCORE_STATE_ON))
261 goto out;
262
263 /* Tx went out in the meantime - everything is ok */
264 if (unlikely(wl->tx_allocated_blocks == 0))
265 goto out;
266
267 /*
268 * if a ROC is in progress, we might not have any Tx for a long
269 * time (e.g. pending Tx on the non-ROC channels)
270 */
271 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
272 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
273 wl->conf.tx.tx_watchdog_timeout);
274 wl12xx_rearm_tx_watchdog_locked(wl);
275 goto out;
276 }
277
278 /*
279 * if a scan is in progress, we might not have any Tx for a long
280 * time
281 */
282 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
283 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
284 wl->conf.tx.tx_watchdog_timeout);
285 wl12xx_rearm_tx_watchdog_locked(wl);
286 goto out;
287 }
288
289 /*
290 * AP might cache a frame for a long time for a sleeping station,
291 * so rearm the timer if there's an AP interface with stations. If
292 * Tx is genuinely stuck we will most hopefully discover it when all
293 * stations are removed due to inactivity.
294 */
295 if (wl->active_sta_count) {
296 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
297 " %d stations",
298 wl->conf.tx.tx_watchdog_timeout,
299 wl->active_sta_count);
300 wl12xx_rearm_tx_watchdog_locked(wl);
301 goto out;
302 }
303
304 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
305 wl->conf.tx.tx_watchdog_timeout);
306 wl12xx_queue_recovery_work(wl);
307
308 out:
309 mutex_unlock(&wl->mutex);
310 }
311
312 static void wlcore_adjust_conf(struct wl1271 *wl)
313 {
314
315 if (fwlog_param) {
316 if (!strcmp(fwlog_param, "continuous")) {
317 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
318 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
319 } else if (!strcmp(fwlog_param, "dbgpins")) {
320 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
321 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
322 } else if (!strcmp(fwlog_param, "disable")) {
323 wl->conf.fwlog.mem_blocks = 0;
324 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
325 } else {
326 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
327 }
328 }
329
330 if (bug_on_recovery != -1)
331 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
332
333 if (no_recovery != -1)
334 wl->conf.recovery.no_recovery = (u8) no_recovery;
335 }
336
337 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
338 struct wl12xx_vif *wlvif,
339 u8 hlid, u8 tx_pkts)
340 {
341 bool fw_ps;
342
343 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
344
345 /*
346 * Wake up from high level PS if the STA is asleep with too little
347 * packets in FW or if the STA is awake.
348 */
349 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
350 wl12xx_ps_link_end(wl, wlvif, hlid);
351
352 /*
353 * Start high-level PS if the STA is asleep with enough blocks in FW.
354 * Make an exception if this is the only connected link. In this
355 * case FW-memory congestion is less of a problem.
356 * Note that a single connected STA means 2*ap_count + 1 active links,
357 * since we must account for the global and broadcast AP links
358 * for each AP. The "fw_ps" check assures us the other link is a STA
359 * connected to the AP. Otherwise the FW would not set the PSM bit.
360 */
361 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
362 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
363 wl12xx_ps_link_start(wl, wlvif, hlid, true);
364 }
365
366 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
367 struct wl12xx_vif *wlvif,
368 struct wl_fw_status *status)
369 {
370 unsigned long cur_fw_ps_map;
371 u8 hlid;
372
373 cur_fw_ps_map = status->link_ps_bitmap;
374 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
375 wl1271_debug(DEBUG_PSM,
376 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
377 wl->ap_fw_ps_map, cur_fw_ps_map,
378 wl->ap_fw_ps_map ^ cur_fw_ps_map);
379
380 wl->ap_fw_ps_map = cur_fw_ps_map;
381 }
382
383 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
384 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
385 wl->links[hlid].allocated_pkts);
386 }
387
388 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
389 {
390 struct wl12xx_vif *wlvif;
391 struct timespec ts;
392 u32 old_tx_blk_count = wl->tx_blocks_available;
393 int avail, freed_blocks;
394 int i;
395 int ret;
396 struct wl1271_link *lnk;
397
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
399 wl->raw_fw_status,
400 wl->fw_status_len, false);
401 if (ret < 0)
402 return ret;
403
404 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
405
406 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
407 "drv_rx_counter = %d, tx_results_counter = %d)",
408 status->intr,
409 status->fw_rx_counter,
410 status->drv_rx_counter,
411 status->tx_results_counter);
412
413 for (i = 0; i < NUM_TX_QUEUES; i++) {
414 /* prevent wrap-around in freed-packets counter */
415 wl->tx_allocated_pkts[i] -=
416 (status->counters.tx_released_pkts[i] -
417 wl->tx_pkts_freed[i]) & 0xff;
418
419 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
420 }
421
422
423 for_each_set_bit(i, wl->links_map, wl->num_links) {
424 u8 diff;
425 lnk = &wl->links[i];
426
427 /* prevent wrap-around in freed-packets counter */
428 diff = (status->counters.tx_lnk_free_pkts[i] -
429 lnk->prev_freed_pkts) & 0xff;
430
431 if (diff == 0)
432 continue;
433
434 lnk->allocated_pkts -= diff;
435 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
436
437 /* accumulate the prev_freed_pkts counter */
438 lnk->total_freed_pkts += diff;
439 }
440
441 /* prevent wrap-around in total blocks counter */
442 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
443 freed_blocks = status->total_released_blks -
444 wl->tx_blocks_freed;
445 else
446 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
447 status->total_released_blks;
448
449 wl->tx_blocks_freed = status->total_released_blks;
450
451 wl->tx_allocated_blocks -= freed_blocks;
452
453 /*
454 * If the FW freed some blocks:
455 * If we still have allocated blocks - re-arm the timer, Tx is
456 * not stuck. Otherwise, cancel the timer (no Tx currently).
457 */
458 if (freed_blocks) {
459 if (wl->tx_allocated_blocks)
460 wl12xx_rearm_tx_watchdog_locked(wl);
461 else
462 cancel_delayed_work(&wl->tx_watchdog_work);
463 }
464
465 avail = status->tx_total - wl->tx_allocated_blocks;
466
467 /*
468 * The FW might change the total number of TX memblocks before
469 * we get a notification about blocks being released. Thus, the
470 * available blocks calculation might yield a temporary result
471 * which is lower than the actual available blocks. Keeping in
472 * mind that only blocks that were allocated can be moved from
473 * TX to RX, tx_blocks_available should never decrease here.
474 */
475 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
476 avail);
477
478 /* if more blocks are available now, tx work can be scheduled */
479 if (wl->tx_blocks_available > old_tx_blk_count)
480 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
481
482 /* for AP update num of allocated TX blocks per link and ps status */
483 wl12xx_for_each_wlvif_ap(wl, wlvif) {
484 wl12xx_irq_update_links_status(wl, wlvif, status);
485 }
486
487 /* update the host-chipset time offset */
488 getnstimeofday(&ts);
489 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
490 (s64)(status->fw_localtime);
491
492 wl->fw_fast_lnk_map = status->link_fast_bitmap;
493
494 return 0;
495 }
496
497 static void wl1271_flush_deferred_work(struct wl1271 *wl)
498 {
499 struct sk_buff *skb;
500
501 /* Pass all received frames to the network stack */
502 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
503 ieee80211_rx_ni(wl->hw, skb);
504
505 /* Return sent skbs to the network stack */
506 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
507 ieee80211_tx_status_ni(wl->hw, skb);
508 }
509
510 static void wl1271_netstack_work(struct work_struct *work)
511 {
512 struct wl1271 *wl =
513 container_of(work, struct wl1271, netstack_work);
514
515 do {
516 wl1271_flush_deferred_work(wl);
517 } while (skb_queue_len(&wl->deferred_rx_queue));
518 }
519
520 #define WL1271_IRQ_MAX_LOOPS 256
521
522 static int wlcore_irq_locked(struct wl1271 *wl)
523 {
524 int ret = 0;
525 u32 intr;
526 int loopcount = WL1271_IRQ_MAX_LOOPS;
527 bool done = false;
528 unsigned int defer_count;
529 unsigned long flags;
530
531 /*
532 * In case edge triggered interrupt must be used, we cannot iterate
533 * more than once without introducing race conditions with the hardirq.
534 */
535 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
536 loopcount = 1;
537
538 wl1271_debug(DEBUG_IRQ, "IRQ work");
539
540 if (unlikely(wl->state != WLCORE_STATE_ON))
541 goto out;
542
543 ret = wl1271_ps_elp_wakeup(wl);
544 if (ret < 0)
545 goto out;
546
547 while (!done && loopcount--) {
548 /*
549 * In order to avoid a race with the hardirq, clear the flag
550 * before acknowledging the chip. Since the mutex is held,
551 * wl1271_ps_elp_wakeup cannot be called concurrently.
552 */
553 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
554 smp_mb__after_atomic();
555
556 ret = wlcore_fw_status(wl, wl->fw_status);
557 if (ret < 0)
558 goto out;
559
560 wlcore_hw_tx_immediate_compl(wl);
561
562 intr = wl->fw_status->intr;
563 intr &= WLCORE_ALL_INTR_MASK;
564 if (!intr) {
565 done = true;
566 continue;
567 }
568
569 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
570 wl1271_error("HW watchdog interrupt received! starting recovery.");
571 wl->watchdog_recovery = true;
572 ret = -EIO;
573
574 /* restarting the chip. ignore any other interrupt. */
575 goto out;
576 }
577
578 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
579 wl1271_error("SW watchdog interrupt received! "
580 "starting recovery.");
581 wl->watchdog_recovery = true;
582 ret = -EIO;
583
584 /* restarting the chip. ignore any other interrupt. */
585 goto out;
586 }
587
588 if (likely(intr & WL1271_ACX_INTR_DATA)) {
589 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
590
591 ret = wlcore_rx(wl, wl->fw_status);
592 if (ret < 0)
593 goto out;
594
595 /* Check if any tx blocks were freed */
596 spin_lock_irqsave(&wl->wl_lock, flags);
597 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
598 wl1271_tx_total_queue_count(wl) > 0) {
599 spin_unlock_irqrestore(&wl->wl_lock, flags);
600 /*
601 * In order to avoid starvation of the TX path,
602 * call the work function directly.
603 */
604 ret = wlcore_tx_work_locked(wl);
605 if (ret < 0)
606 goto out;
607 } else {
608 spin_unlock_irqrestore(&wl->wl_lock, flags);
609 }
610
611 /* check for tx results */
612 ret = wlcore_hw_tx_delayed_compl(wl);
613 if (ret < 0)
614 goto out;
615
616 /* Make sure the deferred queues don't get too long */
617 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
618 skb_queue_len(&wl->deferred_rx_queue);
619 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
620 wl1271_flush_deferred_work(wl);
621 }
622
623 if (intr & WL1271_ACX_INTR_EVENT_A) {
624 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
625 ret = wl1271_event_handle(wl, 0);
626 if (ret < 0)
627 goto out;
628 }
629
630 if (intr & WL1271_ACX_INTR_EVENT_B) {
631 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
632 ret = wl1271_event_handle(wl, 1);
633 if (ret < 0)
634 goto out;
635 }
636
637 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
638 wl1271_debug(DEBUG_IRQ,
639 "WL1271_ACX_INTR_INIT_COMPLETE");
640
641 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
642 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
643 }
644
645 wl1271_ps_elp_sleep(wl);
646
647 out:
648 return ret;
649 }
650
651 static irqreturn_t wlcore_irq(int irq, void *cookie)
652 {
653 int ret;
654 unsigned long flags;
655 struct wl1271 *wl = cookie;
656
657 /* complete the ELP completion */
658 spin_lock_irqsave(&wl->wl_lock, flags);
659 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
660 if (wl->elp_compl) {
661 complete(wl->elp_compl);
662 wl->elp_compl = NULL;
663 }
664
665 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
666 /* don't enqueue a work right now. mark it as pending */
667 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
668 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
669 disable_irq_nosync(wl->irq);
670 pm_wakeup_event(wl->dev, 0);
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
672 return IRQ_HANDLED;
673 }
674 spin_unlock_irqrestore(&wl->wl_lock, flags);
675
676 /* TX might be handled here, avoid redundant work */
677 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
678 cancel_work_sync(&wl->tx_work);
679
680 mutex_lock(&wl->mutex);
681
682 ret = wlcore_irq_locked(wl);
683 if (ret)
684 wl12xx_queue_recovery_work(wl);
685
686 spin_lock_irqsave(&wl->wl_lock, flags);
687 /* In case TX was not handled here, queue TX work */
688 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
690 wl1271_tx_total_queue_count(wl) > 0)
691 ieee80211_queue_work(wl->hw, &wl->tx_work);
692 spin_unlock_irqrestore(&wl->wl_lock, flags);
693
694 mutex_unlock(&wl->mutex);
695
696 return IRQ_HANDLED;
697 }
698
699 struct vif_counter_data {
700 u8 counter;
701
702 struct ieee80211_vif *cur_vif;
703 bool cur_vif_running;
704 };
705
706 static void wl12xx_vif_count_iter(void *data, u8 *mac,
707 struct ieee80211_vif *vif)
708 {
709 struct vif_counter_data *counter = data;
710
711 counter->counter++;
712 if (counter->cur_vif == vif)
713 counter->cur_vif_running = true;
714 }
715
716 /* caller must not hold wl->mutex, as it might deadlock */
717 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
718 struct ieee80211_vif *cur_vif,
719 struct vif_counter_data *data)
720 {
721 memset(data, 0, sizeof(*data));
722 data->cur_vif = cur_vif;
723
724 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
725 wl12xx_vif_count_iter, data);
726 }
727
728 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
729 {
730 const struct firmware *fw;
731 const char *fw_name;
732 enum wl12xx_fw_type fw_type;
733 int ret;
734
735 if (plt) {
736 fw_type = WL12XX_FW_TYPE_PLT;
737 fw_name = wl->plt_fw_name;
738 } else {
739 /*
740 * we can't call wl12xx_get_vif_count() here because
741 * wl->mutex is taken, so use the cached last_vif_count value
742 */
743 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
744 fw_type = WL12XX_FW_TYPE_MULTI;
745 fw_name = wl->mr_fw_name;
746 } else {
747 fw_type = WL12XX_FW_TYPE_NORMAL;
748 fw_name = wl->sr_fw_name;
749 }
750 }
751
752 if (wl->fw_type == fw_type)
753 return 0;
754
755 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
756
757 ret = request_firmware(&fw, fw_name, wl->dev);
758
759 if (ret < 0) {
760 wl1271_error("could not get firmware %s: %d", fw_name, ret);
761 return ret;
762 }
763
764 if (fw->size % 4) {
765 wl1271_error("firmware size is not multiple of 32 bits: %zu",
766 fw->size);
767 ret = -EILSEQ;
768 goto out;
769 }
770
771 vfree(wl->fw);
772 wl->fw_type = WL12XX_FW_TYPE_NONE;
773 wl->fw_len = fw->size;
774 wl->fw = vmalloc(wl->fw_len);
775
776 if (!wl->fw) {
777 wl1271_error("could not allocate memory for the firmware");
778 ret = -ENOMEM;
779 goto out;
780 }
781
782 memcpy(wl->fw, fw->data, wl->fw_len);
783 ret = 0;
784 wl->fw_type = fw_type;
785 out:
786 release_firmware(fw);
787
788 return ret;
789 }
790
791 void wl12xx_queue_recovery_work(struct wl1271 *wl)
792 {
793 /* Avoid a recursive recovery */
794 if (wl->state == WLCORE_STATE_ON) {
795 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
796 &wl->flags));
797
798 wl->state = WLCORE_STATE_RESTARTING;
799 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
800 wl1271_ps_elp_wakeup(wl);
801 wlcore_disable_interrupts_nosync(wl);
802 ieee80211_queue_work(wl->hw, &wl->recovery_work);
803 }
804 }
805
806 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
807 {
808 size_t len;
809
810 /* Make sure we have enough room */
811 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
812
813 /* Fill the FW log file, consumed by the sysfs fwlog entry */
814 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
815 wl->fwlog_size += len;
816
817 return len;
818 }
819
820 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
821 {
822 u32 end_of_log = 0;
823
824 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
825 return;
826
827 wl1271_info("Reading FW panic log");
828
829 /*
830 * Make sure the chip is awake and the logger isn't active.
831 * Do not send a stop fwlog command if the fw is hanged or if
832 * dbgpins are used (due to some fw bug).
833 */
834 if (wl1271_ps_elp_wakeup(wl))
835 return;
836 if (!wl->watchdog_recovery &&
837 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
838 wl12xx_cmd_stop_fwlog(wl);
839
840 /* Traverse the memory blocks linked list */
841 do {
842 end_of_log = wlcore_event_fw_logger(wl);
843 if (end_of_log == 0) {
844 msleep(100);
845 end_of_log = wlcore_event_fw_logger(wl);
846 }
847 } while (end_of_log != 0);
848 }
849
850 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
851 u8 hlid, struct ieee80211_sta *sta)
852 {
853 struct wl1271_station *wl_sta;
854 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
855
856 wl_sta = (void *)sta->drv_priv;
857 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
858
859 /*
860 * increment the initial seq number on recovery to account for
861 * transmitted packets that we haven't yet got in the FW status
862 */
863 if (wlvif->encryption_type == KEY_GEM)
864 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
865
866 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
867 wl_sta->total_freed_pkts += sqn_recovery_padding;
868 }
869
870 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
871 struct wl12xx_vif *wlvif,
872 u8 hlid, const u8 *addr)
873 {
874 struct ieee80211_sta *sta;
875 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
876
877 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
878 is_zero_ether_addr(addr)))
879 return;
880
881 rcu_read_lock();
882 sta = ieee80211_find_sta(vif, addr);
883 if (sta)
884 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
885 rcu_read_unlock();
886 }
887
888 static void wlcore_print_recovery(struct wl1271 *wl)
889 {
890 u32 pc = 0;
891 u32 hint_sts = 0;
892 int ret;
893
894 wl1271_info("Hardware recovery in progress. FW ver: %s",
895 wl->chip.fw_ver_str);
896
897 /* change partitions momentarily so we can read the FW pc */
898 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
899 if (ret < 0)
900 return;
901
902 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
903 if (ret < 0)
904 return;
905
906 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
907 if (ret < 0)
908 return;
909
910 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
911 pc, hint_sts, ++wl->recovery_count);
912
913 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
914 }
915
916
917 static void wl1271_recovery_work(struct work_struct *work)
918 {
919 struct wl1271 *wl =
920 container_of(work, struct wl1271, recovery_work);
921 struct wl12xx_vif *wlvif;
922 struct ieee80211_vif *vif;
923
924 mutex_lock(&wl->mutex);
925
926 if (wl->state == WLCORE_STATE_OFF || wl->plt)
927 goto out_unlock;
928
929 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
930 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
931 wl12xx_read_fwlog_panic(wl);
932 wlcore_print_recovery(wl);
933 }
934
935 BUG_ON(wl->conf.recovery.bug_on_recovery &&
936 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
937
938 if (wl->conf.recovery.no_recovery) {
939 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
940 goto out_unlock;
941 }
942
943 /* Prevent spurious TX during FW restart */
944 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
945
946 /* reboot the chipset */
947 while (!list_empty(&wl->wlvif_list)) {
948 wlvif = list_first_entry(&wl->wlvif_list,
949 struct wl12xx_vif, list);
950 vif = wl12xx_wlvif_to_vif(wlvif);
951
952 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
953 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
954 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
955 vif->bss_conf.bssid);
956 }
957
958 __wl1271_op_remove_interface(wl, vif, false);
959 }
960
961 wlcore_op_stop_locked(wl);
962
963 ieee80211_restart_hw(wl->hw);
964
965 /*
966 * Its safe to enable TX now - the queues are stopped after a request
967 * to restart the HW.
968 */
969 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970
971 out_unlock:
972 wl->watchdog_recovery = false;
973 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
974 mutex_unlock(&wl->mutex);
975 }
976
977 static int wlcore_fw_wakeup(struct wl1271 *wl)
978 {
979 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
980 }
981
982 static int wl1271_setup(struct wl1271 *wl)
983 {
984 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
985 if (!wl->raw_fw_status)
986 goto err;
987
988 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
989 if (!wl->fw_status)
990 goto err;
991
992 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
993 if (!wl->tx_res_if)
994 goto err;
995
996 return 0;
997 err:
998 kfree(wl->fw_status);
999 kfree(wl->raw_fw_status);
1000 return -ENOMEM;
1001 }
1002
1003 static int wl12xx_set_power_on(struct wl1271 *wl)
1004 {
1005 int ret;
1006
1007 msleep(WL1271_PRE_POWER_ON_SLEEP);
1008 ret = wl1271_power_on(wl);
1009 if (ret < 0)
1010 goto out;
1011 msleep(WL1271_POWER_ON_SLEEP);
1012 wl1271_io_reset(wl);
1013 wl1271_io_init(wl);
1014
1015 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1016 if (ret < 0)
1017 goto fail;
1018
1019 /* ELP module wake up */
1020 ret = wlcore_fw_wakeup(wl);
1021 if (ret < 0)
1022 goto fail;
1023
1024 out:
1025 return ret;
1026
1027 fail:
1028 wl1271_power_off(wl);
1029 return ret;
1030 }
1031
1032 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1033 {
1034 int ret = 0;
1035
1036 ret = wl12xx_set_power_on(wl);
1037 if (ret < 0)
1038 goto out;
1039
1040 /*
1041 * For wl127x based devices we could use the default block
1042 * size (512 bytes), but due to a bug in the sdio driver, we
1043 * need to set it explicitly after the chip is powered on. To
1044 * simplify the code and since the performance impact is
1045 * negligible, we use the same block size for all different
1046 * chip types.
1047 *
1048 * Check if the bus supports blocksize alignment and, if it
1049 * doesn't, make sure we don't have the quirk.
1050 */
1051 if (!wl1271_set_block_size(wl))
1052 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1053
1054 /* TODO: make sure the lower driver has set things up correctly */
1055
1056 ret = wl1271_setup(wl);
1057 if (ret < 0)
1058 goto out;
1059
1060 ret = wl12xx_fetch_firmware(wl, plt);
1061 if (ret < 0)
1062 goto out;
1063
1064 out:
1065 return ret;
1066 }
1067
1068 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1069 {
1070 int retries = WL1271_BOOT_RETRIES;
1071 struct wiphy *wiphy = wl->hw->wiphy;
1072
1073 static const char* const PLT_MODE[] = {
1074 "PLT_OFF",
1075 "PLT_ON",
1076 "PLT_FEM_DETECT",
1077 "PLT_CHIP_AWAKE"
1078 };
1079
1080 int ret;
1081
1082 mutex_lock(&wl->mutex);
1083
1084 wl1271_notice("power up");
1085
1086 if (wl->state != WLCORE_STATE_OFF) {
1087 wl1271_error("cannot go into PLT state because not "
1088 "in off state: %d", wl->state);
1089 ret = -EBUSY;
1090 goto out;
1091 }
1092
1093 /* Indicate to lower levels that we are now in PLT mode */
1094 wl->plt = true;
1095 wl->plt_mode = plt_mode;
1096
1097 while (retries) {
1098 retries--;
1099 ret = wl12xx_chip_wakeup(wl, true);
1100 if (ret < 0)
1101 goto power_off;
1102
1103 if (plt_mode != PLT_CHIP_AWAKE) {
1104 ret = wl->ops->plt_init(wl);
1105 if (ret < 0)
1106 goto power_off;
1107 }
1108
1109 wl->state = WLCORE_STATE_ON;
1110 wl1271_notice("firmware booted in PLT mode %s (%s)",
1111 PLT_MODE[plt_mode],
1112 wl->chip.fw_ver_str);
1113
1114 /* update hw/fw version info in wiphy struct */
1115 wiphy->hw_version = wl->chip.id;
1116 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1117 sizeof(wiphy->fw_version));
1118
1119 goto out;
1120
1121 power_off:
1122 wl1271_power_off(wl);
1123 }
1124
1125 wl->plt = false;
1126 wl->plt_mode = PLT_OFF;
1127
1128 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1129 WL1271_BOOT_RETRIES);
1130 out:
1131 mutex_unlock(&wl->mutex);
1132
1133 return ret;
1134 }
1135
1136 int wl1271_plt_stop(struct wl1271 *wl)
1137 {
1138 int ret = 0;
1139
1140 wl1271_notice("power down");
1141
1142 /*
1143 * Interrupts must be disabled before setting the state to OFF.
1144 * Otherwise, the interrupt handler might be called and exit without
1145 * reading the interrupt status.
1146 */
1147 wlcore_disable_interrupts(wl);
1148 mutex_lock(&wl->mutex);
1149 if (!wl->plt) {
1150 mutex_unlock(&wl->mutex);
1151
1152 /*
1153 * This will not necessarily enable interrupts as interrupts
1154 * may have been disabled when op_stop was called. It will,
1155 * however, balance the above call to disable_interrupts().
1156 */
1157 wlcore_enable_interrupts(wl);
1158
1159 wl1271_error("cannot power down because not in PLT "
1160 "state: %d", wl->state);
1161 ret = -EBUSY;
1162 goto out;
1163 }
1164
1165 mutex_unlock(&wl->mutex);
1166
1167 wl1271_flush_deferred_work(wl);
1168 cancel_work_sync(&wl->netstack_work);
1169 cancel_work_sync(&wl->recovery_work);
1170 cancel_delayed_work_sync(&wl->elp_work);
1171 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1172
1173 mutex_lock(&wl->mutex);
1174 wl1271_power_off(wl);
1175 wl->flags = 0;
1176 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1177 wl->state = WLCORE_STATE_OFF;
1178 wl->plt = false;
1179 wl->plt_mode = PLT_OFF;
1180 wl->rx_counter = 0;
1181 mutex_unlock(&wl->mutex);
1182
1183 out:
1184 return ret;
1185 }
1186
1187 static void wl1271_op_tx(struct ieee80211_hw *hw,
1188 struct ieee80211_tx_control *control,
1189 struct sk_buff *skb)
1190 {
1191 struct wl1271 *wl = hw->priv;
1192 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1193 struct ieee80211_vif *vif = info->control.vif;
1194 struct wl12xx_vif *wlvif = NULL;
1195 unsigned long flags;
1196 int q, mapping;
1197 u8 hlid;
1198
1199 if (!vif) {
1200 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1201 ieee80211_free_txskb(hw, skb);
1202 return;
1203 }
1204
1205 wlvif = wl12xx_vif_to_data(vif);
1206 mapping = skb_get_queue_mapping(skb);
1207 q = wl1271_tx_get_queue(mapping);
1208
1209 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1210
1211 spin_lock_irqsave(&wl->wl_lock, flags);
1212
1213 /*
1214 * drop the packet if the link is invalid or the queue is stopped
1215 * for any reason but watermark. Watermark is a "soft"-stop so we
1216 * allow these packets through.
1217 */
1218 if (hlid == WL12XX_INVALID_LINK_ID ||
1219 (!test_bit(hlid, wlvif->links_map)) ||
1220 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1221 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1222 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1223 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1224 ieee80211_free_txskb(hw, skb);
1225 goto out;
1226 }
1227
1228 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1229 hlid, q, skb->len);
1230 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1231
1232 wl->tx_queue_count[q]++;
1233 wlvif->tx_queue_count[q]++;
1234
1235 /*
1236 * The workqueue is slow to process the tx_queue and we need stop
1237 * the queue here, otherwise the queue will get too long.
1238 */
1239 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1240 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1241 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1242 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1243 wlcore_stop_queue_locked(wl, wlvif, q,
1244 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1245 }
1246
1247 /*
1248 * The chip specific setup must run before the first TX packet -
1249 * before that, the tx_work will not be initialized!
1250 */
1251
1252 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1253 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1254 ieee80211_queue_work(wl->hw, &wl->tx_work);
1255
1256 out:
1257 spin_unlock_irqrestore(&wl->wl_lock, flags);
1258 }
1259
1260 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1261 {
1262 unsigned long flags;
1263 int q;
1264
1265 /* no need to queue a new dummy packet if one is already pending */
1266 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1267 return 0;
1268
1269 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1270
1271 spin_lock_irqsave(&wl->wl_lock, flags);
1272 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1273 wl->tx_queue_count[q]++;
1274 spin_unlock_irqrestore(&wl->wl_lock, flags);
1275
1276 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1277 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1278 return wlcore_tx_work_locked(wl);
1279
1280 /*
1281 * If the FW TX is busy, TX work will be scheduled by the threaded
1282 * interrupt handler function
1283 */
1284 return 0;
1285 }
1286
1287 /*
1288 * The size of the dummy packet should be at least 1400 bytes. However, in
1289 * order to minimize the number of bus transactions, aligning it to 512 bytes
1290 * boundaries could be beneficial, performance wise
1291 */
1292 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1293
1294 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1295 {
1296 struct sk_buff *skb;
1297 struct ieee80211_hdr_3addr *hdr;
1298 unsigned int dummy_packet_size;
1299
1300 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1301 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1302
1303 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1304 if (!skb) {
1305 wl1271_warning("Failed to allocate a dummy packet skb");
1306 return NULL;
1307 }
1308
1309 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1310
1311 hdr = skb_put_zero(skb, sizeof(*hdr));
1312 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1313 IEEE80211_STYPE_NULLFUNC |
1314 IEEE80211_FCTL_TODS);
1315
1316 skb_put_zero(skb, dummy_packet_size);
1317
1318 /* Dummy packets require the TID to be management */
1319 skb->priority = WL1271_TID_MGMT;
1320
1321 /* Initialize all fields that might be used */
1322 skb_set_queue_mapping(skb, 0);
1323 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1324
1325 return skb;
1326 }
1327
1328
1329 #ifdef CONFIG_PM
1330 static int
1331 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1332 {
1333 int num_fields = 0, in_field = 0, fields_size = 0;
1334 int i, pattern_len = 0;
1335
1336 if (!p->mask) {
1337 wl1271_warning("No mask in WoWLAN pattern");
1338 return -EINVAL;
1339 }
1340
1341 /*
1342 * The pattern is broken up into segments of bytes at different offsets
1343 * that need to be checked by the FW filter. Each segment is called
1344 * a field in the FW API. We verify that the total number of fields
1345 * required for this pattern won't exceed FW limits (8)
1346 * as well as the total fields buffer won't exceed the FW limit.
1347 * Note that if there's a pattern which crosses Ethernet/IP header
1348 * boundary a new field is required.
1349 */
1350 for (i = 0; i < p->pattern_len; i++) {
1351 if (test_bit(i, (unsigned long *)p->mask)) {
1352 if (!in_field) {
1353 in_field = 1;
1354 pattern_len = 1;
1355 } else {
1356 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1357 num_fields++;
1358 fields_size += pattern_len +
1359 RX_FILTER_FIELD_OVERHEAD;
1360 pattern_len = 1;
1361 } else
1362 pattern_len++;
1363 }
1364 } else {
1365 if (in_field) {
1366 in_field = 0;
1367 fields_size += pattern_len +
1368 RX_FILTER_FIELD_OVERHEAD;
1369 num_fields++;
1370 }
1371 }
1372 }
1373
1374 if (in_field) {
1375 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1376 num_fields++;
1377 }
1378
1379 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1380 wl1271_warning("RX Filter too complex. Too many segments");
1381 return -EINVAL;
1382 }
1383
1384 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1385 wl1271_warning("RX filter pattern is too big");
1386 return -E2BIG;
1387 }
1388
1389 return 0;
1390 }
1391
1392 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1393 {
1394 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1395 }
1396
1397 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1398 {
1399 int i;
1400
1401 if (filter == NULL)
1402 return;
1403
1404 for (i = 0; i < filter->num_fields; i++)
1405 kfree(filter->fields[i].pattern);
1406
1407 kfree(filter);
1408 }
1409
1410 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1411 u16 offset, u8 flags,
1412 const u8 *pattern, u8 len)
1413 {
1414 struct wl12xx_rx_filter_field *field;
1415
1416 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1417 wl1271_warning("Max fields per RX filter. can't alloc another");
1418 return -EINVAL;
1419 }
1420
1421 field = &filter->fields[filter->num_fields];
1422
1423 field->pattern = kzalloc(len, GFP_KERNEL);
1424 if (!field->pattern) {
1425 wl1271_warning("Failed to allocate RX filter pattern");
1426 return -ENOMEM;
1427 }
1428
1429 filter->num_fields++;
1430
1431 field->offset = cpu_to_le16(offset);
1432 field->flags = flags;
1433 field->len = len;
1434 memcpy(field->pattern, pattern, len);
1435
1436 return 0;
1437 }
1438
1439 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1440 {
1441 int i, fields_size = 0;
1442
1443 for (i = 0; i < filter->num_fields; i++)
1444 fields_size += filter->fields[i].len +
1445 sizeof(struct wl12xx_rx_filter_field) -
1446 sizeof(u8 *);
1447
1448 return fields_size;
1449 }
1450
1451 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1452 u8 *buf)
1453 {
1454 int i;
1455 struct wl12xx_rx_filter_field *field;
1456
1457 for (i = 0; i < filter->num_fields; i++) {
1458 field = (struct wl12xx_rx_filter_field *)buf;
1459
1460 field->offset = filter->fields[i].offset;
1461 field->flags = filter->fields[i].flags;
1462 field->len = filter->fields[i].len;
1463
1464 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1465 buf += sizeof(struct wl12xx_rx_filter_field) -
1466 sizeof(u8 *) + field->len;
1467 }
1468 }
1469
1470 /*
1471 * Allocates an RX filter returned through f
1472 * which needs to be freed using rx_filter_free()
1473 */
1474 static int
1475 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1476 struct wl12xx_rx_filter **f)
1477 {
1478 int i, j, ret = 0;
1479 struct wl12xx_rx_filter *filter;
1480 u16 offset;
1481 u8 flags, len;
1482
1483 filter = wl1271_rx_filter_alloc();
1484 if (!filter) {
1485 wl1271_warning("Failed to alloc rx filter");
1486 ret = -ENOMEM;
1487 goto err;
1488 }
1489
1490 i = 0;
1491 while (i < p->pattern_len) {
1492 if (!test_bit(i, (unsigned long *)p->mask)) {
1493 i++;
1494 continue;
1495 }
1496
1497 for (j = i; j < p->pattern_len; j++) {
1498 if (!test_bit(j, (unsigned long *)p->mask))
1499 break;
1500
1501 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1502 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1503 break;
1504 }
1505
1506 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1507 offset = i;
1508 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1509 } else {
1510 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1511 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1512 }
1513
1514 len = j - i;
1515
1516 ret = wl1271_rx_filter_alloc_field(filter,
1517 offset,
1518 flags,
1519 &p->pattern[i], len);
1520 if (ret)
1521 goto err;
1522
1523 i = j;
1524 }
1525
1526 filter->action = FILTER_SIGNAL;
1527
1528 *f = filter;
1529 return 0;
1530
1531 err:
1532 wl1271_rx_filter_free(filter);
1533 *f = NULL;
1534
1535 return ret;
1536 }
1537
1538 static int wl1271_configure_wowlan(struct wl1271 *wl,
1539 struct cfg80211_wowlan *wow)
1540 {
1541 int i, ret;
1542
1543 if (!wow || wow->any || !wow->n_patterns) {
1544 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1545 FILTER_SIGNAL);
1546 if (ret)
1547 goto out;
1548
1549 ret = wl1271_rx_filter_clear_all(wl);
1550 if (ret)
1551 goto out;
1552
1553 return 0;
1554 }
1555
1556 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1557 return -EINVAL;
1558
1559 /* Validate all incoming patterns before clearing current FW state */
1560 for (i = 0; i < wow->n_patterns; i++) {
1561 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1562 if (ret) {
1563 wl1271_warning("Bad wowlan pattern %d", i);
1564 return ret;
1565 }
1566 }
1567
1568 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1569 if (ret)
1570 goto out;
1571
1572 ret = wl1271_rx_filter_clear_all(wl);
1573 if (ret)
1574 goto out;
1575
1576 /* Translate WoWLAN patterns into filters */
1577 for (i = 0; i < wow->n_patterns; i++) {
1578 struct cfg80211_pkt_pattern *p;
1579 struct wl12xx_rx_filter *filter = NULL;
1580
1581 p = &wow->patterns[i];
1582
1583 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1584 if (ret) {
1585 wl1271_warning("Failed to create an RX filter from "
1586 "wowlan pattern %d", i);
1587 goto out;
1588 }
1589
1590 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1591
1592 wl1271_rx_filter_free(filter);
1593 if (ret)
1594 goto out;
1595 }
1596
1597 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1598
1599 out:
1600 return ret;
1601 }
1602
1603 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1604 struct wl12xx_vif *wlvif,
1605 struct cfg80211_wowlan *wow)
1606 {
1607 int ret = 0;
1608
1609 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1610 goto out;
1611
1612 ret = wl1271_configure_wowlan(wl, wow);
1613 if (ret < 0)
1614 goto out;
1615
1616 if ((wl->conf.conn.suspend_wake_up_event ==
1617 wl->conf.conn.wake_up_event) &&
1618 (wl->conf.conn.suspend_listen_interval ==
1619 wl->conf.conn.listen_interval))
1620 goto out;
1621
1622 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1623 wl->conf.conn.suspend_wake_up_event,
1624 wl->conf.conn.suspend_listen_interval);
1625
1626 if (ret < 0)
1627 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1628 out:
1629 return ret;
1630
1631 }
1632
1633 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1634 struct wl12xx_vif *wlvif,
1635 struct cfg80211_wowlan *wow)
1636 {
1637 int ret = 0;
1638
1639 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1640 goto out;
1641
1642 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1643 if (ret < 0)
1644 goto out;
1645
1646 ret = wl1271_configure_wowlan(wl, wow);
1647 if (ret < 0)
1648 goto out;
1649
1650 out:
1651 return ret;
1652
1653 }
1654
1655 static int wl1271_configure_suspend(struct wl1271 *wl,
1656 struct wl12xx_vif *wlvif,
1657 struct cfg80211_wowlan *wow)
1658 {
1659 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1660 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1661 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1662 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1663 return 0;
1664 }
1665
1666 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1667 {
1668 int ret = 0;
1669 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1670 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1671
1672 if ((!is_ap) && (!is_sta))
1673 return;
1674
1675 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1676 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1677 return;
1678
1679 wl1271_configure_wowlan(wl, NULL);
1680
1681 if (is_sta) {
1682 if ((wl->conf.conn.suspend_wake_up_event ==
1683 wl->conf.conn.wake_up_event) &&
1684 (wl->conf.conn.suspend_listen_interval ==
1685 wl->conf.conn.listen_interval))
1686 return;
1687
1688 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689 wl->conf.conn.wake_up_event,
1690 wl->conf.conn.listen_interval);
1691
1692 if (ret < 0)
1693 wl1271_error("resume: wake up conditions failed: %d",
1694 ret);
1695
1696 } else if (is_ap) {
1697 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1698 }
1699 }
1700
1701 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1702 struct cfg80211_wowlan *wow)
1703 {
1704 struct wl1271 *wl = hw->priv;
1705 struct wl12xx_vif *wlvif;
1706 int ret;
1707
1708 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1709 WARN_ON(!wow);
1710
1711 /* we want to perform the recovery before suspending */
1712 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1713 wl1271_warning("postponing suspend to perform recovery");
1714 return -EBUSY;
1715 }
1716
1717 wl1271_tx_flush(wl);
1718
1719 mutex_lock(&wl->mutex);
1720
1721 ret = wl1271_ps_elp_wakeup(wl);
1722 if (ret < 0) {
1723 mutex_unlock(&wl->mutex);
1724 return ret;
1725 }
1726
1727 wl->wow_enabled = true;
1728 wl12xx_for_each_wlvif(wl, wlvif) {
1729 if (wlcore_is_p2p_mgmt(wlvif))
1730 continue;
1731
1732 ret = wl1271_configure_suspend(wl, wlvif, wow);
1733 if (ret < 0) {
1734 mutex_unlock(&wl->mutex);
1735 wl1271_warning("couldn't prepare device to suspend");
1736 return ret;
1737 }
1738 }
1739
1740 /* disable fast link flow control notifications from FW */
1741 ret = wlcore_hw_interrupt_notify(wl, false);
1742 if (ret < 0)
1743 goto out_sleep;
1744
1745 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1746 ret = wlcore_hw_rx_ba_filter(wl,
1747 !!wl->conf.conn.suspend_rx_ba_activity);
1748 if (ret < 0)
1749 goto out_sleep;
1750
1751 out_sleep:
1752 wl1271_ps_elp_sleep(wl);
1753 mutex_unlock(&wl->mutex);
1754
1755 if (ret < 0) {
1756 wl1271_warning("couldn't prepare device to suspend");
1757 return ret;
1758 }
1759
1760 /* flush any remaining work */
1761 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1762
1763 /*
1764 * disable and re-enable interrupts in order to flush
1765 * the threaded_irq
1766 */
1767 wlcore_disable_interrupts(wl);
1768
1769 /*
1770 * set suspended flag to avoid triggering a new threaded_irq
1771 * work. no need for spinlock as interrupts are disabled.
1772 */
1773 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1774
1775 wlcore_enable_interrupts(wl);
1776 flush_work(&wl->tx_work);
1777 flush_delayed_work(&wl->elp_work);
1778
1779 /*
1780 * Cancel the watchdog even if above tx_flush failed. We will detect
1781 * it on resume anyway.
1782 */
1783 cancel_delayed_work(&wl->tx_watchdog_work);
1784
1785 return 0;
1786 }
1787
1788 static int wl1271_op_resume(struct ieee80211_hw *hw)
1789 {
1790 struct wl1271 *wl = hw->priv;
1791 struct wl12xx_vif *wlvif;
1792 unsigned long flags;
1793 bool run_irq_work = false, pending_recovery;
1794 int ret;
1795
1796 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1797 wl->wow_enabled);
1798 WARN_ON(!wl->wow_enabled);
1799
1800 /*
1801 * re-enable irq_work enqueuing, and call irq_work directly if
1802 * there is a pending work.
1803 */
1804 spin_lock_irqsave(&wl->wl_lock, flags);
1805 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1806 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1807 run_irq_work = true;
1808 spin_unlock_irqrestore(&wl->wl_lock, flags);
1809
1810 mutex_lock(&wl->mutex);
1811
1812 /* test the recovery flag before calling any SDIO functions */
1813 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1814 &wl->flags);
1815
1816 if (run_irq_work) {
1817 wl1271_debug(DEBUG_MAC80211,
1818 "run postponed irq_work directly");
1819
1820 /* don't talk to the HW if recovery is pending */
1821 if (!pending_recovery) {
1822 ret = wlcore_irq_locked(wl);
1823 if (ret)
1824 wl12xx_queue_recovery_work(wl);
1825 }
1826
1827 wlcore_enable_interrupts(wl);
1828 }
1829
1830 if (pending_recovery) {
1831 wl1271_warning("queuing forgotten recovery on resume");
1832 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1833 goto out_sleep;
1834 }
1835
1836 ret = wl1271_ps_elp_wakeup(wl);
1837 if (ret < 0)
1838 goto out;
1839
1840 wl12xx_for_each_wlvif(wl, wlvif) {
1841 if (wlcore_is_p2p_mgmt(wlvif))
1842 continue;
1843
1844 wl1271_configure_resume(wl, wlvif);
1845 }
1846
1847 ret = wlcore_hw_interrupt_notify(wl, true);
1848 if (ret < 0)
1849 goto out_sleep;
1850
1851 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1852 ret = wlcore_hw_rx_ba_filter(wl, false);
1853 if (ret < 0)
1854 goto out_sleep;
1855
1856 out_sleep:
1857 wl1271_ps_elp_sleep(wl);
1858
1859 out:
1860 wl->wow_enabled = false;
1861
1862 /*
1863 * Set a flag to re-init the watchdog on the first Tx after resume.
1864 * That way we avoid possible conditions where Tx-complete interrupts
1865 * fail to arrive and we perform a spurious recovery.
1866 */
1867 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1868 mutex_unlock(&wl->mutex);
1869
1870 return 0;
1871 }
1872 #endif
1873
1874 static int wl1271_op_start(struct ieee80211_hw *hw)
1875 {
1876 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1877
1878 /*
1879 * We have to delay the booting of the hardware because
1880 * we need to know the local MAC address before downloading and
1881 * initializing the firmware. The MAC address cannot be changed
1882 * after boot, and without the proper MAC address, the firmware
1883 * will not function properly.
1884 *
1885 * The MAC address is first known when the corresponding interface
1886 * is added. That is where we will initialize the hardware.
1887 */
1888
1889 return 0;
1890 }
1891
1892 static void wlcore_op_stop_locked(struct wl1271 *wl)
1893 {
1894 int i;
1895
1896 if (wl->state == WLCORE_STATE_OFF) {
1897 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1898 &wl->flags))
1899 wlcore_enable_interrupts(wl);
1900
1901 return;
1902 }
1903
1904 /*
1905 * this must be before the cancel_work calls below, so that the work
1906 * functions don't perform further work.
1907 */
1908 wl->state = WLCORE_STATE_OFF;
1909
1910 /*
1911 * Use the nosync variant to disable interrupts, so the mutex could be
1912 * held while doing so without deadlocking.
1913 */
1914 wlcore_disable_interrupts_nosync(wl);
1915
1916 mutex_unlock(&wl->mutex);
1917
1918 wlcore_synchronize_interrupts(wl);
1919 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1920 cancel_work_sync(&wl->recovery_work);
1921 wl1271_flush_deferred_work(wl);
1922 cancel_delayed_work_sync(&wl->scan_complete_work);
1923 cancel_work_sync(&wl->netstack_work);
1924 cancel_work_sync(&wl->tx_work);
1925 cancel_delayed_work_sync(&wl->elp_work);
1926 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1927
1928 /* let's notify MAC80211 about the remaining pending TX frames */
1929 mutex_lock(&wl->mutex);
1930 wl12xx_tx_reset(wl);
1931
1932 wl1271_power_off(wl);
1933 /*
1934 * In case a recovery was scheduled, interrupts were disabled to avoid
1935 * an interrupt storm. Now that the power is down, it is safe to
1936 * re-enable interrupts to balance the disable depth
1937 */
1938 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1939 wlcore_enable_interrupts(wl);
1940
1941 wl->band = NL80211_BAND_2GHZ;
1942
1943 wl->rx_counter = 0;
1944 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1945 wl->channel_type = NL80211_CHAN_NO_HT;
1946 wl->tx_blocks_available = 0;
1947 wl->tx_allocated_blocks = 0;
1948 wl->tx_results_count = 0;
1949 wl->tx_packets_count = 0;
1950 wl->time_offset = 0;
1951 wl->ap_fw_ps_map = 0;
1952 wl->ap_ps_map = 0;
1953 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1954 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1955 memset(wl->links_map, 0, sizeof(wl->links_map));
1956 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1957 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1958 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1959 wl->active_sta_count = 0;
1960 wl->active_link_count = 0;
1961
1962 /* The system link is always allocated */
1963 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1964 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1965 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1966
1967 /*
1968 * this is performed after the cancel_work calls and the associated
1969 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1970 * get executed before all these vars have been reset.
1971 */
1972 wl->flags = 0;
1973
1974 wl->tx_blocks_freed = 0;
1975
1976 for (i = 0; i < NUM_TX_QUEUES; i++) {
1977 wl->tx_pkts_freed[i] = 0;
1978 wl->tx_allocated_pkts[i] = 0;
1979 }
1980
1981 wl1271_debugfs_reset(wl);
1982
1983 kfree(wl->raw_fw_status);
1984 wl->raw_fw_status = NULL;
1985 kfree(wl->fw_status);
1986 wl->fw_status = NULL;
1987 kfree(wl->tx_res_if);
1988 wl->tx_res_if = NULL;
1989 kfree(wl->target_mem_map);
1990 wl->target_mem_map = NULL;
1991
1992 /*
1993 * FW channels must be re-calibrated after recovery,
1994 * save current Reg-Domain channel configuration and clear it.
1995 */
1996 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1997 sizeof(wl->reg_ch_conf_pending));
1998 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1999 }
2000
2001 static void wlcore_op_stop(struct ieee80211_hw *hw)
2002 {
2003 struct wl1271 *wl = hw->priv;
2004
2005 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2006
2007 mutex_lock(&wl->mutex);
2008
2009 wlcore_op_stop_locked(wl);
2010
2011 mutex_unlock(&wl->mutex);
2012 }
2013
2014 static void wlcore_channel_switch_work(struct work_struct *work)
2015 {
2016 struct delayed_work *dwork;
2017 struct wl1271 *wl;
2018 struct ieee80211_vif *vif;
2019 struct wl12xx_vif *wlvif;
2020 int ret;
2021
2022 dwork = to_delayed_work(work);
2023 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2024 wl = wlvif->wl;
2025
2026 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2027
2028 mutex_lock(&wl->mutex);
2029
2030 if (unlikely(wl->state != WLCORE_STATE_ON))
2031 goto out;
2032
2033 /* check the channel switch is still ongoing */
2034 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2035 goto out;
2036
2037 vif = wl12xx_wlvif_to_vif(wlvif);
2038 ieee80211_chswitch_done(vif, false);
2039
2040 ret = wl1271_ps_elp_wakeup(wl);
2041 if (ret < 0)
2042 goto out;
2043
2044 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2045
2046 wl1271_ps_elp_sleep(wl);
2047 out:
2048 mutex_unlock(&wl->mutex);
2049 }
2050
2051 static void wlcore_connection_loss_work(struct work_struct *work)
2052 {
2053 struct delayed_work *dwork;
2054 struct wl1271 *wl;
2055 struct ieee80211_vif *vif;
2056 struct wl12xx_vif *wlvif;
2057
2058 dwork = to_delayed_work(work);
2059 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2060 wl = wlvif->wl;
2061
2062 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2063
2064 mutex_lock(&wl->mutex);
2065
2066 if (unlikely(wl->state != WLCORE_STATE_ON))
2067 goto out;
2068
2069 /* Call mac80211 connection loss */
2070 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2071 goto out;
2072
2073 vif = wl12xx_wlvif_to_vif(wlvif);
2074 ieee80211_connection_loss(vif);
2075 out:
2076 mutex_unlock(&wl->mutex);
2077 }
2078
2079 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2080 {
2081 struct delayed_work *dwork;
2082 struct wl1271 *wl;
2083 struct wl12xx_vif *wlvif;
2084 unsigned long time_spare;
2085 int ret;
2086
2087 dwork = to_delayed_work(work);
2088 wlvif = container_of(dwork, struct wl12xx_vif,
2089 pending_auth_complete_work);
2090 wl = wlvif->wl;
2091
2092 mutex_lock(&wl->mutex);
2093
2094 if (unlikely(wl->state != WLCORE_STATE_ON))
2095 goto out;
2096
2097 /*
2098 * Make sure a second really passed since the last auth reply. Maybe
2099 * a second auth reply arrived while we were stuck on the mutex.
2100 * Check for a little less than the timeout to protect from scheduler
2101 * irregularities.
2102 */
2103 time_spare = jiffies +
2104 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2105 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2106 goto out;
2107
2108 ret = wl1271_ps_elp_wakeup(wl);
2109 if (ret < 0)
2110 goto out;
2111
2112 /* cancel the ROC if active */
2113 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2114
2115 wl1271_ps_elp_sleep(wl);
2116 out:
2117 mutex_unlock(&wl->mutex);
2118 }
2119
2120 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2121 {
2122 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2123 WL12XX_MAX_RATE_POLICIES);
2124 if (policy >= WL12XX_MAX_RATE_POLICIES)
2125 return -EBUSY;
2126
2127 __set_bit(policy, wl->rate_policies_map);
2128 *idx = policy;
2129 return 0;
2130 }
2131
2132 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2133 {
2134 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2135 return;
2136
2137 __clear_bit(*idx, wl->rate_policies_map);
2138 *idx = WL12XX_MAX_RATE_POLICIES;
2139 }
2140
2141 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2142 {
2143 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2144 WLCORE_MAX_KLV_TEMPLATES);
2145 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2146 return -EBUSY;
2147
2148 __set_bit(policy, wl->klv_templates_map);
2149 *idx = policy;
2150 return 0;
2151 }
2152
2153 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2154 {
2155 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2156 return;
2157
2158 __clear_bit(*idx, wl->klv_templates_map);
2159 *idx = WLCORE_MAX_KLV_TEMPLATES;
2160 }
2161
2162 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2163 {
2164 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2165
2166 switch (wlvif->bss_type) {
2167 case BSS_TYPE_AP_BSS:
2168 if (wlvif->p2p)
2169 return WL1271_ROLE_P2P_GO;
2170 else if (ieee80211_vif_is_mesh(vif))
2171 return WL1271_ROLE_MESH_POINT;
2172 else
2173 return WL1271_ROLE_AP;
2174
2175 case BSS_TYPE_STA_BSS:
2176 if (wlvif->p2p)
2177 return WL1271_ROLE_P2P_CL;
2178 else
2179 return WL1271_ROLE_STA;
2180
2181 case BSS_TYPE_IBSS:
2182 return WL1271_ROLE_IBSS;
2183
2184 default:
2185 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2186 }
2187 return WL12XX_INVALID_ROLE_TYPE;
2188 }
2189
2190 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2191 {
2192 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2193 int i;
2194
2195 /* clear everything but the persistent data */
2196 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2197
2198 switch (ieee80211_vif_type_p2p(vif)) {
2199 case NL80211_IFTYPE_P2P_CLIENT:
2200 wlvif->p2p = 1;
2201 /* fall-through */
2202 case NL80211_IFTYPE_STATION:
2203 case NL80211_IFTYPE_P2P_DEVICE:
2204 wlvif->bss_type = BSS_TYPE_STA_BSS;
2205 break;
2206 case NL80211_IFTYPE_ADHOC:
2207 wlvif->bss_type = BSS_TYPE_IBSS;
2208 break;
2209 case NL80211_IFTYPE_P2P_GO:
2210 wlvif->p2p = 1;
2211 /* fall-through */
2212 case NL80211_IFTYPE_AP:
2213 case NL80211_IFTYPE_MESH_POINT:
2214 wlvif->bss_type = BSS_TYPE_AP_BSS;
2215 break;
2216 default:
2217 wlvif->bss_type = MAX_BSS_TYPE;
2218 return -EOPNOTSUPP;
2219 }
2220
2221 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2222 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2223 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2224
2225 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2226 wlvif->bss_type == BSS_TYPE_IBSS) {
2227 /* init sta/ibss data */
2228 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2229 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2230 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2231 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2232 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2233 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2234 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2235 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2236 } else {
2237 /* init ap data */
2238 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2239 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2240 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2241 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2242 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2243 wl12xx_allocate_rate_policy(wl,
2244 &wlvif->ap.ucast_rate_idx[i]);
2245 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2246 /*
2247 * TODO: check if basic_rate shouldn't be
2248 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2249 * instead (the same thing for STA above).
2250 */
2251 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2252 /* TODO: this seems to be used only for STA, check it */
2253 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2254 }
2255
2256 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2257 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2258 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2259
2260 /*
2261 * mac80211 configures some values globally, while we treat them
2262 * per-interface. thus, on init, we have to copy them from wl
2263 */
2264 wlvif->band = wl->band;
2265 wlvif->channel = wl->channel;
2266 wlvif->power_level = wl->power_level;
2267 wlvif->channel_type = wl->channel_type;
2268
2269 INIT_WORK(&wlvif->rx_streaming_enable_work,
2270 wl1271_rx_streaming_enable_work);
2271 INIT_WORK(&wlvif->rx_streaming_disable_work,
2272 wl1271_rx_streaming_disable_work);
2273 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2274 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2275 wlcore_channel_switch_work);
2276 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2277 wlcore_connection_loss_work);
2278 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2279 wlcore_pending_auth_complete_work);
2280 INIT_LIST_HEAD(&wlvif->list);
2281
2282 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2283 (unsigned long) wlvif);
2284 return 0;
2285 }
2286
2287 static int wl12xx_init_fw(struct wl1271 *wl)
2288 {
2289 int retries = WL1271_BOOT_RETRIES;
2290 bool booted = false;
2291 struct wiphy *wiphy = wl->hw->wiphy;
2292 int ret;
2293
2294 while (retries) {
2295 retries--;
2296 ret = wl12xx_chip_wakeup(wl, false);
2297 if (ret < 0)
2298 goto power_off;
2299
2300 ret = wl->ops->boot(wl);
2301 if (ret < 0)
2302 goto power_off;
2303
2304 ret = wl1271_hw_init(wl);
2305 if (ret < 0)
2306 goto irq_disable;
2307
2308 booted = true;
2309 break;
2310
2311 irq_disable:
2312 mutex_unlock(&wl->mutex);
2313 /* Unlocking the mutex in the middle of handling is
2314 inherently unsafe. In this case we deem it safe to do,
2315 because we need to let any possibly pending IRQ out of
2316 the system (and while we are WLCORE_STATE_OFF the IRQ
2317 work function will not do anything.) Also, any other
2318 possible concurrent operations will fail due to the
2319 current state, hence the wl1271 struct should be safe. */
2320 wlcore_disable_interrupts(wl);
2321 wl1271_flush_deferred_work(wl);
2322 cancel_work_sync(&wl->netstack_work);
2323 mutex_lock(&wl->mutex);
2324 power_off:
2325 wl1271_power_off(wl);
2326 }
2327
2328 if (!booted) {
2329 wl1271_error("firmware boot failed despite %d retries",
2330 WL1271_BOOT_RETRIES);
2331 goto out;
2332 }
2333
2334 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2335
2336 /* update hw/fw version info in wiphy struct */
2337 wiphy->hw_version = wl->chip.id;
2338 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2339 sizeof(wiphy->fw_version));
2340
2341 /*
2342 * Now we know if 11a is supported (info from the NVS), so disable
2343 * 11a channels if not supported
2344 */
2345 if (!wl->enable_11a)
2346 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2347
2348 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2349 wl->enable_11a ? "" : "not ");
2350
2351 wl->state = WLCORE_STATE_ON;
2352 out:
2353 return ret;
2354 }
2355
2356 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2357 {
2358 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2359 }
2360
2361 /*
2362 * Check whether a fw switch (i.e. moving from one loaded
2363 * fw to another) is needed. This function is also responsible
2364 * for updating wl->last_vif_count, so it must be called before
2365 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2366 * will be used).
2367 */
2368 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2369 struct vif_counter_data vif_counter_data,
2370 bool add)
2371 {
2372 enum wl12xx_fw_type current_fw = wl->fw_type;
2373 u8 vif_count = vif_counter_data.counter;
2374
2375 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2376 return false;
2377
2378 /* increase the vif count if this is a new vif */
2379 if (add && !vif_counter_data.cur_vif_running)
2380 vif_count++;
2381
2382 wl->last_vif_count = vif_count;
2383
2384 /* no need for fw change if the device is OFF */
2385 if (wl->state == WLCORE_STATE_OFF)
2386 return false;
2387
2388 /* no need for fw change if a single fw is used */
2389 if (!wl->mr_fw_name)
2390 return false;
2391
2392 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2393 return true;
2394 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2395 return true;
2396
2397 return false;
2398 }
2399
2400 /*
2401 * Enter "forced psm". Make sure the sta is in psm against the ap,
2402 * to make the fw switch a bit more disconnection-persistent.
2403 */
2404 static void wl12xx_force_active_psm(struct wl1271 *wl)
2405 {
2406 struct wl12xx_vif *wlvif;
2407
2408 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2409 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2410 }
2411 }
2412
2413 struct wlcore_hw_queue_iter_data {
2414 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2415 /* current vif */
2416 struct ieee80211_vif *vif;
2417 /* is the current vif among those iterated */
2418 bool cur_running;
2419 };
2420
2421 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2422 struct ieee80211_vif *vif)
2423 {
2424 struct wlcore_hw_queue_iter_data *iter_data = data;
2425
2426 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2427 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2428 return;
2429
2430 if (iter_data->cur_running || vif == iter_data->vif) {
2431 iter_data->cur_running = true;
2432 return;
2433 }
2434
2435 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2436 }
2437
2438 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2439 struct wl12xx_vif *wlvif)
2440 {
2441 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2442 struct wlcore_hw_queue_iter_data iter_data = {};
2443 int i, q_base;
2444
2445 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2446 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2447 return 0;
2448 }
2449
2450 iter_data.vif = vif;
2451
2452 /* mark all bits taken by active interfaces */
2453 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2454 IEEE80211_IFACE_ITER_RESUME_ALL,
2455 wlcore_hw_queue_iter, &iter_data);
2456
2457 /* the current vif is already running in mac80211 (resume/recovery) */
2458 if (iter_data.cur_running) {
2459 wlvif->hw_queue_base = vif->hw_queue[0];
2460 wl1271_debug(DEBUG_MAC80211,
2461 "using pre-allocated hw queue base %d",
2462 wlvif->hw_queue_base);
2463
2464 /* interface type might have changed type */
2465 goto adjust_cab_queue;
2466 }
2467
2468 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2469 WLCORE_NUM_MAC_ADDRESSES);
2470 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2471 return -EBUSY;
2472
2473 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2474 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2475 wlvif->hw_queue_base);
2476
2477 for (i = 0; i < NUM_TX_QUEUES; i++) {
2478 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2479 /* register hw queues in mac80211 */
2480 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2481 }
2482
2483 adjust_cab_queue:
2484 /* the last places are reserved for cab queues per interface */
2485 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2486 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2487 wlvif->hw_queue_base / NUM_TX_QUEUES;
2488 else
2489 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2490
2491 return 0;
2492 }
2493
2494 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2495 struct ieee80211_vif *vif)
2496 {
2497 struct wl1271 *wl = hw->priv;
2498 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2499 struct vif_counter_data vif_count;
2500 int ret = 0;
2501 u8 role_type;
2502
2503 if (wl->plt) {
2504 wl1271_error("Adding Interface not allowed while in PLT mode");
2505 return -EBUSY;
2506 }
2507
2508 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2509 IEEE80211_VIF_SUPPORTS_UAPSD |
2510 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2511
2512 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2513 ieee80211_vif_type_p2p(vif), vif->addr);
2514
2515 wl12xx_get_vif_count(hw, vif, &vif_count);
2516
2517 mutex_lock(&wl->mutex);
2518 ret = wl1271_ps_elp_wakeup(wl);
2519 if (ret < 0)
2520 goto out_unlock;
2521
2522 /*
2523 * in some very corner case HW recovery scenarios its possible to
2524 * get here before __wl1271_op_remove_interface is complete, so
2525 * opt out if that is the case.
2526 */
2527 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2528 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2529 ret = -EBUSY;
2530 goto out;
2531 }
2532
2533
2534 ret = wl12xx_init_vif_data(wl, vif);
2535 if (ret < 0)
2536 goto out;
2537
2538 wlvif->wl = wl;
2539 role_type = wl12xx_get_role_type(wl, wlvif);
2540 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2541 ret = -EINVAL;
2542 goto out;
2543 }
2544
2545 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2546 if (ret < 0)
2547 goto out;
2548
2549 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2550 wl12xx_force_active_psm(wl);
2551 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2552 mutex_unlock(&wl->mutex);
2553 wl1271_recovery_work(&wl->recovery_work);
2554 return 0;
2555 }
2556
2557 /*
2558 * TODO: after the nvs issue will be solved, move this block
2559 * to start(), and make sure here the driver is ON.
2560 */
2561 if (wl->state == WLCORE_STATE_OFF) {
2562 /*
2563 * we still need this in order to configure the fw
2564 * while uploading the nvs
2565 */
2566 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2567
2568 ret = wl12xx_init_fw(wl);
2569 if (ret < 0)
2570 goto out;
2571 }
2572
2573 if (!wlcore_is_p2p_mgmt(wlvif)) {
2574 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2575 role_type, &wlvif->role_id);
2576 if (ret < 0)
2577 goto out;
2578
2579 ret = wl1271_init_vif_specific(wl, vif);
2580 if (ret < 0)
2581 goto out;
2582
2583 } else {
2584 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2585 &wlvif->dev_role_id);
2586 if (ret < 0)
2587 goto out;
2588
2589 /* needed mainly for configuring rate policies */
2590 ret = wl1271_sta_hw_init(wl, wlvif);
2591 if (ret < 0)
2592 goto out;
2593 }
2594
2595 list_add(&wlvif->list, &wl->wlvif_list);
2596 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2597
2598 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2599 wl->ap_count++;
2600 else
2601 wl->sta_count++;
2602 out:
2603 wl1271_ps_elp_sleep(wl);
2604 out_unlock:
2605 mutex_unlock(&wl->mutex);
2606
2607 return ret;
2608 }
2609
2610 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2611 struct ieee80211_vif *vif,
2612 bool reset_tx_queues)
2613 {
2614 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2615 int i, ret;
2616 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2617
2618 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2619
2620 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2621 return;
2622
2623 /* because of hardware recovery, we may get here twice */
2624 if (wl->state == WLCORE_STATE_OFF)
2625 return;
2626
2627 wl1271_info("down");
2628
2629 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2630 wl->scan_wlvif == wlvif) {
2631 struct cfg80211_scan_info info = {
2632 .aborted = true,
2633 };
2634
2635 /*
2636 * Rearm the tx watchdog just before idling scan. This
2637 * prevents just-finished scans from triggering the watchdog
2638 */
2639 wl12xx_rearm_tx_watchdog_locked(wl);
2640
2641 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2642 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2643 wl->scan_wlvif = NULL;
2644 wl->scan.req = NULL;
2645 ieee80211_scan_completed(wl->hw, &info);
2646 }
2647
2648 if (wl->sched_vif == wlvif)
2649 wl->sched_vif = NULL;
2650
2651 if (wl->roc_vif == vif) {
2652 wl->roc_vif = NULL;
2653 ieee80211_remain_on_channel_expired(wl->hw);
2654 }
2655
2656 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2657 /* disable active roles */
2658 ret = wl1271_ps_elp_wakeup(wl);
2659 if (ret < 0)
2660 goto deinit;
2661
2662 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2663 wlvif->bss_type == BSS_TYPE_IBSS) {
2664 if (wl12xx_dev_role_started(wlvif))
2665 wl12xx_stop_dev(wl, wlvif);
2666 }
2667
2668 if (!wlcore_is_p2p_mgmt(wlvif)) {
2669 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2670 if (ret < 0)
2671 goto deinit;
2672 } else {
2673 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2674 if (ret < 0)
2675 goto deinit;
2676 }
2677
2678 wl1271_ps_elp_sleep(wl);
2679 }
2680 deinit:
2681 wl12xx_tx_reset_wlvif(wl, wlvif);
2682
2683 /* clear all hlids (except system_hlid) */
2684 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2685
2686 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2687 wlvif->bss_type == BSS_TYPE_IBSS) {
2688 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2689 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2690 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2691 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2692 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2693 } else {
2694 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2695 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2696 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2697 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2698 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2699 wl12xx_free_rate_policy(wl,
2700 &wlvif->ap.ucast_rate_idx[i]);
2701 wl1271_free_ap_keys(wl, wlvif);
2702 }
2703
2704 dev_kfree_skb(wlvif->probereq);
2705 wlvif->probereq = NULL;
2706 if (wl->last_wlvif == wlvif)
2707 wl->last_wlvif = NULL;
2708 list_del(&wlvif->list);
2709 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2710 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2711 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2712
2713 if (is_ap)
2714 wl->ap_count--;
2715 else
2716 wl->sta_count--;
2717
2718 /*
2719 * Last AP, have more stations. Configure sleep auth according to STA.
2720 * Don't do thin on unintended recovery.
2721 */
2722 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2723 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2724 goto unlock;
2725
2726 if (wl->ap_count == 0 && is_ap) {
2727 /* mask ap events */
2728 wl->event_mask &= ~wl->ap_event_mask;
2729 wl1271_event_unmask(wl);
2730 }
2731
2732 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2733 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2734 /* Configure for power according to debugfs */
2735 if (sta_auth != WL1271_PSM_ILLEGAL)
2736 wl1271_acx_sleep_auth(wl, sta_auth);
2737 /* Configure for ELP power saving */
2738 else
2739 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2740 }
2741
2742 unlock:
2743 mutex_unlock(&wl->mutex);
2744
2745 del_timer_sync(&wlvif->rx_streaming_timer);
2746 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2747 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2748 cancel_work_sync(&wlvif->rc_update_work);
2749 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2750 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2751 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2752
2753 mutex_lock(&wl->mutex);
2754 }
2755
2756 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2757 struct ieee80211_vif *vif)
2758 {
2759 struct wl1271 *wl = hw->priv;
2760 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2761 struct wl12xx_vif *iter;
2762 struct vif_counter_data vif_count;
2763
2764 wl12xx_get_vif_count(hw, vif, &vif_count);
2765 mutex_lock(&wl->mutex);
2766
2767 if (wl->state == WLCORE_STATE_OFF ||
2768 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2769 goto out;
2770
2771 /*
2772 * wl->vif can be null here if someone shuts down the interface
2773 * just when hardware recovery has been started.
2774 */
2775 wl12xx_for_each_wlvif(wl, iter) {
2776 if (iter != wlvif)
2777 continue;
2778
2779 __wl1271_op_remove_interface(wl, vif, true);
2780 break;
2781 }
2782 WARN_ON(iter != wlvif);
2783 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2784 wl12xx_force_active_psm(wl);
2785 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2786 wl12xx_queue_recovery_work(wl);
2787 }
2788 out:
2789 mutex_unlock(&wl->mutex);
2790 }
2791
2792 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2793 struct ieee80211_vif *vif,
2794 enum nl80211_iftype new_type, bool p2p)
2795 {
2796 struct wl1271 *wl = hw->priv;
2797 int ret;
2798
2799 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2800 wl1271_op_remove_interface(hw, vif);
2801
2802 vif->type = new_type;
2803 vif->p2p = p2p;
2804 ret = wl1271_op_add_interface(hw, vif);
2805
2806 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2807 return ret;
2808 }
2809
2810 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2811 {
2812 int ret;
2813 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2814
2815 /*
2816 * One of the side effects of the JOIN command is that is clears
2817 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2818 * to a WPA/WPA2 access point will therefore kill the data-path.
2819 * Currently the only valid scenario for JOIN during association
2820 * is on roaming, in which case we will also be given new keys.
2821 * Keep the below message for now, unless it starts bothering
2822 * users who really like to roam a lot :)
2823 */
2824 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2825 wl1271_info("JOIN while associated.");
2826
2827 /* clear encryption type */
2828 wlvif->encryption_type = KEY_NONE;
2829
2830 if (is_ibss)
2831 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2832 else {
2833 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2834 /*
2835 * TODO: this is an ugly workaround for wl12xx fw
2836 * bug - we are not able to tx/rx after the first
2837 * start_sta, so make dummy start+stop calls,
2838 * and then call start_sta again.
2839 * this should be fixed in the fw.
2840 */
2841 wl12xx_cmd_role_start_sta(wl, wlvif);
2842 wl12xx_cmd_role_stop_sta(wl, wlvif);
2843 }
2844
2845 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2846 }
2847
2848 return ret;
2849 }
2850
2851 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2852 int offset)
2853 {
2854 u8 ssid_len;
2855 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2856 skb->len - offset);
2857
2858 if (!ptr) {
2859 wl1271_error("No SSID in IEs!");
2860 return -ENOENT;
2861 }
2862
2863 ssid_len = ptr[1];
2864 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2865 wl1271_error("SSID is too long!");
2866 return -EINVAL;
2867 }
2868
2869 wlvif->ssid_len = ssid_len;
2870 memcpy(wlvif->ssid, ptr+2, ssid_len);
2871 return 0;
2872 }
2873
2874 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2875 {
2876 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2877 struct sk_buff *skb;
2878 int ieoffset;
2879
2880 /* we currently only support setting the ssid from the ap probe req */
2881 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2882 return -EINVAL;
2883
2884 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2885 if (!skb)
2886 return -EINVAL;
2887
2888 ieoffset = offsetof(struct ieee80211_mgmt,
2889 u.probe_req.variable);
2890 wl1271_ssid_set(wlvif, skb, ieoffset);
2891 dev_kfree_skb(skb);
2892
2893 return 0;
2894 }
2895
2896 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2897 struct ieee80211_bss_conf *bss_conf,
2898 u32 sta_rate_set)
2899 {
2900 int ieoffset;
2901 int ret;
2902
2903 wlvif->aid = bss_conf->aid;
2904 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2905 wlvif->beacon_int = bss_conf->beacon_int;
2906 wlvif->wmm_enabled = bss_conf->qos;
2907
2908 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2909
2910 /*
2911 * with wl1271, we don't need to update the
2912 * beacon_int and dtim_period, because the firmware
2913 * updates it by itself when the first beacon is
2914 * received after a join.
2915 */
2916 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2917 if (ret < 0)
2918 return ret;
2919
2920 /*
2921 * Get a template for hardware connection maintenance
2922 */
2923 dev_kfree_skb(wlvif->probereq);
2924 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2925 wlvif,
2926 NULL);
2927 ieoffset = offsetof(struct ieee80211_mgmt,
2928 u.probe_req.variable);
2929 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2930
2931 /* enable the connection monitoring feature */
2932 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2933 if (ret < 0)
2934 return ret;
2935
2936 /*
2937 * The join command disable the keep-alive mode, shut down its process,
2938 * and also clear the template config, so we need to reset it all after
2939 * the join. The acx_aid starts the keep-alive process, and the order
2940 * of the commands below is relevant.
2941 */
2942 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2943 if (ret < 0)
2944 return ret;
2945
2946 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2947 if (ret < 0)
2948 return ret;
2949
2950 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2951 if (ret < 0)
2952 return ret;
2953
2954 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2955 wlvif->sta.klv_template_id,
2956 ACX_KEEP_ALIVE_TPL_VALID);
2957 if (ret < 0)
2958 return ret;
2959
2960 /*
2961 * The default fw psm configuration is AUTO, while mac80211 default
2962 * setting is off (ACTIVE), so sync the fw with the correct value.
2963 */
2964 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2965 if (ret < 0)
2966 return ret;
2967
2968 if (sta_rate_set) {
2969 wlvif->rate_set =
2970 wl1271_tx_enabled_rates_get(wl,
2971 sta_rate_set,
2972 wlvif->band);
2973 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2974 if (ret < 0)
2975 return ret;
2976 }
2977
2978 return ret;
2979 }
2980
2981 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2982 {
2983 int ret;
2984 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2985
2986 /* make sure we are connected (sta) joined */
2987 if (sta &&
2988 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2989 return false;
2990
2991 /* make sure we are joined (ibss) */
2992 if (!sta &&
2993 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2994 return false;
2995
2996 if (sta) {
2997 /* use defaults when not associated */
2998 wlvif->aid = 0;
2999
3000 /* free probe-request template */
3001 dev_kfree_skb(wlvif->probereq);
3002 wlvif->probereq = NULL;
3003
3004 /* disable connection monitor features */
3005 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3006 if (ret < 0)
3007 return ret;
3008
3009 /* Disable the keep-alive feature */
3010 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3011 if (ret < 0)
3012 return ret;
3013
3014 /* disable beacon filtering */
3015 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3016 if (ret < 0)
3017 return ret;
3018 }
3019
3020 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3021 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3022
3023 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3024 ieee80211_chswitch_done(vif, false);
3025 cancel_delayed_work(&wlvif->channel_switch_work);
3026 }
3027
3028 /* invalidate keep-alive template */
3029 wl1271_acx_keep_alive_config(wl, wlvif,
3030 wlvif->sta.klv_template_id,
3031 ACX_KEEP_ALIVE_TPL_INVALID);
3032
3033 return 0;
3034 }
3035
3036 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3037 {
3038 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3039 wlvif->rate_set = wlvif->basic_rate_set;
3040 }
3041
3042 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3043 bool idle)
3044 {
3045 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3046
3047 if (idle == cur_idle)
3048 return;
3049
3050 if (idle) {
3051 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3052 } else {
3053 /* The current firmware only supports sched_scan in idle */
3054 if (wl->sched_vif == wlvif)
3055 wl->ops->sched_scan_stop(wl, wlvif);
3056
3057 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3058 }
3059 }
3060
3061 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3062 struct ieee80211_conf *conf, u32 changed)
3063 {
3064 int ret;
3065
3066 if (wlcore_is_p2p_mgmt(wlvif))
3067 return 0;
3068
3069 if (conf->power_level != wlvif->power_level) {
3070 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3071 if (ret < 0)
3072 return ret;
3073
3074 wlvif->power_level = conf->power_level;
3075 }
3076
3077 return 0;
3078 }
3079
3080 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3081 {
3082 struct wl1271 *wl = hw->priv;
3083 struct wl12xx_vif *wlvif;
3084 struct ieee80211_conf *conf = &hw->conf;
3085 int ret = 0;
3086
3087 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3088 " changed 0x%x",
3089 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3090 conf->power_level,
3091 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3092 changed);
3093
3094 mutex_lock(&wl->mutex);
3095
3096 if (changed & IEEE80211_CONF_CHANGE_POWER)
3097 wl->power_level = conf->power_level;
3098
3099 if (unlikely(wl->state != WLCORE_STATE_ON))
3100 goto out;
3101
3102 ret = wl1271_ps_elp_wakeup(wl);
3103 if (ret < 0)
3104 goto out;
3105
3106 /* configure each interface */
3107 wl12xx_for_each_wlvif(wl, wlvif) {
3108 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3109 if (ret < 0)
3110 goto out_sleep;
3111 }
3112
3113 out_sleep:
3114 wl1271_ps_elp_sleep(wl);
3115
3116 out:
3117 mutex_unlock(&wl->mutex);
3118
3119 return ret;
3120 }
3121
3122 struct wl1271_filter_params {
3123 bool enabled;
3124 int mc_list_length;
3125 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3126 };
3127
3128 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3129 struct netdev_hw_addr_list *mc_list)
3130 {
3131 struct wl1271_filter_params *fp;
3132 struct netdev_hw_addr *ha;
3133
3134 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3135 if (!fp) {
3136 wl1271_error("Out of memory setting filters.");
3137 return 0;
3138 }
3139
3140 /* update multicast filtering parameters */
3141 fp->mc_list_length = 0;
3142 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3143 fp->enabled = false;
3144 } else {
3145 fp->enabled = true;
3146 netdev_hw_addr_list_for_each(ha, mc_list) {
3147 memcpy(fp->mc_list[fp->mc_list_length],
3148 ha->addr, ETH_ALEN);
3149 fp->mc_list_length++;
3150 }
3151 }
3152
3153 return (u64)(unsigned long)fp;
3154 }
3155
3156 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3157 FIF_FCSFAIL | \
3158 FIF_BCN_PRBRESP_PROMISC | \
3159 FIF_CONTROL | \
3160 FIF_OTHER_BSS)
3161
3162 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3163 unsigned int changed,
3164 unsigned int *total, u64 multicast)
3165 {
3166 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3167 struct wl1271 *wl = hw->priv;
3168 struct wl12xx_vif *wlvif;
3169
3170 int ret;
3171
3172 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3173 " total %x", changed, *total);
3174
3175 mutex_lock(&wl->mutex);
3176
3177 *total &= WL1271_SUPPORTED_FILTERS;
3178 changed &= WL1271_SUPPORTED_FILTERS;
3179
3180 if (unlikely(wl->state != WLCORE_STATE_ON))
3181 goto out;
3182
3183 ret = wl1271_ps_elp_wakeup(wl);
3184 if (ret < 0)
3185 goto out;
3186
3187 wl12xx_for_each_wlvif(wl, wlvif) {
3188 if (wlcore_is_p2p_mgmt(wlvif))
3189 continue;
3190
3191 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3192 if (*total & FIF_ALLMULTI)
3193 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3194 false,
3195 NULL, 0);
3196 else if (fp)
3197 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3198 fp->enabled,
3199 fp->mc_list,
3200 fp->mc_list_length);
3201 if (ret < 0)
3202 goto out_sleep;
3203 }
3204
3205 /*
3206 * If interface in AP mode and created with allmulticast then disable
3207 * the firmware filters so that all multicast packets are passed
3208 * This is mandatory for MDNS based discovery protocols
3209 */
3210 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3211 if (*total & FIF_ALLMULTI) {
3212 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3213 false,
3214 NULL, 0);
3215 if (ret < 0)
3216 goto out_sleep;
3217 }
3218 }
3219 }
3220
3221 /*
3222 * the fw doesn't provide an api to configure the filters. instead,
3223 * the filters configuration is based on the active roles / ROC
3224 * state.
3225 */
3226
3227 out_sleep:
3228 wl1271_ps_elp_sleep(wl);
3229
3230 out:
3231 mutex_unlock(&wl->mutex);
3232 kfree(fp);
3233 }
3234
3235 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3236 u8 id, u8 key_type, u8 key_size,
3237 const u8 *key, u8 hlid, u32 tx_seq_32,
3238 u16 tx_seq_16)
3239 {
3240 struct wl1271_ap_key *ap_key;
3241 int i;
3242
3243 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3244
3245 if (key_size > MAX_KEY_SIZE)
3246 return -EINVAL;
3247
3248 /*
3249 * Find next free entry in ap_keys. Also check we are not replacing
3250 * an existing key.
3251 */
3252 for (i = 0; i < MAX_NUM_KEYS; i++) {
3253 if (wlvif->ap.recorded_keys[i] == NULL)
3254 break;
3255
3256 if (wlvif->ap.recorded_keys[i]->id == id) {
3257 wl1271_warning("trying to record key replacement");
3258 return -EINVAL;
3259 }
3260 }
3261
3262 if (i == MAX_NUM_KEYS)
3263 return -EBUSY;
3264
3265 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3266 if (!ap_key)
3267 return -ENOMEM;
3268
3269 ap_key->id = id;
3270 ap_key->key_type = key_type;
3271 ap_key->key_size = key_size;
3272 memcpy(ap_key->key, key, key_size);
3273 ap_key->hlid = hlid;
3274 ap_key->tx_seq_32 = tx_seq_32;
3275 ap_key->tx_seq_16 = tx_seq_16;
3276
3277 wlvif->ap.recorded_keys[i] = ap_key;
3278 return 0;
3279 }
3280
3281 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3282 {
3283 int i;
3284
3285 for (i = 0; i < MAX_NUM_KEYS; i++) {
3286 kfree(wlvif->ap.recorded_keys[i]);
3287 wlvif->ap.recorded_keys[i] = NULL;
3288 }
3289 }
3290
3291 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3292 {
3293 int i, ret = 0;
3294 struct wl1271_ap_key *key;
3295 bool wep_key_added = false;
3296
3297 for (i = 0; i < MAX_NUM_KEYS; i++) {
3298 u8 hlid;
3299 if (wlvif->ap.recorded_keys[i] == NULL)
3300 break;
3301
3302 key = wlvif->ap.recorded_keys[i];
3303 hlid = key->hlid;
3304 if (hlid == WL12XX_INVALID_LINK_ID)
3305 hlid = wlvif->ap.bcast_hlid;
3306
3307 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3308 key->id, key->key_type,
3309 key->key_size, key->key,
3310 hlid, key->tx_seq_32,
3311 key->tx_seq_16);
3312 if (ret < 0)
3313 goto out;
3314
3315 if (key->key_type == KEY_WEP)
3316 wep_key_added = true;
3317 }
3318
3319 if (wep_key_added) {
3320 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3321 wlvif->ap.bcast_hlid);
3322 if (ret < 0)
3323 goto out;
3324 }
3325
3326 out:
3327 wl1271_free_ap_keys(wl, wlvif);
3328 return ret;
3329 }
3330
3331 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3332 u16 action, u8 id, u8 key_type,
3333 u8 key_size, const u8 *key, u32 tx_seq_32,
3334 u16 tx_seq_16, struct ieee80211_sta *sta)
3335 {
3336 int ret;
3337 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3338
3339 if (is_ap) {
3340 struct wl1271_station *wl_sta;
3341 u8 hlid;
3342
3343 if (sta) {
3344 wl_sta = (struct wl1271_station *)sta->drv_priv;
3345 hlid = wl_sta->hlid;
3346 } else {
3347 hlid = wlvif->ap.bcast_hlid;
3348 }
3349
3350 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3351 /*
3352 * We do not support removing keys after AP shutdown.
3353 * Pretend we do to make mac80211 happy.
3354 */
3355 if (action != KEY_ADD_OR_REPLACE)
3356 return 0;
3357
3358 ret = wl1271_record_ap_key(wl, wlvif, id,
3359 key_type, key_size,
3360 key, hlid, tx_seq_32,
3361 tx_seq_16);
3362 } else {
3363 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3364 id, key_type, key_size,
3365 key, hlid, tx_seq_32,
3366 tx_seq_16);
3367 }
3368
3369 if (ret < 0)
3370 return ret;
3371 } else {
3372 const u8 *addr;
3373 static const u8 bcast_addr[ETH_ALEN] = {
3374 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3375 };
3376
3377 addr = sta ? sta->addr : bcast_addr;
3378
3379 if (is_zero_ether_addr(addr)) {
3380 /* We dont support TX only encryption */
3381 return -EOPNOTSUPP;
3382 }
3383
3384 /* The wl1271 does not allow to remove unicast keys - they
3385 will be cleared automatically on next CMD_JOIN. Ignore the
3386 request silently, as we dont want the mac80211 to emit
3387 an error message. */
3388 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3389 return 0;
3390
3391 /* don't remove key if hlid was already deleted */
3392 if (action == KEY_REMOVE &&
3393 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3394 return 0;
3395
3396 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3397 id, key_type, key_size,
3398 key, addr, tx_seq_32,
3399 tx_seq_16);
3400 if (ret < 0)
3401 return ret;
3402
3403 }
3404
3405 return 0;
3406 }
3407
3408 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3409 struct ieee80211_vif *vif,
3410 struct ieee80211_sta *sta,
3411 struct ieee80211_key_conf *key_conf)
3412 {
3413 struct wl1271 *wl = hw->priv;
3414 int ret;
3415 bool might_change_spare =
3416 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3417 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3418
3419 if (might_change_spare) {
3420 /*
3421 * stop the queues and flush to ensure the next packets are
3422 * in sync with FW spare block accounting
3423 */
3424 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3425 wl1271_tx_flush(wl);
3426 }
3427
3428 mutex_lock(&wl->mutex);
3429
3430 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3431 ret = -EAGAIN;
3432 goto out_wake_queues;
3433 }
3434
3435 ret = wl1271_ps_elp_wakeup(wl);
3436 if (ret < 0)
3437 goto out_wake_queues;
3438
3439 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3440
3441 wl1271_ps_elp_sleep(wl);
3442
3443 out_wake_queues:
3444 if (might_change_spare)
3445 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3446
3447 mutex_unlock(&wl->mutex);
3448
3449 return ret;
3450 }
3451
3452 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3453 struct ieee80211_vif *vif,
3454 struct ieee80211_sta *sta,
3455 struct ieee80211_key_conf *key_conf)
3456 {
3457 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3458 int ret;
3459 u32 tx_seq_32 = 0;
3460 u16 tx_seq_16 = 0;
3461 u8 key_type;
3462 u8 hlid;
3463
3464 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3465
3466 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3467 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3468 key_conf->cipher, key_conf->keyidx,
3469 key_conf->keylen, key_conf->flags);
3470 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3471
3472 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3473 if (sta) {
3474 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3475 hlid = wl_sta->hlid;
3476 } else {
3477 hlid = wlvif->ap.bcast_hlid;
3478 }
3479 else
3480 hlid = wlvif->sta.hlid;
3481
3482 if (hlid != WL12XX_INVALID_LINK_ID) {
3483 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3484 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3485 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3486 }
3487
3488 switch (key_conf->cipher) {
3489 case WLAN_CIPHER_SUITE_WEP40:
3490 case WLAN_CIPHER_SUITE_WEP104:
3491 key_type = KEY_WEP;
3492
3493 key_conf->hw_key_idx = key_conf->keyidx;
3494 break;
3495 case WLAN_CIPHER_SUITE_TKIP:
3496 key_type = KEY_TKIP;
3497 key_conf->hw_key_idx = key_conf->keyidx;
3498 break;
3499 case WLAN_CIPHER_SUITE_CCMP:
3500 key_type = KEY_AES;
3501 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3502 break;
3503 case WL1271_CIPHER_SUITE_GEM:
3504 key_type = KEY_GEM;
3505 break;
3506 default:
3507 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3508
3509 return -EOPNOTSUPP;
3510 }
3511
3512 switch (cmd) {
3513 case SET_KEY:
3514 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3515 key_conf->keyidx, key_type,
3516 key_conf->keylen, key_conf->key,
3517 tx_seq_32, tx_seq_16, sta);
3518 if (ret < 0) {
3519 wl1271_error("Could not add or replace key");
3520 return ret;
3521 }
3522
3523 /*
3524 * reconfiguring arp response if the unicast (or common)
3525 * encryption key type was changed
3526 */
3527 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3528 (sta || key_type == KEY_WEP) &&
3529 wlvif->encryption_type != key_type) {
3530 wlvif->encryption_type = key_type;
3531 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3532 if (ret < 0) {
3533 wl1271_warning("build arp rsp failed: %d", ret);
3534 return ret;
3535 }
3536 }
3537 break;
3538
3539 case DISABLE_KEY:
3540 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3541 key_conf->keyidx, key_type,
3542 key_conf->keylen, key_conf->key,
3543 0, 0, sta);
3544 if (ret < 0) {
3545 wl1271_error("Could not remove key");
3546 return ret;
3547 }
3548 break;
3549
3550 default:
3551 wl1271_error("Unsupported key cmd 0x%x", cmd);
3552 return -EOPNOTSUPP;
3553 }
3554
3555 return ret;
3556 }
3557 EXPORT_SYMBOL_GPL(wlcore_set_key);
3558
3559 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3560 struct ieee80211_vif *vif,
3561 int key_idx)
3562 {
3563 struct wl1271 *wl = hw->priv;
3564 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3565 int ret;
3566
3567 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3568 key_idx);
3569
3570 /* we don't handle unsetting of default key */
3571 if (key_idx == -1)
3572 return;
3573
3574 mutex_lock(&wl->mutex);
3575
3576 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3577 ret = -EAGAIN;
3578 goto out_unlock;
3579 }
3580
3581 ret = wl1271_ps_elp_wakeup(wl);
3582 if (ret < 0)
3583 goto out_unlock;
3584
3585 wlvif->default_key = key_idx;
3586
3587 /* the default WEP key needs to be configured at least once */
3588 if (wlvif->encryption_type == KEY_WEP) {
3589 ret = wl12xx_cmd_set_default_wep_key(wl,
3590 key_idx,
3591 wlvif->sta.hlid);
3592 if (ret < 0)
3593 goto out_sleep;
3594 }
3595
3596 out_sleep:
3597 wl1271_ps_elp_sleep(wl);
3598
3599 out_unlock:
3600 mutex_unlock(&wl->mutex);
3601 }
3602
3603 void wlcore_regdomain_config(struct wl1271 *wl)
3604 {
3605 int ret;
3606
3607 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3608 return;
3609
3610 mutex_lock(&wl->mutex);
3611
3612 if (unlikely(wl->state != WLCORE_STATE_ON))
3613 goto out;
3614
3615 ret = wl1271_ps_elp_wakeup(wl);
3616 if (ret < 0)
3617 goto out;
3618
3619 ret = wlcore_cmd_regdomain_config_locked(wl);
3620 if (ret < 0) {
3621 wl12xx_queue_recovery_work(wl);
3622 goto out;
3623 }
3624
3625 wl1271_ps_elp_sleep(wl);
3626 out:
3627 mutex_unlock(&wl->mutex);
3628 }
3629
3630 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3631 struct ieee80211_vif *vif,
3632 struct ieee80211_scan_request *hw_req)
3633 {
3634 struct cfg80211_scan_request *req = &hw_req->req;
3635 struct wl1271 *wl = hw->priv;
3636 int ret;
3637 u8 *ssid = NULL;
3638 size_t len = 0;
3639
3640 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3641
3642 if (req->n_ssids) {
3643 ssid = req->ssids[0].ssid;
3644 len = req->ssids[0].ssid_len;
3645 }
3646
3647 mutex_lock(&wl->mutex);
3648
3649 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3650 /*
3651 * We cannot return -EBUSY here because cfg80211 will expect
3652 * a call to ieee80211_scan_completed if we do - in this case
3653 * there won't be any call.
3654 */
3655 ret = -EAGAIN;
3656 goto out;
3657 }
3658
3659 ret = wl1271_ps_elp_wakeup(wl);
3660 if (ret < 0)
3661 goto out;
3662
3663 /* fail if there is any role in ROC */
3664 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3665 /* don't allow scanning right now */
3666 ret = -EBUSY;
3667 goto out_sleep;
3668 }
3669
3670 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3671 out_sleep:
3672 wl1271_ps_elp_sleep(wl);
3673 out:
3674 mutex_unlock(&wl->mutex);
3675
3676 return ret;
3677 }
3678
3679 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3680 struct ieee80211_vif *vif)
3681 {
3682 struct wl1271 *wl = hw->priv;
3683 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3684 struct cfg80211_scan_info info = {
3685 .aborted = true,
3686 };
3687 int ret;
3688
3689 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3690
3691 mutex_lock(&wl->mutex);
3692
3693 if (unlikely(wl->state != WLCORE_STATE_ON))
3694 goto out;
3695
3696 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3697 goto out;
3698
3699 ret = wl1271_ps_elp_wakeup(wl);
3700 if (ret < 0)
3701 goto out;
3702
3703 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3704 ret = wl->ops->scan_stop(wl, wlvif);
3705 if (ret < 0)
3706 goto out_sleep;
3707 }
3708
3709 /*
3710 * Rearm the tx watchdog just before idling scan. This
3711 * prevents just-finished scans from triggering the watchdog
3712 */
3713 wl12xx_rearm_tx_watchdog_locked(wl);
3714
3715 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3716 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3717 wl->scan_wlvif = NULL;
3718 wl->scan.req = NULL;
3719 ieee80211_scan_completed(wl->hw, &info);
3720
3721 out_sleep:
3722 wl1271_ps_elp_sleep(wl);
3723 out:
3724 mutex_unlock(&wl->mutex);
3725
3726 cancel_delayed_work_sync(&wl->scan_complete_work);
3727 }
3728
3729 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3730 struct ieee80211_vif *vif,
3731 struct cfg80211_sched_scan_request *req,
3732 struct ieee80211_scan_ies *ies)
3733 {
3734 struct wl1271 *wl = hw->priv;
3735 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3736 int ret;
3737
3738 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3739
3740 mutex_lock(&wl->mutex);
3741
3742 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3743 ret = -EAGAIN;
3744 goto out;
3745 }
3746
3747 ret = wl1271_ps_elp_wakeup(wl);
3748 if (ret < 0)
3749 goto out;
3750
3751 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3752 if (ret < 0)
3753 goto out_sleep;
3754
3755 wl->sched_vif = wlvif;
3756
3757 out_sleep:
3758 wl1271_ps_elp_sleep(wl);
3759 out:
3760 mutex_unlock(&wl->mutex);
3761 return ret;
3762 }
3763
3764 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3765 struct ieee80211_vif *vif)
3766 {
3767 struct wl1271 *wl = hw->priv;
3768 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3769 int ret;
3770
3771 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3772
3773 mutex_lock(&wl->mutex);
3774
3775 if (unlikely(wl->state != WLCORE_STATE_ON))
3776 goto out;
3777
3778 ret = wl1271_ps_elp_wakeup(wl);
3779 if (ret < 0)
3780 goto out;
3781
3782 wl->ops->sched_scan_stop(wl, wlvif);
3783
3784 wl1271_ps_elp_sleep(wl);
3785 out:
3786 mutex_unlock(&wl->mutex);
3787
3788 return 0;
3789 }
3790
3791 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3792 {
3793 struct wl1271 *wl = hw->priv;
3794 int ret = 0;
3795
3796 mutex_lock(&wl->mutex);
3797
3798 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3799 ret = -EAGAIN;
3800 goto out;
3801 }
3802
3803 ret = wl1271_ps_elp_wakeup(wl);
3804 if (ret < 0)
3805 goto out;
3806
3807 ret = wl1271_acx_frag_threshold(wl, value);
3808 if (ret < 0)
3809 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3810
3811 wl1271_ps_elp_sleep(wl);
3812
3813 out:
3814 mutex_unlock(&wl->mutex);
3815
3816 return ret;
3817 }
3818
3819 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3820 {
3821 struct wl1271 *wl = hw->priv;
3822 struct wl12xx_vif *wlvif;
3823 int ret = 0;
3824
3825 mutex_lock(&wl->mutex);
3826
3827 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3828 ret = -EAGAIN;
3829 goto out;
3830 }
3831
3832 ret = wl1271_ps_elp_wakeup(wl);
3833 if (ret < 0)
3834 goto out;
3835
3836 wl12xx_for_each_wlvif(wl, wlvif) {
3837 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3838 if (ret < 0)
3839 wl1271_warning("set rts threshold failed: %d", ret);
3840 }
3841 wl1271_ps_elp_sleep(wl);
3842
3843 out:
3844 mutex_unlock(&wl->mutex);
3845
3846 return ret;
3847 }
3848
3849 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3850 {
3851 int len;
3852 const u8 *next, *end = skb->data + skb->len;
3853 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3854 skb->len - ieoffset);
3855 if (!ie)
3856 return;
3857 len = ie[1] + 2;
3858 next = ie + len;
3859 memmove(ie, next, end - next);
3860 skb_trim(skb, skb->len - len);
3861 }
3862
3863 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3864 unsigned int oui, u8 oui_type,
3865 int ieoffset)
3866 {
3867 int len;
3868 const u8 *next, *end = skb->data + skb->len;
3869 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3870 skb->data + ieoffset,
3871 skb->len - ieoffset);
3872 if (!ie)
3873 return;
3874 len = ie[1] + 2;
3875 next = ie + len;
3876 memmove(ie, next, end - next);
3877 skb_trim(skb, skb->len - len);
3878 }
3879
3880 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3881 struct ieee80211_vif *vif)
3882 {
3883 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3884 struct sk_buff *skb;
3885 int ret;
3886
3887 skb = ieee80211_proberesp_get(wl->hw, vif);
3888 if (!skb)
3889 return -EOPNOTSUPP;
3890
3891 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3892 CMD_TEMPL_AP_PROBE_RESPONSE,
3893 skb->data,
3894 skb->len, 0,
3895 rates);
3896 dev_kfree_skb(skb);
3897
3898 if (ret < 0)
3899 goto out;
3900
3901 wl1271_debug(DEBUG_AP, "probe response updated");
3902 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3903
3904 out:
3905 return ret;
3906 }
3907
3908 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3909 struct ieee80211_vif *vif,
3910 u8 *probe_rsp_data,
3911 size_t probe_rsp_len,
3912 u32 rates)
3913 {
3914 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3915 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3916 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3917 int ssid_ie_offset, ie_offset, templ_len;
3918 const u8 *ptr;
3919
3920 /* no need to change probe response if the SSID is set correctly */
3921 if (wlvif->ssid_len > 0)
3922 return wl1271_cmd_template_set(wl, wlvif->role_id,
3923 CMD_TEMPL_AP_PROBE_RESPONSE,
3924 probe_rsp_data,
3925 probe_rsp_len, 0,
3926 rates);
3927
3928 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3929 wl1271_error("probe_rsp template too big");
3930 return -EINVAL;
3931 }
3932
3933 /* start searching from IE offset */
3934 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3935
3936 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3937 probe_rsp_len - ie_offset);
3938 if (!ptr) {
3939 wl1271_error("No SSID in beacon!");
3940 return -EINVAL;
3941 }
3942
3943 ssid_ie_offset = ptr - probe_rsp_data;
3944 ptr += (ptr[1] + 2);
3945
3946 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3947
3948 /* insert SSID from bss_conf */
3949 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3950 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3951 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3952 bss_conf->ssid, bss_conf->ssid_len);
3953 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3954
3955 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3956 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3957 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3958
3959 return wl1271_cmd_template_set(wl, wlvif->role_id,
3960 CMD_TEMPL_AP_PROBE_RESPONSE,
3961 probe_rsp_templ,
3962 templ_len, 0,
3963 rates);
3964 }
3965
3966 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3967 struct ieee80211_vif *vif,
3968 struct ieee80211_bss_conf *bss_conf,
3969 u32 changed)
3970 {
3971 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3972 int ret = 0;
3973
3974 if (changed & BSS_CHANGED_ERP_SLOT) {
3975 if (bss_conf->use_short_slot)
3976 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3977 else
3978 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3979 if (ret < 0) {
3980 wl1271_warning("Set slot time failed %d", ret);
3981 goto out;
3982 }
3983 }
3984
3985 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3986 if (bss_conf->use_short_preamble)
3987 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3988 else
3989 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3990 }
3991
3992 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3993 if (bss_conf->use_cts_prot)
3994 ret = wl1271_acx_cts_protect(wl, wlvif,
3995 CTSPROTECT_ENABLE);
3996 else
3997 ret = wl1271_acx_cts_protect(wl, wlvif,
3998 CTSPROTECT_DISABLE);
3999 if (ret < 0) {
4000 wl1271_warning("Set ctsprotect failed %d", ret);
4001 goto out;
4002 }
4003 }
4004
4005 out:
4006 return ret;
4007 }
4008
4009 static int wlcore_set_beacon_template(struct wl1271 *wl,
4010 struct ieee80211_vif *vif,
4011 bool is_ap)
4012 {
4013 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4014 struct ieee80211_hdr *hdr;
4015 u32 min_rate;
4016 int ret;
4017 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4018 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4019 u16 tmpl_id;
4020
4021 if (!beacon) {
4022 ret = -EINVAL;
4023 goto out;
4024 }
4025
4026 wl1271_debug(DEBUG_MASTER, "beacon updated");
4027
4028 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4029 if (ret < 0) {
4030 dev_kfree_skb(beacon);
4031 goto out;
4032 }
4033 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4034 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4035 CMD_TEMPL_BEACON;
4036 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4037 beacon->data,
4038 beacon->len, 0,
4039 min_rate);
4040 if (ret < 0) {
4041 dev_kfree_skb(beacon);
4042 goto out;
4043 }
4044
4045 wlvif->wmm_enabled =
4046 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4047 WLAN_OUI_TYPE_MICROSOFT_WMM,
4048 beacon->data + ieoffset,
4049 beacon->len - ieoffset);
4050
4051 /*
4052 * In case we already have a probe-resp beacon set explicitly
4053 * by usermode, don't use the beacon data.
4054 */
4055 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4056 goto end_bcn;
4057
4058 /* remove TIM ie from probe response */
4059 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4060
4061 /*
4062 * remove p2p ie from probe response.
4063 * the fw reponds to probe requests that don't include
4064 * the p2p ie. probe requests with p2p ie will be passed,
4065 * and will be responded by the supplicant (the spec
4066 * forbids including the p2p ie when responding to probe
4067 * requests that didn't include it).
4068 */
4069 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4070 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4071
4072 hdr = (struct ieee80211_hdr *) beacon->data;
4073 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4074 IEEE80211_STYPE_PROBE_RESP);
4075 if (is_ap)
4076 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4077 beacon->data,
4078 beacon->len,
4079 min_rate);
4080 else
4081 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4082 CMD_TEMPL_PROBE_RESPONSE,
4083 beacon->data,
4084 beacon->len, 0,
4085 min_rate);
4086 end_bcn:
4087 dev_kfree_skb(beacon);
4088 if (ret < 0)
4089 goto out;
4090
4091 out:
4092 return ret;
4093 }
4094
4095 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4096 struct ieee80211_vif *vif,
4097 struct ieee80211_bss_conf *bss_conf,
4098 u32 changed)
4099 {
4100 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4101 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4102 int ret = 0;
4103
4104 if (changed & BSS_CHANGED_BEACON_INT) {
4105 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4106 bss_conf->beacon_int);
4107
4108 wlvif->beacon_int = bss_conf->beacon_int;
4109 }
4110
4111 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4112 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4113
4114 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4115 }
4116
4117 if (changed & BSS_CHANGED_BEACON) {
4118 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4119 if (ret < 0)
4120 goto out;
4121
4122 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4123 &wlvif->flags)) {
4124 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4125 if (ret < 0)
4126 goto out;
4127 }
4128 }
4129 out:
4130 if (ret != 0)
4131 wl1271_error("beacon info change failed: %d", ret);
4132 return ret;
4133 }
4134
4135 /* AP mode changes */
4136 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4137 struct ieee80211_vif *vif,
4138 struct ieee80211_bss_conf *bss_conf,
4139 u32 changed)
4140 {
4141 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4142 int ret = 0;
4143
4144 if (changed & BSS_CHANGED_BASIC_RATES) {
4145 u32 rates = bss_conf->basic_rates;
4146
4147 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4148 wlvif->band);
4149 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4150 wlvif->basic_rate_set);
4151
4152 ret = wl1271_init_ap_rates(wl, wlvif);
4153 if (ret < 0) {
4154 wl1271_error("AP rate policy change failed %d", ret);
4155 goto out;
4156 }
4157
4158 ret = wl1271_ap_init_templates(wl, vif);
4159 if (ret < 0)
4160 goto out;
4161
4162 /* No need to set probe resp template for mesh */
4163 if (!ieee80211_vif_is_mesh(vif)) {
4164 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4165 wlvif->basic_rate,
4166 vif);
4167 if (ret < 0)
4168 goto out;
4169 }
4170
4171 ret = wlcore_set_beacon_template(wl, vif, true);
4172 if (ret < 0)
4173 goto out;
4174 }
4175
4176 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4177 if (ret < 0)
4178 goto out;
4179
4180 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4181 if (bss_conf->enable_beacon) {
4182 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4183 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4184 if (ret < 0)
4185 goto out;
4186
4187 ret = wl1271_ap_init_hwenc(wl, wlvif);
4188 if (ret < 0)
4189 goto out;
4190
4191 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4192 wl1271_debug(DEBUG_AP, "started AP");
4193 }
4194 } else {
4195 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4196 /*
4197 * AP might be in ROC in case we have just
4198 * sent auth reply. handle it.
4199 */
4200 if (test_bit(wlvif->role_id, wl->roc_map))
4201 wl12xx_croc(wl, wlvif->role_id);
4202
4203 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4204 if (ret < 0)
4205 goto out;
4206
4207 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4208 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4209 &wlvif->flags);
4210 wl1271_debug(DEBUG_AP, "stopped AP");
4211 }
4212 }
4213 }
4214
4215 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4216 if (ret < 0)
4217 goto out;
4218
4219 /* Handle HT information change */
4220 if ((changed & BSS_CHANGED_HT) &&
4221 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4222 ret = wl1271_acx_set_ht_information(wl, wlvif,
4223 bss_conf->ht_operation_mode);
4224 if (ret < 0) {
4225 wl1271_warning("Set ht information failed %d", ret);
4226 goto out;
4227 }
4228 }
4229
4230 out:
4231 return;
4232 }
4233
4234 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4235 struct ieee80211_bss_conf *bss_conf,
4236 u32 sta_rate_set)
4237 {
4238 u32 rates;
4239 int ret;
4240
4241 wl1271_debug(DEBUG_MAC80211,
4242 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4243 bss_conf->bssid, bss_conf->aid,
4244 bss_conf->beacon_int,
4245 bss_conf->basic_rates, sta_rate_set);
4246
4247 wlvif->beacon_int = bss_conf->beacon_int;
4248 rates = bss_conf->basic_rates;
4249 wlvif->basic_rate_set =
4250 wl1271_tx_enabled_rates_get(wl, rates,
4251 wlvif->band);
4252 wlvif->basic_rate =
4253 wl1271_tx_min_rate_get(wl,
4254 wlvif->basic_rate_set);
4255
4256 if (sta_rate_set)
4257 wlvif->rate_set =
4258 wl1271_tx_enabled_rates_get(wl,
4259 sta_rate_set,
4260 wlvif->band);
4261
4262 /* we only support sched_scan while not connected */
4263 if (wl->sched_vif == wlvif)
4264 wl->ops->sched_scan_stop(wl, wlvif);
4265
4266 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4267 if (ret < 0)
4268 return ret;
4269
4270 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4271 if (ret < 0)
4272 return ret;
4273
4274 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4275 if (ret < 0)
4276 return ret;
4277
4278 wlcore_set_ssid(wl, wlvif);
4279
4280 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4281
4282 return 0;
4283 }
4284
4285 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4286 {
4287 int ret;
4288
4289 /* revert back to minimum rates for the current band */
4290 wl1271_set_band_rate(wl, wlvif);
4291 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4292
4293 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4294 if (ret < 0)
4295 return ret;
4296
4297 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4298 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4299 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4300 if (ret < 0)
4301 return ret;
4302 }
4303
4304 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4305 return 0;
4306 }
4307 /* STA/IBSS mode changes */
4308 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4309 struct ieee80211_vif *vif,
4310 struct ieee80211_bss_conf *bss_conf,
4311 u32 changed)
4312 {
4313 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4314 bool do_join = false;
4315 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4316 bool ibss_joined = false;
4317 u32 sta_rate_set = 0;
4318 int ret;
4319 struct ieee80211_sta *sta;
4320 bool sta_exists = false;
4321 struct ieee80211_sta_ht_cap sta_ht_cap;
4322
4323 if (is_ibss) {
4324 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4325 changed);
4326 if (ret < 0)
4327 goto out;
4328 }
4329
4330 if (changed & BSS_CHANGED_IBSS) {
4331 if (bss_conf->ibss_joined) {
4332 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4333 ibss_joined = true;
4334 } else {
4335 wlcore_unset_assoc(wl, wlvif);
4336 wl12xx_cmd_role_stop_sta(wl, wlvif);
4337 }
4338 }
4339
4340 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4341 do_join = true;
4342
4343 /* Need to update the SSID (for filtering etc) */
4344 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4345 do_join = true;
4346
4347 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4348 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4349 bss_conf->enable_beacon ? "enabled" : "disabled");
4350
4351 do_join = true;
4352 }
4353
4354 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4355 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4356
4357 if (changed & BSS_CHANGED_CQM) {
4358 bool enable = false;
4359 if (bss_conf->cqm_rssi_thold)
4360 enable = true;
4361 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4362 bss_conf->cqm_rssi_thold,
4363 bss_conf->cqm_rssi_hyst);
4364 if (ret < 0)
4365 goto out;
4366 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4367 }
4368
4369 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4370 BSS_CHANGED_ASSOC)) {
4371 rcu_read_lock();
4372 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4373 if (sta) {
4374 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4375
4376 /* save the supp_rates of the ap */
4377 sta_rate_set = sta->supp_rates[wlvif->band];
4378 if (sta->ht_cap.ht_supported)
4379 sta_rate_set |=
4380 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4381 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4382 sta_ht_cap = sta->ht_cap;
4383 sta_exists = true;
4384 }
4385
4386 rcu_read_unlock();
4387 }
4388
4389 if (changed & BSS_CHANGED_BSSID) {
4390 if (!is_zero_ether_addr(bss_conf->bssid)) {
4391 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4392 sta_rate_set);
4393 if (ret < 0)
4394 goto out;
4395
4396 /* Need to update the BSSID (for filtering etc) */
4397 do_join = true;
4398 } else {
4399 ret = wlcore_clear_bssid(wl, wlvif);
4400 if (ret < 0)
4401 goto out;
4402 }
4403 }
4404
4405 if (changed & BSS_CHANGED_IBSS) {
4406 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4407 bss_conf->ibss_joined);
4408
4409 if (bss_conf->ibss_joined) {
4410 u32 rates = bss_conf->basic_rates;
4411 wlvif->basic_rate_set =
4412 wl1271_tx_enabled_rates_get(wl, rates,
4413 wlvif->band);
4414 wlvif->basic_rate =
4415 wl1271_tx_min_rate_get(wl,
4416 wlvif->basic_rate_set);
4417
4418 /* by default, use 11b + OFDM rates */
4419 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4420 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4421 if (ret < 0)
4422 goto out;
4423 }
4424 }
4425
4426 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4427 /* enable beacon filtering */
4428 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4429 if (ret < 0)
4430 goto out;
4431 }
4432
4433 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4434 if (ret < 0)
4435 goto out;
4436
4437 if (do_join) {
4438 ret = wlcore_join(wl, wlvif);
4439 if (ret < 0) {
4440 wl1271_warning("cmd join failed %d", ret);
4441 goto out;
4442 }
4443 }
4444
4445 if (changed & BSS_CHANGED_ASSOC) {
4446 if (bss_conf->assoc) {
4447 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4448 sta_rate_set);
4449 if (ret < 0)
4450 goto out;
4451
4452 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4453 wl12xx_set_authorized(wl, wlvif);
4454 } else {
4455 wlcore_unset_assoc(wl, wlvif);
4456 }
4457 }
4458
4459 if (changed & BSS_CHANGED_PS) {
4460 if ((bss_conf->ps) &&
4461 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4462 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4463 int ps_mode;
4464 char *ps_mode_str;
4465
4466 if (wl->conf.conn.forced_ps) {
4467 ps_mode = STATION_POWER_SAVE_MODE;
4468 ps_mode_str = "forced";
4469 } else {
4470 ps_mode = STATION_AUTO_PS_MODE;
4471 ps_mode_str = "auto";
4472 }
4473
4474 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4475
4476 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4477 if (ret < 0)
4478 wl1271_warning("enter %s ps failed %d",
4479 ps_mode_str, ret);
4480 } else if (!bss_conf->ps &&
4481 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4482 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4483
4484 ret = wl1271_ps_set_mode(wl, wlvif,
4485 STATION_ACTIVE_MODE);
4486 if (ret < 0)
4487 wl1271_warning("exit auto ps failed %d", ret);
4488 }
4489 }
4490
4491 /* Handle new association with HT. Do this after join. */
4492 if (sta_exists) {
4493 bool enabled =
4494 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4495
4496 ret = wlcore_hw_set_peer_cap(wl,
4497 &sta_ht_cap,
4498 enabled,
4499 wlvif->rate_set,
4500 wlvif->sta.hlid);
4501 if (ret < 0) {
4502 wl1271_warning("Set ht cap failed %d", ret);
4503 goto out;
4504
4505 }
4506
4507 if (enabled) {
4508 ret = wl1271_acx_set_ht_information(wl, wlvif,
4509 bss_conf->ht_operation_mode);
4510 if (ret < 0) {
4511 wl1271_warning("Set ht information failed %d",
4512 ret);
4513 goto out;
4514 }
4515 }
4516 }
4517
4518 /* Handle arp filtering. Done after join. */
4519 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4520 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4521 __be32 addr = bss_conf->arp_addr_list[0];
4522 wlvif->sta.qos = bss_conf->qos;
4523 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4524
4525 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4526 wlvif->ip_addr = addr;
4527 /*
4528 * The template should have been configured only upon
4529 * association. however, it seems that the correct ip
4530 * isn't being set (when sending), so we have to
4531 * reconfigure the template upon every ip change.
4532 */
4533 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4534 if (ret < 0) {
4535 wl1271_warning("build arp rsp failed: %d", ret);
4536 goto out;
4537 }
4538
4539 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4540 (ACX_ARP_FILTER_ARP_FILTERING |
4541 ACX_ARP_FILTER_AUTO_ARP),
4542 addr);
4543 } else {
4544 wlvif->ip_addr = 0;
4545 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4546 }
4547
4548 if (ret < 0)
4549 goto out;
4550 }
4551
4552 out:
4553 return;
4554 }
4555
4556 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4557 struct ieee80211_vif *vif,
4558 struct ieee80211_bss_conf *bss_conf,
4559 u32 changed)
4560 {
4561 struct wl1271 *wl = hw->priv;
4562 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4563 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4564 int ret;
4565
4566 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4567 wlvif->role_id, (int)changed);
4568
4569 /*
4570 * make sure to cancel pending disconnections if our association
4571 * state changed
4572 */
4573 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4574 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4575
4576 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4577 !bss_conf->enable_beacon)
4578 wl1271_tx_flush(wl);
4579
4580 mutex_lock(&wl->mutex);
4581
4582 if (unlikely(wl->state != WLCORE_STATE_ON))
4583 goto out;
4584
4585 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4586 goto out;
4587
4588 ret = wl1271_ps_elp_wakeup(wl);
4589 if (ret < 0)
4590 goto out;
4591
4592 if ((changed & BSS_CHANGED_TXPOWER) &&
4593 bss_conf->txpower != wlvif->power_level) {
4594
4595 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4596 if (ret < 0)
4597 goto out;
4598
4599 wlvif->power_level = bss_conf->txpower;
4600 }
4601
4602 if (is_ap)
4603 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4604 else
4605 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4606
4607 wl1271_ps_elp_sleep(wl);
4608
4609 out:
4610 mutex_unlock(&wl->mutex);
4611 }
4612
4613 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4614 struct ieee80211_chanctx_conf *ctx)
4615 {
4616 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4617 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4618 cfg80211_get_chandef_type(&ctx->def));
4619 return 0;
4620 }
4621
4622 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4623 struct ieee80211_chanctx_conf *ctx)
4624 {
4625 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4626 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4627 cfg80211_get_chandef_type(&ctx->def));
4628 }
4629
4630 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4631 struct ieee80211_chanctx_conf *ctx,
4632 u32 changed)
4633 {
4634 struct wl1271 *wl = hw->priv;
4635 struct wl12xx_vif *wlvif;
4636 int ret;
4637 int channel = ieee80211_frequency_to_channel(
4638 ctx->def.chan->center_freq);
4639
4640 wl1271_debug(DEBUG_MAC80211,
4641 "mac80211 change chanctx %d (type %d) changed 0x%x",
4642 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4643
4644 mutex_lock(&wl->mutex);
4645
4646 ret = wl1271_ps_elp_wakeup(wl);
4647 if (ret < 0)
4648 goto out;
4649
4650 wl12xx_for_each_wlvif(wl, wlvif) {
4651 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4652
4653 rcu_read_lock();
4654 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4655 rcu_read_unlock();
4656 continue;
4657 }
4658 rcu_read_unlock();
4659
4660 /* start radar if needed */
4661 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4662 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4663 ctx->radar_enabled && !wlvif->radar_enabled &&
4664 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4665 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4666 wlcore_hw_set_cac(wl, wlvif, true);
4667 wlvif->radar_enabled = true;
4668 }
4669 }
4670
4671 wl1271_ps_elp_sleep(wl);
4672 out:
4673 mutex_unlock(&wl->mutex);
4674 }
4675
4676 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4677 struct ieee80211_vif *vif,
4678 struct ieee80211_chanctx_conf *ctx)
4679 {
4680 struct wl1271 *wl = hw->priv;
4681 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4682 int channel = ieee80211_frequency_to_channel(
4683 ctx->def.chan->center_freq);
4684 int ret = -EINVAL;
4685
4686 wl1271_debug(DEBUG_MAC80211,
4687 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4688 wlvif->role_id, channel,
4689 cfg80211_get_chandef_type(&ctx->def),
4690 ctx->radar_enabled, ctx->def.chan->dfs_state);
4691
4692 mutex_lock(&wl->mutex);
4693
4694 if (unlikely(wl->state != WLCORE_STATE_ON))
4695 goto out;
4696
4697 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4698 goto out;
4699
4700 ret = wl1271_ps_elp_wakeup(wl);
4701 if (ret < 0)
4702 goto out;
4703
4704 wlvif->band = ctx->def.chan->band;
4705 wlvif->channel = channel;
4706 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4707
4708 /* update default rates according to the band */
4709 wl1271_set_band_rate(wl, wlvif);
4710
4711 if (ctx->radar_enabled &&
4712 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4713 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4714 wlcore_hw_set_cac(wl, wlvif, true);
4715 wlvif->radar_enabled = true;
4716 }
4717
4718 wl1271_ps_elp_sleep(wl);
4719 out:
4720 mutex_unlock(&wl->mutex);
4721
4722 return 0;
4723 }
4724
4725 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4726 struct ieee80211_vif *vif,
4727 struct ieee80211_chanctx_conf *ctx)
4728 {
4729 struct wl1271 *wl = hw->priv;
4730 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4731 int ret;
4732
4733 wl1271_debug(DEBUG_MAC80211,
4734 "mac80211 unassign chanctx (role %d) %d (type %d)",
4735 wlvif->role_id,
4736 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4737 cfg80211_get_chandef_type(&ctx->def));
4738
4739 wl1271_tx_flush(wl);
4740
4741 mutex_lock(&wl->mutex);
4742
4743 if (unlikely(wl->state != WLCORE_STATE_ON))
4744 goto out;
4745
4746 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4747 goto out;
4748
4749 ret = wl1271_ps_elp_wakeup(wl);
4750 if (ret < 0)
4751 goto out;
4752
4753 if (wlvif->radar_enabled) {
4754 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4755 wlcore_hw_set_cac(wl, wlvif, false);
4756 wlvif->radar_enabled = false;
4757 }
4758
4759 wl1271_ps_elp_sleep(wl);
4760 out:
4761 mutex_unlock(&wl->mutex);
4762 }
4763
4764 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4765 struct wl12xx_vif *wlvif,
4766 struct ieee80211_chanctx_conf *new_ctx)
4767 {
4768 int channel = ieee80211_frequency_to_channel(
4769 new_ctx->def.chan->center_freq);
4770
4771 wl1271_debug(DEBUG_MAC80211,
4772 "switch vif (role %d) %d -> %d chan_type: %d",
4773 wlvif->role_id, wlvif->channel, channel,
4774 cfg80211_get_chandef_type(&new_ctx->def));
4775
4776 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4777 return 0;
4778
4779 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4780
4781 if (wlvif->radar_enabled) {
4782 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4783 wlcore_hw_set_cac(wl, wlvif, false);
4784 wlvif->radar_enabled = false;
4785 }
4786
4787 wlvif->band = new_ctx->def.chan->band;
4788 wlvif->channel = channel;
4789 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4790
4791 /* start radar if needed */
4792 if (new_ctx->radar_enabled) {
4793 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4794 wlcore_hw_set_cac(wl, wlvif, true);
4795 wlvif->radar_enabled = true;
4796 }
4797
4798 return 0;
4799 }
4800
4801 static int
4802 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4803 struct ieee80211_vif_chanctx_switch *vifs,
4804 int n_vifs,
4805 enum ieee80211_chanctx_switch_mode mode)
4806 {
4807 struct wl1271 *wl = hw->priv;
4808 int i, ret;
4809
4810 wl1271_debug(DEBUG_MAC80211,
4811 "mac80211 switch chanctx n_vifs %d mode %d",
4812 n_vifs, mode);
4813
4814 mutex_lock(&wl->mutex);
4815
4816 ret = wl1271_ps_elp_wakeup(wl);
4817 if (ret < 0)
4818 goto out;
4819
4820 for (i = 0; i < n_vifs; i++) {
4821 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4822
4823 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4824 if (ret)
4825 goto out_sleep;
4826 }
4827 out_sleep:
4828 wl1271_ps_elp_sleep(wl);
4829 out:
4830 mutex_unlock(&wl->mutex);
4831
4832 return 0;
4833 }
4834
4835 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4836 struct ieee80211_vif *vif, u16 queue,
4837 const struct ieee80211_tx_queue_params *params)
4838 {
4839 struct wl1271 *wl = hw->priv;
4840 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4841 u8 ps_scheme;
4842 int ret = 0;
4843
4844 if (wlcore_is_p2p_mgmt(wlvif))
4845 return 0;
4846
4847 mutex_lock(&wl->mutex);
4848
4849 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4850
4851 if (params->uapsd)
4852 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4853 else
4854 ps_scheme = CONF_PS_SCHEME_LEGACY;
4855
4856 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4857 goto out;
4858
4859 ret = wl1271_ps_elp_wakeup(wl);
4860 if (ret < 0)
4861 goto out;
4862
4863 /*
4864 * the txop is confed in units of 32us by the mac80211,
4865 * we need us
4866 */
4867 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4868 params->cw_min, params->cw_max,
4869 params->aifs, params->txop << 5);
4870 if (ret < 0)
4871 goto out_sleep;
4872
4873 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4874 CONF_CHANNEL_TYPE_EDCF,
4875 wl1271_tx_get_queue(queue),
4876 ps_scheme, CONF_ACK_POLICY_LEGACY,
4877 0, 0);
4878
4879 out_sleep:
4880 wl1271_ps_elp_sleep(wl);
4881
4882 out:
4883 mutex_unlock(&wl->mutex);
4884
4885 return ret;
4886 }
4887
4888 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4889 struct ieee80211_vif *vif)
4890 {
4891
4892 struct wl1271 *wl = hw->priv;
4893 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4894 u64 mactime = ULLONG_MAX;
4895 int ret;
4896
4897 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4898
4899 mutex_lock(&wl->mutex);
4900
4901 if (unlikely(wl->state != WLCORE_STATE_ON))
4902 goto out;
4903
4904 ret = wl1271_ps_elp_wakeup(wl);
4905 if (ret < 0)
4906 goto out;
4907
4908 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4909 if (ret < 0)
4910 goto out_sleep;
4911
4912 out_sleep:
4913 wl1271_ps_elp_sleep(wl);
4914
4915 out:
4916 mutex_unlock(&wl->mutex);
4917 return mactime;
4918 }
4919
4920 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4921 struct survey_info *survey)
4922 {
4923 struct ieee80211_conf *conf = &hw->conf;
4924
4925 if (idx != 0)
4926 return -ENOENT;
4927
4928 survey->channel = conf->chandef.chan;
4929 survey->filled = 0;
4930 return 0;
4931 }
4932
4933 static int wl1271_allocate_sta(struct wl1271 *wl,
4934 struct wl12xx_vif *wlvif,
4935 struct ieee80211_sta *sta)
4936 {
4937 struct wl1271_station *wl_sta;
4938 int ret;
4939
4940
4941 if (wl->active_sta_count >= wl->max_ap_stations) {
4942 wl1271_warning("could not allocate HLID - too much stations");
4943 return -EBUSY;
4944 }
4945
4946 wl_sta = (struct wl1271_station *)sta->drv_priv;
4947 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4948 if (ret < 0) {
4949 wl1271_warning("could not allocate HLID - too many links");
4950 return -EBUSY;
4951 }
4952
4953 /* use the previous security seq, if this is a recovery/resume */
4954 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4955
4956 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4957 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4958 wl->active_sta_count++;
4959 return 0;
4960 }
4961
4962 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4963 {
4964 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4965 return;
4966
4967 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4968 __clear_bit(hlid, &wl->ap_ps_map);
4969 __clear_bit(hlid, &wl->ap_fw_ps_map);
4970
4971 /*
4972 * save the last used PN in the private part of iee80211_sta,
4973 * in case of recovery/suspend
4974 */
4975 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4976
4977 wl12xx_free_link(wl, wlvif, &hlid);
4978 wl->active_sta_count--;
4979
4980 /*
4981 * rearm the tx watchdog when the last STA is freed - give the FW a
4982 * chance to return STA-buffered packets before complaining.
4983 */
4984 if (wl->active_sta_count == 0)
4985 wl12xx_rearm_tx_watchdog_locked(wl);
4986 }
4987
4988 static int wl12xx_sta_add(struct wl1271 *wl,
4989 struct wl12xx_vif *wlvif,
4990 struct ieee80211_sta *sta)
4991 {
4992 struct wl1271_station *wl_sta;
4993 int ret = 0;
4994 u8 hlid;
4995
4996 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4997
4998 ret = wl1271_allocate_sta(wl, wlvif, sta);
4999 if (ret < 0)
5000 return ret;
5001
5002 wl_sta = (struct wl1271_station *)sta->drv_priv;
5003 hlid = wl_sta->hlid;
5004
5005 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5006 if (ret < 0)
5007 wl1271_free_sta(wl, wlvif, hlid);
5008
5009 return ret;
5010 }
5011
5012 static int wl12xx_sta_remove(struct wl1271 *wl,
5013 struct wl12xx_vif *wlvif,
5014 struct ieee80211_sta *sta)
5015 {
5016 struct wl1271_station *wl_sta;
5017 int ret = 0, id;
5018
5019 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5020
5021 wl_sta = (struct wl1271_station *)sta->drv_priv;
5022 id = wl_sta->hlid;
5023 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5024 return -EINVAL;
5025
5026 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5027 if (ret < 0)
5028 return ret;
5029
5030 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5031 return ret;
5032 }
5033
5034 static void wlcore_roc_if_possible(struct wl1271 *wl,
5035 struct wl12xx_vif *wlvif)
5036 {
5037 if (find_first_bit(wl->roc_map,
5038 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5039 return;
5040
5041 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5042 return;
5043
5044 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5045 }
5046
5047 /*
5048 * when wl_sta is NULL, we treat this call as if coming from a
5049 * pending auth reply.
5050 * wl->mutex must be taken and the FW must be awake when the call
5051 * takes place.
5052 */
5053 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5054 struct wl1271_station *wl_sta, bool in_conn)
5055 {
5056 if (in_conn) {
5057 if (WARN_ON(wl_sta && wl_sta->in_connection))
5058 return;
5059
5060 if (!wlvif->ap_pending_auth_reply &&
5061 !wlvif->inconn_count)
5062 wlcore_roc_if_possible(wl, wlvif);
5063
5064 if (wl_sta) {
5065 wl_sta->in_connection = true;
5066 wlvif->inconn_count++;
5067 } else {
5068 wlvif->ap_pending_auth_reply = true;
5069 }
5070 } else {
5071 if (wl_sta && !wl_sta->in_connection)
5072 return;
5073
5074 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5075 return;
5076
5077 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5078 return;
5079
5080 if (wl_sta) {
5081 wl_sta->in_connection = false;
5082 wlvif->inconn_count--;
5083 } else {
5084 wlvif->ap_pending_auth_reply = false;
5085 }
5086
5087 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5088 test_bit(wlvif->role_id, wl->roc_map))
5089 wl12xx_croc(wl, wlvif->role_id);
5090 }
5091 }
5092
5093 static int wl12xx_update_sta_state(struct wl1271 *wl,
5094 struct wl12xx_vif *wlvif,
5095 struct ieee80211_sta *sta,
5096 enum ieee80211_sta_state old_state,
5097 enum ieee80211_sta_state new_state)
5098 {
5099 struct wl1271_station *wl_sta;
5100 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5101 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5102 int ret;
5103
5104 wl_sta = (struct wl1271_station *)sta->drv_priv;
5105
5106 /* Add station (AP mode) */
5107 if (is_ap &&
5108 old_state == IEEE80211_STA_NOTEXIST &&
5109 new_state == IEEE80211_STA_NONE) {
5110 ret = wl12xx_sta_add(wl, wlvif, sta);
5111 if (ret)
5112 return ret;
5113
5114 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5115 }
5116
5117 /* Remove station (AP mode) */
5118 if (is_ap &&
5119 old_state == IEEE80211_STA_NONE &&
5120 new_state == IEEE80211_STA_NOTEXIST) {
5121 /* must not fail */
5122 wl12xx_sta_remove(wl, wlvif, sta);
5123
5124 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5125 }
5126
5127 /* Authorize station (AP mode) */
5128 if (is_ap &&
5129 new_state == IEEE80211_STA_AUTHORIZED) {
5130 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5131 if (ret < 0)
5132 return ret;
5133
5134 /* reconfigure rates */
5135 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5136 if (ret < 0)
5137 return ret;
5138
5139 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5140 wl_sta->hlid);
5141 if (ret)
5142 return ret;
5143
5144 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5145 }
5146
5147 /* Authorize station */
5148 if (is_sta &&
5149 new_state == IEEE80211_STA_AUTHORIZED) {
5150 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5151 ret = wl12xx_set_authorized(wl, wlvif);
5152 if (ret)
5153 return ret;
5154 }
5155
5156 if (is_sta &&
5157 old_state == IEEE80211_STA_AUTHORIZED &&
5158 new_state == IEEE80211_STA_ASSOC) {
5159 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5160 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5161 }
5162
5163 /* save seq number on disassoc (suspend) */
5164 if (is_sta &&
5165 old_state == IEEE80211_STA_ASSOC &&
5166 new_state == IEEE80211_STA_AUTH) {
5167 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5168 wlvif->total_freed_pkts = 0;
5169 }
5170
5171 /* restore seq number on assoc (resume) */
5172 if (is_sta &&
5173 old_state == IEEE80211_STA_AUTH &&
5174 new_state == IEEE80211_STA_ASSOC) {
5175 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5176 }
5177
5178 /* clear ROCs on failure or authorization */
5179 if (is_sta &&
5180 (new_state == IEEE80211_STA_AUTHORIZED ||
5181 new_state == IEEE80211_STA_NOTEXIST)) {
5182 if (test_bit(wlvif->role_id, wl->roc_map))
5183 wl12xx_croc(wl, wlvif->role_id);
5184 }
5185
5186 if (is_sta &&
5187 old_state == IEEE80211_STA_NOTEXIST &&
5188 new_state == IEEE80211_STA_NONE) {
5189 if (find_first_bit(wl->roc_map,
5190 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5191 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5192 wl12xx_roc(wl, wlvif, wlvif->role_id,
5193 wlvif->band, wlvif->channel);
5194 }
5195 }
5196 return 0;
5197 }
5198
5199 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5200 struct ieee80211_vif *vif,
5201 struct ieee80211_sta *sta,
5202 enum ieee80211_sta_state old_state,
5203 enum ieee80211_sta_state new_state)
5204 {
5205 struct wl1271 *wl = hw->priv;
5206 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5207 int ret;
5208
5209 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5210 sta->aid, old_state, new_state);
5211
5212 mutex_lock(&wl->mutex);
5213
5214 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5215 ret = -EBUSY;
5216 goto out;
5217 }
5218
5219 ret = wl1271_ps_elp_wakeup(wl);
5220 if (ret < 0)
5221 goto out;
5222
5223 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5224
5225 wl1271_ps_elp_sleep(wl);
5226 out:
5227 mutex_unlock(&wl->mutex);
5228 if (new_state < old_state)
5229 return 0;
5230 return ret;
5231 }
5232
5233 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5234 struct ieee80211_vif *vif,
5235 struct ieee80211_ampdu_params *params)
5236 {
5237 struct wl1271 *wl = hw->priv;
5238 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5239 int ret;
5240 u8 hlid, *ba_bitmap;
5241 struct ieee80211_sta *sta = params->sta;
5242 enum ieee80211_ampdu_mlme_action action = params->action;
5243 u16 tid = params->tid;
5244 u16 *ssn = &params->ssn;
5245
5246 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5247 tid);
5248
5249 /* sanity check - the fields in FW are only 8bits wide */
5250 if (WARN_ON(tid > 0xFF))
5251 return -ENOTSUPP;
5252
5253 mutex_lock(&wl->mutex);
5254
5255 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5256 ret = -EAGAIN;
5257 goto out;
5258 }
5259
5260 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5261 hlid = wlvif->sta.hlid;
5262 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5263 struct wl1271_station *wl_sta;
5264
5265 wl_sta = (struct wl1271_station *)sta->drv_priv;
5266 hlid = wl_sta->hlid;
5267 } else {
5268 ret = -EINVAL;
5269 goto out;
5270 }
5271
5272 ba_bitmap = &wl->links[hlid].ba_bitmap;
5273
5274 ret = wl1271_ps_elp_wakeup(wl);
5275 if (ret < 0)
5276 goto out;
5277
5278 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5279 tid, action);
5280
5281 switch (action) {
5282 case IEEE80211_AMPDU_RX_START:
5283 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5284 ret = -ENOTSUPP;
5285 break;
5286 }
5287
5288 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5289 ret = -EBUSY;
5290 wl1271_error("exceeded max RX BA sessions");
5291 break;
5292 }
5293
5294 if (*ba_bitmap & BIT(tid)) {
5295 ret = -EINVAL;
5296 wl1271_error("cannot enable RX BA session on active "
5297 "tid: %d", tid);
5298 break;
5299 }
5300
5301 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5302 hlid,
5303 params->buf_size);
5304
5305 if (!ret) {
5306 *ba_bitmap |= BIT(tid);
5307 wl->ba_rx_session_count++;
5308 }
5309 break;
5310
5311 case IEEE80211_AMPDU_RX_STOP:
5312 if (!(*ba_bitmap & BIT(tid))) {
5313 /*
5314 * this happens on reconfig - so only output a debug
5315 * message for now, and don't fail the function.
5316 */
5317 wl1271_debug(DEBUG_MAC80211,
5318 "no active RX BA session on tid: %d",
5319 tid);
5320 ret = 0;
5321 break;
5322 }
5323
5324 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5325 hlid, 0);
5326 if (!ret) {
5327 *ba_bitmap &= ~BIT(tid);
5328 wl->ba_rx_session_count--;
5329 }
5330 break;
5331
5332 /*
5333 * The BA initiator session management in FW independently.
5334 * Falling break here on purpose for all TX APDU commands.
5335 */
5336 case IEEE80211_AMPDU_TX_START:
5337 case IEEE80211_AMPDU_TX_STOP_CONT:
5338 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5339 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5340 case IEEE80211_AMPDU_TX_OPERATIONAL:
5341 ret = -EINVAL;
5342 break;
5343
5344 default:
5345 wl1271_error("Incorrect ampdu action id=%x\n", action);
5346 ret = -EINVAL;
5347 }
5348
5349 wl1271_ps_elp_sleep(wl);
5350
5351 out:
5352 mutex_unlock(&wl->mutex);
5353
5354 return ret;
5355 }
5356
5357 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5358 struct ieee80211_vif *vif,
5359 const struct cfg80211_bitrate_mask *mask)
5360 {
5361 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5362 struct wl1271 *wl = hw->priv;
5363 int i, ret = 0;
5364
5365 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5366 mask->control[NL80211_BAND_2GHZ].legacy,
5367 mask->control[NL80211_BAND_5GHZ].legacy);
5368
5369 mutex_lock(&wl->mutex);
5370
5371 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5372 wlvif->bitrate_masks[i] =
5373 wl1271_tx_enabled_rates_get(wl,
5374 mask->control[i].legacy,
5375 i);
5376
5377 if (unlikely(wl->state != WLCORE_STATE_ON))
5378 goto out;
5379
5380 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5381 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5382
5383 ret = wl1271_ps_elp_wakeup(wl);
5384 if (ret < 0)
5385 goto out;
5386
5387 wl1271_set_band_rate(wl, wlvif);
5388 wlvif->basic_rate =
5389 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5390 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5391
5392 wl1271_ps_elp_sleep(wl);
5393 }
5394 out:
5395 mutex_unlock(&wl->mutex);
5396
5397 return ret;
5398 }
5399
5400 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5401 struct ieee80211_vif *vif,
5402 struct ieee80211_channel_switch *ch_switch)
5403 {
5404 struct wl1271 *wl = hw->priv;
5405 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5406 int ret;
5407
5408 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5409
5410 wl1271_tx_flush(wl);
5411
5412 mutex_lock(&wl->mutex);
5413
5414 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5415 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5416 ieee80211_chswitch_done(vif, false);
5417 goto out;
5418 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5419 goto out;
5420 }
5421
5422 ret = wl1271_ps_elp_wakeup(wl);
5423 if (ret < 0)
5424 goto out;
5425
5426 /* TODO: change mac80211 to pass vif as param */
5427
5428 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5429 unsigned long delay_usec;
5430
5431 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5432 if (ret)
5433 goto out_sleep;
5434
5435 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5436
5437 /* indicate failure 5 seconds after channel switch time */
5438 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5439 ch_switch->count;
5440 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5441 usecs_to_jiffies(delay_usec) +
5442 msecs_to_jiffies(5000));
5443 }
5444
5445 out_sleep:
5446 wl1271_ps_elp_sleep(wl);
5447
5448 out:
5449 mutex_unlock(&wl->mutex);
5450 }
5451
5452 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5453 struct wl12xx_vif *wlvif,
5454 u8 eid)
5455 {
5456 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5457 struct sk_buff *beacon =
5458 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5459
5460 if (!beacon)
5461 return NULL;
5462
5463 return cfg80211_find_ie(eid,
5464 beacon->data + ieoffset,
5465 beacon->len - ieoffset);
5466 }
5467
5468 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5469 u8 *csa_count)
5470 {
5471 const u8 *ie;
5472 const struct ieee80211_channel_sw_ie *ie_csa;
5473
5474 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5475 if (!ie)
5476 return -EINVAL;
5477
5478 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5479 *csa_count = ie_csa->count;
5480
5481 return 0;
5482 }
5483
5484 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5485 struct ieee80211_vif *vif,
5486 struct cfg80211_chan_def *chandef)
5487 {
5488 struct wl1271 *wl = hw->priv;
5489 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5490 struct ieee80211_channel_switch ch_switch = {
5491 .block_tx = true,
5492 .chandef = *chandef,
5493 };
5494 int ret;
5495
5496 wl1271_debug(DEBUG_MAC80211,
5497 "mac80211 channel switch beacon (role %d)",
5498 wlvif->role_id);
5499
5500 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5501 if (ret < 0) {
5502 wl1271_error("error getting beacon (for CSA counter)");
5503 return;
5504 }
5505
5506 mutex_lock(&wl->mutex);
5507
5508 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5509 ret = -EBUSY;
5510 goto out;
5511 }
5512
5513 ret = wl1271_ps_elp_wakeup(wl);
5514 if (ret < 0)
5515 goto out;
5516
5517 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5518 if (ret)
5519 goto out_sleep;
5520
5521 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5522
5523 out_sleep:
5524 wl1271_ps_elp_sleep(wl);
5525 out:
5526 mutex_unlock(&wl->mutex);
5527 }
5528
5529 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5530 u32 queues, bool drop)
5531 {
5532 struct wl1271 *wl = hw->priv;
5533
5534 wl1271_tx_flush(wl);
5535 }
5536
5537 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5538 struct ieee80211_vif *vif,
5539 struct ieee80211_channel *chan,
5540 int duration,
5541 enum ieee80211_roc_type type)
5542 {
5543 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5544 struct wl1271 *wl = hw->priv;
5545 int channel, active_roc, ret = 0;
5546
5547 channel = ieee80211_frequency_to_channel(chan->center_freq);
5548
5549 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5550 channel, wlvif->role_id);
5551
5552 mutex_lock(&wl->mutex);
5553
5554 if (unlikely(wl->state != WLCORE_STATE_ON))
5555 goto out;
5556
5557 /* return EBUSY if we can't ROC right now */
5558 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5559 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5560 wl1271_warning("active roc on role %d", active_roc);
5561 ret = -EBUSY;
5562 goto out;
5563 }
5564
5565 ret = wl1271_ps_elp_wakeup(wl);
5566 if (ret < 0)
5567 goto out;
5568
5569 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5570 if (ret < 0)
5571 goto out_sleep;
5572
5573 wl->roc_vif = vif;
5574 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5575 msecs_to_jiffies(duration));
5576 out_sleep:
5577 wl1271_ps_elp_sleep(wl);
5578 out:
5579 mutex_unlock(&wl->mutex);
5580 return ret;
5581 }
5582
5583 static int __wlcore_roc_completed(struct wl1271 *wl)
5584 {
5585 struct wl12xx_vif *wlvif;
5586 int ret;
5587
5588 /* already completed */
5589 if (unlikely(!wl->roc_vif))
5590 return 0;
5591
5592 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5593
5594 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5595 return -EBUSY;
5596
5597 ret = wl12xx_stop_dev(wl, wlvif);
5598 if (ret < 0)
5599 return ret;
5600
5601 wl->roc_vif = NULL;
5602
5603 return 0;
5604 }
5605
5606 static int wlcore_roc_completed(struct wl1271 *wl)
5607 {
5608 int ret;
5609
5610 wl1271_debug(DEBUG_MAC80211, "roc complete");
5611
5612 mutex_lock(&wl->mutex);
5613
5614 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5615 ret = -EBUSY;
5616 goto out;
5617 }
5618
5619 ret = wl1271_ps_elp_wakeup(wl);
5620 if (ret < 0)
5621 goto out;
5622
5623 ret = __wlcore_roc_completed(wl);
5624
5625 wl1271_ps_elp_sleep(wl);
5626 out:
5627 mutex_unlock(&wl->mutex);
5628
5629 return ret;
5630 }
5631
5632 static void wlcore_roc_complete_work(struct work_struct *work)
5633 {
5634 struct delayed_work *dwork;
5635 struct wl1271 *wl;
5636 int ret;
5637
5638 dwork = to_delayed_work(work);
5639 wl = container_of(dwork, struct wl1271, roc_complete_work);
5640
5641 ret = wlcore_roc_completed(wl);
5642 if (!ret)
5643 ieee80211_remain_on_channel_expired(wl->hw);
5644 }
5645
5646 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5647 {
5648 struct wl1271 *wl = hw->priv;
5649
5650 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5651
5652 /* TODO: per-vif */
5653 wl1271_tx_flush(wl);
5654
5655 /*
5656 * we can't just flush_work here, because it might deadlock
5657 * (as we might get called from the same workqueue)
5658 */
5659 cancel_delayed_work_sync(&wl->roc_complete_work);
5660 wlcore_roc_completed(wl);
5661
5662 return 0;
5663 }
5664
5665 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5666 struct ieee80211_vif *vif,
5667 struct ieee80211_sta *sta,
5668 u32 changed)
5669 {
5670 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5671
5672 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5673
5674 if (!(changed & IEEE80211_RC_BW_CHANGED))
5675 return;
5676
5677 /* this callback is atomic, so schedule a new work */
5678 wlvif->rc_update_bw = sta->bandwidth;
5679 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5680 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5681 }
5682
5683 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5684 struct ieee80211_vif *vif,
5685 struct ieee80211_sta *sta,
5686 struct station_info *sinfo)
5687 {
5688 struct wl1271 *wl = hw->priv;
5689 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5690 s8 rssi_dbm;
5691 int ret;
5692
5693 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5694
5695 mutex_lock(&wl->mutex);
5696
5697 if (unlikely(wl->state != WLCORE_STATE_ON))
5698 goto out;
5699
5700 ret = wl1271_ps_elp_wakeup(wl);
5701 if (ret < 0)
5702 goto out_sleep;
5703
5704 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5705 if (ret < 0)
5706 goto out_sleep;
5707
5708 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5709 sinfo->signal = rssi_dbm;
5710
5711 out_sleep:
5712 wl1271_ps_elp_sleep(wl);
5713
5714 out:
5715 mutex_unlock(&wl->mutex);
5716 }
5717
5718 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5719 struct ieee80211_sta *sta)
5720 {
5721 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5722 struct wl1271 *wl = hw->priv;
5723 u8 hlid = wl_sta->hlid;
5724
5725 /* return in units of Kbps */
5726 return (wl->links[hlid].fw_rate_mbps * 1000);
5727 }
5728
5729 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5730 {
5731 struct wl1271 *wl = hw->priv;
5732 bool ret = false;
5733
5734 mutex_lock(&wl->mutex);
5735
5736 if (unlikely(wl->state != WLCORE_STATE_ON))
5737 goto out;
5738
5739 /* packets are considered pending if in the TX queue or the FW */
5740 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5741 out:
5742 mutex_unlock(&wl->mutex);
5743
5744 return ret;
5745 }
5746
5747 /* can't be const, mac80211 writes to this */
5748 static struct ieee80211_rate wl1271_rates[] = {
5749 { .bitrate = 10,
5750 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5751 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5752 { .bitrate = 20,
5753 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5754 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5755 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5756 { .bitrate = 55,
5757 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5758 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5759 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5760 { .bitrate = 110,
5761 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5762 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5763 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5764 { .bitrate = 60,
5765 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5766 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5767 { .bitrate = 90,
5768 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5769 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5770 { .bitrate = 120,
5771 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5772 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5773 { .bitrate = 180,
5774 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5775 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5776 { .bitrate = 240,
5777 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5778 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5779 { .bitrate = 360,
5780 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5781 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5782 { .bitrate = 480,
5783 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5784 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5785 { .bitrate = 540,
5786 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5787 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5788 };
5789
5790 /* can't be const, mac80211 writes to this */
5791 static struct ieee80211_channel wl1271_channels[] = {
5792 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5793 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5794 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5795 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5796 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5797 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5798 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5799 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5800 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5801 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5802 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5803 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5804 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5805 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5806 };
5807
5808 /* can't be const, mac80211 writes to this */
5809 static struct ieee80211_supported_band wl1271_band_2ghz = {
5810 .channels = wl1271_channels,
5811 .n_channels = ARRAY_SIZE(wl1271_channels),
5812 .bitrates = wl1271_rates,
5813 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5814 };
5815
5816 /* 5 GHz data rates for WL1273 */
5817 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5818 { .bitrate = 60,
5819 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5820 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5821 { .bitrate = 90,
5822 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5823 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5824 { .bitrate = 120,
5825 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5826 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5827 { .bitrate = 180,
5828 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5829 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5830 { .bitrate = 240,
5831 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5832 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5833 { .bitrate = 360,
5834 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5835 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5836 { .bitrate = 480,
5837 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5838 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5839 { .bitrate = 540,
5840 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5841 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5842 };
5843
5844 /* 5 GHz band channels for WL1273 */
5845 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5846 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5847 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5848 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5849 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5850 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5851 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5852 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5853 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5854 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5855 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5856 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5857 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5858 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5859 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5860 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5861 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5862 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5863 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5864 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5865 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5866 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5867 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5868 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5869 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5870 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5871 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5872 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5873 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5874 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5875 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5876 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5877 };
5878
5879 static struct ieee80211_supported_band wl1271_band_5ghz = {
5880 .channels = wl1271_channels_5ghz,
5881 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5882 .bitrates = wl1271_rates_5ghz,
5883 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5884 };
5885
5886 static const struct ieee80211_ops wl1271_ops = {
5887 .start = wl1271_op_start,
5888 .stop = wlcore_op_stop,
5889 .add_interface = wl1271_op_add_interface,
5890 .remove_interface = wl1271_op_remove_interface,
5891 .change_interface = wl12xx_op_change_interface,
5892 #ifdef CONFIG_PM
5893 .suspend = wl1271_op_suspend,
5894 .resume = wl1271_op_resume,
5895 #endif
5896 .config = wl1271_op_config,
5897 .prepare_multicast = wl1271_op_prepare_multicast,
5898 .configure_filter = wl1271_op_configure_filter,
5899 .tx = wl1271_op_tx,
5900 .set_key = wlcore_op_set_key,
5901 .hw_scan = wl1271_op_hw_scan,
5902 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5903 .sched_scan_start = wl1271_op_sched_scan_start,
5904 .sched_scan_stop = wl1271_op_sched_scan_stop,
5905 .bss_info_changed = wl1271_op_bss_info_changed,
5906 .set_frag_threshold = wl1271_op_set_frag_threshold,
5907 .set_rts_threshold = wl1271_op_set_rts_threshold,
5908 .conf_tx = wl1271_op_conf_tx,
5909 .get_tsf = wl1271_op_get_tsf,
5910 .get_survey = wl1271_op_get_survey,
5911 .sta_state = wl12xx_op_sta_state,
5912 .ampdu_action = wl1271_op_ampdu_action,
5913 .tx_frames_pending = wl1271_tx_frames_pending,
5914 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5915 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5916 .channel_switch = wl12xx_op_channel_switch,
5917 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5918 .flush = wlcore_op_flush,
5919 .remain_on_channel = wlcore_op_remain_on_channel,
5920 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5921 .add_chanctx = wlcore_op_add_chanctx,
5922 .remove_chanctx = wlcore_op_remove_chanctx,
5923 .change_chanctx = wlcore_op_change_chanctx,
5924 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5925 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5926 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5927 .sta_rc_update = wlcore_op_sta_rc_update,
5928 .sta_statistics = wlcore_op_sta_statistics,
5929 .get_expected_throughput = wlcore_op_get_expected_throughput,
5930 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5931 };
5932
5933
5934 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5935 {
5936 u8 idx;
5937
5938 BUG_ON(band >= 2);
5939
5940 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5941 wl1271_error("Illegal RX rate from HW: %d", rate);
5942 return 0;
5943 }
5944
5945 idx = wl->band_rate_to_idx[band][rate];
5946 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5947 wl1271_error("Unsupported RX rate from HW: %d", rate);
5948 return 0;
5949 }
5950
5951 return idx;
5952 }
5953
5954 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5955 {
5956 int i;
5957
5958 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5959 oui, nic);
5960
5961 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5962 wl1271_warning("NIC part of the MAC address wraps around!");
5963
5964 for (i = 0; i < wl->num_mac_addr; i++) {
5965 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5966 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5967 wl->addresses[i].addr[2] = (u8) oui;
5968 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5969 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5970 wl->addresses[i].addr[5] = (u8) nic;
5971 nic++;
5972 }
5973
5974 /* we may be one address short at the most */
5975 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5976
5977 /*
5978 * turn on the LAA bit in the first address and use it as
5979 * the last address.
5980 */
5981 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5982 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5983 memcpy(&wl->addresses[idx], &wl->addresses[0],
5984 sizeof(wl->addresses[0]));
5985 /* LAA bit */
5986 wl->addresses[idx].addr[0] |= BIT(1);
5987 }
5988
5989 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5990 wl->hw->wiphy->addresses = wl->addresses;
5991 }
5992
5993 static int wl12xx_get_hw_info(struct wl1271 *wl)
5994 {
5995 int ret;
5996
5997 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5998 if (ret < 0)
5999 goto out;
6000
6001 wl->fuse_oui_addr = 0;
6002 wl->fuse_nic_addr = 0;
6003
6004 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6005 if (ret < 0)
6006 goto out;
6007
6008 if (wl->ops->get_mac)
6009 ret = wl->ops->get_mac(wl);
6010
6011 out:
6012 return ret;
6013 }
6014
6015 static int wl1271_register_hw(struct wl1271 *wl)
6016 {
6017 int ret;
6018 u32 oui_addr = 0, nic_addr = 0;
6019 struct platform_device *pdev = wl->pdev;
6020 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6021
6022 if (wl->mac80211_registered)
6023 return 0;
6024
6025 if (wl->nvs_len >= 12) {
6026 /* NOTE: The wl->nvs->nvs element must be first, in
6027 * order to simplify the casting, we assume it is at
6028 * the beginning of the wl->nvs structure.
6029 */
6030 u8 *nvs_ptr = (u8 *)wl->nvs;
6031
6032 oui_addr =
6033 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6034 nic_addr =
6035 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6036 }
6037
6038 /* if the MAC address is zeroed in the NVS derive from fuse */
6039 if (oui_addr == 0 && nic_addr == 0) {
6040 oui_addr = wl->fuse_oui_addr;
6041 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6042 nic_addr = wl->fuse_nic_addr + 1;
6043 }
6044
6045 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6046 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
6047 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6048 wl1271_warning("This default nvs file can be removed from the file system\n");
6049 } else {
6050 wl1271_warning("Your device performance is not optimized.\n");
6051 wl1271_warning("Please use the calibrator tool to configure your device.\n");
6052 }
6053
6054 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6055 wl1271_warning("Fuse mac address is zero. using random mac\n");
6056 /* Use TI oui and a random nic */
6057 oui_addr = WLCORE_TI_OUI_ADDRESS;
6058 nic_addr = get_random_int();
6059 } else {
6060 oui_addr = wl->fuse_oui_addr;
6061 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6062 nic_addr = wl->fuse_nic_addr + 1;
6063 }
6064 }
6065
6066 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6067
6068 ret = ieee80211_register_hw(wl->hw);
6069 if (ret < 0) {
6070 wl1271_error("unable to register mac80211 hw: %d", ret);
6071 goto out;
6072 }
6073
6074 wl->mac80211_registered = true;
6075
6076 wl1271_debugfs_init(wl);
6077
6078 wl1271_notice("loaded");
6079
6080 out:
6081 return ret;
6082 }
6083
6084 static void wl1271_unregister_hw(struct wl1271 *wl)
6085 {
6086 if (wl->plt)
6087 wl1271_plt_stop(wl);
6088
6089 ieee80211_unregister_hw(wl->hw);
6090 wl->mac80211_registered = false;
6091
6092 }
6093
6094 static int wl1271_init_ieee80211(struct wl1271 *wl)
6095 {
6096 int i;
6097 static const u32 cipher_suites[] = {
6098 WLAN_CIPHER_SUITE_WEP40,
6099 WLAN_CIPHER_SUITE_WEP104,
6100 WLAN_CIPHER_SUITE_TKIP,
6101 WLAN_CIPHER_SUITE_CCMP,
6102 WL1271_CIPHER_SUITE_GEM,
6103 };
6104
6105 /* The tx descriptor buffer */
6106 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6107
6108 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6109 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6110
6111 /* unit us */
6112 /* FIXME: find a proper value */
6113 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6114
6115 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6116 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6117 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6118 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6119 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6120 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6121 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6122 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6123 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6124 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6125 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6126 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6127 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6128 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6129
6130 wl->hw->wiphy->cipher_suites = cipher_suites;
6131 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6132
6133 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6134 BIT(NL80211_IFTYPE_AP) |
6135 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6136 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6137 #ifdef CONFIG_MAC80211_MESH
6138 BIT(NL80211_IFTYPE_MESH_POINT) |
6139 #endif
6140 BIT(NL80211_IFTYPE_P2P_GO);
6141
6142 wl->hw->wiphy->max_scan_ssids = 1;
6143 wl->hw->wiphy->max_sched_scan_ssids = 16;
6144 wl->hw->wiphy->max_match_sets = 16;
6145 /*
6146 * Maximum length of elements in scanning probe request templates
6147 * should be the maximum length possible for a template, without
6148 * the IEEE80211 header of the template
6149 */
6150 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6151 sizeof(struct ieee80211_header);
6152
6153 wl->hw->wiphy->max_sched_scan_reqs = 1;
6154 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6155 sizeof(struct ieee80211_header);
6156
6157 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6158
6159 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6160 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6161 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6162
6163 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6164
6165 /* make sure all our channels fit in the scanned_ch bitmask */
6166 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6167 ARRAY_SIZE(wl1271_channels_5ghz) >
6168 WL1271_MAX_CHANNELS);
6169 /*
6170 * clear channel flags from the previous usage
6171 * and restore max_power & max_antenna_gain values.
6172 */
6173 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6174 wl1271_band_2ghz.channels[i].flags = 0;
6175 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6176 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6177 }
6178
6179 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6180 wl1271_band_5ghz.channels[i].flags = 0;
6181 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6182 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6183 }
6184
6185 /*
6186 * We keep local copies of the band structs because we need to
6187 * modify them on a per-device basis.
6188 */
6189 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6190 sizeof(wl1271_band_2ghz));
6191 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6192 &wl->ht_cap[NL80211_BAND_2GHZ],
6193 sizeof(*wl->ht_cap));
6194 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6195 sizeof(wl1271_band_5ghz));
6196 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6197 &wl->ht_cap[NL80211_BAND_5GHZ],
6198 sizeof(*wl->ht_cap));
6199
6200 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6201 &wl->bands[NL80211_BAND_2GHZ];
6202 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6203 &wl->bands[NL80211_BAND_5GHZ];
6204
6205 /*
6206 * allow 4 queues per mac address we support +
6207 * 1 cab queue per mac + one global offchannel Tx queue
6208 */
6209 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6210
6211 /* the last queue is the offchannel queue */
6212 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6213 wl->hw->max_rates = 1;
6214
6215 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6216
6217 /* the FW answers probe-requests in AP-mode */
6218 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6219 wl->hw->wiphy->probe_resp_offload =
6220 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6221 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6222 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6223
6224 /* allowed interface combinations */
6225 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6226 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6227
6228 /* register vendor commands */
6229 wlcore_set_vendor_commands(wl->hw->wiphy);
6230
6231 SET_IEEE80211_DEV(wl->hw, wl->dev);
6232
6233 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6234 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6235
6236 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6237
6238 return 0;
6239 }
6240
6241 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6242 u32 mbox_size)
6243 {
6244 struct ieee80211_hw *hw;
6245 struct wl1271 *wl;
6246 int i, j, ret;
6247 unsigned int order;
6248
6249 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6250 if (!hw) {
6251 wl1271_error("could not alloc ieee80211_hw");
6252 ret = -ENOMEM;
6253 goto err_hw_alloc;
6254 }
6255
6256 wl = hw->priv;
6257 memset(wl, 0, sizeof(*wl));
6258
6259 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6260 if (!wl->priv) {
6261 wl1271_error("could not alloc wl priv");
6262 ret = -ENOMEM;
6263 goto err_priv_alloc;
6264 }
6265
6266 INIT_LIST_HEAD(&wl->wlvif_list);
6267
6268 wl->hw = hw;
6269
6270 /*
6271 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6272 * we don't allocate any additional resource here, so that's fine.
6273 */
6274 for (i = 0; i < NUM_TX_QUEUES; i++)
6275 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6276 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6277
6278 skb_queue_head_init(&wl->deferred_rx_queue);
6279 skb_queue_head_init(&wl->deferred_tx_queue);
6280
6281 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6282 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6283 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6284 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6285 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6286 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6287 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6288
6289 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6290 if (!wl->freezable_wq) {
6291 ret = -ENOMEM;
6292 goto err_hw;
6293 }
6294
6295 wl->channel = 0;
6296 wl->rx_counter = 0;
6297 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6298 wl->band = NL80211_BAND_2GHZ;
6299 wl->channel_type = NL80211_CHAN_NO_HT;
6300 wl->flags = 0;
6301 wl->sg_enabled = true;
6302 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6303 wl->recovery_count = 0;
6304 wl->hw_pg_ver = -1;
6305 wl->ap_ps_map = 0;
6306 wl->ap_fw_ps_map = 0;
6307 wl->quirks = 0;
6308 wl->system_hlid = WL12XX_SYSTEM_HLID;
6309 wl->active_sta_count = 0;
6310 wl->active_link_count = 0;
6311 wl->fwlog_size = 0;
6312
6313 /* The system link is always allocated */
6314 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6315
6316 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6317 for (i = 0; i < wl->num_tx_desc; i++)
6318 wl->tx_frames[i] = NULL;
6319
6320 spin_lock_init(&wl->wl_lock);
6321
6322 wl->state = WLCORE_STATE_OFF;
6323 wl->fw_type = WL12XX_FW_TYPE_NONE;
6324 mutex_init(&wl->mutex);
6325 mutex_init(&wl->flush_mutex);
6326 init_completion(&wl->nvs_loading_complete);
6327
6328 order = get_order(aggr_buf_size);
6329 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6330 if (!wl->aggr_buf) {
6331 ret = -ENOMEM;
6332 goto err_wq;
6333 }
6334 wl->aggr_buf_size = aggr_buf_size;
6335
6336 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6337 if (!wl->dummy_packet) {
6338 ret = -ENOMEM;
6339 goto err_aggr;
6340 }
6341
6342 /* Allocate one page for the FW log */
6343 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6344 if (!wl->fwlog) {
6345 ret = -ENOMEM;
6346 goto err_dummy_packet;
6347 }
6348
6349 wl->mbox_size = mbox_size;
6350 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6351 if (!wl->mbox) {
6352 ret = -ENOMEM;
6353 goto err_fwlog;
6354 }
6355
6356 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6357 if (!wl->buffer_32) {
6358 ret = -ENOMEM;
6359 goto err_mbox;
6360 }
6361
6362 return hw;
6363
6364 err_mbox:
6365 kfree(wl->mbox);
6366
6367 err_fwlog:
6368 free_page((unsigned long)wl->fwlog);
6369
6370 err_dummy_packet:
6371 dev_kfree_skb(wl->dummy_packet);
6372
6373 err_aggr:
6374 free_pages((unsigned long)wl->aggr_buf, order);
6375
6376 err_wq:
6377 destroy_workqueue(wl->freezable_wq);
6378
6379 err_hw:
6380 wl1271_debugfs_exit(wl);
6381 kfree(wl->priv);
6382
6383 err_priv_alloc:
6384 ieee80211_free_hw(hw);
6385
6386 err_hw_alloc:
6387
6388 return ERR_PTR(ret);
6389 }
6390 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6391
6392 int wlcore_free_hw(struct wl1271 *wl)
6393 {
6394 /* Unblock any fwlog readers */
6395 mutex_lock(&wl->mutex);
6396 wl->fwlog_size = -1;
6397 mutex_unlock(&wl->mutex);
6398
6399 wlcore_sysfs_free(wl);
6400
6401 kfree(wl->buffer_32);
6402 kfree(wl->mbox);
6403 free_page((unsigned long)wl->fwlog);
6404 dev_kfree_skb(wl->dummy_packet);
6405 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6406
6407 wl1271_debugfs_exit(wl);
6408
6409 vfree(wl->fw);
6410 wl->fw = NULL;
6411 wl->fw_type = WL12XX_FW_TYPE_NONE;
6412 kfree(wl->nvs);
6413 wl->nvs = NULL;
6414
6415 kfree(wl->raw_fw_status);
6416 kfree(wl->fw_status);
6417 kfree(wl->tx_res_if);
6418 destroy_workqueue(wl->freezable_wq);
6419
6420 kfree(wl->priv);
6421 ieee80211_free_hw(wl->hw);
6422
6423 return 0;
6424 }
6425 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6426
6427 #ifdef CONFIG_PM
6428 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6429 .flags = WIPHY_WOWLAN_ANY,
6430 .n_patterns = WL1271_MAX_RX_FILTERS,
6431 .pattern_min_len = 1,
6432 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6433 };
6434 #endif
6435
6436 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6437 {
6438 return IRQ_WAKE_THREAD;
6439 }
6440
6441 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6442 {
6443 struct wl1271 *wl = context;
6444 struct platform_device *pdev = wl->pdev;
6445 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6446 struct resource *res;
6447
6448 int ret;
6449 irq_handler_t hardirq_fn = NULL;
6450
6451 if (fw) {
6452 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6453 if (!wl->nvs) {
6454 wl1271_error("Could not allocate nvs data");
6455 goto out;
6456 }
6457 wl->nvs_len = fw->size;
6458 } else if (pdev_data->family->nvs_name) {
6459 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6460 pdev_data->family->nvs_name);
6461 wl->nvs = NULL;
6462 wl->nvs_len = 0;
6463 } else {
6464 wl->nvs = NULL;
6465 wl->nvs_len = 0;
6466 }
6467
6468 ret = wl->ops->setup(wl);
6469 if (ret < 0)
6470 goto out_free_nvs;
6471
6472 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6473
6474 /* adjust some runtime configuration parameters */
6475 wlcore_adjust_conf(wl);
6476
6477 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6478 if (!res) {
6479 wl1271_error("Could not get IRQ resource");
6480 goto out_free_nvs;
6481 }
6482
6483 wl->irq = res->start;
6484 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6485 wl->if_ops = pdev_data->if_ops;
6486
6487 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6488 hardirq_fn = wlcore_hardirq;
6489 else
6490 wl->irq_flags |= IRQF_ONESHOT;
6491
6492 ret = wl12xx_set_power_on(wl);
6493 if (ret < 0)
6494 goto out_free_nvs;
6495
6496 ret = wl12xx_get_hw_info(wl);
6497 if (ret < 0) {
6498 wl1271_error("couldn't get hw info");
6499 wl1271_power_off(wl);
6500 goto out_free_nvs;
6501 }
6502
6503 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6504 wl->irq_flags, pdev->name, wl);
6505 if (ret < 0) {
6506 wl1271_error("interrupt configuration failed");
6507 wl1271_power_off(wl);
6508 goto out_free_nvs;
6509 }
6510
6511 #ifdef CONFIG_PM
6512 ret = enable_irq_wake(wl->irq);
6513 if (!ret) {
6514 wl->irq_wake_enabled = true;
6515 device_init_wakeup(wl->dev, 1);
6516 if (pdev_data->pwr_in_suspend)
6517 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6518 }
6519 #endif
6520 disable_irq(wl->irq);
6521 wl1271_power_off(wl);
6522
6523 ret = wl->ops->identify_chip(wl);
6524 if (ret < 0)
6525 goto out_irq;
6526
6527 ret = wl1271_init_ieee80211(wl);
6528 if (ret)
6529 goto out_irq;
6530
6531 ret = wl1271_register_hw(wl);
6532 if (ret)
6533 goto out_irq;
6534
6535 ret = wlcore_sysfs_init(wl);
6536 if (ret)
6537 goto out_unreg;
6538
6539 wl->initialized = true;
6540 goto out;
6541
6542 out_unreg:
6543 wl1271_unregister_hw(wl);
6544
6545 out_irq:
6546 free_irq(wl->irq, wl);
6547
6548 out_free_nvs:
6549 kfree(wl->nvs);
6550
6551 out:
6552 release_firmware(fw);
6553 complete_all(&wl->nvs_loading_complete);
6554 }
6555
6556 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6557 {
6558 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6559 const char *nvs_name;
6560 int ret = 0;
6561
6562 if (!wl->ops || !wl->ptable || !pdev_data)
6563 return -EINVAL;
6564
6565 wl->dev = &pdev->dev;
6566 wl->pdev = pdev;
6567 platform_set_drvdata(pdev, wl);
6568
6569 if (pdev_data->family && pdev_data->family->nvs_name) {
6570 nvs_name = pdev_data->family->nvs_name;
6571 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6572 nvs_name, &pdev->dev, GFP_KERNEL,
6573 wl, wlcore_nvs_cb);
6574 if (ret < 0) {
6575 wl1271_error("request_firmware_nowait failed for %s: %d",
6576 nvs_name, ret);
6577 complete_all(&wl->nvs_loading_complete);
6578 }
6579 } else {
6580 wlcore_nvs_cb(NULL, wl);
6581 }
6582
6583 return ret;
6584 }
6585 EXPORT_SYMBOL_GPL(wlcore_probe);
6586
6587 int wlcore_remove(struct platform_device *pdev)
6588 {
6589 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6590 struct wl1271 *wl = platform_get_drvdata(pdev);
6591
6592 if (pdev_data->family && pdev_data->family->nvs_name)
6593 wait_for_completion(&wl->nvs_loading_complete);
6594 if (!wl->initialized)
6595 return 0;
6596
6597 if (wl->irq_wake_enabled) {
6598 device_init_wakeup(wl->dev, 0);
6599 disable_irq_wake(wl->irq);
6600 }
6601 wl1271_unregister_hw(wl);
6602 free_irq(wl->irq, wl);
6603 wlcore_free_hw(wl);
6604
6605 return 0;
6606 }
6607 EXPORT_SYMBOL_GPL(wlcore_remove);
6608
6609 u32 wl12xx_debug_level = DEBUG_NONE;
6610 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6611 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6612 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6613
6614 module_param_named(fwlog, fwlog_param, charp, 0);
6615 MODULE_PARM_DESC(fwlog,
6616 "FW logger options: continuous, dbgpins or disable");
6617
6618 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6619 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6620
6621 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6622 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6623
6624 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6625 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6626
6627 MODULE_LICENSE("GPL");
6628 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6629 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");