3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param
;
59 static bool bug_on_recovery
;
60 static bool no_recovery
;
62 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
63 struct ieee80211_vif
*vif
,
64 bool reset_tx_queues
);
65 static void wlcore_op_stop_locked(struct wl1271
*wl
);
66 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
68 static int wl12xx_set_authorized(struct wl1271
*wl
,
69 struct wl12xx_vif
*wlvif
)
73 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
82 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
->sta
.hlid
);
86 wl12xx_croc(wl
, wlvif
->role_id
);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy
*wiphy
,
93 struct regulatory_request
*request
)
95 struct ieee80211_supported_band
*band
;
96 struct ieee80211_channel
*ch
;
99 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
100 for (i
= 0; i
< band
->n_channels
; i
++) {
101 ch
= &band
->channels
[i
];
102 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
105 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
106 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
107 IEEE80211_CHAN_PASSIVE_SCAN
;
114 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
119 /* we should hold wl->mutex */
120 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
139 int period
= wl
->conf
.rx_streaming
.interval
;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
148 (wl
->conf
.rx_streaming
.always
||
149 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
150 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
152 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif
->rx_streaming_timer
);
160 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
163 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
164 rx_streaming_enable_work
);
165 struct wl1271
*wl
= wlvif
->wl
;
167 mutex_lock(&wl
->mutex
);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
171 (!wl
->conf
.rx_streaming
.always
&&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
175 if (!wl
->conf
.rx_streaming
.interval
)
178 ret
= wl1271_ps_elp_wakeup(wl
);
182 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif
->rx_streaming_timer
,
188 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
191 wl1271_ps_elp_sleep(wl
);
193 mutex_unlock(&wl
->mutex
);
196 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
199 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
200 rx_streaming_disable_work
);
201 struct wl1271
*wl
= wlvif
->wl
;
203 mutex_lock(&wl
->mutex
);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
208 ret
= wl1271_ps_elp_wakeup(wl
);
212 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
217 wl1271_ps_elp_sleep(wl
);
219 mutex_unlock(&wl
->mutex
);
222 static void wl1271_rx_streaming_timer(unsigned long data
)
224 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
225 struct wl1271
*wl
= wlvif
->wl
;
226 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl
->tx_allocated_blocks
== 0)
236 cancel_delayed_work(&wl
->tx_watchdog_work
);
237 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
238 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
241 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
243 struct delayed_work
*dwork
;
246 dwork
= container_of(work
, struct delayed_work
, work
);
247 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
249 mutex_lock(&wl
->mutex
);
251 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl
->tx_allocated_blocks
== 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
263 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
264 wl
->conf
.tx
.tx_watchdog_timeout
);
265 wl12xx_rearm_tx_watchdog_locked(wl
);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
274 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
275 wl
->conf
.tx
.tx_watchdog_timeout
);
276 wl12xx_rearm_tx_watchdog_locked(wl
);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl
->active_sta_count
) {
287 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
289 wl
->conf
.tx
.tx_watchdog_timeout
,
290 wl
->active_sta_count
);
291 wl12xx_rearm_tx_watchdog_locked(wl
);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl
->conf
.tx
.tx_watchdog_timeout
);
297 wl12xx_queue_recovery_work(wl
);
300 mutex_unlock(&wl
->mutex
);
303 static void wlcore_adjust_conf(struct wl1271
*wl
)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param
, "continuous")) {
308 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
309 } else if (!strcmp(fwlog_param
, "ondemand")) {
310 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
311 } else if (!strcmp(fwlog_param
, "dbgpins")) {
312 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
313 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
314 } else if (!strcmp(fwlog_param
, "disable")) {
315 wl
->conf
.fwlog
.mem_blocks
= 0;
316 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
323 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
324 struct wl12xx_vif
*wlvif
,
327 bool fw_ps
, single_sta
;
329 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
330 single_sta
= (wl
->active_sta_count
== 1);
333 * Wake up from high level PS if the STA is asleep with too little
334 * packets in FW or if the STA is awake.
336 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
337 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
340 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 * Make an exception if this is the only connected station. In this
342 * case FW-memory congestion is not a problem.
344 else if (!single_sta
&& fw_ps
&& tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
345 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
348 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
349 struct wl12xx_vif
*wlvif
,
350 struct wl_fw_status_2
*status
)
352 struct wl1271_link
*lnk
;
356 /* TODO: also use link_fast_bitmap here */
358 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
359 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
360 wl1271_debug(DEBUG_PSM
,
361 "link ps prev 0x%x cur 0x%x changed 0x%x",
362 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
363 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
365 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
368 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
) {
369 lnk
= &wl
->links
[hlid
];
370 cnt
= status
->counters
.tx_lnk_free_pkts
[hlid
] -
371 lnk
->prev_freed_pkts
;
373 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[hlid
];
374 lnk
->allocated_pkts
-= cnt
;
376 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
377 lnk
->allocated_pkts
);
381 static int wlcore_fw_status(struct wl1271
*wl
,
382 struct wl_fw_status_1
*status_1
,
383 struct wl_fw_status_2
*status_2
)
385 struct wl12xx_vif
*wlvif
;
387 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
388 int avail
, freed_blocks
;
393 status_len
= WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
394 sizeof(*status_2
) + wl
->fw_status_priv_len
;
396 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status_1
,
401 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
404 status_1
->fw_rx_counter
,
405 status_1
->drv_rx_counter
,
406 status_1
->tx_results_counter
);
408 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl
->tx_allocated_pkts
[i
] -=
411 (status_2
->counters
.tx_released_pkts
[i
] -
412 wl
->tx_pkts_freed
[i
]) & 0xff;
414 wl
->tx_pkts_freed
[i
] = status_2
->counters
.tx_released_pkts
[i
];
417 /* prevent wrap-around in total blocks counter */
418 if (likely(wl
->tx_blocks_freed
<=
419 le32_to_cpu(status_2
->total_released_blks
)))
420 freed_blocks
= le32_to_cpu(status_2
->total_released_blks
) -
423 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
424 le32_to_cpu(status_2
->total_released_blks
);
426 wl
->tx_blocks_freed
= le32_to_cpu(status_2
->total_released_blks
);
428 wl
->tx_allocated_blocks
-= freed_blocks
;
431 * If the FW freed some blocks:
432 * If we still have allocated blocks - re-arm the timer, Tx is
433 * not stuck. Otherwise, cancel the timer (no Tx currently).
436 if (wl
->tx_allocated_blocks
)
437 wl12xx_rearm_tx_watchdog_locked(wl
);
439 cancel_delayed_work(&wl
->tx_watchdog_work
);
442 avail
= le32_to_cpu(status_2
->tx_total
) - wl
->tx_allocated_blocks
;
445 * The FW might change the total number of TX memblocks before
446 * we get a notification about blocks being released. Thus, the
447 * available blocks calculation might yield a temporary result
448 * which is lower than the actual available blocks. Keeping in
449 * mind that only blocks that were allocated can be moved from
450 * TX to RX, tx_blocks_available should never decrease here.
452 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
455 /* if more blocks are available now, tx work can be scheduled */
456 if (wl
->tx_blocks_available
> old_tx_blk_count
)
457 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
459 /* for AP update num of allocated TX blocks per link and ps status */
460 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
461 wl12xx_irq_update_links_status(wl
, wlvif
, status_2
);
464 /* update the host-chipset time offset */
466 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
467 (s64
)le32_to_cpu(status_2
->fw_localtime
);
472 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
476 /* Pass all received frames to the network stack */
477 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
478 ieee80211_rx_ni(wl
->hw
, skb
);
480 /* Return sent skbs to the network stack */
481 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
482 ieee80211_tx_status_ni(wl
->hw
, skb
);
485 static void wl1271_netstack_work(struct work_struct
*work
)
488 container_of(work
, struct wl1271
, netstack_work
);
491 wl1271_flush_deferred_work(wl
);
492 } while (skb_queue_len(&wl
->deferred_rx_queue
));
495 #define WL1271_IRQ_MAX_LOOPS 256
497 static int wlcore_irq_locked(struct wl1271
*wl
)
501 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
503 unsigned int defer_count
;
507 * In case edge triggered interrupt must be used, we cannot iterate
508 * more than once without introducing race conditions with the hardirq.
510 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
513 wl1271_debug(DEBUG_IRQ
, "IRQ work");
515 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
518 ret
= wl1271_ps_elp_wakeup(wl
);
522 while (!done
&& loopcount
--) {
524 * In order to avoid a race with the hardirq, clear the flag
525 * before acknowledging the chip. Since the mutex is held,
526 * wl1271_ps_elp_wakeup cannot be called concurrently.
528 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
529 smp_mb__after_clear_bit();
531 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
535 wlcore_hw_tx_immediate_compl(wl
);
537 intr
= le32_to_cpu(wl
->fw_status_1
->intr
);
538 intr
&= WLCORE_ALL_INTR_MASK
;
544 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
545 wl1271_error("HW watchdog interrupt received! starting recovery.");
546 wl
->watchdog_recovery
= true;
549 /* restarting the chip. ignore any other interrupt. */
553 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
554 wl1271_error("SW watchdog interrupt received! "
555 "starting recovery.");
556 wl
->watchdog_recovery
= true;
559 /* restarting the chip. ignore any other interrupt. */
563 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
564 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
566 ret
= wlcore_rx(wl
, wl
->fw_status_1
);
570 /* Check if any tx blocks were freed */
571 spin_lock_irqsave(&wl
->wl_lock
, flags
);
572 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
573 wl1271_tx_total_queue_count(wl
) > 0) {
574 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
576 * In order to avoid starvation of the TX path,
577 * call the work function directly.
579 ret
= wlcore_tx_work_locked(wl
);
583 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
586 /* check for tx results */
587 ret
= wlcore_hw_tx_delayed_compl(wl
);
591 /* Make sure the deferred queues don't get too long */
592 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
593 skb_queue_len(&wl
->deferred_rx_queue
);
594 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
595 wl1271_flush_deferred_work(wl
);
598 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
599 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
600 ret
= wl1271_event_handle(wl
, 0);
605 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
606 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
607 ret
= wl1271_event_handle(wl
, 1);
612 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
613 wl1271_debug(DEBUG_IRQ
,
614 "WL1271_ACX_INTR_INIT_COMPLETE");
616 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
617 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
620 wl1271_ps_elp_sleep(wl
);
626 static irqreturn_t
wlcore_irq(int irq
, void *cookie
)
630 struct wl1271
*wl
= cookie
;
632 /* TX might be handled here, avoid redundant work */
633 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
634 cancel_work_sync(&wl
->tx_work
);
636 mutex_lock(&wl
->mutex
);
638 ret
= wlcore_irq_locked(wl
);
640 wl12xx_queue_recovery_work(wl
);
642 spin_lock_irqsave(&wl
->wl_lock
, flags
);
643 /* In case TX was not handled here, queue TX work */
644 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
645 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
646 wl1271_tx_total_queue_count(wl
) > 0)
647 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
648 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
650 mutex_unlock(&wl
->mutex
);
655 struct vif_counter_data
{
658 struct ieee80211_vif
*cur_vif
;
659 bool cur_vif_running
;
662 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
663 struct ieee80211_vif
*vif
)
665 struct vif_counter_data
*counter
= data
;
668 if (counter
->cur_vif
== vif
)
669 counter
->cur_vif_running
= true;
672 /* caller must not hold wl->mutex, as it might deadlock */
673 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
674 struct ieee80211_vif
*cur_vif
,
675 struct vif_counter_data
*data
)
677 memset(data
, 0, sizeof(*data
));
678 data
->cur_vif
= cur_vif
;
680 ieee80211_iterate_active_interfaces(hw
,
681 wl12xx_vif_count_iter
, data
);
684 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
686 const struct firmware
*fw
;
688 enum wl12xx_fw_type fw_type
;
692 fw_type
= WL12XX_FW_TYPE_PLT
;
693 fw_name
= wl
->plt_fw_name
;
696 * we can't call wl12xx_get_vif_count() here because
697 * wl->mutex is taken, so use the cached last_vif_count value
699 if (wl
->last_vif_count
> 1 && wl
->mr_fw_name
) {
700 fw_type
= WL12XX_FW_TYPE_MULTI
;
701 fw_name
= wl
->mr_fw_name
;
703 fw_type
= WL12XX_FW_TYPE_NORMAL
;
704 fw_name
= wl
->sr_fw_name
;
708 if (wl
->fw_type
== fw_type
)
711 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
713 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
716 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
721 wl1271_error("firmware size is not multiple of 32 bits: %zu",
728 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
729 wl
->fw_len
= fw
->size
;
730 wl
->fw
= vmalloc(wl
->fw_len
);
733 wl1271_error("could not allocate memory for the firmware");
738 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
740 wl
->fw_type
= fw_type
;
742 release_firmware(fw
);
747 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
749 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
751 /* Avoid a recursive recovery */
752 if (wl
->state
== WLCORE_STATE_ON
) {
753 wl
->state
= WLCORE_STATE_RESTARTING
;
754 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
755 wlcore_disable_interrupts_nosync(wl
);
756 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
760 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
764 /* The FW log is a length-value list, find where the log end */
765 while (len
< maxlen
) {
766 if (memblock
[len
] == 0)
768 if (len
+ memblock
[len
] + 1 > maxlen
)
770 len
+= memblock
[len
] + 1;
773 /* Make sure we have enough room */
774 len
= min(len
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
776 /* Fill the FW log file, consumed by the sysfs fwlog entry */
777 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
778 wl
->fwlog_size
+= len
;
783 #define WLCORE_FW_LOG_END 0x2000000
785 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
793 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
794 (wl
->conf
.fwlog
.mem_blocks
== 0))
797 wl1271_info("Reading FW panic log");
799 block
= kmalloc(WL12XX_HW_BLOCK_SIZE
, GFP_KERNEL
);
804 * Make sure the chip is awake and the logger isn't active.
805 * Do not send a stop fwlog command if the fw is hanged.
807 if (wl1271_ps_elp_wakeup(wl
))
809 if (!wl
->watchdog_recovery
)
810 wl12xx_cmd_stop_fwlog(wl
);
812 /* Read the first memory block address */
813 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
817 addr
= le32_to_cpu(wl
->fw_status_2
->log_start_addr
);
821 if (wl
->conf
.fwlog
.mode
== WL12XX_FWLOG_CONTINUOUS
) {
822 offset
= sizeof(addr
) + sizeof(struct wl1271_rx_descriptor
);
823 end_of_log
= WLCORE_FW_LOG_END
;
825 offset
= sizeof(addr
);
829 /* Traverse the memory blocks linked list */
831 memset(block
, 0, WL12XX_HW_BLOCK_SIZE
);
832 ret
= wlcore_read_hwaddr(wl
, addr
, block
, WL12XX_HW_BLOCK_SIZE
,
838 * Memory blocks are linked to one another. The first 4 bytes
839 * of each memory block hold the hardware address of the next
840 * one. The last memory block points to the first one in
841 * on demand mode and is equal to 0x2000000 in continuous mode.
843 addr
= le32_to_cpup((__le32
*)block
);
844 if (!wl12xx_copy_fwlog(wl
, block
+ offset
,
845 WL12XX_HW_BLOCK_SIZE
- offset
))
847 } while (addr
&& (addr
!= end_of_log
));
849 wake_up_interruptible(&wl
->fwlog_waitq
);
855 static void wlcore_print_recovery(struct wl1271
*wl
)
861 wl1271_info("Hardware recovery in progress. FW ver: %s",
862 wl
->chip
.fw_ver_str
);
864 /* change partitions momentarily so we can read the FW pc */
865 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
869 ret
= wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
, &pc
);
873 ret
= wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
, &hint_sts
);
877 wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc
, hint_sts
);
879 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
883 static void wl1271_recovery_work(struct work_struct
*work
)
886 container_of(work
, struct wl1271
, recovery_work
);
887 struct wl12xx_vif
*wlvif
;
888 struct ieee80211_vif
*vif
;
890 mutex_lock(&wl
->mutex
);
892 if (wl
->state
== WLCORE_STATE_OFF
|| wl
->plt
)
895 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
)) {
896 wl12xx_read_fwlog_panic(wl
);
897 wlcore_print_recovery(wl
);
900 BUG_ON(bug_on_recovery
&&
901 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
904 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
909 * Advance security sequence number to overcome potential progress
910 * in the firmware during recovery. This doens't hurt if the network is
913 wl12xx_for_each_wlvif(wl
, wlvif
) {
914 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
915 test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
916 wlvif
->tx_security_seq
+=
917 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
920 /* Prevent spurious TX during FW restart */
921 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
923 if (wl
->sched_scanning
) {
924 ieee80211_sched_scan_stopped(wl
->hw
);
925 wl
->sched_scanning
= false;
928 /* reboot the chipset */
929 while (!list_empty(&wl
->wlvif_list
)) {
930 wlvif
= list_first_entry(&wl
->wlvif_list
,
931 struct wl12xx_vif
, list
);
932 vif
= wl12xx_wlvif_to_vif(wlvif
);
933 __wl1271_op_remove_interface(wl
, vif
, false);
936 wlcore_op_stop_locked(wl
);
938 ieee80211_restart_hw(wl
->hw
);
941 * Its safe to enable TX now - the queues are stopped after a request
944 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
947 wl
->watchdog_recovery
= false;
948 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
949 mutex_unlock(&wl
->mutex
);
952 static int wlcore_fw_wakeup(struct wl1271
*wl
)
954 return wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
957 static int wl1271_setup(struct wl1271
*wl
)
959 wl
->fw_status_1
= kmalloc(WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
960 sizeof(*wl
->fw_status_2
) +
961 wl
->fw_status_priv_len
, GFP_KERNEL
);
962 if (!wl
->fw_status_1
)
965 wl
->fw_status_2
= (struct wl_fw_status_2
*)
966 (((u8
*) wl
->fw_status_1
) +
967 WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
));
969 wl
->tx_res_if
= kmalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
970 if (!wl
->tx_res_if
) {
971 kfree(wl
->fw_status_1
);
978 static int wl12xx_set_power_on(struct wl1271
*wl
)
982 msleep(WL1271_PRE_POWER_ON_SLEEP
);
983 ret
= wl1271_power_on(wl
);
986 msleep(WL1271_POWER_ON_SLEEP
);
990 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
994 /* ELP module wake up */
995 ret
= wlcore_fw_wakeup(wl
);
1003 wl1271_power_off(wl
);
1007 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
1011 ret
= wl12xx_set_power_on(wl
);
1016 * For wl127x based devices we could use the default block
1017 * size (512 bytes), but due to a bug in the sdio driver, we
1018 * need to set it explicitly after the chip is powered on. To
1019 * simplify the code and since the performance impact is
1020 * negligible, we use the same block size for all different
1023 * Check if the bus supports blocksize alignment and, if it
1024 * doesn't, make sure we don't have the quirk.
1026 if (!wl1271_set_block_size(wl
))
1027 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1029 /* TODO: make sure the lower driver has set things up correctly */
1031 ret
= wl1271_setup(wl
);
1035 ret
= wl12xx_fetch_firmware(wl
, plt
);
1043 int wl1271_plt_start(struct wl1271
*wl
, const enum plt_mode plt_mode
)
1045 int retries
= WL1271_BOOT_RETRIES
;
1046 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1048 static const char* const PLT_MODE
[] = {
1056 mutex_lock(&wl
->mutex
);
1058 wl1271_notice("power up");
1060 if (wl
->state
!= WLCORE_STATE_OFF
) {
1061 wl1271_error("cannot go into PLT state because not "
1062 "in off state: %d", wl
->state
);
1067 /* Indicate to lower levels that we are now in PLT mode */
1069 wl
->plt_mode
= plt_mode
;
1073 ret
= wl12xx_chip_wakeup(wl
, true);
1077 ret
= wl
->ops
->plt_init(wl
);
1081 wl
->state
= WLCORE_STATE_ON
;
1082 wl1271_notice("firmware booted in PLT mode %s (%s)",
1084 wl
->chip
.fw_ver_str
);
1086 /* update hw/fw version info in wiphy struct */
1087 wiphy
->hw_version
= wl
->chip
.id
;
1088 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1089 sizeof(wiphy
->fw_version
));
1094 wl1271_power_off(wl
);
1098 wl
->plt_mode
= PLT_OFF
;
1100 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1101 WL1271_BOOT_RETRIES
);
1103 mutex_unlock(&wl
->mutex
);
1108 int wl1271_plt_stop(struct wl1271
*wl
)
1112 wl1271_notice("power down");
1115 * Interrupts must be disabled before setting the state to OFF.
1116 * Otherwise, the interrupt handler might be called and exit without
1117 * reading the interrupt status.
1119 wlcore_disable_interrupts(wl
);
1120 mutex_lock(&wl
->mutex
);
1122 mutex_unlock(&wl
->mutex
);
1125 * This will not necessarily enable interrupts as interrupts
1126 * may have been disabled when op_stop was called. It will,
1127 * however, balance the above call to disable_interrupts().
1129 wlcore_enable_interrupts(wl
);
1131 wl1271_error("cannot power down because not in PLT "
1132 "state: %d", wl
->state
);
1137 mutex_unlock(&wl
->mutex
);
1139 wl1271_flush_deferred_work(wl
);
1140 cancel_work_sync(&wl
->netstack_work
);
1141 cancel_work_sync(&wl
->recovery_work
);
1142 cancel_delayed_work_sync(&wl
->elp_work
);
1143 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1144 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1146 mutex_lock(&wl
->mutex
);
1147 wl1271_power_off(wl
);
1149 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1150 wl
->state
= WLCORE_STATE_OFF
;
1152 wl
->plt_mode
= PLT_OFF
;
1154 mutex_unlock(&wl
->mutex
);
1160 static void wl1271_op_tx(struct ieee80211_hw
*hw
,
1161 struct ieee80211_tx_control
*control
,
1162 struct sk_buff
*skb
)
1164 struct wl1271
*wl
= hw
->priv
;
1165 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1166 struct ieee80211_vif
*vif
= info
->control
.vif
;
1167 struct wl12xx_vif
*wlvif
= NULL
;
1168 unsigned long flags
;
1173 wlvif
= wl12xx_vif_to_data(vif
);
1175 mapping
= skb_get_queue_mapping(skb
);
1176 q
= wl1271_tx_get_queue(mapping
);
1178 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
, control
->sta
);
1180 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1183 * drop the packet if the link is invalid or the queue is stopped
1184 * for any reason but watermark. Watermark is a "soft"-stop so we
1185 * allow these packets through.
1187 if (hlid
== WL12XX_INVALID_LINK_ID
||
1188 (wlvif
&& !test_bit(hlid
, wlvif
->links_map
)) ||
1189 (wlcore_is_queue_stopped(wl
, q
) &&
1190 !wlcore_is_queue_stopped_by_reason(wl
, q
,
1191 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1192 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1193 ieee80211_free_txskb(hw
, skb
);
1197 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1199 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1201 wl
->tx_queue_count
[q
]++;
1204 * The workqueue is slow to process the tx_queue and we need stop
1205 * the queue here, otherwise the queue will get too long.
1207 if (wl
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
&&
1208 !wlcore_is_queue_stopped_by_reason(wl
, q
,
1209 WLCORE_QUEUE_STOP_REASON_WATERMARK
)) {
1210 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1211 wlcore_stop_queue_locked(wl
, q
,
1212 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1216 * The chip specific setup must run before the first TX packet -
1217 * before that, the tx_work will not be initialized!
1220 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1221 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1222 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1225 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1228 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1230 unsigned long flags
;
1233 /* no need to queue a new dummy packet if one is already pending */
1234 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1237 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1239 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1240 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1241 wl
->tx_queue_count
[q
]++;
1242 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1244 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1245 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1246 return wlcore_tx_work_locked(wl
);
1249 * If the FW TX is busy, TX work will be scheduled by the threaded
1250 * interrupt handler function
1256 * The size of the dummy packet should be at least 1400 bytes. However, in
1257 * order to minimize the number of bus transactions, aligning it to 512 bytes
1258 * boundaries could be beneficial, performance wise
1260 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1262 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1264 struct sk_buff
*skb
;
1265 struct ieee80211_hdr_3addr
*hdr
;
1266 unsigned int dummy_packet_size
;
1268 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1269 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1271 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1273 wl1271_warning("Failed to allocate a dummy packet skb");
1277 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1279 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1280 memset(hdr
, 0, sizeof(*hdr
));
1281 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1282 IEEE80211_STYPE_NULLFUNC
|
1283 IEEE80211_FCTL_TODS
);
1285 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1287 /* Dummy packets require the TID to be management */
1288 skb
->priority
= WL1271_TID_MGMT
;
1290 /* Initialize all fields that might be used */
1291 skb_set_queue_mapping(skb
, 0);
1292 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1300 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern
*p
)
1302 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1303 int i
, pattern_len
= 0;
1306 wl1271_warning("No mask in WoWLAN pattern");
1311 * The pattern is broken up into segments of bytes at different offsets
1312 * that need to be checked by the FW filter. Each segment is called
1313 * a field in the FW API. We verify that the total number of fields
1314 * required for this pattern won't exceed FW limits (8)
1315 * as well as the total fields buffer won't exceed the FW limit.
1316 * Note that if there's a pattern which crosses Ethernet/IP header
1317 * boundary a new field is required.
1319 for (i
= 0; i
< p
->pattern_len
; i
++) {
1320 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1325 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1327 fields_size
+= pattern_len
+
1328 RX_FILTER_FIELD_OVERHEAD
;
1336 fields_size
+= pattern_len
+
1337 RX_FILTER_FIELD_OVERHEAD
;
1344 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1348 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1349 wl1271_warning("RX Filter too complex. Too many segments");
1353 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1354 wl1271_warning("RX filter pattern is too big");
1361 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1363 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1366 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1373 for (i
= 0; i
< filter
->num_fields
; i
++)
1374 kfree(filter
->fields
[i
].pattern
);
1379 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1380 u16 offset
, u8 flags
,
1381 u8
*pattern
, u8 len
)
1383 struct wl12xx_rx_filter_field
*field
;
1385 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1386 wl1271_warning("Max fields per RX filter. can't alloc another");
1390 field
= &filter
->fields
[filter
->num_fields
];
1392 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1393 if (!field
->pattern
) {
1394 wl1271_warning("Failed to allocate RX filter pattern");
1398 filter
->num_fields
++;
1400 field
->offset
= cpu_to_le16(offset
);
1401 field
->flags
= flags
;
1403 memcpy(field
->pattern
, pattern
, len
);
1408 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1410 int i
, fields_size
= 0;
1412 for (i
= 0; i
< filter
->num_fields
; i
++)
1413 fields_size
+= filter
->fields
[i
].len
+
1414 sizeof(struct wl12xx_rx_filter_field
) -
1420 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1424 struct wl12xx_rx_filter_field
*field
;
1426 for (i
= 0; i
< filter
->num_fields
; i
++) {
1427 field
= (struct wl12xx_rx_filter_field
*)buf
;
1429 field
->offset
= filter
->fields
[i
].offset
;
1430 field
->flags
= filter
->fields
[i
].flags
;
1431 field
->len
= filter
->fields
[i
].len
;
1433 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1434 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1435 sizeof(u8
*) + field
->len
;
1440 * Allocates an RX filter returned through f
1441 * which needs to be freed using rx_filter_free()
1443 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1444 struct cfg80211_wowlan_trig_pkt_pattern
*p
,
1445 struct wl12xx_rx_filter
**f
)
1448 struct wl12xx_rx_filter
*filter
;
1452 filter
= wl1271_rx_filter_alloc();
1454 wl1271_warning("Failed to alloc rx filter");
1460 while (i
< p
->pattern_len
) {
1461 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1466 for (j
= i
; j
< p
->pattern_len
; j
++) {
1467 if (!test_bit(j
, (unsigned long *)p
->mask
))
1470 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1471 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1475 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1477 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1479 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1480 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1485 ret
= wl1271_rx_filter_alloc_field(filter
,
1488 &p
->pattern
[i
], len
);
1495 filter
->action
= FILTER_SIGNAL
;
1501 wl1271_rx_filter_free(filter
);
1507 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1508 struct cfg80211_wowlan
*wow
)
1512 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1513 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0,
1518 ret
= wl1271_rx_filter_clear_all(wl
);
1525 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1528 /* Validate all incoming patterns before clearing current FW state */
1529 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1530 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1532 wl1271_warning("Bad wowlan pattern %d", i
);
1537 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1541 ret
= wl1271_rx_filter_clear_all(wl
);
1545 /* Translate WoWLAN patterns into filters */
1546 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1547 struct cfg80211_wowlan_trig_pkt_pattern
*p
;
1548 struct wl12xx_rx_filter
*filter
= NULL
;
1550 p
= &wow
->patterns
[i
];
1552 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1554 wl1271_warning("Failed to create an RX filter from "
1555 "wowlan pattern %d", i
);
1559 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1561 wl1271_rx_filter_free(filter
);
1566 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1572 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1573 struct wl12xx_vif
*wlvif
,
1574 struct cfg80211_wowlan
*wow
)
1578 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1581 ret
= wl1271_ps_elp_wakeup(wl
);
1585 ret
= wl1271_configure_wowlan(wl
, wow
);
1589 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1590 wl
->conf
.conn
.wake_up_event
) &&
1591 (wl
->conf
.conn
.suspend_listen_interval
==
1592 wl
->conf
.conn
.listen_interval
))
1595 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1596 wl
->conf
.conn
.suspend_wake_up_event
,
1597 wl
->conf
.conn
.suspend_listen_interval
);
1600 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1603 wl1271_ps_elp_sleep(wl
);
1609 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1610 struct wl12xx_vif
*wlvif
)
1614 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1617 ret
= wl1271_ps_elp_wakeup(wl
);
1621 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1623 wl1271_ps_elp_sleep(wl
);
1629 static int wl1271_configure_suspend(struct wl1271
*wl
,
1630 struct wl12xx_vif
*wlvif
,
1631 struct cfg80211_wowlan
*wow
)
1633 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1634 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1635 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1636 return wl1271_configure_suspend_ap(wl
, wlvif
);
1640 static void wl1271_configure_resume(struct wl1271
*wl
,
1641 struct wl12xx_vif
*wlvif
)
1644 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1645 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1647 if ((!is_ap
) && (!is_sta
))
1650 if (is_sta
&& !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1653 ret
= wl1271_ps_elp_wakeup(wl
);
1658 wl1271_configure_wowlan(wl
, NULL
);
1660 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1661 wl
->conf
.conn
.wake_up_event
) &&
1662 (wl
->conf
.conn
.suspend_listen_interval
==
1663 wl
->conf
.conn
.listen_interval
))
1666 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1667 wl
->conf
.conn
.wake_up_event
,
1668 wl
->conf
.conn
.listen_interval
);
1671 wl1271_error("resume: wake up conditions failed: %d",
1675 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1679 wl1271_ps_elp_sleep(wl
);
1682 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1683 struct cfg80211_wowlan
*wow
)
1685 struct wl1271
*wl
= hw
->priv
;
1686 struct wl12xx_vif
*wlvif
;
1689 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1692 /* we want to perform the recovery before suspending */
1693 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
1694 wl1271_warning("postponing suspend to perform recovery");
1698 wl1271_tx_flush(wl
);
1700 mutex_lock(&wl
->mutex
);
1701 wl
->wow_enabled
= true;
1702 wl12xx_for_each_wlvif(wl
, wlvif
) {
1703 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1705 mutex_unlock(&wl
->mutex
);
1706 wl1271_warning("couldn't prepare device to suspend");
1710 mutex_unlock(&wl
->mutex
);
1711 /* flush any remaining work */
1712 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1715 * disable and re-enable interrupts in order to flush
1718 wlcore_disable_interrupts(wl
);
1721 * set suspended flag to avoid triggering a new threaded_irq
1722 * work. no need for spinlock as interrupts are disabled.
1724 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1726 wlcore_enable_interrupts(wl
);
1727 flush_work(&wl
->tx_work
);
1728 flush_delayed_work(&wl
->elp_work
);
1733 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1735 struct wl1271
*wl
= hw
->priv
;
1736 struct wl12xx_vif
*wlvif
;
1737 unsigned long flags
;
1738 bool run_irq_work
= false, pending_recovery
;
1741 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1743 WARN_ON(!wl
->wow_enabled
);
1746 * re-enable irq_work enqueuing, and call irq_work directly if
1747 * there is a pending work.
1749 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1750 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1751 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1752 run_irq_work
= true;
1753 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1755 mutex_lock(&wl
->mutex
);
1757 /* test the recovery flag before calling any SDIO functions */
1758 pending_recovery
= test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1762 wl1271_debug(DEBUG_MAC80211
,
1763 "run postponed irq_work directly");
1765 /* don't talk to the HW if recovery is pending */
1766 if (!pending_recovery
) {
1767 ret
= wlcore_irq_locked(wl
);
1769 wl12xx_queue_recovery_work(wl
);
1772 wlcore_enable_interrupts(wl
);
1775 if (pending_recovery
) {
1776 wl1271_warning("queuing forgotten recovery on resume");
1777 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
1781 wl12xx_for_each_wlvif(wl
, wlvif
) {
1782 wl1271_configure_resume(wl
, wlvif
);
1786 wl
->wow_enabled
= false;
1787 mutex_unlock(&wl
->mutex
);
1793 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1795 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1798 * We have to delay the booting of the hardware because
1799 * we need to know the local MAC address before downloading and
1800 * initializing the firmware. The MAC address cannot be changed
1801 * after boot, and without the proper MAC address, the firmware
1802 * will not function properly.
1804 * The MAC address is first known when the corresponding interface
1805 * is added. That is where we will initialize the hardware.
1811 static void wlcore_op_stop_locked(struct wl1271
*wl
)
1815 if (wl
->state
== WLCORE_STATE_OFF
) {
1816 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1818 wlcore_enable_interrupts(wl
);
1824 * this must be before the cancel_work calls below, so that the work
1825 * functions don't perform further work.
1827 wl
->state
= WLCORE_STATE_OFF
;
1830 * Use the nosync variant to disable interrupts, so the mutex could be
1831 * held while doing so without deadlocking.
1833 wlcore_disable_interrupts_nosync(wl
);
1835 mutex_unlock(&wl
->mutex
);
1837 wlcore_synchronize_interrupts(wl
);
1838 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1839 cancel_work_sync(&wl
->recovery_work
);
1840 wl1271_flush_deferred_work(wl
);
1841 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1842 cancel_work_sync(&wl
->netstack_work
);
1843 cancel_work_sync(&wl
->tx_work
);
1844 cancel_delayed_work_sync(&wl
->elp_work
);
1845 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1846 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1848 /* let's notify MAC80211 about the remaining pending TX frames */
1849 wl12xx_tx_reset(wl
);
1850 mutex_lock(&wl
->mutex
);
1852 wl1271_power_off(wl
);
1854 * In case a recovery was scheduled, interrupts were disabled to avoid
1855 * an interrupt storm. Now that the power is down, it is safe to
1856 * re-enable interrupts to balance the disable depth
1858 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1859 wlcore_enable_interrupts(wl
);
1861 wl
->band
= IEEE80211_BAND_2GHZ
;
1864 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1865 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1866 wl
->tx_blocks_available
= 0;
1867 wl
->tx_allocated_blocks
= 0;
1868 wl
->tx_results_count
= 0;
1869 wl
->tx_packets_count
= 0;
1870 wl
->time_offset
= 0;
1871 wl
->ap_fw_ps_map
= 0;
1873 wl
->sched_scanning
= false;
1874 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1875 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1876 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1877 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1878 wl
->active_sta_count
= 0;
1880 /* The system link is always allocated */
1881 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1884 * this is performed after the cancel_work calls and the associated
1885 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1886 * get executed before all these vars have been reset.
1890 wl
->tx_blocks_freed
= 0;
1892 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1893 wl
->tx_pkts_freed
[i
] = 0;
1894 wl
->tx_allocated_pkts
[i
] = 0;
1897 wl1271_debugfs_reset(wl
);
1899 kfree(wl
->fw_status_1
);
1900 wl
->fw_status_1
= NULL
;
1901 wl
->fw_status_2
= NULL
;
1902 kfree(wl
->tx_res_if
);
1903 wl
->tx_res_if
= NULL
;
1904 kfree(wl
->target_mem_map
);
1905 wl
->target_mem_map
= NULL
;
1908 static void wlcore_op_stop(struct ieee80211_hw
*hw
)
1910 struct wl1271
*wl
= hw
->priv
;
1912 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1914 mutex_lock(&wl
->mutex
);
1916 wlcore_op_stop_locked(wl
);
1918 mutex_unlock(&wl
->mutex
);
1921 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
1923 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
1924 WL12XX_MAX_RATE_POLICIES
);
1925 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
1928 __set_bit(policy
, wl
->rate_policies_map
);
1933 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
1935 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
1938 __clear_bit(*idx
, wl
->rate_policies_map
);
1939 *idx
= WL12XX_MAX_RATE_POLICIES
;
1942 static int wlcore_allocate_klv_template(struct wl1271
*wl
, u8
*idx
)
1944 u8 policy
= find_first_zero_bit(wl
->klv_templates_map
,
1945 WLCORE_MAX_KLV_TEMPLATES
);
1946 if (policy
>= WLCORE_MAX_KLV_TEMPLATES
)
1949 __set_bit(policy
, wl
->klv_templates_map
);
1954 static void wlcore_free_klv_template(struct wl1271
*wl
, u8
*idx
)
1956 if (WARN_ON(*idx
>= WLCORE_MAX_KLV_TEMPLATES
))
1959 __clear_bit(*idx
, wl
->klv_templates_map
);
1960 *idx
= WLCORE_MAX_KLV_TEMPLATES
;
1963 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1965 switch (wlvif
->bss_type
) {
1966 case BSS_TYPE_AP_BSS
:
1968 return WL1271_ROLE_P2P_GO
;
1970 return WL1271_ROLE_AP
;
1972 case BSS_TYPE_STA_BSS
:
1974 return WL1271_ROLE_P2P_CL
;
1976 return WL1271_ROLE_STA
;
1979 return WL1271_ROLE_IBSS
;
1982 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
1984 return WL12XX_INVALID_ROLE_TYPE
;
1987 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
1989 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
1992 /* clear everything but the persistent data */
1993 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
1995 switch (ieee80211_vif_type_p2p(vif
)) {
1996 case NL80211_IFTYPE_P2P_CLIENT
:
1999 case NL80211_IFTYPE_STATION
:
2000 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
2002 case NL80211_IFTYPE_ADHOC
:
2003 wlvif
->bss_type
= BSS_TYPE_IBSS
;
2005 case NL80211_IFTYPE_P2P_GO
:
2008 case NL80211_IFTYPE_AP
:
2009 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
2012 wlvif
->bss_type
= MAX_BSS_TYPE
;
2016 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2017 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2018 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2020 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2021 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2022 /* init sta/ibss data */
2023 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2024 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2025 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2026 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2027 wlcore_allocate_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2028 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
2029 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
2030 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
2033 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2034 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2035 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2036 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2037 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2038 wl12xx_allocate_rate_policy(wl
,
2039 &wlvif
->ap
.ucast_rate_idx
[i
]);
2040 wlvif
->basic_rate_set
= CONF_TX_AP_ENABLED_RATES
;
2042 * TODO: check if basic_rate shouldn't be
2043 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2044 * instead (the same thing for STA above).
2046 wlvif
->basic_rate
= CONF_TX_AP_ENABLED_RATES
;
2047 /* TODO: this seems to be used only for STA, check it */
2048 wlvif
->rate_set
= CONF_TX_AP_ENABLED_RATES
;
2051 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
2052 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
2053 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
2056 * mac80211 configures some values globally, while we treat them
2057 * per-interface. thus, on init, we have to copy them from wl
2059 wlvif
->band
= wl
->band
;
2060 wlvif
->channel
= wl
->channel
;
2061 wlvif
->power_level
= wl
->power_level
;
2062 wlvif
->channel_type
= wl
->channel_type
;
2064 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
2065 wl1271_rx_streaming_enable_work
);
2066 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
2067 wl1271_rx_streaming_disable_work
);
2068 INIT_LIST_HEAD(&wlvif
->list
);
2070 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
2071 (unsigned long) wlvif
);
2075 static bool wl12xx_init_fw(struct wl1271
*wl
)
2077 int retries
= WL1271_BOOT_RETRIES
;
2078 bool booted
= false;
2079 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
2084 ret
= wl12xx_chip_wakeup(wl
, false);
2088 ret
= wl
->ops
->boot(wl
);
2092 ret
= wl1271_hw_init(wl
);
2100 mutex_unlock(&wl
->mutex
);
2101 /* Unlocking the mutex in the middle of handling is
2102 inherently unsafe. In this case we deem it safe to do,
2103 because we need to let any possibly pending IRQ out of
2104 the system (and while we are WLCORE_STATE_OFF the IRQ
2105 work function will not do anything.) Also, any other
2106 possible concurrent operations will fail due to the
2107 current state, hence the wl1271 struct should be safe. */
2108 wlcore_disable_interrupts(wl
);
2109 wl1271_flush_deferred_work(wl
);
2110 cancel_work_sync(&wl
->netstack_work
);
2111 mutex_lock(&wl
->mutex
);
2113 wl1271_power_off(wl
);
2117 wl1271_error("firmware boot failed despite %d retries",
2118 WL1271_BOOT_RETRIES
);
2122 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2124 /* update hw/fw version info in wiphy struct */
2125 wiphy
->hw_version
= wl
->chip
.id
;
2126 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2127 sizeof(wiphy
->fw_version
));
2130 * Now we know if 11a is supported (info from the NVS), so disable
2131 * 11a channels if not supported
2133 if (!wl
->enable_11a
)
2134 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2136 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2137 wl
->enable_11a
? "" : "not ");
2139 wl
->state
= WLCORE_STATE_ON
;
2144 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2146 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2150 * Check whether a fw switch (i.e. moving from one loaded
2151 * fw to another) is needed. This function is also responsible
2152 * for updating wl->last_vif_count, so it must be called before
2153 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2156 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2157 struct vif_counter_data vif_counter_data
,
2160 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2161 u8 vif_count
= vif_counter_data
.counter
;
2163 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2166 /* increase the vif count if this is a new vif */
2167 if (add
&& !vif_counter_data
.cur_vif_running
)
2170 wl
->last_vif_count
= vif_count
;
2172 /* no need for fw change if the device is OFF */
2173 if (wl
->state
== WLCORE_STATE_OFF
)
2176 /* no need for fw change if a single fw is used */
2177 if (!wl
->mr_fw_name
)
2180 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2182 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2189 * Enter "forced psm". Make sure the sta is in psm against the ap,
2190 * to make the fw switch a bit more disconnection-persistent.
2192 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2194 struct wl12xx_vif
*wlvif
;
2196 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2197 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2201 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2202 struct ieee80211_vif
*vif
)
2204 struct wl1271
*wl
= hw
->priv
;
2205 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2206 struct vif_counter_data vif_count
;
2209 bool booted
= false;
2211 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2212 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2214 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2215 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2217 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2219 mutex_lock(&wl
->mutex
);
2220 ret
= wl1271_ps_elp_wakeup(wl
);
2225 * in some very corner case HW recovery scenarios its possible to
2226 * get here before __wl1271_op_remove_interface is complete, so
2227 * opt out if that is the case.
2229 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2230 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2236 ret
= wl12xx_init_vif_data(wl
, vif
);
2241 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2242 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2247 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2248 wl12xx_force_active_psm(wl
);
2249 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2250 mutex_unlock(&wl
->mutex
);
2251 wl1271_recovery_work(&wl
->recovery_work
);
2256 * TODO: after the nvs issue will be solved, move this block
2257 * to start(), and make sure here the driver is ON.
2259 if (wl
->state
== WLCORE_STATE_OFF
) {
2261 * we still need this in order to configure the fw
2262 * while uploading the nvs
2264 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2266 booted
= wl12xx_init_fw(wl
);
2273 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2274 role_type
, &wlvif
->role_id
);
2278 ret
= wl1271_init_vif_specific(wl
, vif
);
2282 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2283 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2285 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2290 wl1271_ps_elp_sleep(wl
);
2292 mutex_unlock(&wl
->mutex
);
2297 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2298 struct ieee80211_vif
*vif
,
2299 bool reset_tx_queues
)
2301 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2303 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2305 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2307 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2310 /* because of hardware recovery, we may get here twice */
2311 if (wl
->state
== WLCORE_STATE_OFF
)
2314 wl1271_info("down");
2316 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2317 wl
->scan_vif
== vif
) {
2319 * Rearm the tx watchdog just before idling scan. This
2320 * prevents just-finished scans from triggering the watchdog
2322 wl12xx_rearm_tx_watchdog_locked(wl
);
2324 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2325 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2326 wl
->scan_vif
= NULL
;
2327 wl
->scan
.req
= NULL
;
2328 ieee80211_scan_completed(wl
->hw
, true);
2331 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2332 /* disable active roles */
2333 ret
= wl1271_ps_elp_wakeup(wl
);
2337 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2338 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2339 if (wl12xx_dev_role_started(wlvif
))
2340 wl12xx_stop_dev(wl
, wlvif
);
2343 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2347 wl1271_ps_elp_sleep(wl
);
2350 /* clear all hlids (except system_hlid) */
2351 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2353 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2354 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2355 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2356 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2357 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2358 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2359 wlcore_free_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2361 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2362 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2363 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2364 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2365 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2366 wl12xx_free_rate_policy(wl
,
2367 &wlvif
->ap
.ucast_rate_idx
[i
]);
2368 wl1271_free_ap_keys(wl
, wlvif
);
2371 dev_kfree_skb(wlvif
->probereq
);
2372 wlvif
->probereq
= NULL
;
2373 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2374 if (wl
->last_wlvif
== wlvif
)
2375 wl
->last_wlvif
= NULL
;
2376 list_del(&wlvif
->list
);
2377 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2378 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2379 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2387 * Last AP, have more stations. Configure sleep auth according to STA.
2388 * Don't do thin on unintended recovery.
2390 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) &&
2391 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
))
2394 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2395 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2396 /* Configure for power according to debugfs */
2397 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2398 wl1271_acx_sleep_auth(wl
, sta_auth
);
2399 /* Configure for power always on */
2400 else if (wl
->quirks
& WLCORE_QUIRK_NO_ELP
)
2401 wl1271_acx_sleep_auth(wl
, WL1271_PSM_CAM
);
2402 /* Configure for ELP power saving */
2404 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2408 mutex_unlock(&wl
->mutex
);
2410 del_timer_sync(&wlvif
->rx_streaming_timer
);
2411 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2412 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2414 mutex_lock(&wl
->mutex
);
2417 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2418 struct ieee80211_vif
*vif
)
2420 struct wl1271
*wl
= hw
->priv
;
2421 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2422 struct wl12xx_vif
*iter
;
2423 struct vif_counter_data vif_count
;
2425 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2426 mutex_lock(&wl
->mutex
);
2428 if (wl
->state
== WLCORE_STATE_OFF
||
2429 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2433 * wl->vif can be null here if someone shuts down the interface
2434 * just when hardware recovery has been started.
2436 wl12xx_for_each_wlvif(wl
, iter
) {
2440 __wl1271_op_remove_interface(wl
, vif
, true);
2443 WARN_ON(iter
!= wlvif
);
2444 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2445 wl12xx_force_active_psm(wl
);
2446 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2447 wl12xx_queue_recovery_work(wl
);
2450 mutex_unlock(&wl
->mutex
);
2453 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2454 struct ieee80211_vif
*vif
,
2455 enum nl80211_iftype new_type
, bool p2p
)
2457 struct wl1271
*wl
= hw
->priv
;
2460 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2461 wl1271_op_remove_interface(hw
, vif
);
2463 vif
->type
= new_type
;
2465 ret
= wl1271_op_add_interface(hw
, vif
);
2467 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2471 static int wl1271_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2475 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2478 * One of the side effects of the JOIN command is that is clears
2479 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2480 * to a WPA/WPA2 access point will therefore kill the data-path.
2481 * Currently the only valid scenario for JOIN during association
2482 * is on roaming, in which case we will also be given new keys.
2483 * Keep the below message for now, unless it starts bothering
2484 * users who really like to roam a lot :)
2486 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2487 wl1271_info("JOIN while associated.");
2489 /* clear encryption type */
2490 wlvif
->encryption_type
= KEY_NONE
;
2493 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2496 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2498 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2502 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2506 * The join command disable the keep-alive mode, shut down its process,
2507 * and also clear the template config, so we need to reset it all after
2508 * the join. The acx_aid starts the keep-alive process, and the order
2509 * of the commands below is relevant.
2511 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2515 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2519 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2523 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2524 wlvif
->sta
.klv_template_id
,
2525 ACX_KEEP_ALIVE_TPL_VALID
);
2533 static int wl1271_unjoin(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2537 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2538 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2540 wl12xx_cmd_stop_channel_switch(wl
);
2541 ieee80211_chswitch_done(vif
, false);
2544 /* invalidate keep-alive template */
2545 wl1271_acx_keep_alive_config(wl
, wlvif
,
2546 wlvif
->sta
.klv_template_id
,
2547 ACX_KEEP_ALIVE_TPL_INVALID
);
2549 /* to stop listening to a channel, we disconnect */
2550 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2554 /* reset TX security counters on a clean disconnect */
2555 wlvif
->tx_security_last_seq_lsb
= 0;
2556 wlvif
->tx_security_seq
= 0;
2562 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2564 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2565 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2568 static int wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2572 bool cur_idle
= !test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2574 if (idle
== cur_idle
)
2578 /* no need to croc if we weren't busy (e.g. during boot) */
2579 if (wl12xx_dev_role_started(wlvif
)) {
2580 ret
= wl12xx_stop_dev(wl
, wlvif
);
2585 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
2586 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2589 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2591 /* The current firmware only supports sched_scan in idle */
2592 if (wl
->sched_scanning
) {
2593 wl1271_scan_sched_scan_stop(wl
, wlvif
);
2594 ieee80211_sched_scan_stopped(wl
->hw
);
2597 ret
= wl12xx_start_dev(wl
, wlvif
);
2600 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2607 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2608 struct ieee80211_conf
*conf
, u32 changed
)
2610 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2613 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2615 /* if the channel changes while joined, join again */
2616 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
&&
2617 ((wlvif
->band
!= conf
->channel
->band
) ||
2618 (wlvif
->channel
!= channel
) ||
2619 (wlvif
->channel_type
!= conf
->channel_type
))) {
2620 /* send all pending packets */
2621 ret
= wlcore_tx_work_locked(wl
);
2625 wlvif
->band
= conf
->channel
->band
;
2626 wlvif
->channel
= channel
;
2627 wlvif
->channel_type
= conf
->channel_type
;
2630 wl1271_set_band_rate(wl
, wlvif
);
2631 ret
= wl1271_init_ap_rates(wl
, wlvif
);
2633 wl1271_error("AP rate policy change failed %d",
2637 * FIXME: the mac80211 should really provide a fixed
2638 * rate to use here. for now, just use the smallest
2639 * possible rate for the band as a fixed rate for
2640 * association frames and other control messages.
2642 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2643 wl1271_set_band_rate(wl
, wlvif
);
2646 wl1271_tx_min_rate_get(wl
,
2647 wlvif
->basic_rate_set
);
2648 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2650 wl1271_warning("rate policy for channel "
2654 * change the ROC channel. do it only if we are
2655 * not idle. otherwise, CROC will be called
2658 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
,
2660 wl12xx_dev_role_started(wlvif
) &&
2661 !(conf
->flags
& IEEE80211_CONF_IDLE
)) {
2662 ret
= wl12xx_stop_dev(wl
, wlvif
);
2666 ret
= wl12xx_start_dev(wl
, wlvif
);
2673 if ((changed
& IEEE80211_CONF_CHANGE_PS
) && !is_ap
) {
2675 if ((conf
->flags
& IEEE80211_CONF_PS
) &&
2676 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
2677 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2682 if (wl
->conf
.conn
.forced_ps
) {
2683 ps_mode
= STATION_POWER_SAVE_MODE
;
2684 ps_mode_str
= "forced";
2686 ps_mode
= STATION_AUTO_PS_MODE
;
2687 ps_mode_str
= "auto";
2690 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
2692 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
2695 wl1271_warning("enter %s ps failed %d",
2698 } else if (!(conf
->flags
& IEEE80211_CONF_PS
) &&
2699 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2701 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
2703 ret
= wl1271_ps_set_mode(wl
, wlvif
,
2704 STATION_ACTIVE_MODE
);
2706 wl1271_warning("exit auto ps failed %d", ret
);
2710 if (conf
->power_level
!= wlvif
->power_level
) {
2711 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2715 wlvif
->power_level
= conf
->power_level
;
2721 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2723 struct wl1271
*wl
= hw
->priv
;
2724 struct wl12xx_vif
*wlvif
;
2725 struct ieee80211_conf
*conf
= &hw
->conf
;
2726 int channel
, ret
= 0;
2728 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2730 wl1271_debug(DEBUG_MAC80211
, "mac80211 config ch %d psm %s power %d %s"
2733 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
2735 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
2739 * mac80211 will go to idle nearly immediately after transmitting some
2740 * frames, such as the deauth. To make sure those frames reach the air,
2741 * wait here until the TX queue is fully flushed.
2743 if ((changed
& IEEE80211_CONF_CHANGE_CHANNEL
) ||
2744 ((changed
& IEEE80211_CONF_CHANGE_IDLE
) &&
2745 (conf
->flags
& IEEE80211_CONF_IDLE
)))
2746 wl1271_tx_flush(wl
);
2748 mutex_lock(&wl
->mutex
);
2750 /* we support configuring the channel and band even while off */
2751 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
) {
2752 wl
->band
= conf
->channel
->band
;
2753 wl
->channel
= channel
;
2754 wl
->channel_type
= conf
->channel_type
;
2757 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
2758 wl
->power_level
= conf
->power_level
;
2760 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2763 ret
= wl1271_ps_elp_wakeup(wl
);
2767 /* configure each interface */
2768 wl12xx_for_each_wlvif(wl
, wlvif
) {
2769 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
2775 wl1271_ps_elp_sleep(wl
);
2778 mutex_unlock(&wl
->mutex
);
2783 struct wl1271_filter_params
{
2786 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
2789 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
2790 struct netdev_hw_addr_list
*mc_list
)
2792 struct wl1271_filter_params
*fp
;
2793 struct netdev_hw_addr
*ha
;
2795 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
2797 wl1271_error("Out of memory setting filters.");
2801 /* update multicast filtering parameters */
2802 fp
->mc_list_length
= 0;
2803 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
2804 fp
->enabled
= false;
2807 netdev_hw_addr_list_for_each(ha
, mc_list
) {
2808 memcpy(fp
->mc_list
[fp
->mc_list_length
],
2809 ha
->addr
, ETH_ALEN
);
2810 fp
->mc_list_length
++;
2814 return (u64
)(unsigned long)fp
;
2817 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2820 FIF_BCN_PRBRESP_PROMISC | \
2824 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
2825 unsigned int changed
,
2826 unsigned int *total
, u64 multicast
)
2828 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
2829 struct wl1271
*wl
= hw
->priv
;
2830 struct wl12xx_vif
*wlvif
;
2834 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
2835 " total %x", changed
, *total
);
2837 mutex_lock(&wl
->mutex
);
2839 *total
&= WL1271_SUPPORTED_FILTERS
;
2840 changed
&= WL1271_SUPPORTED_FILTERS
;
2842 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2845 ret
= wl1271_ps_elp_wakeup(wl
);
2849 wl12xx_for_each_wlvif(wl
, wlvif
) {
2850 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
2851 if (*total
& FIF_ALLMULTI
)
2852 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2856 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2859 fp
->mc_list_length
);
2866 * the fw doesn't provide an api to configure the filters. instead,
2867 * the filters configuration is based on the active roles / ROC
2872 wl1271_ps_elp_sleep(wl
);
2875 mutex_unlock(&wl
->mutex
);
2879 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2880 u8 id
, u8 key_type
, u8 key_size
,
2881 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
2884 struct wl1271_ap_key
*ap_key
;
2887 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
2889 if (key_size
> MAX_KEY_SIZE
)
2893 * Find next free entry in ap_keys. Also check we are not replacing
2896 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2897 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2900 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
2901 wl1271_warning("trying to record key replacement");
2906 if (i
== MAX_NUM_KEYS
)
2909 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
2914 ap_key
->key_type
= key_type
;
2915 ap_key
->key_size
= key_size
;
2916 memcpy(ap_key
->key
, key
, key_size
);
2917 ap_key
->hlid
= hlid
;
2918 ap_key
->tx_seq_32
= tx_seq_32
;
2919 ap_key
->tx_seq_16
= tx_seq_16
;
2921 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
2925 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2929 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2930 kfree(wlvif
->ap
.recorded_keys
[i
]);
2931 wlvif
->ap
.recorded_keys
[i
] = NULL
;
2935 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2938 struct wl1271_ap_key
*key
;
2939 bool wep_key_added
= false;
2941 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2943 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2946 key
= wlvif
->ap
.recorded_keys
[i
];
2948 if (hlid
== WL12XX_INVALID_LINK_ID
)
2949 hlid
= wlvif
->ap
.bcast_hlid
;
2951 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2952 key
->id
, key
->key_type
,
2953 key
->key_size
, key
->key
,
2954 hlid
, key
->tx_seq_32
,
2959 if (key
->key_type
== KEY_WEP
)
2960 wep_key_added
= true;
2963 if (wep_key_added
) {
2964 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
2965 wlvif
->ap
.bcast_hlid
);
2971 wl1271_free_ap_keys(wl
, wlvif
);
2975 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2976 u16 action
, u8 id
, u8 key_type
,
2977 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
2978 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
2981 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2984 struct wl1271_station
*wl_sta
;
2988 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
2989 hlid
= wl_sta
->hlid
;
2991 hlid
= wlvif
->ap
.bcast_hlid
;
2994 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
2996 * We do not support removing keys after AP shutdown.
2997 * Pretend we do to make mac80211 happy.
2999 if (action
!= KEY_ADD_OR_REPLACE
)
3002 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
3004 key
, hlid
, tx_seq_32
,
3007 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
3008 id
, key_type
, key_size
,
3009 key
, hlid
, tx_seq_32
,
3017 static const u8 bcast_addr
[ETH_ALEN
] = {
3018 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3021 addr
= sta
? sta
->addr
: bcast_addr
;
3023 if (is_zero_ether_addr(addr
)) {
3024 /* We dont support TX only encryption */
3028 /* The wl1271 does not allow to remove unicast keys - they
3029 will be cleared automatically on next CMD_JOIN. Ignore the
3030 request silently, as we dont want the mac80211 to emit
3031 an error message. */
3032 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
3035 /* don't remove key if hlid was already deleted */
3036 if (action
== KEY_REMOVE
&&
3037 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
3040 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
3041 id
, key_type
, key_size
,
3042 key
, addr
, tx_seq_32
,
3047 /* the default WEP key needs to be configured at least once */
3048 if (key_type
== KEY_WEP
) {
3049 ret
= wl12xx_cmd_set_default_wep_key(wl
,
3060 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
3061 struct ieee80211_vif
*vif
,
3062 struct ieee80211_sta
*sta
,
3063 struct ieee80211_key_conf
*key_conf
)
3065 struct wl1271
*wl
= hw
->priv
;
3067 bool might_change_spare
=
3068 key_conf
->cipher
== WL1271_CIPHER_SUITE_GEM
||
3069 key_conf
->cipher
== WLAN_CIPHER_SUITE_TKIP
;
3071 if (might_change_spare
) {
3073 * stop the queues and flush to ensure the next packets are
3074 * in sync with FW spare block accounting
3076 mutex_lock(&wl
->mutex
);
3077 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3078 mutex_unlock(&wl
->mutex
);
3080 wl1271_tx_flush(wl
);
3083 mutex_lock(&wl
->mutex
);
3085 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3087 goto out_wake_queues
;
3090 ret
= wl1271_ps_elp_wakeup(wl
);
3092 goto out_wake_queues
;
3094 ret
= wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
3096 wl1271_ps_elp_sleep(wl
);
3099 if (might_change_spare
)
3100 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3102 mutex_unlock(&wl
->mutex
);
3107 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
3108 struct ieee80211_vif
*vif
,
3109 struct ieee80211_sta
*sta
,
3110 struct ieee80211_key_conf
*key_conf
)
3112 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3118 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
3120 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
3121 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3122 key_conf
->cipher
, key_conf
->keyidx
,
3123 key_conf
->keylen
, key_conf
->flags
);
3124 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
3126 switch (key_conf
->cipher
) {
3127 case WLAN_CIPHER_SUITE_WEP40
:
3128 case WLAN_CIPHER_SUITE_WEP104
:
3131 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3133 case WLAN_CIPHER_SUITE_TKIP
:
3134 key_type
= KEY_TKIP
;
3136 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3137 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3138 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3140 case WLAN_CIPHER_SUITE_CCMP
:
3143 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3144 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3145 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3147 case WL1271_CIPHER_SUITE_GEM
:
3149 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3150 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3153 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3160 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3161 key_conf
->keyidx
, key_type
,
3162 key_conf
->keylen
, key_conf
->key
,
3163 tx_seq_32
, tx_seq_16
, sta
);
3165 wl1271_error("Could not add or replace key");
3170 * reconfiguring arp response if the unicast (or common)
3171 * encryption key type was changed
3173 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3174 (sta
|| key_type
== KEY_WEP
) &&
3175 wlvif
->encryption_type
!= key_type
) {
3176 wlvif
->encryption_type
= key_type
;
3177 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3179 wl1271_warning("build arp rsp failed: %d", ret
);
3186 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3187 key_conf
->keyidx
, key_type
,
3188 key_conf
->keylen
, key_conf
->key
,
3191 wl1271_error("Could not remove key");
3197 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3203 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3205 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3206 struct ieee80211_vif
*vif
,
3207 struct cfg80211_scan_request
*req
)
3209 struct wl1271
*wl
= hw
->priv
;
3214 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3217 ssid
= req
->ssids
[0].ssid
;
3218 len
= req
->ssids
[0].ssid_len
;
3221 mutex_lock(&wl
->mutex
);
3223 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3225 * We cannot return -EBUSY here because cfg80211 will expect
3226 * a call to ieee80211_scan_completed if we do - in this case
3227 * there won't be any call.
3233 ret
= wl1271_ps_elp_wakeup(wl
);
3237 /* fail if there is any role in ROC */
3238 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3239 /* don't allow scanning right now */
3244 ret
= wl1271_scan(hw
->priv
, vif
, ssid
, len
, req
);
3246 wl1271_ps_elp_sleep(wl
);
3248 mutex_unlock(&wl
->mutex
);
3253 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3254 struct ieee80211_vif
*vif
)
3256 struct wl1271
*wl
= hw
->priv
;
3259 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3261 mutex_lock(&wl
->mutex
);
3263 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3266 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3269 ret
= wl1271_ps_elp_wakeup(wl
);
3273 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3274 ret
= wl1271_scan_stop(wl
);
3280 * Rearm the tx watchdog just before idling scan. This
3281 * prevents just-finished scans from triggering the watchdog
3283 wl12xx_rearm_tx_watchdog_locked(wl
);
3285 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3286 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3287 wl
->scan_vif
= NULL
;
3288 wl
->scan
.req
= NULL
;
3289 ieee80211_scan_completed(wl
->hw
, true);
3292 wl1271_ps_elp_sleep(wl
);
3294 mutex_unlock(&wl
->mutex
);
3296 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3299 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3300 struct ieee80211_vif
*vif
,
3301 struct cfg80211_sched_scan_request
*req
,
3302 struct ieee80211_sched_scan_ies
*ies
)
3304 struct wl1271
*wl
= hw
->priv
;
3305 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3308 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3310 mutex_lock(&wl
->mutex
);
3312 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3317 ret
= wl1271_ps_elp_wakeup(wl
);
3321 ret
= wl1271_scan_sched_scan_config(wl
, wlvif
, req
, ies
);
3325 ret
= wl1271_scan_sched_scan_start(wl
, wlvif
);
3329 wl
->sched_scanning
= true;
3332 wl1271_ps_elp_sleep(wl
);
3334 mutex_unlock(&wl
->mutex
);
3338 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3339 struct ieee80211_vif
*vif
)
3341 struct wl1271
*wl
= hw
->priv
;
3342 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3345 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3347 mutex_lock(&wl
->mutex
);
3349 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3352 ret
= wl1271_ps_elp_wakeup(wl
);
3356 wl1271_scan_sched_scan_stop(wl
, wlvif
);
3358 wl1271_ps_elp_sleep(wl
);
3360 mutex_unlock(&wl
->mutex
);
3363 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3365 struct wl1271
*wl
= hw
->priv
;
3368 mutex_lock(&wl
->mutex
);
3370 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3375 ret
= wl1271_ps_elp_wakeup(wl
);
3379 ret
= wl1271_acx_frag_threshold(wl
, value
);
3381 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3383 wl1271_ps_elp_sleep(wl
);
3386 mutex_unlock(&wl
->mutex
);
3391 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3393 struct wl1271
*wl
= hw
->priv
;
3394 struct wl12xx_vif
*wlvif
;
3397 mutex_lock(&wl
->mutex
);
3399 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3404 ret
= wl1271_ps_elp_wakeup(wl
);
3408 wl12xx_for_each_wlvif(wl
, wlvif
) {
3409 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3411 wl1271_warning("set rts threshold failed: %d", ret
);
3413 wl1271_ps_elp_sleep(wl
);
3416 mutex_unlock(&wl
->mutex
);
3421 static int wl1271_ssid_set(struct ieee80211_vif
*vif
, struct sk_buff
*skb
,
3424 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3426 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
3430 wl1271_error("No SSID in IEs!");
3435 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
3436 wl1271_error("SSID is too long!");
3440 wlvif
->ssid_len
= ssid_len
;
3441 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
3445 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3448 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3449 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3450 skb
->len
- ieoffset
);
3455 memmove(ie
, next
, end
- next
);
3456 skb_trim(skb
, skb
->len
- len
);
3459 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3460 unsigned int oui
, u8 oui_type
,
3464 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3465 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3466 skb
->data
+ ieoffset
,
3467 skb
->len
- ieoffset
);
3472 memmove(ie
, next
, end
- next
);
3473 skb_trim(skb
, skb
->len
- len
);
3476 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3477 struct ieee80211_vif
*vif
)
3479 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3480 struct sk_buff
*skb
;
3483 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3487 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3488 CMD_TEMPL_AP_PROBE_RESPONSE
,
3497 wl1271_debug(DEBUG_AP
, "probe response updated");
3498 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3504 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3505 struct ieee80211_vif
*vif
,
3507 size_t probe_rsp_len
,
3510 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3511 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3512 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3513 int ssid_ie_offset
, ie_offset
, templ_len
;
3516 /* no need to change probe response if the SSID is set correctly */
3517 if (wlvif
->ssid_len
> 0)
3518 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3519 CMD_TEMPL_AP_PROBE_RESPONSE
,
3524 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3525 wl1271_error("probe_rsp template too big");
3529 /* start searching from IE offset */
3530 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3532 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3533 probe_rsp_len
- ie_offset
);
3535 wl1271_error("No SSID in beacon!");
3539 ssid_ie_offset
= ptr
- probe_rsp_data
;
3540 ptr
+= (ptr
[1] + 2);
3542 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3544 /* insert SSID from bss_conf */
3545 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3546 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3547 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3548 bss_conf
->ssid
, bss_conf
->ssid_len
);
3549 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3551 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3552 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3553 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3555 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3556 CMD_TEMPL_AP_PROBE_RESPONSE
,
3562 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3563 struct ieee80211_vif
*vif
,
3564 struct ieee80211_bss_conf
*bss_conf
,
3567 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3570 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3571 if (bss_conf
->use_short_slot
)
3572 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3574 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3576 wl1271_warning("Set slot time failed %d", ret
);
3581 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3582 if (bss_conf
->use_short_preamble
)
3583 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3585 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3588 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3589 if (bss_conf
->use_cts_prot
)
3590 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3593 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3594 CTSPROTECT_DISABLE
);
3596 wl1271_warning("Set ctsprotect failed %d", ret
);
3605 static int wlcore_set_beacon_template(struct wl1271
*wl
,
3606 struct ieee80211_vif
*vif
,
3609 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3610 struct ieee80211_hdr
*hdr
;
3613 int ieoffset
= offsetof(struct ieee80211_mgmt
,
3615 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3623 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3625 ret
= wl1271_ssid_set(vif
, beacon
, ieoffset
);
3627 dev_kfree_skb(beacon
);
3630 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3631 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3633 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3638 dev_kfree_skb(beacon
);
3643 * In case we already have a probe-resp beacon set explicitly
3644 * by usermode, don't use the beacon data.
3646 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3649 /* remove TIM ie from probe response */
3650 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3653 * remove p2p ie from probe response.
3654 * the fw reponds to probe requests that don't include
3655 * the p2p ie. probe requests with p2p ie will be passed,
3656 * and will be responded by the supplicant (the spec
3657 * forbids including the p2p ie when responding to probe
3658 * requests that didn't include it).
3660 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3661 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3663 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3664 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3665 IEEE80211_STYPE_PROBE_RESP
);
3667 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3672 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3673 CMD_TEMPL_PROBE_RESPONSE
,
3678 dev_kfree_skb(beacon
);
3686 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3687 struct ieee80211_vif
*vif
,
3688 struct ieee80211_bss_conf
*bss_conf
,
3691 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3692 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3695 if ((changed
& BSS_CHANGED_BEACON_INT
)) {
3696 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3697 bss_conf
->beacon_int
);
3699 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3702 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3703 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3705 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
3708 if ((changed
& BSS_CHANGED_BEACON
)) {
3709 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
3716 wl1271_error("beacon info change failed: %d", ret
);
3720 /* AP mode changes */
3721 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
3722 struct ieee80211_vif
*vif
,
3723 struct ieee80211_bss_conf
*bss_conf
,
3726 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3729 if ((changed
& BSS_CHANGED_BASIC_RATES
)) {
3730 u32 rates
= bss_conf
->basic_rates
;
3732 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
3734 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
3735 wlvif
->basic_rate_set
);
3737 ret
= wl1271_init_ap_rates(wl
, wlvif
);
3739 wl1271_error("AP rate policy change failed %d", ret
);
3743 ret
= wl1271_ap_init_templates(wl
, vif
);
3747 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
3751 ret
= wlcore_set_beacon_template(wl
, vif
, true);
3756 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
3760 if ((changed
& BSS_CHANGED_BEACON_ENABLED
)) {
3761 if (bss_conf
->enable_beacon
) {
3762 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3763 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
3767 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
3771 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3772 wl1271_debug(DEBUG_AP
, "started AP");
3775 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3776 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
3780 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3781 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
3783 wl1271_debug(DEBUG_AP
, "stopped AP");
3788 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3792 /* Handle HT information change */
3793 if ((changed
& BSS_CHANGED_HT
) &&
3794 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3795 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3796 bss_conf
->ht_operation_mode
);
3798 wl1271_warning("Set ht information failed %d", ret
);
3807 /* STA/IBSS mode changes */
3808 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
3809 struct ieee80211_vif
*vif
,
3810 struct ieee80211_bss_conf
*bss_conf
,
3813 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3814 bool do_join
= false, set_assoc
= false;
3815 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
3816 bool ibss_joined
= false;
3817 u32 sta_rate_set
= 0;
3819 struct ieee80211_sta
*sta
;
3820 bool sta_exists
= false;
3821 struct ieee80211_sta_ht_cap sta_ht_cap
;
3824 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
3830 if (changed
& BSS_CHANGED_IBSS
) {
3831 if (bss_conf
->ibss_joined
) {
3832 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
3835 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
,
3837 wl1271_unjoin(wl
, wlvif
);
3841 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
3844 /* Need to update the SSID (for filtering etc) */
3845 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
3848 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
3849 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
3850 bss_conf
->enable_beacon
? "enabled" : "disabled");
3855 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
) {
3856 ret
= wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
3858 wl1271_warning("idle mode change failed %d", ret
);
3861 if ((changed
& BSS_CHANGED_CQM
)) {
3862 bool enable
= false;
3863 if (bss_conf
->cqm_rssi_thold
)
3865 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
3866 bss_conf
->cqm_rssi_thold
,
3867 bss_conf
->cqm_rssi_hyst
);
3870 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
3873 if (changed
& BSS_CHANGED_BSSID
)
3874 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
3875 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
3879 ret
= wl1271_build_qos_null_data(wl
, vif
);
3884 if (changed
& (BSS_CHANGED_ASSOC
| BSS_CHANGED_HT
)) {
3886 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
3890 /* save the supp_rates of the ap */
3891 sta_rate_set
= sta
->supp_rates
[wl
->hw
->conf
.channel
->band
];
3892 if (sta
->ht_cap
.ht_supported
)
3894 (sta
->ht_cap
.mcs
.rx_mask
[0] << HW_HT_RATES_OFFSET
) |
3895 (sta
->ht_cap
.mcs
.rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
3896 sta_ht_cap
= sta
->ht_cap
;
3903 if ((changed
& BSS_CHANGED_ASSOC
)) {
3904 if (bss_conf
->assoc
) {
3907 wlvif
->aid
= bss_conf
->aid
;
3908 wlvif
->channel_type
= bss_conf
->channel_type
;
3909 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3914 * use basic rates from AP, and determine lowest rate
3915 * to use with control frames.
3917 rates
= bss_conf
->basic_rates
;
3918 wlvif
->basic_rate_set
=
3919 wl1271_tx_enabled_rates_get(wl
, rates
,
3922 wl1271_tx_min_rate_get(wl
,
3923 wlvif
->basic_rate_set
);
3926 wl1271_tx_enabled_rates_get(wl
,
3929 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3934 * with wl1271, we don't need to update the
3935 * beacon_int and dtim_period, because the firmware
3936 * updates it by itself when the first beacon is
3937 * received after a join.
3939 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
3944 * Get a template for hardware connection maintenance
3946 dev_kfree_skb(wlvif
->probereq
);
3947 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
3950 ieoffset
= offsetof(struct ieee80211_mgmt
,
3951 u
.probe_req
.variable
);
3952 wl1271_ssid_set(vif
, wlvif
->probereq
, ieoffset
);
3954 /* enable the connection monitoring feature */
3955 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
3959 /* use defaults when not associated */
3961 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
,
3964 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT
,
3968 /* free probe-request template */
3969 dev_kfree_skb(wlvif
->probereq
);
3970 wlvif
->probereq
= NULL
;
3972 /* revert back to minimum rates for the current band */
3973 wl1271_set_band_rate(wl
, wlvif
);
3975 wl1271_tx_min_rate_get(wl
,
3976 wlvif
->basic_rate_set
);
3977 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3981 /* disable connection monitor features */
3982 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
3984 /* Disable the keep-alive feature */
3985 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
3989 /* restore the bssid filter and go to dummy bssid */
3992 * we might have to disable roc, if there was
3993 * no IF_OPER_UP notification.
3996 ret
= wl12xx_croc(wl
, wlvif
->role_id
);
4001 * (we also need to disable roc in case of
4002 * roaming on the same channel. until we will
4003 * have a better flow...)
4005 if (test_bit(wlvif
->dev_role_id
, wl
->roc_map
)) {
4006 ret
= wl12xx_croc(wl
,
4007 wlvif
->dev_role_id
);
4012 wl1271_unjoin(wl
, wlvif
);
4013 if (!bss_conf
->idle
)
4014 wl12xx_start_dev(wl
, wlvif
);
4019 if (changed
& BSS_CHANGED_IBSS
) {
4020 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
4021 bss_conf
->ibss_joined
);
4023 if (bss_conf
->ibss_joined
) {
4024 u32 rates
= bss_conf
->basic_rates
;
4025 wlvif
->basic_rate_set
=
4026 wl1271_tx_enabled_rates_get(wl
, rates
,
4029 wl1271_tx_min_rate_get(wl
,
4030 wlvif
->basic_rate_set
);
4032 /* by default, use 11b + OFDM rates */
4033 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
4034 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4040 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4045 ret
= wl1271_join(wl
, wlvif
, set_assoc
);
4047 wl1271_warning("cmd join failed %d", ret
);
4051 /* ROC until connected (after EAPOL exchange) */
4053 ret
= wl12xx_roc(wl
, wlvif
, wlvif
->role_id
);
4057 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
4058 wl12xx_set_authorized(wl
, wlvif
);
4061 * stop device role if started (we might already be in
4064 if (wl12xx_dev_role_started(wlvif
)) {
4065 ret
= wl12xx_stop_dev(wl
, wlvif
);
4071 /* Handle new association with HT. Do this after join. */
4073 if ((changed
& BSS_CHANGED_HT
) &&
4074 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
4075 ret
= wl1271_acx_set_ht_capabilities(wl
,
4080 wl1271_warning("Set ht cap true failed %d",
4085 /* handle new association without HT and disassociation */
4086 else if (changed
& BSS_CHANGED_ASSOC
) {
4087 ret
= wl1271_acx_set_ht_capabilities(wl
,
4092 wl1271_warning("Set ht cap false failed %d",
4099 /* Handle HT information change. Done after join. */
4100 if ((changed
& BSS_CHANGED_HT
) &&
4101 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
4102 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4103 bss_conf
->ht_operation_mode
);
4105 wl1271_warning("Set ht information failed %d", ret
);
4110 /* Handle arp filtering. Done after join. */
4111 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
4112 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
4113 __be32 addr
= bss_conf
->arp_addr_list
[0];
4114 wlvif
->sta
.qos
= bss_conf
->qos
;
4115 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
4117 if (bss_conf
->arp_addr_cnt
== 1 &&
4118 bss_conf
->arp_filter_enabled
) {
4119 wlvif
->ip_addr
= addr
;
4121 * The template should have been configured only upon
4122 * association. however, it seems that the correct ip
4123 * isn't being set (when sending), so we have to
4124 * reconfigure the template upon every ip change.
4126 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4128 wl1271_warning("build arp rsp failed: %d", ret
);
4132 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4133 (ACX_ARP_FILTER_ARP_FILTERING
|
4134 ACX_ARP_FILTER_AUTO_ARP
),
4138 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4149 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4150 struct ieee80211_vif
*vif
,
4151 struct ieee80211_bss_conf
*bss_conf
,
4154 struct wl1271
*wl
= hw
->priv
;
4155 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4156 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4159 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info changed 0x%x",
4163 * make sure to cancel pending disconnections if our association
4166 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4167 cancel_delayed_work_sync(&wl
->connection_loss_work
);
4169 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4170 !bss_conf
->enable_beacon
)
4171 wl1271_tx_flush(wl
);
4173 mutex_lock(&wl
->mutex
);
4175 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4178 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4181 ret
= wl1271_ps_elp_wakeup(wl
);
4186 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4188 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4190 wl1271_ps_elp_sleep(wl
);
4193 mutex_unlock(&wl
->mutex
);
4196 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4197 struct ieee80211_vif
*vif
, u16 queue
,
4198 const struct ieee80211_tx_queue_params
*params
)
4200 struct wl1271
*wl
= hw
->priv
;
4201 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4205 mutex_lock(&wl
->mutex
);
4207 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4210 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4212 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4214 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4217 ret
= wl1271_ps_elp_wakeup(wl
);
4222 * the txop is confed in units of 32us by the mac80211,
4225 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4226 params
->cw_min
, params
->cw_max
,
4227 params
->aifs
, params
->txop
<< 5);
4231 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4232 CONF_CHANNEL_TYPE_EDCF
,
4233 wl1271_tx_get_queue(queue
),
4234 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4238 wl1271_ps_elp_sleep(wl
);
4241 mutex_unlock(&wl
->mutex
);
4246 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4247 struct ieee80211_vif
*vif
)
4250 struct wl1271
*wl
= hw
->priv
;
4251 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4252 u64 mactime
= ULLONG_MAX
;
4255 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4257 mutex_lock(&wl
->mutex
);
4259 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4262 ret
= wl1271_ps_elp_wakeup(wl
);
4266 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4271 wl1271_ps_elp_sleep(wl
);
4274 mutex_unlock(&wl
->mutex
);
4278 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4279 struct survey_info
*survey
)
4281 struct ieee80211_conf
*conf
= &hw
->conf
;
4286 survey
->channel
= conf
->channel
;
4291 static int wl1271_allocate_sta(struct wl1271
*wl
,
4292 struct wl12xx_vif
*wlvif
,
4293 struct ieee80211_sta
*sta
)
4295 struct wl1271_station
*wl_sta
;
4299 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4300 wl1271_warning("could not allocate HLID - too much stations");
4304 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4305 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4307 wl1271_warning("could not allocate HLID - too many links");
4311 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4312 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4313 wl
->active_sta_count
++;
4317 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4319 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4322 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4323 memset(wl
->links
[hlid
].addr
, 0, ETH_ALEN
);
4324 wl
->links
[hlid
].ba_bitmap
= 0;
4325 __clear_bit(hlid
, &wl
->ap_ps_map
);
4326 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4327 wl12xx_free_link(wl
, wlvif
, &hlid
);
4328 wl
->active_sta_count
--;
4331 * rearm the tx watchdog when the last STA is freed - give the FW a
4332 * chance to return STA-buffered packets before complaining.
4334 if (wl
->active_sta_count
== 0)
4335 wl12xx_rearm_tx_watchdog_locked(wl
);
4338 static int wl12xx_sta_add(struct wl1271
*wl
,
4339 struct wl12xx_vif
*wlvif
,
4340 struct ieee80211_sta
*sta
)
4342 struct wl1271_station
*wl_sta
;
4346 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4348 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4352 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4353 hlid
= wl_sta
->hlid
;
4355 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4357 wl1271_free_sta(wl
, wlvif
, hlid
);
4362 static int wl12xx_sta_remove(struct wl1271
*wl
,
4363 struct wl12xx_vif
*wlvif
,
4364 struct ieee80211_sta
*sta
)
4366 struct wl1271_station
*wl_sta
;
4369 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4371 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4373 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4376 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4380 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4384 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4385 struct wl12xx_vif
*wlvif
,
4386 struct ieee80211_sta
*sta
,
4387 enum ieee80211_sta_state old_state
,
4388 enum ieee80211_sta_state new_state
)
4390 struct wl1271_station
*wl_sta
;
4392 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4393 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4396 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4397 hlid
= wl_sta
->hlid
;
4399 /* Add station (AP mode) */
4401 old_state
== IEEE80211_STA_NOTEXIST
&&
4402 new_state
== IEEE80211_STA_NONE
)
4403 return wl12xx_sta_add(wl
, wlvif
, sta
);
4405 /* Remove station (AP mode) */
4407 old_state
== IEEE80211_STA_NONE
&&
4408 new_state
== IEEE80211_STA_NOTEXIST
) {
4410 wl12xx_sta_remove(wl
, wlvif
, sta
);
4414 /* Authorize station (AP mode) */
4416 new_state
== IEEE80211_STA_AUTHORIZED
) {
4417 ret
= wl12xx_cmd_set_peer_state(wl
, hlid
);
4421 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4426 /* Authorize station */
4428 new_state
== IEEE80211_STA_AUTHORIZED
) {
4429 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4430 return wl12xx_set_authorized(wl
, wlvif
);
4434 old_state
== IEEE80211_STA_AUTHORIZED
&&
4435 new_state
== IEEE80211_STA_ASSOC
) {
4436 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4443 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4444 struct ieee80211_vif
*vif
,
4445 struct ieee80211_sta
*sta
,
4446 enum ieee80211_sta_state old_state
,
4447 enum ieee80211_sta_state new_state
)
4449 struct wl1271
*wl
= hw
->priv
;
4450 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4453 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4454 sta
->aid
, old_state
, new_state
);
4456 mutex_lock(&wl
->mutex
);
4458 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4463 ret
= wl1271_ps_elp_wakeup(wl
);
4467 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4469 wl1271_ps_elp_sleep(wl
);
4471 mutex_unlock(&wl
->mutex
);
4472 if (new_state
< old_state
)
4477 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4478 struct ieee80211_vif
*vif
,
4479 enum ieee80211_ampdu_mlme_action action
,
4480 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4483 struct wl1271
*wl
= hw
->priv
;
4484 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4486 u8 hlid
, *ba_bitmap
;
4488 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4491 /* sanity check - the fields in FW are only 8bits wide */
4492 if (WARN_ON(tid
> 0xFF))
4495 mutex_lock(&wl
->mutex
);
4497 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4502 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4503 hlid
= wlvif
->sta
.hlid
;
4504 ba_bitmap
= &wlvif
->sta
.ba_rx_bitmap
;
4505 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4506 struct wl1271_station
*wl_sta
;
4508 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4509 hlid
= wl_sta
->hlid
;
4510 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4516 ret
= wl1271_ps_elp_wakeup(wl
);
4520 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4524 case IEEE80211_AMPDU_RX_START
:
4525 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4530 if (wl
->ba_rx_session_count
>= RX_BA_MAX_SESSIONS
) {
4532 wl1271_error("exceeded max RX BA sessions");
4536 if (*ba_bitmap
& BIT(tid
)) {
4538 wl1271_error("cannot enable RX BA session on active "
4543 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
4546 *ba_bitmap
|= BIT(tid
);
4547 wl
->ba_rx_session_count
++;
4551 case IEEE80211_AMPDU_RX_STOP
:
4552 if (!(*ba_bitmap
& BIT(tid
))) {
4554 * this happens on reconfig - so only output a debug
4555 * message for now, and don't fail the function.
4557 wl1271_debug(DEBUG_MAC80211
,
4558 "no active RX BA session on tid: %d",
4564 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
4567 *ba_bitmap
&= ~BIT(tid
);
4568 wl
->ba_rx_session_count
--;
4573 * The BA initiator session management in FW independently.
4574 * Falling break here on purpose for all TX APDU commands.
4576 case IEEE80211_AMPDU_TX_START
:
4577 case IEEE80211_AMPDU_TX_STOP
:
4578 case IEEE80211_AMPDU_TX_OPERATIONAL
:
4583 wl1271_error("Incorrect ampdu action id=%x\n", action
);
4587 wl1271_ps_elp_sleep(wl
);
4590 mutex_unlock(&wl
->mutex
);
4595 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
4596 struct ieee80211_vif
*vif
,
4597 const struct cfg80211_bitrate_mask
*mask
)
4599 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4600 struct wl1271
*wl
= hw
->priv
;
4603 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
4604 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
4605 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
4607 mutex_lock(&wl
->mutex
);
4609 for (i
= 0; i
< WLCORE_NUM_BANDS
; i
++)
4610 wlvif
->bitrate_masks
[i
] =
4611 wl1271_tx_enabled_rates_get(wl
,
4612 mask
->control
[i
].legacy
,
4615 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4618 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4619 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
4621 ret
= wl1271_ps_elp_wakeup(wl
);
4625 wl1271_set_band_rate(wl
, wlvif
);
4627 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4628 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4630 wl1271_ps_elp_sleep(wl
);
4633 mutex_unlock(&wl
->mutex
);
4638 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
4639 struct ieee80211_channel_switch
*ch_switch
)
4641 struct wl1271
*wl
= hw
->priv
;
4642 struct wl12xx_vif
*wlvif
;
4645 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
4647 wl1271_tx_flush(wl
);
4649 mutex_lock(&wl
->mutex
);
4651 if (unlikely(wl
->state
== WLCORE_STATE_OFF
)) {
4652 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4653 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4654 ieee80211_chswitch_done(vif
, false);
4657 } else if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4661 ret
= wl1271_ps_elp_wakeup(wl
);
4665 /* TODO: change mac80211 to pass vif as param */
4666 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4667 ret
= wl12xx_cmd_channel_switch(wl
, wlvif
, ch_switch
);
4670 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
4673 wl1271_ps_elp_sleep(wl
);
4676 mutex_unlock(&wl
->mutex
);
4679 static void wlcore_op_flush(struct ieee80211_hw
*hw
, bool drop
)
4681 struct wl1271
*wl
= hw
->priv
;
4683 wl1271_tx_flush(wl
);
4686 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
4688 struct wl1271
*wl
= hw
->priv
;
4691 mutex_lock(&wl
->mutex
);
4693 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4696 /* packets are considered pending if in the TX queue or the FW */
4697 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
4699 mutex_unlock(&wl
->mutex
);
4704 /* can't be const, mac80211 writes to this */
4705 static struct ieee80211_rate wl1271_rates
[] = {
4707 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
4708 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
4710 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
4711 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
4712 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4714 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
4715 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
4716 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4718 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
4719 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
4720 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4722 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4723 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4725 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4726 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4728 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4729 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4731 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4732 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4734 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4735 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4737 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4738 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4740 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4741 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4743 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4744 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4747 /* can't be const, mac80211 writes to this */
4748 static struct ieee80211_channel wl1271_channels
[] = {
4749 { .hw_value
= 1, .center_freq
= 2412, .max_power
= 25 },
4750 { .hw_value
= 2, .center_freq
= 2417, .max_power
= 25 },
4751 { .hw_value
= 3, .center_freq
= 2422, .max_power
= 25 },
4752 { .hw_value
= 4, .center_freq
= 2427, .max_power
= 25 },
4753 { .hw_value
= 5, .center_freq
= 2432, .max_power
= 25 },
4754 { .hw_value
= 6, .center_freq
= 2437, .max_power
= 25 },
4755 { .hw_value
= 7, .center_freq
= 2442, .max_power
= 25 },
4756 { .hw_value
= 8, .center_freq
= 2447, .max_power
= 25 },
4757 { .hw_value
= 9, .center_freq
= 2452, .max_power
= 25 },
4758 { .hw_value
= 10, .center_freq
= 2457, .max_power
= 25 },
4759 { .hw_value
= 11, .center_freq
= 2462, .max_power
= 25 },
4760 { .hw_value
= 12, .center_freq
= 2467, .max_power
= 25 },
4761 { .hw_value
= 13, .center_freq
= 2472, .max_power
= 25 },
4762 { .hw_value
= 14, .center_freq
= 2484, .max_power
= 25 },
4765 /* can't be const, mac80211 writes to this */
4766 static struct ieee80211_supported_band wl1271_band_2ghz
= {
4767 .channels
= wl1271_channels
,
4768 .n_channels
= ARRAY_SIZE(wl1271_channels
),
4769 .bitrates
= wl1271_rates
,
4770 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
4773 /* 5 GHz data rates for WL1273 */
4774 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
4776 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4777 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4779 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4780 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4782 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4783 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4785 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4786 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4788 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4789 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4791 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4792 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4794 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4795 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4797 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4798 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4801 /* 5 GHz band channels for WL1273 */
4802 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
4803 { .hw_value
= 7, .center_freq
= 5035, .max_power
= 25 },
4804 { .hw_value
= 8, .center_freq
= 5040, .max_power
= 25 },
4805 { .hw_value
= 9, .center_freq
= 5045, .max_power
= 25 },
4806 { .hw_value
= 11, .center_freq
= 5055, .max_power
= 25 },
4807 { .hw_value
= 12, .center_freq
= 5060, .max_power
= 25 },
4808 { .hw_value
= 16, .center_freq
= 5080, .max_power
= 25 },
4809 { .hw_value
= 34, .center_freq
= 5170, .max_power
= 25 },
4810 { .hw_value
= 36, .center_freq
= 5180, .max_power
= 25 },
4811 { .hw_value
= 38, .center_freq
= 5190, .max_power
= 25 },
4812 { .hw_value
= 40, .center_freq
= 5200, .max_power
= 25 },
4813 { .hw_value
= 42, .center_freq
= 5210, .max_power
= 25 },
4814 { .hw_value
= 44, .center_freq
= 5220, .max_power
= 25 },
4815 { .hw_value
= 46, .center_freq
= 5230, .max_power
= 25 },
4816 { .hw_value
= 48, .center_freq
= 5240, .max_power
= 25 },
4817 { .hw_value
= 52, .center_freq
= 5260, .max_power
= 25 },
4818 { .hw_value
= 56, .center_freq
= 5280, .max_power
= 25 },
4819 { .hw_value
= 60, .center_freq
= 5300, .max_power
= 25 },
4820 { .hw_value
= 64, .center_freq
= 5320, .max_power
= 25 },
4821 { .hw_value
= 100, .center_freq
= 5500, .max_power
= 25 },
4822 { .hw_value
= 104, .center_freq
= 5520, .max_power
= 25 },
4823 { .hw_value
= 108, .center_freq
= 5540, .max_power
= 25 },
4824 { .hw_value
= 112, .center_freq
= 5560, .max_power
= 25 },
4825 { .hw_value
= 116, .center_freq
= 5580, .max_power
= 25 },
4826 { .hw_value
= 120, .center_freq
= 5600, .max_power
= 25 },
4827 { .hw_value
= 124, .center_freq
= 5620, .max_power
= 25 },
4828 { .hw_value
= 128, .center_freq
= 5640, .max_power
= 25 },
4829 { .hw_value
= 132, .center_freq
= 5660, .max_power
= 25 },
4830 { .hw_value
= 136, .center_freq
= 5680, .max_power
= 25 },
4831 { .hw_value
= 140, .center_freq
= 5700, .max_power
= 25 },
4832 { .hw_value
= 149, .center_freq
= 5745, .max_power
= 25 },
4833 { .hw_value
= 153, .center_freq
= 5765, .max_power
= 25 },
4834 { .hw_value
= 157, .center_freq
= 5785, .max_power
= 25 },
4835 { .hw_value
= 161, .center_freq
= 5805, .max_power
= 25 },
4836 { .hw_value
= 165, .center_freq
= 5825, .max_power
= 25 },
4839 static struct ieee80211_supported_band wl1271_band_5ghz
= {
4840 .channels
= wl1271_channels_5ghz
,
4841 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
4842 .bitrates
= wl1271_rates_5ghz
,
4843 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
4846 static const struct ieee80211_ops wl1271_ops
= {
4847 .start
= wl1271_op_start
,
4848 .stop
= wlcore_op_stop
,
4849 .add_interface
= wl1271_op_add_interface
,
4850 .remove_interface
= wl1271_op_remove_interface
,
4851 .change_interface
= wl12xx_op_change_interface
,
4853 .suspend
= wl1271_op_suspend
,
4854 .resume
= wl1271_op_resume
,
4856 .config
= wl1271_op_config
,
4857 .prepare_multicast
= wl1271_op_prepare_multicast
,
4858 .configure_filter
= wl1271_op_configure_filter
,
4860 .set_key
= wlcore_op_set_key
,
4861 .hw_scan
= wl1271_op_hw_scan
,
4862 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
4863 .sched_scan_start
= wl1271_op_sched_scan_start
,
4864 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
4865 .bss_info_changed
= wl1271_op_bss_info_changed
,
4866 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
4867 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
4868 .conf_tx
= wl1271_op_conf_tx
,
4869 .get_tsf
= wl1271_op_get_tsf
,
4870 .get_survey
= wl1271_op_get_survey
,
4871 .sta_state
= wl12xx_op_sta_state
,
4872 .ampdu_action
= wl1271_op_ampdu_action
,
4873 .tx_frames_pending
= wl1271_tx_frames_pending
,
4874 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
4875 .channel_switch
= wl12xx_op_channel_switch
,
4876 .flush
= wlcore_op_flush
,
4877 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
4881 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
4887 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
4888 wl1271_error("Illegal RX rate from HW: %d", rate
);
4892 idx
= wl
->band_rate_to_idx
[band
][rate
];
4893 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
4894 wl1271_error("Unsupported RX rate from HW: %d", rate
);
4901 static ssize_t
wl1271_sysfs_show_bt_coex_state(struct device
*dev
,
4902 struct device_attribute
*attr
,
4905 struct wl1271
*wl
= dev_get_drvdata(dev
);
4910 mutex_lock(&wl
->mutex
);
4911 len
= snprintf(buf
, len
, "%d\n\n0 - off\n1 - on\n",
4913 mutex_unlock(&wl
->mutex
);
4919 static ssize_t
wl1271_sysfs_store_bt_coex_state(struct device
*dev
,
4920 struct device_attribute
*attr
,
4921 const char *buf
, size_t count
)
4923 struct wl1271
*wl
= dev_get_drvdata(dev
);
4927 ret
= kstrtoul(buf
, 10, &res
);
4929 wl1271_warning("incorrect value written to bt_coex_mode");
4933 mutex_lock(&wl
->mutex
);
4937 if (res
== wl
->sg_enabled
)
4940 wl
->sg_enabled
= res
;
4942 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4945 ret
= wl1271_ps_elp_wakeup(wl
);
4949 wl1271_acx_sg_enable(wl
, wl
->sg_enabled
);
4950 wl1271_ps_elp_sleep(wl
);
4953 mutex_unlock(&wl
->mutex
);
4957 static DEVICE_ATTR(bt_coex_state
, S_IRUGO
| S_IWUSR
,
4958 wl1271_sysfs_show_bt_coex_state
,
4959 wl1271_sysfs_store_bt_coex_state
);
4961 static ssize_t
wl1271_sysfs_show_hw_pg_ver(struct device
*dev
,
4962 struct device_attribute
*attr
,
4965 struct wl1271
*wl
= dev_get_drvdata(dev
);
4970 mutex_lock(&wl
->mutex
);
4971 if (wl
->hw_pg_ver
>= 0)
4972 len
= snprintf(buf
, len
, "%d\n", wl
->hw_pg_ver
);
4974 len
= snprintf(buf
, len
, "n/a\n");
4975 mutex_unlock(&wl
->mutex
);
4980 static DEVICE_ATTR(hw_pg_ver
, S_IRUGO
,
4981 wl1271_sysfs_show_hw_pg_ver
, NULL
);
4983 static ssize_t
wl1271_sysfs_read_fwlog(struct file
*filp
, struct kobject
*kobj
,
4984 struct bin_attribute
*bin_attr
,
4985 char *buffer
, loff_t pos
, size_t count
)
4987 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
4988 struct wl1271
*wl
= dev_get_drvdata(dev
);
4992 ret
= mutex_lock_interruptible(&wl
->mutex
);
4994 return -ERESTARTSYS
;
4996 /* Let only one thread read the log at a time, blocking others */
4997 while (wl
->fwlog_size
== 0) {
5000 prepare_to_wait_exclusive(&wl
->fwlog_waitq
,
5002 TASK_INTERRUPTIBLE
);
5004 if (wl
->fwlog_size
!= 0) {
5005 finish_wait(&wl
->fwlog_waitq
, &wait
);
5009 mutex_unlock(&wl
->mutex
);
5012 finish_wait(&wl
->fwlog_waitq
, &wait
);
5014 if (signal_pending(current
))
5015 return -ERESTARTSYS
;
5017 ret
= mutex_lock_interruptible(&wl
->mutex
);
5019 return -ERESTARTSYS
;
5022 /* Check if the fwlog is still valid */
5023 if (wl
->fwlog_size
< 0) {
5024 mutex_unlock(&wl
->mutex
);
5028 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5029 len
= min(count
, (size_t)wl
->fwlog_size
);
5030 wl
->fwlog_size
-= len
;
5031 memcpy(buffer
, wl
->fwlog
, len
);
5033 /* Make room for new messages */
5034 memmove(wl
->fwlog
, wl
->fwlog
+ len
, wl
->fwlog_size
);
5036 mutex_unlock(&wl
->mutex
);
5041 static struct bin_attribute fwlog_attr
= {
5042 .attr
= {.name
= "fwlog", .mode
= S_IRUSR
},
5043 .read
= wl1271_sysfs_read_fwlog
,
5046 static void wl1271_connection_loss_work(struct work_struct
*work
)
5048 struct delayed_work
*dwork
;
5050 struct ieee80211_vif
*vif
;
5051 struct wl12xx_vif
*wlvif
;
5053 dwork
= container_of(work
, struct delayed_work
, work
);
5054 wl
= container_of(dwork
, struct wl1271
, connection_loss_work
);
5056 wl1271_info("Connection loss work.");
5058 mutex_lock(&wl
->mutex
);
5060 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5063 /* Call mac80211 connection loss */
5064 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
5065 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
5067 vif
= wl12xx_wlvif_to_vif(wlvif
);
5068 ieee80211_connection_loss(vif
);
5071 mutex_unlock(&wl
->mutex
);
5074 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
, u32 oui
, u32 nic
)
5078 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x",
5081 if (nic
+ WLCORE_NUM_MAC_ADDRESSES
- wl
->num_mac_addr
> 0xffffff)
5082 wl1271_warning("NIC part of the MAC address wraps around!");
5084 for (i
= 0; i
< wl
->num_mac_addr
; i
++) {
5085 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
5086 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
5087 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
5088 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
5089 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
5090 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
5094 /* we may be one address short at the most */
5095 WARN_ON(wl
->num_mac_addr
+ 1 < WLCORE_NUM_MAC_ADDRESSES
);
5098 * turn on the LAA bit in the first address and use it as
5101 if (wl
->num_mac_addr
< WLCORE_NUM_MAC_ADDRESSES
) {
5102 int idx
= WLCORE_NUM_MAC_ADDRESSES
- 1;
5103 memcpy(&wl
->addresses
[idx
], &wl
->addresses
[0],
5104 sizeof(wl
->addresses
[0]));
5106 wl
->addresses
[idx
].addr
[2] |= BIT(1);
5109 wl
->hw
->wiphy
->n_addresses
= WLCORE_NUM_MAC_ADDRESSES
;
5110 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
5113 static int wl12xx_get_hw_info(struct wl1271
*wl
)
5117 ret
= wl12xx_set_power_on(wl
);
5121 ret
= wlcore_read_reg(wl
, REG_CHIP_ID_B
, &wl
->chip
.id
);
5125 wl
->fuse_oui_addr
= 0;
5126 wl
->fuse_nic_addr
= 0;
5128 ret
= wl
->ops
->get_pg_ver(wl
, &wl
->hw_pg_ver
);
5132 if (wl
->ops
->get_mac
)
5133 ret
= wl
->ops
->get_mac(wl
);
5136 wl1271_power_off(wl
);
5140 static int wl1271_register_hw(struct wl1271
*wl
)
5143 u32 oui_addr
= 0, nic_addr
= 0;
5145 if (wl
->mac80211_registered
)
5148 if (wl
->nvs_len
>= 12) {
5149 /* NOTE: The wl->nvs->nvs element must be first, in
5150 * order to simplify the casting, we assume it is at
5151 * the beginning of the wl->nvs structure.
5153 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
5156 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
5158 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
5161 /* if the MAC address is zeroed in the NVS derive from fuse */
5162 if (oui_addr
== 0 && nic_addr
== 0) {
5163 oui_addr
= wl
->fuse_oui_addr
;
5164 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5165 nic_addr
= wl
->fuse_nic_addr
+ 1;
5168 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
);
5170 ret
= ieee80211_register_hw(wl
->hw
);
5172 wl1271_error("unable to register mac80211 hw: %d", ret
);
5176 wl
->mac80211_registered
= true;
5178 wl1271_debugfs_init(wl
);
5180 wl1271_notice("loaded");
5186 static void wl1271_unregister_hw(struct wl1271
*wl
)
5189 wl1271_plt_stop(wl
);
5191 ieee80211_unregister_hw(wl
->hw
);
5192 wl
->mac80211_registered
= false;
5196 static const struct ieee80211_iface_limit wlcore_iface_limits
[] = {
5199 .types
= BIT(NL80211_IFTYPE_STATION
),
5203 .types
= BIT(NL80211_IFTYPE_AP
) |
5204 BIT(NL80211_IFTYPE_P2P_GO
) |
5205 BIT(NL80211_IFTYPE_P2P_CLIENT
),
5209 static const struct ieee80211_iface_combination
5210 wlcore_iface_combinations
[] = {
5212 .num_different_channels
= 1,
5213 .max_interfaces
= 3,
5214 .limits
= wlcore_iface_limits
,
5215 .n_limits
= ARRAY_SIZE(wlcore_iface_limits
),
5219 static int wl1271_init_ieee80211(struct wl1271
*wl
)
5221 static const u32 cipher_suites
[] = {
5222 WLAN_CIPHER_SUITE_WEP40
,
5223 WLAN_CIPHER_SUITE_WEP104
,
5224 WLAN_CIPHER_SUITE_TKIP
,
5225 WLAN_CIPHER_SUITE_CCMP
,
5226 WL1271_CIPHER_SUITE_GEM
,
5229 /* The tx descriptor buffer */
5230 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
5232 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
5233 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
5236 /* FIXME: find a proper value */
5237 wl
->hw
->channel_change_time
= 10000;
5238 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
5240 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
5241 IEEE80211_HW_SUPPORTS_PS
|
5242 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5243 IEEE80211_HW_SUPPORTS_UAPSD
|
5244 IEEE80211_HW_HAS_RATE_CONTROL
|
5245 IEEE80211_HW_CONNECTION_MONITOR
|
5246 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5247 IEEE80211_HW_SPECTRUM_MGMT
|
5248 IEEE80211_HW_AP_LINK_PS
|
5249 IEEE80211_HW_AMPDU_AGGREGATION
|
5250 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5251 IEEE80211_HW_SCAN_WHILE_IDLE
;
5253 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5254 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5256 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5257 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5258 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5259 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5260 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5261 wl
->hw
->wiphy
->max_match_sets
= 16;
5263 * Maximum length of elements in scanning probe request templates
5264 * should be the maximum length possible for a template, without
5265 * the IEEE80211 header of the template
5267 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5268 sizeof(struct ieee80211_header
);
5270 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5271 sizeof(struct ieee80211_header
);
5273 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5274 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
;
5276 /* make sure all our channels fit in the scanned_ch bitmask */
5277 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5278 ARRAY_SIZE(wl1271_channels_5ghz
) >
5279 WL1271_MAX_CHANNELS
);
5281 * We keep local copies of the band structs because we need to
5282 * modify them on a per-device basis.
5284 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5285 sizeof(wl1271_band_2ghz
));
5286 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5287 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5288 sizeof(*wl
->ht_cap
));
5289 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5290 sizeof(wl1271_band_5ghz
));
5291 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5292 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5293 sizeof(*wl
->ht_cap
));
5295 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5296 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5297 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5298 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5301 wl
->hw
->max_rates
= 1;
5303 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5305 /* the FW answers probe-requests in AP-mode */
5306 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5307 wl
->hw
->wiphy
->probe_resp_offload
=
5308 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5309 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5310 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5312 /* allowed interface combinations */
5313 wl
->hw
->wiphy
->iface_combinations
= wlcore_iface_combinations
;
5314 wl
->hw
->wiphy
->n_iface_combinations
=
5315 ARRAY_SIZE(wlcore_iface_combinations
);
5317 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5319 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5320 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5322 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5327 #define WL1271_DEFAULT_CHANNEL 0
5329 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
, u32 aggr_buf_size
)
5331 struct ieee80211_hw
*hw
;
5336 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5338 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5340 wl1271_error("could not alloc ieee80211_hw");
5346 memset(wl
, 0, sizeof(*wl
));
5348 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5350 wl1271_error("could not alloc wl priv");
5352 goto err_priv_alloc
;
5355 INIT_LIST_HEAD(&wl
->wlvif_list
);
5359 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5360 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5361 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5363 skb_queue_head_init(&wl
->deferred_rx_queue
);
5364 skb_queue_head_init(&wl
->deferred_tx_queue
);
5366 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5367 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5368 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5369 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5370 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5371 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5372 INIT_DELAYED_WORK(&wl
->connection_loss_work
,
5373 wl1271_connection_loss_work
);
5375 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5376 if (!wl
->freezable_wq
) {
5381 wl
->channel
= WL1271_DEFAULT_CHANNEL
;
5383 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5384 wl
->band
= IEEE80211_BAND_2GHZ
;
5385 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5387 wl
->sg_enabled
= true;
5388 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
5391 wl
->ap_fw_ps_map
= 0;
5393 wl
->platform_quirks
= 0;
5394 wl
->sched_scanning
= false;
5395 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5396 wl
->active_sta_count
= 0;
5398 init_waitqueue_head(&wl
->fwlog_waitq
);
5400 /* The system link is always allocated */
5401 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5403 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5404 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5405 wl
->tx_frames
[i
] = NULL
;
5407 spin_lock_init(&wl
->wl_lock
);
5409 wl
->state
= WLCORE_STATE_OFF
;
5410 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5411 mutex_init(&wl
->mutex
);
5412 mutex_init(&wl
->flush_mutex
);
5413 init_completion(&wl
->nvs_loading_complete
);
5415 order
= get_order(aggr_buf_size
);
5416 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5417 if (!wl
->aggr_buf
) {
5421 wl
->aggr_buf_size
= aggr_buf_size
;
5423 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5424 if (!wl
->dummy_packet
) {
5429 /* Allocate one page for the FW log */
5430 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5433 goto err_dummy_packet
;
5436 wl
->mbox
= kmalloc(sizeof(*wl
->mbox
), GFP_KERNEL
| GFP_DMA
);
5445 free_page((unsigned long)wl
->fwlog
);
5448 dev_kfree_skb(wl
->dummy_packet
);
5451 free_pages((unsigned long)wl
->aggr_buf
, order
);
5454 destroy_workqueue(wl
->freezable_wq
);
5457 wl1271_debugfs_exit(wl
);
5461 ieee80211_free_hw(hw
);
5465 return ERR_PTR(ret
);
5467 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5469 int wlcore_free_hw(struct wl1271
*wl
)
5471 /* Unblock any fwlog readers */
5472 mutex_lock(&wl
->mutex
);
5473 wl
->fwlog_size
= -1;
5474 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5475 mutex_unlock(&wl
->mutex
);
5477 device_remove_bin_file(wl
->dev
, &fwlog_attr
);
5479 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5481 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5482 free_page((unsigned long)wl
->fwlog
);
5483 dev_kfree_skb(wl
->dummy_packet
);
5484 free_pages((unsigned long)wl
->aggr_buf
, get_order(wl
->aggr_buf_size
));
5486 wl1271_debugfs_exit(wl
);
5490 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5494 kfree(wl
->fw_status_1
);
5495 kfree(wl
->tx_res_if
);
5496 destroy_workqueue(wl
->freezable_wq
);
5499 ieee80211_free_hw(wl
->hw
);
5503 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
5505 static irqreturn_t
wl12xx_hardirq(int irq
, void *cookie
)
5507 struct wl1271
*wl
= cookie
;
5508 unsigned long flags
;
5510 wl1271_debug(DEBUG_IRQ
, "IRQ");
5512 /* complete the ELP completion */
5513 spin_lock_irqsave(&wl
->wl_lock
, flags
);
5514 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
5515 if (wl
->elp_compl
) {
5516 complete(wl
->elp_compl
);
5517 wl
->elp_compl
= NULL
;
5520 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
5521 /* don't enqueue a work right now. mark it as pending */
5522 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
5523 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
5524 disable_irq_nosync(wl
->irq
);
5525 pm_wakeup_event(wl
->dev
, 0);
5526 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5529 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5531 return IRQ_WAKE_THREAD
;
5534 static void wlcore_nvs_cb(const struct firmware
*fw
, void *context
)
5536 struct wl1271
*wl
= context
;
5537 struct platform_device
*pdev
= wl
->pdev
;
5538 struct wl12xx_platform_data
*pdata
= pdev
->dev
.platform_data
;
5539 unsigned long irqflags
;
5543 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
5545 wl1271_error("Could not allocate nvs data");
5548 wl
->nvs_len
= fw
->size
;
5550 wl1271_debug(DEBUG_BOOT
, "Could not get nvs file %s",
5556 ret
= wl
->ops
->setup(wl
);
5560 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
5562 /* adjust some runtime configuration parameters */
5563 wlcore_adjust_conf(wl
);
5565 wl
->irq
= platform_get_irq(pdev
, 0);
5566 wl
->platform_quirks
= pdata
->platform_quirks
;
5567 wl
->set_power
= pdata
->set_power
;
5568 wl
->if_ops
= pdata
->ops
;
5570 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
5571 irqflags
= IRQF_TRIGGER_RISING
;
5573 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
5575 ret
= request_threaded_irq(wl
->irq
, wl12xx_hardirq
, wlcore_irq
,
5579 wl1271_error("request_irq() failed: %d", ret
);
5584 ret
= enable_irq_wake(wl
->irq
);
5586 wl
->irq_wake_enabled
= true;
5587 device_init_wakeup(wl
->dev
, 1);
5588 if (pdata
->pwr_in_suspend
) {
5589 wl
->hw
->wiphy
->wowlan
.flags
= WIPHY_WOWLAN_ANY
;
5590 wl
->hw
->wiphy
->wowlan
.n_patterns
=
5591 WL1271_MAX_RX_FILTERS
;
5592 wl
->hw
->wiphy
->wowlan
.pattern_min_len
= 1;
5593 wl
->hw
->wiphy
->wowlan
.pattern_max_len
=
5594 WL1271_RX_FILTER_MAX_PATTERN_SIZE
;
5598 disable_irq(wl
->irq
);
5600 ret
= wl12xx_get_hw_info(wl
);
5602 wl1271_error("couldn't get hw info");
5606 ret
= wl
->ops
->identify_chip(wl
);
5610 ret
= wl1271_init_ieee80211(wl
);
5614 ret
= wl1271_register_hw(wl
);
5618 /* Create sysfs file to control bt coex state */
5619 ret
= device_create_file(wl
->dev
, &dev_attr_bt_coex_state
);
5621 wl1271_error("failed to create sysfs file bt_coex_state");
5625 /* Create sysfs file to get HW PG version */
5626 ret
= device_create_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5628 wl1271_error("failed to create sysfs file hw_pg_ver");
5629 goto out_bt_coex_state
;
5632 /* Create sysfs file for the FW log */
5633 ret
= device_create_bin_file(wl
->dev
, &fwlog_attr
);
5635 wl1271_error("failed to create sysfs file fwlog");
5639 wl
->initialized
= true;
5643 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5646 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5649 wl1271_unregister_hw(wl
);
5652 free_irq(wl
->irq
, wl
);
5658 release_firmware(fw
);
5659 complete_all(&wl
->nvs_loading_complete
);
5662 int __devinit
wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
5666 if (!wl
->ops
|| !wl
->ptable
)
5669 wl
->dev
= &pdev
->dev
;
5671 platform_set_drvdata(pdev
, wl
);
5673 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
5674 WL12XX_NVS_NAME
, &pdev
->dev
, GFP_KERNEL
,
5677 wl1271_error("request_firmware_nowait failed: %d", ret
);
5678 complete_all(&wl
->nvs_loading_complete
);
5683 EXPORT_SYMBOL_GPL(wlcore_probe
);
5685 int __devexit
wlcore_remove(struct platform_device
*pdev
)
5687 struct wl1271
*wl
= platform_get_drvdata(pdev
);
5689 wait_for_completion(&wl
->nvs_loading_complete
);
5690 if (!wl
->initialized
)
5693 if (wl
->irq_wake_enabled
) {
5694 device_init_wakeup(wl
->dev
, 0);
5695 disable_irq_wake(wl
->irq
);
5697 wl1271_unregister_hw(wl
);
5698 free_irq(wl
->irq
, wl
);
5703 EXPORT_SYMBOL_GPL(wlcore_remove
);
5705 u32 wl12xx_debug_level
= DEBUG_NONE
;
5706 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
5707 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
5708 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
5710 module_param_named(fwlog
, fwlog_param
, charp
, 0);
5711 MODULE_PARM_DESC(fwlog
,
5712 "FW logger options: continuous, ondemand, dbgpins or disable");
5714 module_param(bug_on_recovery
, bool, S_IRUSR
| S_IWUSR
);
5715 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
5717 module_param(no_recovery
, bool, S_IRUSR
| S_IWUSR
);
5718 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
5720 MODULE_LICENSE("GPL");
5721 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5722 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
5723 MODULE_FIRMWARE(WL12XX_NVS_NAME
);