]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/wireless/iwlegacy/common.c
iwlegacy: merge iwl-power.h into common.h
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlegacy / common.c
CommitLineData
be663ab6
WYG
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
0cdc2136
SG
34#include <linux/types.h>
35#include <linux/lockdep.h>
36#include <linux/init.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/skbuff.h>
be663ab6
WYG
41#include <net/mac80211.h>
42
43#include "iwl-eeprom.h"
be663ab6 44#include "iwl-debug.h"
98613be0 45#include "common.h"
be663ab6 46
0cdc2136
SG
47const char *il_get_cmd_string(u8 cmd)
48{
49 switch (cmd) {
50 IL_CMD(N_ALIVE);
51 IL_CMD(N_ERROR);
52 IL_CMD(C_RXON);
53 IL_CMD(C_RXON_ASSOC);
54 IL_CMD(C_QOS_PARAM);
55 IL_CMD(C_RXON_TIMING);
56 IL_CMD(C_ADD_STA);
57 IL_CMD(C_REM_STA);
58 IL_CMD(C_WEPKEY);
59 IL_CMD(N_3945_RX);
60 IL_CMD(C_TX);
61 IL_CMD(C_RATE_SCALE);
62 IL_CMD(C_LEDS);
63 IL_CMD(C_TX_LINK_QUALITY_CMD);
64 IL_CMD(C_CHANNEL_SWITCH);
65 IL_CMD(N_CHANNEL_SWITCH);
66 IL_CMD(C_SPECTRUM_MEASUREMENT);
67 IL_CMD(N_SPECTRUM_MEASUREMENT);
68 IL_CMD(C_POWER_TBL);
69 IL_CMD(N_PM_SLEEP);
70 IL_CMD(N_PM_DEBUG_STATS);
71 IL_CMD(C_SCAN);
72 IL_CMD(C_SCAN_ABORT);
73 IL_CMD(N_SCAN_START);
74 IL_CMD(N_SCAN_RESULTS);
75 IL_CMD(N_SCAN_COMPLETE);
76 IL_CMD(N_BEACON);
77 IL_CMD(C_TX_BEACON);
78 IL_CMD(C_TX_PWR_TBL);
79 IL_CMD(C_BT_CONFIG);
80 IL_CMD(C_STATS);
81 IL_CMD(N_STATS);
82 IL_CMD(N_CARD_STATE);
83 IL_CMD(N_MISSED_BEACONS);
84 IL_CMD(C_CT_KILL_CONFIG);
85 IL_CMD(C_SENSITIVITY);
86 IL_CMD(C_PHY_CALIBRATION);
87 IL_CMD(N_RX_PHY);
88 IL_CMD(N_RX_MPDU);
89 IL_CMD(N_RX);
90 IL_CMD(N_COMPRESSED_BA);
91 default:
92 return "UNKNOWN";
93
94 }
95}
96EXPORT_SYMBOL(il_get_cmd_string);
97
98#define HOST_COMPLETE_TIMEOUT (HZ / 2)
99
100static void il_generic_cmd_callback(struct il_priv *il,
101 struct il_device_cmd *cmd,
102 struct il_rx_pkt *pkt)
103{
104 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
105 IL_ERR("Bad return from %s (0x%08X)\n",
106 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
107 return;
108 }
109
110#ifdef CONFIG_IWLEGACY_DEBUG
111 switch (cmd->hdr.cmd) {
112 case C_TX_LINK_QUALITY_CMD:
113 case C_SENSITIVITY:
114 D_HC_DUMP("back from %s (0x%08X)\n",
115 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
116 break;
117 default:
118 D_HC("back from %s (0x%08X)\n",
119 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
120 }
121#endif
122}
123
124static int
125il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
126{
127 int ret;
128
129 BUG_ON(!(cmd->flags & CMD_ASYNC));
130
131 /* An asynchronous command can not expect an SKB to be set. */
132 BUG_ON(cmd->flags & CMD_WANT_SKB);
133
134 /* Assign a generic callback if one is not provided */
135 if (!cmd->callback)
136 cmd->callback = il_generic_cmd_callback;
137
138 if (test_bit(S_EXIT_PENDING, &il->status))
139 return -EBUSY;
140
141 ret = il_enqueue_hcmd(il, cmd);
142 if (ret < 0) {
143 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
144 il_get_cmd_string(cmd->id), ret);
145 return ret;
146 }
147 return 0;
148}
149
150int il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
151{
152 int cmd_idx;
153 int ret;
154
155 lockdep_assert_held(&il->mutex);
156
157 BUG_ON(cmd->flags & CMD_ASYNC);
158
159 /* A synchronous command can not have a callback set. */
160 BUG_ON(cmd->callback);
161
162 D_INFO("Attempting to send sync command %s\n",
163 il_get_cmd_string(cmd->id));
164
165 set_bit(S_HCMD_ACTIVE, &il->status);
166 D_INFO("Setting HCMD_ACTIVE for command %s\n",
167 il_get_cmd_string(cmd->id));
168
169 cmd_idx = il_enqueue_hcmd(il, cmd);
170 if (cmd_idx < 0) {
171 ret = cmd_idx;
172 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
173 il_get_cmd_string(cmd->id), ret);
174 goto out;
175 }
176
177 ret = wait_event_timeout(il->wait_command_queue,
178 !test_bit(S_HCMD_ACTIVE, &il->status),
179 HOST_COMPLETE_TIMEOUT);
180 if (!ret) {
181 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
182 IL_ERR(
183 "Error sending %s: time out after %dms.\n",
184 il_get_cmd_string(cmd->id),
185 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
186
187 clear_bit(S_HCMD_ACTIVE, &il->status);
188 D_INFO(
189 "Clearing HCMD_ACTIVE for command %s\n",
190 il_get_cmd_string(cmd->id));
191 ret = -ETIMEDOUT;
192 goto cancel;
193 }
194 }
195
196 if (test_bit(S_RF_KILL_HW, &il->status)) {
197 IL_ERR("Command %s aborted: RF KILL Switch\n",
198 il_get_cmd_string(cmd->id));
199 ret = -ECANCELED;
200 goto fail;
201 }
202 if (test_bit(S_FW_ERROR, &il->status)) {
203 IL_ERR("Command %s failed: FW Error\n",
204 il_get_cmd_string(cmd->id));
205 ret = -EIO;
206 goto fail;
207 }
208 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
209 IL_ERR("Error: Response NULL in '%s'\n",
210 il_get_cmd_string(cmd->id));
211 ret = -EIO;
212 goto cancel;
213 }
214
215 ret = 0;
216 goto out;
217
218cancel:
219 if (cmd->flags & CMD_WANT_SKB) {
220 /*
221 * Cancel the CMD_WANT_SKB flag for the cmd in the
222 * TX cmd queue. Otherwise in case the cmd comes
223 * in later, it will possibly set an invalid
224 * address (cmd->meta.source).
225 */
226 il->txq[il->cmd_queue].meta[cmd_idx].flags &=
227 ~CMD_WANT_SKB;
228 }
229fail:
230 if (cmd->reply_page) {
231 il_free_pages(il, cmd->reply_page);
232 cmd->reply_page = 0;
233 }
234out:
235 return ret;
236}
237EXPORT_SYMBOL(il_send_cmd_sync);
238
239int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
240{
241 if (cmd->flags & CMD_ASYNC)
242 return il_send_cmd_async(il, cmd);
243
244 return il_send_cmd_sync(il, cmd);
245}
246EXPORT_SYMBOL(il_send_cmd);
247
248int
249il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
250{
251 struct il_host_cmd cmd = {
252 .id = id,
253 .len = len,
254 .data = data,
255 };
256
257 return il_send_cmd_sync(il, &cmd);
258}
259EXPORT_SYMBOL(il_send_cmd_pdu);
260
261int il_send_cmd_pdu_async(struct il_priv *il,
262 u8 id, u16 len, const void *data,
263 void (*callback)(struct il_priv *il,
264 struct il_device_cmd *cmd,
265 struct il_rx_pkt *pkt))
266{
267 struct il_host_cmd cmd = {
268 .id = id,
269 .len = len,
270 .data = data,
271 };
272
273 cmd.flags |= CMD_ASYNC;
274 cmd.callback = callback;
275
276 return il_send_cmd_async(il, &cmd);
277}
278EXPORT_SYMBOL(il_send_cmd_pdu_async);
279
280/* default: IL_LED_BLINK(0) using blinking idx table */
281static int led_mode;
282module_param(led_mode, int, S_IRUGO);
283MODULE_PARM_DESC(led_mode, "0=system default, "
284 "1=On(RF On)/Off(RF Off), 2=blinking");
285
286/* Throughput OFF time(ms) ON time (ms)
287 * >300 25 25
288 * >200 to 300 40 40
289 * >100 to 200 55 55
290 * >70 to 100 65 65
291 * >50 to 70 75 75
292 * >20 to 50 85 85
293 * >10 to 20 95 95
294 * >5 to 10 110 110
295 * >1 to 5 130 130
296 * >0 to 1 167 167
297 * <=0 SOLID ON
298 */
299static const struct ieee80211_tpt_blink il_blink[] = {
300 { .throughput = 0, .blink_time = 334 },
301 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
302 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
303 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
304 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
305 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
306 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
307 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
308 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
309 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
310};
311
312/*
313 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
314 * Led blink rate analysis showed an average deviation of 0% on 3945,
315 * 5% on 4965 HW.
316 * Need to compensate on the led on/off time per HW according to the deviation
317 * to achieve the desired led frequency
318 * The calculation is: (100-averageDeviation)/100 * blinkTime
319 * For code efficiency the calculation will be:
320 * compensation = (100 - averageDeviation) * 64 / 100
321 * NewBlinkTime = (compensation * BlinkTime) / 64
322 */
323static inline u8 il_blink_compensation(struct il_priv *il,
324 u8 time, u16 compensation)
325{
326 if (!compensation) {
327 IL_ERR("undefined blink compensation: "
328 "use pre-defined blinking time\n");
329 return time;
330 }
331
332 return (u8)((time * compensation) >> 6);
333}
334
335/* Set led pattern command */
336static int il_led_cmd(struct il_priv *il,
337 unsigned long on,
338 unsigned long off)
339{
340 struct il_led_cmd led_cmd = {
341 .id = IL_LED_LINK,
342 .interval = IL_DEF_LED_INTRVL
343 };
344 int ret;
345
346 if (!test_bit(S_READY, &il->status))
347 return -EBUSY;
348
349 if (il->blink_on == on && il->blink_off == off)
350 return 0;
351
352 if (off == 0) {
353 /* led is SOLID_ON */
354 on = IL_LED_SOLID;
355 }
356
357 D_LED("Led blink time compensation=%u\n",
358 il->cfg->base_params->led_compensation);
359 led_cmd.on = il_blink_compensation(il, on,
360 il->cfg->base_params->led_compensation);
361 led_cmd.off = il_blink_compensation(il, off,
362 il->cfg->base_params->led_compensation);
363
364 ret = il->cfg->ops->led->cmd(il, &led_cmd);
365 if (!ret) {
366 il->blink_on = on;
367 il->blink_off = off;
368 }
369 return ret;
370}
371
372static void il_led_brightness_set(struct led_classdev *led_cdev,
373 enum led_brightness brightness)
374{
375 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
376 unsigned long on = 0;
377
378 if (brightness > 0)
379 on = IL_LED_SOLID;
380
381 il_led_cmd(il, on, 0);
382}
383
384static int il_led_blink_set(struct led_classdev *led_cdev,
385 unsigned long *delay_on,
386 unsigned long *delay_off)
387{
388 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
389
390 return il_led_cmd(il, *delay_on, *delay_off);
391}
392
393void il_leds_init(struct il_priv *il)
394{
395 int mode = led_mode;
396 int ret;
397
398 if (mode == IL_LED_DEFAULT)
399 mode = il->cfg->led_mode;
400
401 il->led.name = kasprintf(GFP_KERNEL, "%s-led",
402 wiphy_name(il->hw->wiphy));
403 il->led.brightness_set = il_led_brightness_set;
404 il->led.blink_set = il_led_blink_set;
405 il->led.max_brightness = 1;
406
407 switch (mode) {
408 case IL_LED_DEFAULT:
409 WARN_ON(1);
410 break;
411 case IL_LED_BLINK:
412 il->led.default_trigger =
413 ieee80211_create_tpt_led_trigger(il->hw,
414 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
415 il_blink, ARRAY_SIZE(il_blink));
416 break;
417 case IL_LED_RF_STATE:
418 il->led.default_trigger =
419 ieee80211_get_radio_led_name(il->hw);
420 break;
421 }
422
423 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
424 if (ret) {
425 kfree(il->led.name);
426 return;
427 }
428
429 il->led_registered = true;
430}
431EXPORT_SYMBOL(il_leds_init);
432
433void il_leds_exit(struct il_priv *il)
434{
435 if (!il->led_registered)
436 return;
437
438 led_classdev_unregister(&il->led);
439 kfree(il->led.name);
440}
441EXPORT_SYMBOL(il_leds_exit);
442
443/************************** EEPROM BANDS ****************************
444 *
445 * The il_eeprom_band definitions below provide the mapping from the
446 * EEPROM contents to the specific channel number supported for each
447 * band.
448 *
449 * For example, il_priv->eeprom.band_3_channels[4] from the band_3
450 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
451 * The specific geography and calibration information for that channel
452 * is contained in the eeprom map itself.
453 *
454 * During init, we copy the eeprom information and channel map
455 * information into il->channel_info_24/52 and il->channel_map_24/52
456 *
457 * channel_map_24/52 provides the idx in the channel_info array for a
458 * given channel. We have to have two separate maps as there is channel
459 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
460 * band_2
461 *
462 * A value of 0xff stored in the channel_map indicates that the channel
463 * is not supported by the hardware at all.
464 *
465 * A value of 0xfe in the channel_map indicates that the channel is not
466 * valid for Tx with the current hardware. This means that
467 * while the system can tune and receive on a given channel, it may not
468 * be able to associate or transmit any frames on that
469 * channel. There is no corresponding channel information for that
470 * entry.
471 *
472 *********************************************************************/
473
474/* 2.4 GHz */
475const u8 il_eeprom_band_1[14] = {
476 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
477};
478
479/* 5.2 GHz bands */
480static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
481 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
482};
483
484static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
485 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
486};
487
488static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
489 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
490};
491
492static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
493 145, 149, 153, 157, 161, 165
494};
495
496static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
497 1, 2, 3, 4, 5, 6, 7
498};
499
500static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
501 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
502};
503
504/******************************************************************************
505 *
506 * EEPROM related functions
507 *
508******************************************************************************/
509
510static int il_eeprom_verify_signature(struct il_priv *il)
511{
512 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
513 int ret = 0;
514
515 D_EEPROM("EEPROM signature=0x%08x\n", gp);
516 switch (gp) {
517 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
518 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
519 break;
520 default:
521 IL_ERR("bad EEPROM signature,"
522 "EEPROM_GP=0x%08x\n", gp);
523 ret = -ENOENT;
524 break;
525 }
526 return ret;
527}
528
529const u8
530*il_eeprom_query_addr(const struct il_priv *il, size_t offset)
531{
532 BUG_ON(offset >= il->cfg->base_params->eeprom_size);
533 return &il->eeprom[offset];
534}
535EXPORT_SYMBOL(il_eeprom_query_addr);
536
537u16 il_eeprom_query16(const struct il_priv *il, size_t offset)
538{
539 if (!il->eeprom)
540 return 0;
541 return (u16)il->eeprom[offset] | ((u16)il->eeprom[offset + 1] << 8);
542}
543EXPORT_SYMBOL(il_eeprom_query16);
544
545/**
546 * il_eeprom_init - read EEPROM contents
547 *
548 * Load the EEPROM contents from adapter into il->eeprom
549 *
550 * NOTE: This routine uses the non-debug IO access functions.
551 */
552int il_eeprom_init(struct il_priv *il)
553{
554 __le16 *e;
555 u32 gp = _il_rd(il, CSR_EEPROM_GP);
556 int sz;
557 int ret;
558 u16 addr;
559
560 /* allocate eeprom */
561 sz = il->cfg->base_params->eeprom_size;
562 D_EEPROM("NVM size = %d\n", sz);
563 il->eeprom = kzalloc(sz, GFP_KERNEL);
564 if (!il->eeprom) {
565 ret = -ENOMEM;
566 goto alloc_err;
567 }
568 e = (__le16 *)il->eeprom;
569
570 il->cfg->ops->lib->apm_ops.init(il);
571
572 ret = il_eeprom_verify_signature(il);
573 if (ret < 0) {
574 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
575 ret = -ENOENT;
576 goto err;
577 }
578
579 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
580 ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
581 if (ret < 0) {
582 IL_ERR("Failed to acquire EEPROM semaphore.\n");
583 ret = -ENOENT;
584 goto err;
585 }
586
587 /* eeprom is an array of 16bit values */
588 for (addr = 0; addr < sz; addr += sizeof(u16)) {
589 u32 r;
590
591 _il_wr(il, CSR_EEPROM_REG,
592 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
593
594 ret = _il_poll_bit(il, CSR_EEPROM_REG,
595 CSR_EEPROM_REG_READ_VALID_MSK,
596 CSR_EEPROM_REG_READ_VALID_MSK,
597 IL_EEPROM_ACCESS_TIMEOUT);
598 if (ret < 0) {
599 IL_ERR("Time out reading EEPROM[%d]\n",
600 addr);
601 goto done;
602 }
603 r = _il_rd(il, CSR_EEPROM_REG);
604 e[addr / 2] = cpu_to_le16(r >> 16);
605 }
606
607 D_EEPROM("NVM Type: %s, version: 0x%x\n",
608 "EEPROM",
609 il_eeprom_query16(il, EEPROM_VERSION));
610
611 ret = 0;
612done:
613 il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
614
615err:
616 if (ret)
617 il_eeprom_free(il);
618 /* Reset chip to save power until we load uCode during "up". */
619 il_apm_stop(il);
620alloc_err:
621 return ret;
622}
623EXPORT_SYMBOL(il_eeprom_init);
624
625void il_eeprom_free(struct il_priv *il)
626{
627 kfree(il->eeprom);
628 il->eeprom = NULL;
629}
630EXPORT_SYMBOL(il_eeprom_free);
631
632static void il_init_band_reference(const struct il_priv *il,
633 int eep_band, int *eeprom_ch_count,
634 const struct il_eeprom_channel **eeprom_ch_info,
635 const u8 **eeprom_ch_idx)
636{
637 u32 offset = il->cfg->ops->lib->
638 eeprom_ops.regulatory_bands[eep_band - 1];
639 switch (eep_band) {
640 case 1: /* 2.4GHz band */
641 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
642 *eeprom_ch_info = (struct il_eeprom_channel *)
643 il_eeprom_query_addr(il, offset);
644 *eeprom_ch_idx = il_eeprom_band_1;
645 break;
646 case 2: /* 4.9GHz band */
647 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
648 *eeprom_ch_info = (struct il_eeprom_channel *)
649 il_eeprom_query_addr(il, offset);
650 *eeprom_ch_idx = il_eeprom_band_2;
651 break;
652 case 3: /* 5.2GHz band */
653 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
654 *eeprom_ch_info = (struct il_eeprom_channel *)
655 il_eeprom_query_addr(il, offset);
656 *eeprom_ch_idx = il_eeprom_band_3;
657 break;
658 case 4: /* 5.5GHz band */
659 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
660 *eeprom_ch_info = (struct il_eeprom_channel *)
661 il_eeprom_query_addr(il, offset);
662 *eeprom_ch_idx = il_eeprom_band_4;
663 break;
664 case 5: /* 5.7GHz band */
665 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
666 *eeprom_ch_info = (struct il_eeprom_channel *)
667 il_eeprom_query_addr(il, offset);
668 *eeprom_ch_idx = il_eeprom_band_5;
669 break;
670 case 6: /* 2.4GHz ht40 channels */
671 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
672 *eeprom_ch_info = (struct il_eeprom_channel *)
673 il_eeprom_query_addr(il, offset);
674 *eeprom_ch_idx = il_eeprom_band_6;
675 break;
676 case 7: /* 5 GHz ht40 channels */
677 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
678 *eeprom_ch_info = (struct il_eeprom_channel *)
679 il_eeprom_query_addr(il, offset);
680 *eeprom_ch_idx = il_eeprom_band_7;
681 break;
682 default:
683 BUG();
684 }
685}
686
687#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
688 ? # x " " : "")
689/**
690 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
691 *
692 * Does not set up a command, or touch hardware.
693 */
694static int il_mod_ht40_chan_info(struct il_priv *il,
695 enum ieee80211_band band, u16 channel,
696 const struct il_eeprom_channel *eeprom_ch,
697 u8 clear_ht40_extension_channel)
698{
699 struct il_channel_info *ch_info;
700
701 ch_info = (struct il_channel_info *)
702 il_get_channel_info(il, band, channel);
703
704 if (!il_is_channel_valid(ch_info))
705 return -1;
706
707 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
708 " Ad-Hoc %ssupported\n",
709 ch_info->channel,
710 il_is_channel_a_band(ch_info) ?
711 "5.2" : "2.4",
712 CHECK_AND_PRINT(IBSS),
713 CHECK_AND_PRINT(ACTIVE),
714 CHECK_AND_PRINT(RADAR),
715 CHECK_AND_PRINT(WIDE),
716 CHECK_AND_PRINT(DFS),
717 eeprom_ch->flags,
718 eeprom_ch->max_power_avg,
719 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
720 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
721 "" : "not ");
722
723 ch_info->ht40_eeprom = *eeprom_ch;
724 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
725 ch_info->ht40_flags = eeprom_ch->flags;
726 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
727 ch_info->ht40_extension_channel &=
728 ~clear_ht40_extension_channel;
729
730 return 0;
731}
732
733#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
734 ? # x " " : "")
735
736/**
737 * il_init_channel_map - Set up driver's info for all possible channels
738 */
739int il_init_channel_map(struct il_priv *il)
740{
741 int eeprom_ch_count = 0;
742 const u8 *eeprom_ch_idx = NULL;
743 const struct il_eeprom_channel *eeprom_ch_info = NULL;
744 int band, ch;
745 struct il_channel_info *ch_info;
746
747 if (il->channel_count) {
748 D_EEPROM("Channel map already initialized.\n");
749 return 0;
750 }
751
752 D_EEPROM("Initializing regulatory info from EEPROM\n");
753
754 il->channel_count =
755 ARRAY_SIZE(il_eeprom_band_1) +
756 ARRAY_SIZE(il_eeprom_band_2) +
757 ARRAY_SIZE(il_eeprom_band_3) +
758 ARRAY_SIZE(il_eeprom_band_4) +
759 ARRAY_SIZE(il_eeprom_band_5);
760
761 D_EEPROM("Parsing data for %d channels.\n",
762 il->channel_count);
763
764 il->channel_info = kzalloc(sizeof(struct il_channel_info) *
765 il->channel_count, GFP_KERNEL);
766 if (!il->channel_info) {
767 IL_ERR("Could not allocate channel_info\n");
768 il->channel_count = 0;
769 return -ENOMEM;
770 }
771
772 ch_info = il->channel_info;
773
774 /* Loop through the 5 EEPROM bands adding them in order to the
775 * channel map we maintain (that contains additional information than
776 * what just in the EEPROM) */
777 for (band = 1; band <= 5; band++) {
778
779 il_init_band_reference(il, band, &eeprom_ch_count,
780 &eeprom_ch_info, &eeprom_ch_idx);
781
782 /* Loop through each band adding each of the channels */
783 for (ch = 0; ch < eeprom_ch_count; ch++) {
784 ch_info->channel = eeprom_ch_idx[ch];
785 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
786 IEEE80211_BAND_5GHZ;
787
788 /* permanently store EEPROM's channel regulatory flags
789 * and max power in channel info database. */
790 ch_info->eeprom = eeprom_ch_info[ch];
791
792 /* Copy the run-time flags so they are there even on
793 * invalid channels */
794 ch_info->flags = eeprom_ch_info[ch].flags;
795 /* First write that ht40 is not enabled, and then enable
796 * one by one */
797 ch_info->ht40_extension_channel =
798 IEEE80211_CHAN_NO_HT40;
799
800 if (!(il_is_channel_valid(ch_info))) {
801 D_EEPROM(
802 "Ch. %d Flags %x [%sGHz] - "
803 "No traffic\n",
804 ch_info->channel,
805 ch_info->flags,
806 il_is_channel_a_band(ch_info) ?
807 "5.2" : "2.4");
808 ch_info++;
809 continue;
810 }
811
812 /* Initialize regulatory-based run-time data */
813 ch_info->max_power_avg = ch_info->curr_txpow =
814 eeprom_ch_info[ch].max_power_avg;
815 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
816 ch_info->min_power = 0;
817
818 D_EEPROM("Ch. %d [%sGHz] "
819 "%s%s%s%s%s%s(0x%02x %ddBm):"
820 " Ad-Hoc %ssupported\n",
821 ch_info->channel,
822 il_is_channel_a_band(ch_info) ?
823 "5.2" : "2.4",
824 CHECK_AND_PRINT_I(VALID),
825 CHECK_AND_PRINT_I(IBSS),
826 CHECK_AND_PRINT_I(ACTIVE),
827 CHECK_AND_PRINT_I(RADAR),
828 CHECK_AND_PRINT_I(WIDE),
829 CHECK_AND_PRINT_I(DFS),
830 eeprom_ch_info[ch].flags,
831 eeprom_ch_info[ch].max_power_avg,
832 ((eeprom_ch_info[ch].
833 flags & EEPROM_CHANNEL_IBSS)
834 && !(eeprom_ch_info[ch].
835 flags & EEPROM_CHANNEL_RADAR))
836 ? "" : "not ");
837
838 ch_info++;
839 }
840 }
841
842 /* Check if we do have HT40 channels */
843 if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
844 EEPROM_REGULATORY_BAND_NO_HT40 &&
845 il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
846 EEPROM_REGULATORY_BAND_NO_HT40)
847 return 0;
848
849 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
850 for (band = 6; band <= 7; band++) {
851 enum ieee80211_band ieeeband;
852
853 il_init_band_reference(il, band, &eeprom_ch_count,
854 &eeprom_ch_info, &eeprom_ch_idx);
855
856 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
857 ieeeband =
858 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
859
860 /* Loop through each band adding each of the channels */
861 for (ch = 0; ch < eeprom_ch_count; ch++) {
862 /* Set up driver's info for lower half */
863 il_mod_ht40_chan_info(il, ieeeband,
864 eeprom_ch_idx[ch],
865 &eeprom_ch_info[ch],
866 IEEE80211_CHAN_NO_HT40PLUS);
867
868 /* Set up driver's info for upper half */
869 il_mod_ht40_chan_info(il, ieeeband,
870 eeprom_ch_idx[ch] + 4,
871 &eeprom_ch_info[ch],
872 IEEE80211_CHAN_NO_HT40MINUS);
873 }
874 }
875
876 return 0;
877}
878EXPORT_SYMBOL(il_init_channel_map);
879
880/*
881 * il_free_channel_map - undo allocations in il_init_channel_map
882 */
883void il_free_channel_map(struct il_priv *il)
884{
885 kfree(il->channel_info);
886 il->channel_count = 0;
887}
888EXPORT_SYMBOL(il_free_channel_map);
889
890/**
891 * il_get_channel_info - Find driver's ilate channel info
892 *
893 * Based on band and channel number.
894 */
895const struct
896il_channel_info *il_get_channel_info(const struct il_priv *il,
897 enum ieee80211_band band, u16 channel)
898{
899 int i;
900
901 switch (band) {
902 case IEEE80211_BAND_5GHZ:
903 for (i = 14; i < il->channel_count; i++) {
904 if (il->channel_info[i].channel == channel)
905 return &il->channel_info[i];
906 }
907 break;
908 case IEEE80211_BAND_2GHZ:
909 if (channel >= 1 && channel <= 14)
910 return &il->channel_info[channel - 1];
911 break;
912 default:
913 BUG();
914 }
915
916 return NULL;
917}
918EXPORT_SYMBOL(il_get_channel_info);
919
920/*
921 * Setting power level allows the card to go to sleep when not busy.
922 *
923 * We calculate a sleep command based on the required latency, which
924 * we get from mac80211. In order to handle thermal throttling, we can
925 * also use pre-defined power levels.
926 */
927
928/*
929 * This defines the old power levels. They are still used by default
930 * (level 1) and for thermal throttle (levels 3 through 5)
931 */
932
933struct il_power_vec_entry {
934 struct il_powertable_cmd cmd;
935 u8 no_dtim; /* number of skip dtim */
936};
937
938static void il_power_sleep_cam_cmd(struct il_priv *il,
939 struct il_powertable_cmd *cmd)
940{
941 memset(cmd, 0, sizeof(*cmd));
942
943 if (il->power_data.pci_pm)
944 cmd->flags |= IL_POWER_PCI_PM_MSK;
945
946 D_POWER("Sleep command for CAM\n");
947}
948
949static int
950il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
951{
952 D_POWER("Sending power/sleep command\n");
953 D_POWER("Flags value = 0x%08X\n", cmd->flags);
954 D_POWER("Tx timeout = %u\n",
955 le32_to_cpu(cmd->tx_data_timeout));
956 D_POWER("Rx timeout = %u\n",
957 le32_to_cpu(cmd->rx_data_timeout));
958 D_POWER(
959 "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
960 le32_to_cpu(cmd->sleep_interval[0]),
961 le32_to_cpu(cmd->sleep_interval[1]),
962 le32_to_cpu(cmd->sleep_interval[2]),
963 le32_to_cpu(cmd->sleep_interval[3]),
964 le32_to_cpu(cmd->sleep_interval[4]));
965
966 return il_send_cmd_pdu(il, C_POWER_TBL,
967 sizeof(struct il_powertable_cmd), cmd);
968}
969
970int
971il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd,
972 bool force)
973{
974 int ret;
975 bool update_chains;
976
977 lockdep_assert_held(&il->mutex);
978
979 /* Don't update the RX chain when chain noise calibration is running */
980 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
981 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
982
983 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
984 return 0;
985
986 if (!il_is_ready_rf(il))
987 return -EIO;
988
989 /* scan complete use sleep_power_next, need to be updated */
990 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
991 if (test_bit(S_SCANNING, &il->status) && !force) {
992 D_INFO("Defer power set mode while scanning\n");
993 return 0;
994 }
995
996 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
997 set_bit(S_POWER_PMI, &il->status);
998
999 ret = il_set_power(il, cmd);
1000 if (!ret) {
1001 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
1002 clear_bit(S_POWER_PMI, &il->status);
1003
1004 if (il->cfg->ops->lib->update_chain_flags && update_chains)
1005 il->cfg->ops->lib->update_chain_flags(il);
1006 else if (il->cfg->ops->lib->update_chain_flags)
1007 D_POWER(
1008 "Cannot update the power, chain noise "
1009 "calibration running: %d\n",
1010 il->chain_noise_data.state);
1011
1012 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1013 } else
1014 IL_ERR("set power fail, ret = %d", ret);
1015
1016 return ret;
1017}
1018
1019int il_power_update_mode(struct il_priv *il, bool force)
1020{
1021 struct il_powertable_cmd cmd;
1022
1023 il_power_sleep_cam_cmd(il, &cmd);
1024 return il_power_set_mode(il, &cmd, force);
1025}
1026EXPORT_SYMBOL(il_power_update_mode);
1027
1028/* initialize to default */
1029void il_power_initialize(struct il_priv *il)
1030{
1031 u16 lctl = il_pcie_link_ctl(il);
1032
1033 il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
1034
1035 il->power_data.debug_sleep_level_override = -1;
1036
1037 memset(&il->power_data.sleep_cmd, 0,
1038 sizeof(il->power_data.sleep_cmd));
1039}
1040EXPORT_SYMBOL(il_power_initialize);
1041
1042/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
1043 * sending probe req. This should be set long enough to hear probe responses
1044 * from more than one AP. */
1045#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
1046#define IL_ACTIVE_DWELL_TIME_52 (20)
1047
1048#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1049#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1050
1051/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
1052 * Must be set longer than active dwell time.
1053 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
1054#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
1055#define IL_PASSIVE_DWELL_TIME_52 (10)
1056#define IL_PASSIVE_DWELL_BASE (100)
1057#define IL_CHANNEL_TUNE_TIME 5
1058
1059static int il_send_scan_abort(struct il_priv *il)
1060{
1061 int ret;
1062 struct il_rx_pkt *pkt;
1063 struct il_host_cmd cmd = {
1064 .id = C_SCAN_ABORT,
1065 .flags = CMD_WANT_SKB,
1066 };
1067
1068 /* Exit instantly with error when device is not ready
1069 * to receive scan abort command or it does not perform
1070 * hardware scan currently */
1071 if (!test_bit(S_READY, &il->status) ||
1072 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1073 !test_bit(S_SCAN_HW, &il->status) ||
1074 test_bit(S_FW_ERROR, &il->status) ||
1075 test_bit(S_EXIT_PENDING, &il->status))
1076 return -EIO;
1077
1078 ret = il_send_cmd_sync(il, &cmd);
1079 if (ret)
1080 return ret;
1081
1082 pkt = (struct il_rx_pkt *)cmd.reply_page;
1083 if (pkt->u.status != CAN_ABORT_STATUS) {
1084 /* The scan abort will return 1 for success or
1085 * 2 for "failure". A failure condition can be
1086 * due to simply not being in an active scan which
1087 * can occur if we send the scan abort before we
1088 * the microcode has notified us that a scan is
1089 * completed. */
1090 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1091 ret = -EIO;
1092 }
1093
1094 il_free_pages(il, cmd.reply_page);
1095 return ret;
1096}
1097
1098static void il_complete_scan(struct il_priv *il, bool aborted)
1099{
1100 /* check if scan was requested from mac80211 */
1101 if (il->scan_request) {
1102 D_SCAN("Complete scan in mac80211\n");
1103 ieee80211_scan_completed(il->hw, aborted);
1104 }
1105
1106 il->scan_vif = NULL;
1107 il->scan_request = NULL;
1108}
1109
1110void il_force_scan_end(struct il_priv *il)
1111{
1112 lockdep_assert_held(&il->mutex);
1113
1114 if (!test_bit(S_SCANNING, &il->status)) {
1115 D_SCAN("Forcing scan end while not scanning\n");
1116 return;
1117 }
1118
1119 D_SCAN("Forcing scan end\n");
1120 clear_bit(S_SCANNING, &il->status);
1121 clear_bit(S_SCAN_HW, &il->status);
1122 clear_bit(S_SCAN_ABORTING, &il->status);
1123 il_complete_scan(il, true);
1124}
1125
1126static void il_do_scan_abort(struct il_priv *il)
1127{
1128 int ret;
1129
1130 lockdep_assert_held(&il->mutex);
1131
1132 if (!test_bit(S_SCANNING, &il->status)) {
1133 D_SCAN("Not performing scan to abort\n");
1134 return;
1135 }
1136
1137 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1138 D_SCAN("Scan abort in progress\n");
1139 return;
1140 }
1141
1142 ret = il_send_scan_abort(il);
1143 if (ret) {
1144 D_SCAN("Send scan abort failed %d\n", ret);
1145 il_force_scan_end(il);
1146 } else
1147 D_SCAN("Successfully send scan abort\n");
1148}
1149
1150/**
1151 * il_scan_cancel - Cancel any currently executing HW scan
1152 */
1153int il_scan_cancel(struct il_priv *il)
1154{
1155 D_SCAN("Queuing abort scan\n");
1156 queue_work(il->workqueue, &il->abort_scan);
1157 return 0;
1158}
1159EXPORT_SYMBOL(il_scan_cancel);
1160
1161/**
1162 * il_scan_cancel_timeout - Cancel any currently executing HW scan
1163 * @ms: amount of time to wait (in milliseconds) for scan to abort
1164 *
1165 */
1166int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1167{
1168 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1169
1170 lockdep_assert_held(&il->mutex);
1171
1172 D_SCAN("Scan cancel timeout\n");
1173
1174 il_do_scan_abort(il);
1175
1176 while (time_before_eq(jiffies, timeout)) {
1177 if (!test_bit(S_SCAN_HW, &il->status))
1178 break;
1179 msleep(20);
1180 }
1181
1182 return test_bit(S_SCAN_HW, &il->status);
1183}
1184EXPORT_SYMBOL(il_scan_cancel_timeout);
1185
1186/* Service response to C_SCAN (0x80) */
1187static void il_hdl_scan(struct il_priv *il,
1188 struct il_rx_buf *rxb)
1189{
1190#ifdef CONFIG_IWLEGACY_DEBUG
1191 struct il_rx_pkt *pkt = rxb_addr(rxb);
1192 struct il_scanreq_notification *notif =
1193 (struct il_scanreq_notification *)pkt->u.raw;
1194
1195 D_SCAN("Scan request status = 0x%x\n", notif->status);
1196#endif
1197}
1198
1199/* Service N_SCAN_START (0x82) */
1200static void il_hdl_scan_start(struct il_priv *il,
1201 struct il_rx_buf *rxb)
1202{
1203 struct il_rx_pkt *pkt = rxb_addr(rxb);
1204 struct il_scanstart_notification *notif =
1205 (struct il_scanstart_notification *)pkt->u.raw;
1206 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1207 D_SCAN("Scan start: "
1208 "%d [802.11%s] "
1209 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
1210 notif->channel,
1211 notif->band ? "bg" : "a",
1212 le32_to_cpu(notif->tsf_high),
1213 le32_to_cpu(notif->tsf_low),
1214 notif->status, notif->beacon_timer);
1215}
1216
1217/* Service N_SCAN_RESULTS (0x83) */
1218static void il_hdl_scan_results(struct il_priv *il,
1219 struct il_rx_buf *rxb)
1220{
1221#ifdef CONFIG_IWLEGACY_DEBUG
1222 struct il_rx_pkt *pkt = rxb_addr(rxb);
1223 struct il_scanresults_notification *notif =
1224 (struct il_scanresults_notification *)pkt->u.raw;
1225
1226 D_SCAN("Scan ch.res: "
1227 "%d [802.11%s] "
1228 "(TSF: 0x%08X:%08X) - %d "
1229 "elapsed=%lu usec\n",
1230 notif->channel,
1231 notif->band ? "bg" : "a",
1232 le32_to_cpu(notif->tsf_high),
1233 le32_to_cpu(notif->tsf_low),
1234 le32_to_cpu(notif->stats[0]),
1235 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1236#endif
1237}
1238
1239/* Service N_SCAN_COMPLETE (0x84) */
1240static void il_hdl_scan_complete(struct il_priv *il,
1241 struct il_rx_buf *rxb)
1242{
1243
1244#ifdef CONFIG_IWLEGACY_DEBUG
1245 struct il_rx_pkt *pkt = rxb_addr(rxb);
1246 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1247#endif
1248
1249 D_SCAN(
1250 "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1251 scan_notif->scanned_channels,
1252 scan_notif->tsf_low,
1253 scan_notif->tsf_high, scan_notif->status);
1254
1255 /* The HW is no longer scanning */
1256 clear_bit(S_SCAN_HW, &il->status);
1257
1258 D_SCAN("Scan on %sGHz took %dms\n",
1259 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
1260 jiffies_to_msecs(jiffies - il->scan_start));
1261
1262 queue_work(il->workqueue, &il->scan_completed);
1263}
1264
1265void il_setup_rx_scan_handlers(struct il_priv *il)
1266{
1267 /* scan handlers */
1268 il->handlers[C_SCAN] = il_hdl_scan;
1269 il->handlers[N_SCAN_START] =
1270 il_hdl_scan_start;
1271 il->handlers[N_SCAN_RESULTS] =
1272 il_hdl_scan_results;
1273 il->handlers[N_SCAN_COMPLETE] =
1274 il_hdl_scan_complete;
1275}
1276EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1277
1278inline u16 il_get_active_dwell_time(struct il_priv *il,
1279 enum ieee80211_band band,
1280 u8 n_probes)
1281{
1282 if (band == IEEE80211_BAND_5GHZ)
1283 return IL_ACTIVE_DWELL_TIME_52 +
1284 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1285 else
1286 return IL_ACTIVE_DWELL_TIME_24 +
1287 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1288}
1289EXPORT_SYMBOL(il_get_active_dwell_time);
1290
1291u16 il_get_passive_dwell_time(struct il_priv *il,
1292 enum ieee80211_band band,
1293 struct ieee80211_vif *vif)
1294{
1295 struct il_rxon_context *ctx = &il->ctx;
1296 u16 value;
1297
1298 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
1299 IL_PASSIVE_DWELL_BASE + IL_PASSIVE_DWELL_TIME_24 :
1300 IL_PASSIVE_DWELL_BASE + IL_PASSIVE_DWELL_TIME_52;
1301
1302 if (il_is_any_associated(il)) {
1303 /*
1304 * If we're associated, we clamp the maximum passive
1305 * dwell time to be 98% of the smallest beacon interval
1306 * (minus 2 * channel tune time)
1307 */
1308 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
1309 if (value > IL_PASSIVE_DWELL_BASE || !value)
1310 value = IL_PASSIVE_DWELL_BASE;
1311 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1312 passive = min(value, passive);
1313 }
1314
1315 return passive;
1316}
1317EXPORT_SYMBOL(il_get_passive_dwell_time);
1318
1319void il_init_scan_params(struct il_priv *il)
1320{
1321 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1322 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
1323 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1324 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
1325 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1326}
1327EXPORT_SYMBOL(il_init_scan_params);
1328
1329static int il_scan_initiate(struct il_priv *il,
1330 struct ieee80211_vif *vif)
1331{
1332 int ret;
1333
1334 lockdep_assert_held(&il->mutex);
1335
1336 if (WARN_ON(!il->cfg->ops->utils->request_scan))
1337 return -EOPNOTSUPP;
1338
1339 cancel_delayed_work(&il->scan_check);
1340
1341 if (!il_is_ready_rf(il)) {
1342 IL_WARN("Request scan called when driver not ready.\n");
1343 return -EIO;
1344 }
1345
1346 if (test_bit(S_SCAN_HW, &il->status)) {
1347 D_SCAN(
1348 "Multiple concurrent scan requests in parallel.\n");
1349 return -EBUSY;
1350 }
1351
1352 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1353 D_SCAN("Scan request while abort pending.\n");
1354 return -EBUSY;
1355 }
1356
1357 D_SCAN("Starting scan...\n");
1358
1359 set_bit(S_SCANNING, &il->status);
1360 il->scan_start = jiffies;
1361
1362 ret = il->cfg->ops->utils->request_scan(il, vif);
1363 if (ret) {
1364 clear_bit(S_SCANNING, &il->status);
1365 return ret;
1366 }
1367
1368 queue_delayed_work(il->workqueue, &il->scan_check,
1369 IL_SCAN_CHECK_WATCHDOG);
1370
1371 return 0;
1372}
1373
1374int il_mac_hw_scan(struct ieee80211_hw *hw,
1375 struct ieee80211_vif *vif,
1376 struct cfg80211_scan_request *req)
1377{
1378 struct il_priv *il = hw->priv;
1379 int ret;
1380
1381 D_MAC80211("enter\n");
1382
1383 if (req->n_channels == 0)
1384 return -EINVAL;
1385
1386 mutex_lock(&il->mutex);
1387
1388 if (test_bit(S_SCANNING, &il->status)) {
1389 D_SCAN("Scan already in progress.\n");
1390 ret = -EAGAIN;
1391 goto out_unlock;
1392 }
1393
1394 /* mac80211 will only ask for one band at a time */
1395 il->scan_request = req;
1396 il->scan_vif = vif;
1397 il->scan_band = req->channels[0]->band;
1398
1399 ret = il_scan_initiate(il, vif);
1400
1401 D_MAC80211("leave\n");
1402
1403out_unlock:
1404 mutex_unlock(&il->mutex);
1405
1406 return ret;
1407}
1408EXPORT_SYMBOL(il_mac_hw_scan);
1409
1410static void il_bg_scan_check(struct work_struct *data)
1411{
1412 struct il_priv *il =
1413 container_of(data, struct il_priv, scan_check.work);
1414
1415 D_SCAN("Scan check work\n");
1416
1417 /* Since we are here firmware does not finish scan and
1418 * most likely is in bad shape, so we don't bother to
1419 * send abort command, just force scan complete to mac80211 */
1420 mutex_lock(&il->mutex);
1421 il_force_scan_end(il);
1422 mutex_unlock(&il->mutex);
1423}
1424
1425/**
1426 * il_fill_probe_req - fill in all required fields and IE for probe request
1427 */
1428
1429u16
1430il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1431 const u8 *ta, const u8 *ies, int ie_len, int left)
1432{
1433 int len = 0;
1434 u8 *pos = NULL;
1435
1436 /* Make sure there is enough space for the probe request,
1437 * two mandatory IEs and the data */
1438 left -= 24;
1439 if (left < 0)
1440 return 0;
1441
1442 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1443 memcpy(frame->da, il_bcast_addr, ETH_ALEN);
1444 memcpy(frame->sa, ta, ETH_ALEN);
1445 memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
1446 frame->seq_ctrl = 0;
1447
1448 len += 24;
1449
1450 /* ...next IE... */
1451 pos = &frame->u.probe_req.variable[0];
1452
1453 /* fill in our indirect SSID IE */
1454 left -= 2;
1455 if (left < 0)
1456 return 0;
1457 *pos++ = WLAN_EID_SSID;
1458 *pos++ = 0;
1459
1460 len += 2;
1461
1462 if (WARN_ON(left < ie_len))
1463 return len;
1464
1465 if (ies && ie_len) {
1466 memcpy(pos, ies, ie_len);
1467 len += ie_len;
1468 }
1469
1470 return (u16)len;
1471}
1472EXPORT_SYMBOL(il_fill_probe_req);
1473
1474static void il_bg_abort_scan(struct work_struct *work)
1475{
1476 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1477
1478 D_SCAN("Abort scan work\n");
1479
1480 /* We keep scan_check work queued in case when firmware will not
1481 * report back scan completed notification */
1482 mutex_lock(&il->mutex);
1483 il_scan_cancel_timeout(il, 200);
1484 mutex_unlock(&il->mutex);
1485}
1486
1487static void il_bg_scan_completed(struct work_struct *work)
1488{
1489 struct il_priv *il =
1490 container_of(work, struct il_priv, scan_completed);
1491 bool aborted;
1492
1493 D_SCAN("Completed scan.\n");
1494
1495 cancel_delayed_work(&il->scan_check);
1496
1497 mutex_lock(&il->mutex);
1498
1499 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1500 if (aborted)
1501 D_SCAN("Aborted scan completed.\n");
1502
1503 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1504 D_SCAN("Scan already completed.\n");
1505 goto out_settings;
1506 }
1507
1508 il_complete_scan(il, aborted);
1509
1510out_settings:
1511 /* Can we still talk to firmware ? */
1512 if (!il_is_ready_rf(il))
1513 goto out;
1514
1515 /*
1516 * We do not commit power settings while scan is pending,
1517 * do it now if the settings changed.
1518 */
1519 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1520 il_set_tx_power(il, il->tx_power_next, false);
1521
1522 il->cfg->ops->utils->post_scan(il);
1523
1524out:
1525 mutex_unlock(&il->mutex);
1526}
1527
1528void il_setup_scan_deferred_work(struct il_priv *il)
1529{
1530 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1531 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1532 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1533}
1534EXPORT_SYMBOL(il_setup_scan_deferred_work);
1535
1536void il_cancel_scan_deferred_work(struct il_priv *il)
1537{
1538 cancel_work_sync(&il->abort_scan);
1539 cancel_work_sync(&il->scan_completed);
1540
1541 if (cancel_delayed_work_sync(&il->scan_check)) {
1542 mutex_lock(&il->mutex);
1543 il_force_scan_end(il);
1544 mutex_unlock(&il->mutex);
1545 }
1546}
1547EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1548
1549/* il->sta_lock must be held */
1550static void il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1551{
1552
1553 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1554 IL_ERR(
1555 "ACTIVATE a non DRIVER active station id %u addr %pM\n",
1556 sta_id, il->stations[sta_id].sta.sta.addr);
1557
1558 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1559 D_ASSOC(
1560 "STA id %u addr %pM already present"
1561 " in uCode (according to driver)\n",
1562 sta_id, il->stations[sta_id].sta.sta.addr);
1563 } else {
1564 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1565 D_ASSOC("Added STA id %u addr %pM to uCode\n",
1566 sta_id, il->stations[sta_id].sta.sta.addr);
1567 }
1568}
1569
1570static int il_process_add_sta_resp(struct il_priv *il,
1571 struct il_addsta_cmd *addsta,
1572 struct il_rx_pkt *pkt,
1573 bool sync)
1574{
1575 u8 sta_id = addsta->sta.sta_id;
1576 unsigned long flags;
1577 int ret = -EIO;
1578
1579 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1580 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n",
1581 pkt->hdr.flags);
1582 return ret;
1583 }
1584
1585 D_INFO("Processing response for adding station %u\n",
1586 sta_id);
1587
1588 spin_lock_irqsave(&il->sta_lock, flags);
1589
1590 switch (pkt->u.add_sta.status) {
1591 case ADD_STA_SUCCESS_MSK:
1592 D_INFO("C_ADD_STA PASSED\n");
1593 il_sta_ucode_activate(il, sta_id);
1594 ret = 0;
1595 break;
1596 case ADD_STA_NO_ROOM_IN_TBL:
1597 IL_ERR("Adding station %d failed, no room in table.\n",
1598 sta_id);
1599 break;
1600 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1601 IL_ERR(
1602 "Adding station %d failed, no block ack resource.\n",
1603 sta_id);
1604 break;
1605 case ADD_STA_MODIFY_NON_EXIST_STA:
1606 IL_ERR("Attempting to modify non-existing station %d\n",
1607 sta_id);
1608 break;
1609 default:
1610 D_ASSOC("Received C_ADD_STA:(0x%08X)\n",
1611 pkt->u.add_sta.status);
1612 break;
1613 }
1614
1615 D_INFO("%s station id %u addr %pM\n",
1616 il->stations[sta_id].sta.mode ==
1617 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
1618 sta_id, il->stations[sta_id].sta.sta.addr);
1619
1620 /*
1621 * XXX: The MAC address in the command buffer is often changed from
1622 * the original sent to the device. That is, the MAC address
1623 * written to the command buffer often is not the same MAC address
1624 * read from the command buffer when the command returns. This
1625 * issue has not yet been resolved and this debugging is left to
1626 * observe the problem.
1627 */
1628 D_INFO("%s station according to cmd buffer %pM\n",
1629 il->stations[sta_id].sta.mode ==
1630 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
1631 addsta->sta.addr);
1632 spin_unlock_irqrestore(&il->sta_lock, flags);
1633
1634 return ret;
1635}
1636
1637static void il_add_sta_callback(struct il_priv *il,
1638 struct il_device_cmd *cmd,
1639 struct il_rx_pkt *pkt)
1640{
1641 struct il_addsta_cmd *addsta =
1642 (struct il_addsta_cmd *)cmd->cmd.payload;
1643
1644 il_process_add_sta_resp(il, addsta, pkt, false);
1645
1646}
1647
1648int il_send_add_sta(struct il_priv *il,
1649 struct il_addsta_cmd *sta, u8 flags)
1650{
1651 struct il_rx_pkt *pkt = NULL;
1652 int ret = 0;
1653 u8 data[sizeof(*sta)];
1654 struct il_host_cmd cmd = {
1655 .id = C_ADD_STA,
1656 .flags = flags,
1657 .data = data,
1658 };
1659 u8 sta_id __maybe_unused = sta->sta.sta_id;
1660
1661 D_INFO("Adding sta %u (%pM) %ssynchronously\n",
1662 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
1663
1664 if (flags & CMD_ASYNC)
1665 cmd.callback = il_add_sta_callback;
1666 else {
1667 cmd.flags |= CMD_WANT_SKB;
1668 might_sleep();
1669 }
1670
1671 cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
1672 ret = il_send_cmd(il, &cmd);
1673
1674 if (ret || (flags & CMD_ASYNC))
1675 return ret;
1676
1677 if (ret == 0) {
1678 pkt = (struct il_rx_pkt *)cmd.reply_page;
1679 ret = il_process_add_sta_resp(il, sta, pkt, true);
1680 }
1681 il_free_pages(il, cmd.reply_page);
1682
1683 return ret;
1684}
1685EXPORT_SYMBOL(il_send_add_sta);
1686
1687static void il_set_ht_add_station(struct il_priv *il, u8 idx,
1688 struct ieee80211_sta *sta,
1689 struct il_rxon_context *ctx)
1690{
1691 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1692 __le32 sta_flags;
1693 u8 mimo_ps_mode;
1694
1695 if (!sta || !sta_ht_inf->ht_supported)
1696 goto done;
1697
1698 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
1699 D_ASSOC("spatial multiplexing power save mode: %s\n",
1700 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
1701 "static" :
1702 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
1703 "dynamic" : "disabled");
1704
1705 sta_flags = il->stations[idx].sta.station_flags;
1706
1707 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1708
1709 switch (mimo_ps_mode) {
1710 case WLAN_HT_CAP_SM_PS_STATIC:
1711 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1712 break;
1713 case WLAN_HT_CAP_SM_PS_DYNAMIC:
1714 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1715 break;
1716 case WLAN_HT_CAP_SM_PS_DISABLED:
1717 break;
1718 default:
1719 IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
1720 break;
1721 }
1722
1723 sta_flags |= cpu_to_le32(
1724 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1725
1726 sta_flags |= cpu_to_le32(
1727 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1728
1729 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1730 sta_flags |= STA_FLG_HT40_EN_MSK;
1731 else
1732 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1733
1734 il->stations[idx].sta.station_flags = sta_flags;
1735 done:
1736 return;
1737}
1738
1739/**
1740 * il_prep_station - Prepare station information for addition
1741 *
1742 * should be called with sta_lock held
1743 */
1744u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
1745 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
1746{
1747 struct il_station_entry *station;
1748 int i;
1749 u8 sta_id = IL_INVALID_STATION;
1750 u16 rate;
1751
1752 if (is_ap)
1753 sta_id = ctx->ap_sta_id;
1754 else if (is_broadcast_ether_addr(addr))
1755 sta_id = ctx->bcast_sta_id;
1756 else
1757 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1758 if (!compare_ether_addr(il->stations[i].sta.sta.addr,
1759 addr)) {
1760 sta_id = i;
1761 break;
1762 }
1763
1764 if (!il->stations[i].used &&
1765 sta_id == IL_INVALID_STATION)
1766 sta_id = i;
1767 }
1768
1769 /*
1770 * These two conditions have the same outcome, but keep them
1771 * separate
1772 */
1773 if (unlikely(sta_id == IL_INVALID_STATION))
1774 return sta_id;
1775
1776 /*
1777 * uCode is not able to deal with multiple requests to add a
1778 * station. Keep track if one is in progress so that we do not send
1779 * another.
1780 */
1781 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1782 D_INFO(
1783 "STA %d already in process of being added.\n",
1784 sta_id);
1785 return sta_id;
1786 }
1787
1788 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1789 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1790 !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
1791 D_ASSOC(
1792 "STA %d (%pM) already added, not adding again.\n",
1793 sta_id, addr);
1794 return sta_id;
1795 }
1796
1797 station = &il->stations[sta_id];
1798 station->used = IL_STA_DRIVER_ACTIVE;
1799 D_ASSOC("Add STA to driver ID %d: %pM\n",
1800 sta_id, addr);
1801 il->num_stations++;
1802
1803 /* Set up the C_ADD_STA command to send to device */
1804 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1805 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1806 station->sta.mode = 0;
1807 station->sta.sta.sta_id = sta_id;
1808 station->sta.station_flags = ctx->station_flags;
1809 station->ctxid = ctx->ctxid;
1810
1811 if (sta) {
1812 struct il_station_priv_common *sta_priv;
1813
1814 sta_priv = (void *)sta->drv_priv;
1815 sta_priv->ctx = ctx;
1816 }
1817
1818 /*
1819 * OK to call unconditionally, since local stations (IBSS BSSID
1820 * STA and broadcast STA) pass in a NULL sta, and mac80211
1821 * doesn't allow HT IBSS.
1822 */
1823 il_set_ht_add_station(il, sta_id, sta, ctx);
1824
1825 /* 3945 only */
1826 rate = (il->band == IEEE80211_BAND_5GHZ) ?
1827 RATE_6M_PLCP : RATE_1M_PLCP;
1828 /* Turn on both antennas for the station... */
1829 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
1830
1831 return sta_id;
1832
1833}
1834EXPORT_SYMBOL_GPL(il_prep_station);
1835
1836#define STA_WAIT_TIMEOUT (HZ/2)
1837
1838/**
1839 * il_add_station_common -
1840 */
1841int
1842il_add_station_common(struct il_priv *il,
1843 struct il_rxon_context *ctx,
1844 const u8 *addr, bool is_ap,
1845 struct ieee80211_sta *sta, u8 *sta_id_r)
1846{
1847 unsigned long flags_spin;
1848 int ret = 0;
1849 u8 sta_id;
1850 struct il_addsta_cmd sta_cmd;
1851
1852 *sta_id_r = 0;
1853 spin_lock_irqsave(&il->sta_lock, flags_spin);
1854 sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
1855 if (sta_id == IL_INVALID_STATION) {
1856 IL_ERR("Unable to prepare station %pM for addition\n",
1857 addr);
1858 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1859 return -EINVAL;
1860 }
1861
1862 /*
1863 * uCode is not able to deal with multiple requests to add a
1864 * station. Keep track if one is in progress so that we do not send
1865 * another.
1866 */
1867 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1868 D_INFO(
1869 "STA %d already in process of being added.\n",
1870 sta_id);
1871 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1872 return -EEXIST;
1873 }
1874
1875 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1876 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
1877 D_ASSOC(
1878 "STA %d (%pM) already added, not adding again.\n",
1879 sta_id, addr);
1880 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1881 return -EEXIST;
1882 }
1883
1884 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
1885 memcpy(&sta_cmd, &il->stations[sta_id].sta,
1886 sizeof(struct il_addsta_cmd));
1887 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1888
1889 /* Add station to device's station table */
1890 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
1891 if (ret) {
1892 spin_lock_irqsave(&il->sta_lock, flags_spin);
1893 IL_ERR("Adding station %pM failed.\n",
1894 il->stations[sta_id].sta.sta.addr);
1895 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
1896 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
1897 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1898 }
1899 *sta_id_r = sta_id;
1900 return ret;
1901}
1902EXPORT_SYMBOL(il_add_station_common);
1903
1904/**
1905 * il_sta_ucode_deactivate - deactivate ucode status for a station
1906 *
1907 * il->sta_lock must be held
1908 */
1909static void il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
1910{
1911 /* Ucode must be active and driver must be non active */
1912 if ((il->stations[sta_id].used &
1913 (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
1914 IL_STA_UCODE_ACTIVE)
1915 IL_ERR("removed non active STA %u\n", sta_id);
1916
1917 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
1918
1919 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
1920 D_ASSOC("Removed STA %u\n", sta_id);
1921}
1922
1923static int il_send_remove_station(struct il_priv *il,
1924 const u8 *addr, int sta_id,
1925 bool temporary)
1926{
1927 struct il_rx_pkt *pkt;
1928 int ret;
1929
1930 unsigned long flags_spin;
1931 struct il_rem_sta_cmd rm_sta_cmd;
1932
1933 struct il_host_cmd cmd = {
1934 .id = C_REM_STA,
1935 .len = sizeof(struct il_rem_sta_cmd),
1936 .flags = CMD_SYNC,
1937 .data = &rm_sta_cmd,
1938 };
1939
1940 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
1941 rm_sta_cmd.num_sta = 1;
1942 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
1943
1944 cmd.flags |= CMD_WANT_SKB;
1945
1946 ret = il_send_cmd(il, &cmd);
1947
1948 if (ret)
1949 return ret;
1950
1951 pkt = (struct il_rx_pkt *)cmd.reply_page;
1952 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1953 IL_ERR("Bad return from C_REM_STA (0x%08X)\n",
1954 pkt->hdr.flags);
1955 ret = -EIO;
1956 }
1957
1958 if (!ret) {
1959 switch (pkt->u.rem_sta.status) {
1960 case REM_STA_SUCCESS_MSK:
1961 if (!temporary) {
1962 spin_lock_irqsave(&il->sta_lock, flags_spin);
1963 il_sta_ucode_deactivate(il, sta_id);
1964 spin_unlock_irqrestore(&il->sta_lock,
1965 flags_spin);
1966 }
1967 D_ASSOC("C_REM_STA PASSED\n");
1968 break;
1969 default:
1970 ret = -EIO;
1971 IL_ERR("C_REM_STA failed\n");
1972 break;
1973 }
1974 }
1975 il_free_pages(il, cmd.reply_page);
1976
1977 return ret;
1978}
1979
1980/**
1981 * il_remove_station - Remove driver's knowledge of station.
1982 */
1983int il_remove_station(struct il_priv *il, const u8 sta_id,
1984 const u8 *addr)
1985{
1986 unsigned long flags;
1987
1988 if (!il_is_ready(il)) {
1989 D_INFO(
1990 "Unable to remove station %pM, device not ready.\n",
1991 addr);
1992 /*
1993 * It is typical for stations to be removed when we are
1994 * going down. Return success since device will be down
1995 * soon anyway
1996 */
1997 return 0;
1998 }
1999
2000 D_ASSOC("Removing STA from driver:%d %pM\n",
2001 sta_id, addr);
2002
2003 if (WARN_ON(sta_id == IL_INVALID_STATION))
2004 return -EINVAL;
2005
2006 spin_lock_irqsave(&il->sta_lock, flags);
2007
2008 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2009 D_INFO("Removing %pM but non DRIVER active\n",
2010 addr);
2011 goto out_err;
2012 }
2013
2014 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2015 D_INFO("Removing %pM but non UCODE active\n",
2016 addr);
2017 goto out_err;
2018 }
2019
2020 if (il->stations[sta_id].used & IL_STA_LOCAL) {
2021 kfree(il->stations[sta_id].lq);
2022 il->stations[sta_id].lq = NULL;
2023 }
2024
2025 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2026
2027 il->num_stations--;
2028
2029 BUG_ON(il->num_stations < 0);
2030
2031 spin_unlock_irqrestore(&il->sta_lock, flags);
2032
2033 return il_send_remove_station(il, addr, sta_id, false);
2034out_err:
2035 spin_unlock_irqrestore(&il->sta_lock, flags);
2036 return -EINVAL;
2037}
2038EXPORT_SYMBOL_GPL(il_remove_station);
2039
2040/**
2041 * il_clear_ucode_stations - clear ucode station table bits
2042 *
2043 * This function clears all the bits in the driver indicating
2044 * which stations are active in the ucode. Call when something
2045 * other than explicit station management would cause this in
2046 * the ucode, e.g. unassociated RXON.
2047 */
2048void il_clear_ucode_stations(struct il_priv *il,
2049 struct il_rxon_context *ctx)
2050{
2051 int i;
2052 unsigned long flags_spin;
2053 bool cleared = false;
2054
2055 D_INFO("Clearing ucode stations in driver\n");
2056
2057 spin_lock_irqsave(&il->sta_lock, flags_spin);
2058 for (i = 0; i < il->hw_params.max_stations; i++) {
2059 if (ctx && ctx->ctxid != il->stations[i].ctxid)
2060 continue;
2061
2062 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2063 D_INFO(
2064 "Clearing ucode active for station %d\n", i);
2065 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2066 cleared = true;
2067 }
2068 }
2069 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2070
2071 if (!cleared)
2072 D_INFO(
2073 "No active stations found to be cleared\n");
2074}
2075EXPORT_SYMBOL(il_clear_ucode_stations);
2076
2077/**
2078 * il_restore_stations() - Restore driver known stations to device
2079 *
2080 * All stations considered active by driver, but not present in ucode, is
2081 * restored.
2082 *
2083 * Function sleeps.
2084 */
2085void
2086il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
2087{
2088 struct il_addsta_cmd sta_cmd;
2089 struct il_link_quality_cmd lq;
2090 unsigned long flags_spin;
2091 int i;
2092 bool found = false;
2093 int ret;
2094 bool send_lq;
2095
2096 if (!il_is_ready(il)) {
2097 D_INFO(
2098 "Not ready yet, not restoring any stations.\n");
2099 return;
2100 }
2101
2102 D_ASSOC("Restoring all known stations ... start.\n");
2103 spin_lock_irqsave(&il->sta_lock, flags_spin);
2104 for (i = 0; i < il->hw_params.max_stations; i++) {
2105 if (ctx->ctxid != il->stations[i].ctxid)
2106 continue;
2107 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2108 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2109 D_ASSOC("Restoring sta %pM\n",
2110 il->stations[i].sta.sta.addr);
2111 il->stations[i].sta.mode = 0;
2112 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2113 found = true;
2114 }
2115 }
2116
2117 for (i = 0; i < il->hw_params.max_stations; i++) {
2118 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2119 memcpy(&sta_cmd, &il->stations[i].sta,
2120 sizeof(struct il_addsta_cmd));
2121 send_lq = false;
2122 if (il->stations[i].lq) {
2123 memcpy(&lq, il->stations[i].lq,
2124 sizeof(struct il_link_quality_cmd));
2125 send_lq = true;
2126 }
2127 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2128 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2129 if (ret) {
2130 spin_lock_irqsave(&il->sta_lock, flags_spin);
2131 IL_ERR("Adding station %pM failed.\n",
2132 il->stations[i].sta.sta.addr);
2133 il->stations[i].used &=
2134 ~IL_STA_DRIVER_ACTIVE;
2135 il->stations[i].used &=
2136 ~IL_STA_UCODE_INPROGRESS;
2137 spin_unlock_irqrestore(&il->sta_lock,
2138 flags_spin);
2139 }
2140 /*
2141 * Rate scaling has already been initialized, send
2142 * current LQ command
2143 */
2144 if (send_lq)
2145 il_send_lq_cmd(il, ctx, &lq,
2146 CMD_SYNC, true);
2147 spin_lock_irqsave(&il->sta_lock, flags_spin);
2148 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2149 }
2150 }
2151
2152 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2153 if (!found)
2154 D_INFO("Restoring all known stations"
2155 " .... no stations to be restored.\n");
2156 else
2157 D_INFO("Restoring all known stations"
2158 " .... complete.\n");
2159}
2160EXPORT_SYMBOL(il_restore_stations);
2161
2162int il_get_free_ucode_key_idx(struct il_priv *il)
2163{
2164 int i;
2165
2166 for (i = 0; i < il->sta_key_max_num; i++)
2167 if (!test_and_set_bit(i, &il->ucode_key_table))
2168 return i;
2169
2170 return WEP_INVALID_OFFSET;
2171}
2172EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2173
2174void il_dealloc_bcast_stations(struct il_priv *il)
2175{
2176 unsigned long flags;
2177 int i;
2178
2179 spin_lock_irqsave(&il->sta_lock, flags);
2180 for (i = 0; i < il->hw_params.max_stations; i++) {
2181 if (!(il->stations[i].used & IL_STA_BCAST))
2182 continue;
2183
2184 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2185 il->num_stations--;
2186 BUG_ON(il->num_stations < 0);
2187 kfree(il->stations[i].lq);
2188 il->stations[i].lq = NULL;
2189 }
2190 spin_unlock_irqrestore(&il->sta_lock, flags);
2191}
2192EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2193
2194#ifdef CONFIG_IWLEGACY_DEBUG
2195static void il_dump_lq_cmd(struct il_priv *il,
2196 struct il_link_quality_cmd *lq)
2197{
2198 int i;
2199 D_RATE("lq station id 0x%x\n", lq->sta_id);
2200 D_RATE("lq ant 0x%X 0x%X\n",
2201 lq->general_params.single_stream_ant_msk,
2202 lq->general_params.dual_stream_ant_msk);
2203
2204 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2205 D_RATE("lq idx %d 0x%X\n",
2206 i, lq->rs_table[i].rate_n_flags);
2207}
2208#else
2209static inline void il_dump_lq_cmd(struct il_priv *il,
2210 struct il_link_quality_cmd *lq)
2211{
2212}
2213#endif
2214
2215/**
2216 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
2217 *
2218 * It sometimes happens when a HT rate has been in use and we
2219 * loose connectivity with AP then mac80211 will first tell us that the
2220 * current channel is not HT anymore before removing the station. In such a
2221 * scenario the RXON flags will be updated to indicate we are not
2222 * communicating HT anymore, but the LQ command may still contain HT rates.
2223 * Test for this to prevent driver from sending LQ command between the time
2224 * RXON flags are updated and when LQ command is updated.
2225 */
2226static bool il_is_lq_table_valid(struct il_priv *il,
2227 struct il_rxon_context *ctx,
2228 struct il_link_quality_cmd *lq)
2229{
2230 int i;
2231
2232 if (ctx->ht.enabled)
2233 return true;
2234
2235 D_INFO("Channel %u is not an HT channel\n",
2236 ctx->active.channel);
2237 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2238 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
2239 RATE_MCS_HT_MSK) {
2240 D_INFO(
2241 "idx %d of LQ expects HT channel\n",
2242 i);
2243 return false;
2244 }
2245 }
2246 return true;
2247}
2248
2249/**
2250 * il_send_lq_cmd() - Send link quality command
2251 * @init: This command is sent as part of station initialization right
2252 * after station has been added.
2253 *
2254 * The link quality command is sent as the last step of station creation.
2255 * This is the special case in which init is set and we call a callback in
2256 * this case to clear the state indicating that station creation is in
2257 * progress.
2258 */
2259int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2260 struct il_link_quality_cmd *lq, u8 flags, bool init)
2261{
2262 int ret = 0;
2263 unsigned long flags_spin;
2264
2265 struct il_host_cmd cmd = {
2266 .id = C_TX_LINK_QUALITY_CMD,
2267 .len = sizeof(struct il_link_quality_cmd),
2268 .flags = flags,
2269 .data = lq,
2270 };
2271
2272 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2273 return -EINVAL;
2274
2275
2276 spin_lock_irqsave(&il->sta_lock, flags_spin);
2277 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2278 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2279 return -EINVAL;
2280 }
2281 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2282
2283 il_dump_lq_cmd(il, lq);
2284 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2285
2286 if (il_is_lq_table_valid(il, ctx, lq))
2287 ret = il_send_cmd(il, &cmd);
2288 else
2289 ret = -EINVAL;
2290
2291 if (cmd.flags & CMD_ASYNC)
2292 return ret;
2293
2294 if (init) {
2295 D_INFO("init LQ command complete,"
2296 " clearing sta addition status for sta %d\n",
2297 lq->sta_id);
2298 spin_lock_irqsave(&il->sta_lock, flags_spin);
2299 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2300 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2301 }
2302 return ret;
2303}
2304EXPORT_SYMBOL(il_send_lq_cmd);
2305
2306int il_mac_sta_remove(struct ieee80211_hw *hw,
2307 struct ieee80211_vif *vif,
2308 struct ieee80211_sta *sta)
2309{
2310 struct il_priv *il = hw->priv;
2311 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2312 int ret;
2313
2314 D_INFO("received request to remove station %pM\n",
2315 sta->addr);
2316 mutex_lock(&il->mutex);
2317 D_INFO("proceeding to remove station %pM\n",
2318 sta->addr);
2319 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2320 if (ret)
2321 IL_ERR("Error removing station %pM\n",
2322 sta->addr);
2323 mutex_unlock(&il->mutex);
2324 return ret;
2325}
2326EXPORT_SYMBOL(il_mac_sta_remove);
2327
2328/************************** RX-FUNCTIONS ****************************/
2329/*
2330 * Rx theory of operation
2331 *
2332 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
2333 * each of which point to Receive Buffers to be filled by the NIC. These get
2334 * used not only for Rx frames, but for any command response or notification
2335 * from the NIC. The driver and NIC manage the Rx buffers by means
2336 * of idxes into the circular buffer.
2337 *
2338 * Rx Queue Indexes
2339 * The host/firmware share two idx registers for managing the Rx buffers.
2340 *
2341 * The READ idx maps to the first position that the firmware may be writing
2342 * to -- the driver can read up to (but not including) this position and get
2343 * good data.
2344 * The READ idx is managed by the firmware once the card is enabled.
2345 *
2346 * The WRITE idx maps to the last position the driver has read from -- the
2347 * position preceding WRITE is the last slot the firmware can place a packet.
2348 *
2349 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2350 * WRITE = READ.
2351 *
2352 * During initialization, the host sets up the READ queue position to the first
2353 * IDX position, and WRITE to the last (READ - 1 wrapped)
2354 *
2355 * When the firmware places a packet in a buffer, it will advance the READ idx
2356 * and fire the RX interrupt. The driver can then query the READ idx and
2357 * process as many packets as possible, moving the WRITE idx forward as it
2358 * resets the Rx queue buffers with new memory.
2359 *
2360 * The management in the driver is as follows:
2361 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2362 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2363 * to replenish the iwl->rxq->rx_free.
2364 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
2365 * iwl->rxq is replenished and the READ IDX is updated (updating the
2366 * 'processed' and 'read' driver idxes as well)
2367 * + A received packet is processed and handed to the kernel network stack,
2368 * detached from the iwl->rxq. The driver 'processed' idx is updated.
2369 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2370 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2371 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
2372 * were enough free buffers and RX_STALLED is set it is cleared.
2373 *
2374 *
2375 * Driver sequence:
2376 *
2377 * il_rx_queue_alloc() Allocates rx_free
2378 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
2379 * il_rx_queue_restock
2380 * il_rx_queue_restock() Moves available buffers from rx_free into Rx
2381 * queue, updates firmware pointers, and updates
2382 * the WRITE idx. If insufficient rx_free buffers
2383 * are available, schedules il_rx_replenish
2384 *
2385 * -- enable interrupts --
2386 * ISR - il_rx() Detach il_rx_bufs from pool up to the
2387 * READ IDX, detaching the SKB from the pool.
2388 * Moves the packet buffer from queue to rx_used.
2389 * Calls il_rx_queue_restock to refill any empty
2390 * slots.
2391 * ...
2392 *
2393 */
2394
2395/**
2396 * il_rx_queue_space - Return number of free slots available in queue.
2397 */
2398int il_rx_queue_space(const struct il_rx_queue *q)
2399{
2400 int s = q->read - q->write;
2401 if (s <= 0)
2402 s += RX_QUEUE_SIZE;
2403 /* keep some buffer to not confuse full and empty queue */
2404 s -= 2;
2405 if (s < 0)
2406 s = 0;
2407 return s;
2408}
2409EXPORT_SYMBOL(il_rx_queue_space);
2410
2411/**
2412 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
2413 */
2414void
2415il_rx_queue_update_write_ptr(struct il_priv *il,
2416 struct il_rx_queue *q)
2417{
2418 unsigned long flags;
2419 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2420 u32 reg;
2421
2422 spin_lock_irqsave(&q->lock, flags);
2423
2424 if (q->need_update == 0)
2425 goto exit_unlock;
2426
2427 /* If power-saving is in use, make sure device is awake */
2428 if (test_bit(S_POWER_PMI, &il->status)) {
2429 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2430
2431 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2432 D_INFO(
2433 "Rx queue requesting wakeup,"
2434 " GP1 = 0x%x\n", reg);
2435 il_set_bit(il, CSR_GP_CNTRL,
2436 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2437 goto exit_unlock;
2438 }
2439
2440 q->write_actual = (q->write & ~0x7);
2441 il_wr(il, rx_wrt_ptr_reg,
2442 q->write_actual);
2443
2444 /* Else device is assumed to be awake */
2445 } else {
2446 /* Device expects a multiple of 8 */
2447 q->write_actual = (q->write & ~0x7);
2448 il_wr(il, rx_wrt_ptr_reg,
2449 q->write_actual);
2450 }
2451
2452 q->need_update = 0;
2453
2454 exit_unlock:
2455 spin_unlock_irqrestore(&q->lock, flags);
2456}
2457EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2458
2459int il_rx_queue_alloc(struct il_priv *il)
2460{
2461 struct il_rx_queue *rxq = &il->rxq;
2462 struct device *dev = &il->pci_dev->dev;
2463 int i;
2464
2465 spin_lock_init(&rxq->lock);
2466 INIT_LIST_HEAD(&rxq->rx_free);
2467 INIT_LIST_HEAD(&rxq->rx_used);
2468
2469 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2470 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2471 GFP_KERNEL);
2472 if (!rxq->bd)
2473 goto err_bd;
2474
2475 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2476 &rxq->rb_stts_dma, GFP_KERNEL);
2477 if (!rxq->rb_stts)
2478 goto err_rb;
2479
2480 /* Fill the rx_used queue with _all_ of the Rx buffers */
2481 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2482 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2483
2484 /* Set us so that we have processed and used all buffers, but have
2485 * not restocked the Rx queue with fresh buffers */
2486 rxq->read = rxq->write = 0;
2487 rxq->write_actual = 0;
2488 rxq->free_count = 0;
2489 rxq->need_update = 0;
2490 return 0;
2491
2492err_rb:
2493 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2494 rxq->bd_dma);
2495err_bd:
2496 return -ENOMEM;
2497}
2498EXPORT_SYMBOL(il_rx_queue_alloc);
2499
2500
2501void il_hdl_spectrum_measurement(struct il_priv *il,
2502 struct il_rx_buf *rxb)
2503{
2504 struct il_rx_pkt *pkt = rxb_addr(rxb);
2505 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2506
2507 if (!report->state) {
2508 D_11H(
2509 "Spectrum Measure Notification: Start\n");
2510 return;
2511 }
2512
2513 memcpy(&il->measure_report, report, sizeof(*report));
2514 il->measurement_status |= MEASUREMENT_READY;
2515}
2516EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2517
2518/*
2519 * returns non-zero if packet should be dropped
2520 */
2521int il_set_decrypted_flag(struct il_priv *il,
2522 struct ieee80211_hdr *hdr,
2523 u32 decrypt_res,
2524 struct ieee80211_rx_status *stats)
2525{
2526 u16 fc = le16_to_cpu(hdr->frame_control);
2527
2528 /*
2529 * All contexts have the same setting here due to it being
2530 * a module parameter, so OK to check any context.
2531 */
2532 if (il->ctx.active.filter_flags &
2533 RXON_FILTER_DIS_DECRYPT_MSK)
2534 return 0;
2535
2536 if (!(fc & IEEE80211_FCTL_PROTECTED))
2537 return 0;
2538
2539 D_RX("decrypt_res:0x%x\n", decrypt_res);
2540 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2541 case RX_RES_STATUS_SEC_TYPE_TKIP:
2542 /* The uCode has got a bad phase 1 Key, pushes the packet.
2543 * Decryption will be done in SW. */
2544 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2545 RX_RES_STATUS_BAD_KEY_TTAK)
2546 break;
2547
2548 case RX_RES_STATUS_SEC_TYPE_WEP:
2549 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2550 RX_RES_STATUS_BAD_ICV_MIC) {
2551 /* bad ICV, the packet is destroyed since the
2552 * decryption is inplace, drop it */
2553 D_RX("Packet destroyed\n");
2554 return -1;
2555 }
2556 case RX_RES_STATUS_SEC_TYPE_CCMP:
2557 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2558 RX_RES_STATUS_DECRYPT_OK) {
2559 D_RX("hw decrypt successfully!!!\n");
2560 stats->flag |= RX_FLAG_DECRYPTED;
2561 }
2562 break;
2563
2564 default:
2565 break;
2566 }
2567 return 0;
2568}
2569EXPORT_SYMBOL(il_set_decrypted_flag);
2570
2571/**
2572 * il_txq_update_write_ptr - Send new write idx to hardware
2573 */
2574void
2575il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2576{
2577 u32 reg = 0;
2578 int txq_id = txq->q.id;
2579
2580 if (txq->need_update == 0)
2581 return;
2582
2583 /* if we're trying to save power */
2584 if (test_bit(S_POWER_PMI, &il->status)) {
2585 /* wake up nic if it's powered down ...
2586 * uCode will wake up, and interrupt us again, so next
2587 * time we'll skip this part. */
2588 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2589
2590 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2591 D_INFO(
2592 "Tx queue %d requesting wakeup,"
2593 " GP1 = 0x%x\n", txq_id, reg);
2594 il_set_bit(il, CSR_GP_CNTRL,
2595 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2596 return;
2597 }
2598
2599 il_wr(il, HBUS_TARG_WRPTR,
2600 txq->q.write_ptr | (txq_id << 8));
2601
2602 /*
2603 * else not in power-save mode,
2604 * uCode will never sleep when we're
2605 * trying to tx (during RFKILL, we're not trying to tx).
2606 */
2607 } else
2608 _il_wr(il, HBUS_TARG_WRPTR,
2609 txq->q.write_ptr | (txq_id << 8));
2610 txq->need_update = 0;
2611}
2612EXPORT_SYMBOL(il_txq_update_write_ptr);
2613
2614/**
2615 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
2616 */
2617void il_tx_queue_unmap(struct il_priv *il, int txq_id)
2618{
2619 struct il_tx_queue *txq = &il->txq[txq_id];
2620 struct il_queue *q = &txq->q;
2621
2622 if (q->n_bd == 0)
2623 return;
2624
2625 while (q->write_ptr != q->read_ptr) {
2626 il->cfg->ops->lib->txq_free_tfd(il, txq);
2627 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2628 }
2629}
2630EXPORT_SYMBOL(il_tx_queue_unmap);
2631
2632/**
2633 * il_tx_queue_free - Deallocate DMA queue.
2634 * @txq: Transmit queue to deallocate.
2635 *
2636 * Empty queue by removing and destroying all BD's.
2637 * Free all buffers.
2638 * 0-fill, but do not free "txq" descriptor structure.
2639 */
2640void il_tx_queue_free(struct il_priv *il, int txq_id)
2641{
2642 struct il_tx_queue *txq = &il->txq[txq_id];
2643 struct device *dev = &il->pci_dev->dev;
2644 int i;
2645
2646 il_tx_queue_unmap(il, txq_id);
2647
2648 /* De-alloc array of command/tx buffers */
2649 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2650 kfree(txq->cmd[i]);
2651
2652 /* De-alloc circular buffer of TFDs */
2653 if (txq->q.n_bd)
2654 dma_free_coherent(dev, il->hw_params.tfd_size *
2655 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2656
2657 /* De-alloc array of per-TFD driver data */
2658 kfree(txq->txb);
2659 txq->txb = NULL;
2660
2661 /* deallocate arrays */
2662 kfree(txq->cmd);
2663 kfree(txq->meta);
2664 txq->cmd = NULL;
2665 txq->meta = NULL;
2666
2667 /* 0-fill queue descriptor structure */
2668 memset(txq, 0, sizeof(*txq));
2669}
2670EXPORT_SYMBOL(il_tx_queue_free);
2671
2672/**
2673 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
2674 */
2675void il_cmd_queue_unmap(struct il_priv *il)
2676{
2677 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2678 struct il_queue *q = &txq->q;
2679 int i;
2680
2681 if (q->n_bd == 0)
2682 return;
2683
2684 while (q->read_ptr != q->write_ptr) {
2685 i = il_get_cmd_idx(q, q->read_ptr, 0);
2686
2687 if (txq->meta[i].flags & CMD_MAPPED) {
2688 pci_unmap_single(il->pci_dev,
2689 dma_unmap_addr(&txq->meta[i], mapping),
2690 dma_unmap_len(&txq->meta[i], len),
2691 PCI_DMA_BIDIRECTIONAL);
2692 txq->meta[i].flags = 0;
2693 }
2694
2695 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2696 }
2697
2698 i = q->n_win;
2699 if (txq->meta[i].flags & CMD_MAPPED) {
2700 pci_unmap_single(il->pci_dev,
2701 dma_unmap_addr(&txq->meta[i], mapping),
2702 dma_unmap_len(&txq->meta[i], len),
2703 PCI_DMA_BIDIRECTIONAL);
2704 txq->meta[i].flags = 0;
2705 }
2706}
2707EXPORT_SYMBOL(il_cmd_queue_unmap);
2708
2709/**
2710 * il_cmd_queue_free - Deallocate DMA queue.
2711 * @txq: Transmit queue to deallocate.
2712 *
2713 * Empty queue by removing and destroying all BD's.
2714 * Free all buffers.
2715 * 0-fill, but do not free "txq" descriptor structure.
2716 */
2717void il_cmd_queue_free(struct il_priv *il)
2718{
2719 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2720 struct device *dev = &il->pci_dev->dev;
2721 int i;
2722
2723 il_cmd_queue_unmap(il);
2724
2725 /* De-alloc array of command/tx buffers */
2726 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2727 kfree(txq->cmd[i]);
2728
2729 /* De-alloc circular buffer of TFDs */
2730 if (txq->q.n_bd)
2731 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2732 txq->tfds, txq->q.dma_addr);
2733
2734 /* deallocate arrays */
2735 kfree(txq->cmd);
2736 kfree(txq->meta);
2737 txq->cmd = NULL;
2738 txq->meta = NULL;
2739
2740 /* 0-fill queue descriptor structure */
2741 memset(txq, 0, sizeof(*txq));
2742}
2743EXPORT_SYMBOL(il_cmd_queue_free);
2744
2745/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
2746 * DMA services
2747 *
2748 * Theory of operation
2749 *
2750 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
2751 * of buffer descriptors, each of which points to one or more data buffers for
2752 * the device to read from or fill. Driver and device exchange status of each
2753 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
2754 * entries in each circular buffer, to protect against confusing empty and full
2755 * queue states.
2756 *
2757 * The device reads or writes the data in the queues via the device's several
2758 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
2759 *
2760 * For Tx queue, there are low mark and high mark limits. If, after queuing
2761 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2762 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2763 * Tx queue resumed.
2764 *
2765 * See more detailed info in 4965.h.
2766 ***************************************************/
2767
2768int il_queue_space(const struct il_queue *q)
2769{
2770 int s = q->read_ptr - q->write_ptr;
2771
2772 if (q->read_ptr > q->write_ptr)
2773 s -= q->n_bd;
2774
2775 if (s <= 0)
2776 s += q->n_win;
2777 /* keep some reserve to not confuse empty and full situations */
2778 s -= 2;
2779 if (s < 0)
2780 s = 0;
2781 return s;
2782}
2783EXPORT_SYMBOL(il_queue_space);
2784
2785
2786/**
2787 * il_queue_init - Initialize queue's high/low-water and read/write idxes
2788 */
2789static int il_queue_init(struct il_priv *il, struct il_queue *q,
2790 int count, int slots_num, u32 id)
2791{
2792 q->n_bd = count;
2793 q->n_win = slots_num;
2794 q->id = id;
2795
2796 /* count must be power-of-two size, otherwise il_queue_inc_wrap
2797 * and il_queue_dec_wrap are broken. */
2798 BUG_ON(!is_power_of_2(count));
2799
2800 /* slots_num must be power-of-two size, otherwise
2801 * il_get_cmd_idx is broken. */
2802 BUG_ON(!is_power_of_2(slots_num));
2803
2804 q->low_mark = q->n_win / 4;
2805 if (q->low_mark < 4)
2806 q->low_mark = 4;
2807
2808 q->high_mark = q->n_win / 8;
2809 if (q->high_mark < 2)
2810 q->high_mark = 2;
2811
2812 q->write_ptr = q->read_ptr = 0;
2813
2814 return 0;
2815}
2816
2817/**
2818 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
2819 */
2820static int il_tx_queue_alloc(struct il_priv *il,
2821 struct il_tx_queue *txq, u32 id)
2822{
2823 struct device *dev = &il->pci_dev->dev;
2824 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2825
2826 /* Driver ilate data, only for Tx (not command) queues,
2827 * not shared with device. */
2828 if (id != il->cmd_queue) {
2829 txq->txb = kzalloc(sizeof(txq->txb[0]) *
2830 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
2831 if (!txq->txb) {
2832 IL_ERR("kmalloc for auxiliary BD "
2833 "structures failed\n");
2834 goto error;
2835 }
2836 } else {
2837 txq->txb = NULL;
2838 }
2839
2840 /* Circular buffer of transmit frame descriptors (TFDs),
2841 * shared with device */
2842 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
2843 GFP_KERNEL);
2844 if (!txq->tfds) {
2845 IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
2846 goto error;
2847 }
2848 txq->q.id = id;
2849
2850 return 0;
2851
2852 error:
2853 kfree(txq->txb);
2854 txq->txb = NULL;
2855
2856 return -ENOMEM;
2857}
2858
2859/**
2860 * il_tx_queue_init - Allocate and initialize one tx/cmd queue
2861 */
2862int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq,
2863 int slots_num, u32 txq_id)
2864{
2865 int i, len;
2866 int ret;
2867 int actual_slots = slots_num;
2868
2869 /*
2870 * Alloc buffer array for commands (Tx or other types of commands).
2871 * For the command queue (#4/#9), allocate command space + one big
2872 * command for scan, since scan command is very huge; the system will
2873 * not have two scans at the same time, so only one is needed.
2874 * For normal Tx queues (all other queues), no super-size command
2875 * space is needed.
2876 */
2877 if (txq_id == il->cmd_queue)
2878 actual_slots++;
2879
2880 txq->meta = kzalloc(sizeof(struct il_cmd_meta) * actual_slots,
2881 GFP_KERNEL);
2882 txq->cmd = kzalloc(sizeof(struct il_device_cmd *) * actual_slots,
2883 GFP_KERNEL);
2884
2885 if (!txq->meta || !txq->cmd)
2886 goto out_free_arrays;
2887
2888 len = sizeof(struct il_device_cmd);
2889 for (i = 0; i < actual_slots; i++) {
2890 /* only happens for cmd queue */
2891 if (i == slots_num)
2892 len = IL_MAX_CMD_SIZE;
2893
2894 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
2895 if (!txq->cmd[i])
2896 goto err;
2897 }
2898
2899 /* Alloc driver data array and TFD circular buffer */
2900 ret = il_tx_queue_alloc(il, txq, txq_id);
2901 if (ret)
2902 goto err;
2903
2904 txq->need_update = 0;
2905
2906 /*
2907 * For the default queues 0-3, set up the swq_id
2908 * already -- all others need to get one later
2909 * (if they need one at all).
2910 */
2911 if (txq_id < 4)
2912 il_set_swq_id(txq, txq_id, txq_id);
2913
2914 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
2915 * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
2916 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2917
2918 /* Initialize queue's high/low-water marks, and head/tail idxes */
2919 il_queue_init(il, &txq->q,
2920 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
2921
2922 /* Tell device where to find queue */
2923 il->cfg->ops->lib->txq_init(il, txq);
2924
2925 return 0;
2926err:
2927 for (i = 0; i < actual_slots; i++)
2928 kfree(txq->cmd[i]);
2929out_free_arrays:
2930 kfree(txq->meta);
2931 kfree(txq->cmd);
2932
2933 return -ENOMEM;
2934}
2935EXPORT_SYMBOL(il_tx_queue_init);
2936
2937void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
2938 int slots_num, u32 txq_id)
2939{
2940 int actual_slots = slots_num;
2941
2942 if (txq_id == il->cmd_queue)
2943 actual_slots++;
2944
2945 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
2946
2947 txq->need_update = 0;
2948
2949 /* Initialize queue's high/low-water marks, and head/tail idxes */
2950 il_queue_init(il, &txq->q,
2951 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
2952
2953 /* Tell device where to find queue */
2954 il->cfg->ops->lib->txq_init(il, txq);
2955}
2956EXPORT_SYMBOL(il_tx_queue_reset);
2957
2958/*************** HOST COMMAND QUEUE FUNCTIONS *****/
2959
2960/**
2961 * il_enqueue_hcmd - enqueue a uCode command
2962 * @il: device ilate data point
2963 * @cmd: a point to the ucode command structure
2964 *
2965 * The function returns < 0 values to indicate the operation is
2966 * failed. On success, it turns the idx (> 0) of command in the
2967 * command queue.
2968 */
2969int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
2970{
2971 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2972 struct il_queue *q = &txq->q;
2973 struct il_device_cmd *out_cmd;
2974 struct il_cmd_meta *out_meta;
2975 dma_addr_t phys_addr;
2976 unsigned long flags;
2977 int len;
2978 u32 idx;
2979 u16 fix_size;
2980
2981 cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
2982 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
2983
2984 /* If any of the command structures end up being larger than
2985 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
2986 * we will need to increase the size of the TFD entries
2987 * Also, check to see if command buffer should not exceed the size
2988 * of device_cmd and max_cmd_size. */
2989 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
2990 !(cmd->flags & CMD_SIZE_HUGE));
2991 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
2992
2993 if (il_is_rfkill(il) || il_is_ctkill(il)) {
2994 IL_WARN("Not sending command - %s KILL\n",
2995 il_is_rfkill(il) ? "RF" : "CT");
2996 return -EIO;
2997 }
2998
2999 spin_lock_irqsave(&il->hcmd_lock, flags);
3000
3001 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
3002 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3003
3004 IL_ERR("Restarting adapter due to command queue full\n");
3005 queue_work(il->workqueue, &il->restart);
3006 return -ENOSPC;
3007 }
3008
3009 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
3010 out_cmd = txq->cmd[idx];
3011 out_meta = &txq->meta[idx];
3012
3013 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
3014 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3015 return -ENOSPC;
3016 }
3017
3018 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
3019 out_meta->flags = cmd->flags | CMD_MAPPED;
3020 if (cmd->flags & CMD_WANT_SKB)
3021 out_meta->source = cmd;
3022 if (cmd->flags & CMD_ASYNC)
3023 out_meta->callback = cmd->callback;
3024
3025 out_cmd->hdr.cmd = cmd->id;
3026 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
3027
3028 /* At this point, the out_cmd now has all of the incoming cmd
3029 * information */
3030
3031 out_cmd->hdr.flags = 0;
3032 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) |
3033 IDX_TO_SEQ(q->write_ptr));
3034 if (cmd->flags & CMD_SIZE_HUGE)
3035 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
3036 len = sizeof(struct il_device_cmd);
3037 if (idx == TFD_CMD_SLOTS)
3038 len = IL_MAX_CMD_SIZE;
3039
3040#ifdef CONFIG_IWLEGACY_DEBUG
3041 switch (out_cmd->hdr.cmd) {
3042 case C_TX_LINK_QUALITY_CMD:
3043 case C_SENSITIVITY:
3044 D_HC_DUMP(
3045 "Sending command %s (#%x), seq: 0x%04X, "
3046 "%d bytes at %d[%d]:%d\n",
3047 il_get_cmd_string(out_cmd->hdr.cmd),
3048 out_cmd->hdr.cmd,
3049 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3050 q->write_ptr, idx, il->cmd_queue);
3051 break;
3052 default:
3053 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3054 "%d bytes at %d[%d]:%d\n",
3055 il_get_cmd_string(out_cmd->hdr.cmd),
3056 out_cmd->hdr.cmd,
3057 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3058 q->write_ptr, idx, il->cmd_queue);
3059 }
3060#endif
3061 txq->need_update = 1;
3062
3063 if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
3064 /* Set up entry in queue's byte count circular buffer */
3065 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
3066
3067 phys_addr = pci_map_single(il->pci_dev, &out_cmd->hdr,
3068 fix_size, PCI_DMA_BIDIRECTIONAL);
3069 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3070 dma_unmap_len_set(out_meta, len, fix_size);
3071
3072 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq,
3073 phys_addr, fix_size, 1,
3074 U32_PAD(cmd->len));
3075
3076 /* Increment and update queue's write idx */
3077 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3078 il_txq_update_write_ptr(il, txq);
3079
3080 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3081 return idx;
3082}
3083
3084/**
3085 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
3086 *
3087 * When FW advances 'R' idx, all entries between old and new 'R' idx
3088 * need to be reclaimed. As result, some free space forms. If there is
3089 * enough free space (> low mark), wake the stack that feeds us.
3090 */
3091static void il_hcmd_queue_reclaim(struct il_priv *il, int txq_id,
3092 int idx, int cmd_idx)
3093{
3094 struct il_tx_queue *txq = &il->txq[txq_id];
3095 struct il_queue *q = &txq->q;
3096 int nfreed = 0;
3097
3098 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3099 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3100 "is out of range [0-%d] %d %d.\n", txq_id,
3101 idx, q->n_bd, q->write_ptr, q->read_ptr);
3102 return;
3103 }
3104
3105 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3106 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3107
3108 if (nfreed++ > 0) {
3109 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3110 q->write_ptr, q->read_ptr);
3111 queue_work(il->workqueue, &il->restart);
3112 }
3113
3114 }
3115}
3116
3117/**
3118 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3119 * @rxb: Rx buffer to reclaim
3120 *
3121 * If an Rx buffer has an async callback associated with it the callback
3122 * will be executed. The attached skb (if present) will only be freed
3123 * if the callback returns 1
3124 */
3125void
3126il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3127{
3128 struct il_rx_pkt *pkt = rxb_addr(rxb);
3129 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3130 int txq_id = SEQ_TO_QUEUE(sequence);
3131 int idx = SEQ_TO_IDX(sequence);
3132 int cmd_idx;
3133 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3134 struct il_device_cmd *cmd;
3135 struct il_cmd_meta *meta;
3136 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3137 unsigned long flags;
3138
3139 /* If a Tx command is being handled and it isn't in the actual
3140 * command queue then there a command routing bug has been introduced
3141 * in the queue management code. */
3142 if (WARN(txq_id != il->cmd_queue,
3143 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3144 txq_id, il->cmd_queue, sequence,
3145 il->txq[il->cmd_queue].q.read_ptr,
3146 il->txq[il->cmd_queue].q.write_ptr)) {
3147 il_print_hex_error(il, pkt, 32);
3148 return;
3149 }
3150
3151 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3152 cmd = txq->cmd[cmd_idx];
3153 meta = &txq->meta[cmd_idx];
3154
3155 txq->time_stamp = jiffies;
3156
3157 pci_unmap_single(il->pci_dev,
3158 dma_unmap_addr(meta, mapping),
3159 dma_unmap_len(meta, len),
3160 PCI_DMA_BIDIRECTIONAL);
3161
3162 /* Input error checking is done when commands are added to queue. */
3163 if (meta->flags & CMD_WANT_SKB) {
3164 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3165 rxb->page = NULL;
3166 } else if (meta->callback)
3167 meta->callback(il, cmd, pkt);
3168
3169 spin_lock_irqsave(&il->hcmd_lock, flags);
3170
3171 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3172
3173 if (!(meta->flags & CMD_ASYNC)) {
3174 clear_bit(S_HCMD_ACTIVE, &il->status);
3175 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3176 il_get_cmd_string(cmd->hdr.cmd));
3177 wake_up(&il->wait_command_queue);
3178 }
3179
3180 /* Mark as unmapped */
3181 meta->flags = 0;
3182
3183 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3184}
3185EXPORT_SYMBOL(il_tx_cmd_complete);
be663ab6
WYG
3186
3187MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3188MODULE_VERSION(IWLWIFI_VERSION);
3189MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3190MODULE_LICENSE("GPL");
3191
3192/*
3193 * set bt_coex_active to true, uCode will do kill/defer
3194 * every time the priority line is asserted (BT is sending signals on the
3195 * priority line in the PCIx).
3196 * set bt_coex_active to false, uCode will ignore the BT activity and
3197 * perform the normal operation
3198 *
3199 * User might experience transmit issue on some platform due to WiFi/BT
3200 * co-exist problem. The possible behaviors are:
3201 * Able to scan and finding all the available AP
3202 * Not able to associate with any AP
3203 * On those platforms, WiFi communication can be restored by set
3204 * "bt_coex_active" module parameter to "false"
3205 *
3206 * default: bt_coex_active = true (BT_COEX_ENABLE)
3207 */
ef33417d 3208static bool bt_coex_active = true;
be663ab6
WYG
3209module_param(bt_coex_active, bool, S_IRUGO);
3210MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3211
d2ddf621
SG
3212u32 il_debug_level;
3213EXPORT_SYMBOL(il_debug_level);
be663ab6 3214
d2ddf621
SG
3215const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3216EXPORT_SYMBOL(il_bcast_addr);
be663ab6
WYG
3217
3218
46bc8d4b 3219/* This function both allocates and initializes hw and il. */
e2ebc833 3220struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg)
be663ab6 3221{
46bc8d4b 3222 struct il_priv *il;
be663ab6 3223 /* mac80211 allocates memory for this device instance, including
46bc8d4b 3224 * space for this driver's ilate structure */
be663ab6
WYG
3225 struct ieee80211_hw *hw;
3226
e2ebc833 3227 hw = ieee80211_alloc_hw(sizeof(struct il_priv),
be663ab6
WYG
3228 cfg->ops->ieee80211_ops);
3229 if (hw == NULL) {
3230 pr_err("%s: Can not allocate network device\n",
3231 cfg->name);
3232 goto out;
3233 }
3234
46bc8d4b
SG
3235 il = hw->priv;
3236 il->hw = hw;
be663ab6
WYG
3237
3238out:
3239 return hw;
3240}
e2ebc833 3241EXPORT_SYMBOL(il_alloc_all);
be663ab6
WYG
3242
3243#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
3244#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
46bc8d4b 3245static void il_init_ht_hw_capab(const struct il_priv *il,
be663ab6
WYG
3246 struct ieee80211_sta_ht_cap *ht_info,
3247 enum ieee80211_band band)
3248{
3249 u16 max_bit_rate = 0;
46bc8d4b
SG
3250 u8 rx_chains_num = il->hw_params.rx_chains_num;
3251 u8 tx_chains_num = il->hw_params.tx_chains_num;
be663ab6
WYG
3252
3253 ht_info->cap = 0;
3254 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3255
3256 ht_info->ht_supported = true;
3257
3258 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3259 max_bit_rate = MAX_BIT_RATE_20_MHZ;
46bc8d4b 3260 if (il->hw_params.ht40_channel & BIT(band)) {
be663ab6
WYG
3261 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3262 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3263 ht_info->mcs.rx_mask[4] = 0x01;
3264 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3265 }
3266
46bc8d4b 3267 if (il->cfg->mod_params->amsdu_size_8K)
be663ab6
WYG
3268 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3269
3270 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3271 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3272
3273 ht_info->mcs.rx_mask[0] = 0xFF;
3274 if (rx_chains_num >= 2)
3275 ht_info->mcs.rx_mask[1] = 0xFF;
3276 if (rx_chains_num >= 3)
3277 ht_info->mcs.rx_mask[2] = 0xFF;
3278
3279 /* Highest supported Rx data rate */
3280 max_bit_rate *= rx_chains_num;
3281 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3282 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3283
3284 /* Tx MCS capabilities */
3285 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3286 if (tx_chains_num != rx_chains_num) {
3287 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3288 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
3289 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3290 }
3291}
3292
3293/**
e2ebc833 3294 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
be663ab6 3295 */
46bc8d4b 3296int il_init_geos(struct il_priv *il)
be663ab6 3297{
e2ebc833 3298 struct il_channel_info *ch;
be663ab6
WYG
3299 struct ieee80211_supported_band *sband;
3300 struct ieee80211_channel *channels;
3301 struct ieee80211_channel *geo_ch;
3302 struct ieee80211_rate *rates;
3303 int i = 0;
332704a5 3304 s8 max_tx_power = 0;
be663ab6 3305
46bc8d4b
SG
3306 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3307 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
58de00a4 3308 D_INFO("Geography modes already initialized.\n");
a6766ccd 3309 set_bit(S_GEO_CONFIGURED, &il->status);
be663ab6
WYG
3310 return 0;
3311 }
3312
3313 channels = kzalloc(sizeof(struct ieee80211_channel) *
46bc8d4b 3314 il->channel_count, GFP_KERNEL);
be663ab6
WYG
3315 if (!channels)
3316 return -ENOMEM;
3317
2eb05816 3318 rates = kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
be663ab6
WYG
3319 GFP_KERNEL);
3320 if (!rates) {
3321 kfree(channels);
3322 return -ENOMEM;
3323 }
3324
3325 /* 5.2GHz channels start after the 2.4GHz channels */
46bc8d4b 3326 sband = &il->bands[IEEE80211_BAND_5GHZ];
d2ddf621 3327 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
be663ab6 3328 /* just OFDM */
e2ebc833 3329 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
2eb05816 3330 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
be663ab6 3331
46bc8d4b
SG
3332 if (il->cfg->sku & IL_SKU_N)
3333 il_init_ht_hw_capab(il, &sband->ht_cap,
be663ab6
WYG
3334 IEEE80211_BAND_5GHZ);
3335
46bc8d4b 3336 sband = &il->bands[IEEE80211_BAND_2GHZ];
be663ab6
WYG
3337 sband->channels = channels;
3338 /* OFDM & CCK */
3339 sband->bitrates = rates;
2eb05816 3340 sband->n_bitrates = RATE_COUNT_LEGACY;
be663ab6 3341
46bc8d4b
SG
3342 if (il->cfg->sku & IL_SKU_N)
3343 il_init_ht_hw_capab(il, &sband->ht_cap,
be663ab6
WYG
3344 IEEE80211_BAND_2GHZ);
3345
46bc8d4b
SG
3346 il->ieee_channels = channels;
3347 il->ieee_rates = rates;
be663ab6 3348
46bc8d4b
SG
3349 for (i = 0; i < il->channel_count; i++) {
3350 ch = &il->channel_info[i];
be663ab6 3351
e2ebc833 3352 if (!il_is_channel_valid(ch))
be663ab6
WYG
3353 continue;
3354
46bc8d4b 3355 sband = &il->bands[ch->band];
be663ab6
WYG
3356
3357 geo_ch = &sband->channels[sband->n_channels++];
3358
3359 geo_ch->center_freq =
3360 ieee80211_channel_to_frequency(ch->channel, ch->band);
3361 geo_ch->max_power = ch->max_power_avg;
3362 geo_ch->max_antenna_gain = 0xff;
3363 geo_ch->hw_value = ch->channel;
3364
e2ebc833 3365 if (il_is_channel_valid(ch)) {
be663ab6
WYG
3366 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3367 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
3368
3369 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3370 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
3371
3372 if (ch->flags & EEPROM_CHANNEL_RADAR)
3373 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3374
3375 geo_ch->flags |= ch->ht40_extension_channel;
3376
332704a5
SG
3377 if (ch->max_power_avg > max_tx_power)
3378 max_tx_power = ch->max_power_avg;
be663ab6
WYG
3379 } else {
3380 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3381 }
3382
58de00a4 3383 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
be663ab6 3384 ch->channel, geo_ch->center_freq,
e2ebc833 3385 il_is_channel_a_band(ch) ? "5.2" : "2.4",
be663ab6
WYG
3386 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
3387 "restricted" : "valid",
3388 geo_ch->flags);
3389 }
3390
46bc8d4b
SG
3391 il->tx_power_device_lmt = max_tx_power;
3392 il->tx_power_user_lmt = max_tx_power;
3393 il->tx_power_next = max_tx_power;
332704a5 3394
232913b5
SG
3395 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
3396 (il->cfg->sku & IL_SKU_A)) {
9406f797 3397 IL_INFO("Incorrectly detected BG card as ABG. "
be663ab6 3398 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
46bc8d4b
SG
3399 il->pci_dev->device,
3400 il->pci_dev->subsystem_device);
3401 il->cfg->sku &= ~IL_SKU_A;
be663ab6
WYG
3402 }
3403
9406f797 3404 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
46bc8d4b
SG
3405 il->bands[IEEE80211_BAND_2GHZ].n_channels,
3406 il->bands[IEEE80211_BAND_5GHZ].n_channels);
be663ab6 3407
a6766ccd 3408 set_bit(S_GEO_CONFIGURED, &il->status);
be663ab6
WYG
3409
3410 return 0;
3411}
e2ebc833 3412EXPORT_SYMBOL(il_init_geos);
be663ab6
WYG
3413
3414/*
e2ebc833 3415 * il_free_geos - undo allocations in il_init_geos
be663ab6 3416 */
46bc8d4b 3417void il_free_geos(struct il_priv *il)
be663ab6 3418{
46bc8d4b
SG
3419 kfree(il->ieee_channels);
3420 kfree(il->ieee_rates);
a6766ccd 3421 clear_bit(S_GEO_CONFIGURED, &il->status);
be663ab6 3422}
e2ebc833 3423EXPORT_SYMBOL(il_free_geos);
be663ab6 3424
46bc8d4b 3425static bool il_is_channel_extension(struct il_priv *il,
be663ab6
WYG
3426 enum ieee80211_band band,
3427 u16 channel, u8 extension_chan_offset)
3428{
e2ebc833 3429 const struct il_channel_info *ch_info;
be663ab6 3430
46bc8d4b 3431 ch_info = il_get_channel_info(il, band, channel);
e2ebc833 3432 if (!il_is_channel_valid(ch_info))
be663ab6
WYG
3433 return false;
3434
3435 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3436 return !(ch_info->ht40_extension_channel &
3437 IEEE80211_CHAN_NO_HT40PLUS);
3438 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3439 return !(ch_info->ht40_extension_channel &
3440 IEEE80211_CHAN_NO_HT40MINUS);
3441
3442 return false;
3443}
3444
46bc8d4b 3445bool il_is_ht40_tx_allowed(struct il_priv *il,
e2ebc833 3446 struct il_rxon_context *ctx,
be663ab6
WYG
3447 struct ieee80211_sta_ht_cap *ht_cap)
3448{
3449 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
3450 return false;
3451
3452 /*
3453 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
3454 * the bit will not set if it is pure 40MHz case
3455 */
3456 if (ht_cap && !ht_cap->ht_supported)
3457 return false;
3458
d3175167 3459#ifdef CONFIG_IWLEGACY_DEBUGFS
46bc8d4b 3460 if (il->disable_ht40)
be663ab6
WYG
3461 return false;
3462#endif
3463
46bc8d4b 3464 return il_is_channel_extension(il, il->band,
be663ab6
WYG
3465 le16_to_cpu(ctx->staging.channel),
3466 ctx->ht.extension_chan_offset);
3467}
e2ebc833 3468EXPORT_SYMBOL(il_is_ht40_tx_allowed);
be663ab6 3469
e2ebc833 3470static u16 il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
be663ab6
WYG
3471{
3472 u16 new_val;
3473 u16 beacon_factor;
3474
3475 /*
3476 * If mac80211 hasn't given us a beacon interval, program
3477 * the default into the device.
3478 */
3479 if (!beacon_val)
3480 return DEFAULT_BEACON_INTERVAL;
3481
3482 /*
3483 * If the beacon interval we obtained from the peer
3484 * is too large, we'll have to wake up more often
3485 * (and in IBSS case, we'll beacon too much)
3486 *
3487 * For example, if max_beacon_val is 4096, and the
3488 * requested beacon interval is 7000, we'll have to
3489 * use 3500 to be able to wake up on the beacons.
3490 *
3491 * This could badly influence beacon detection stats.
3492 */
3493
3494 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3495 new_val = beacon_val / beacon_factor;
3496
3497 if (!new_val)
3498 new_val = max_beacon_val;
3499
3500 return new_val;
3501}
3502
3503int
46bc8d4b 3504il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
be663ab6
WYG
3505{
3506 u64 tsf;
3507 s32 interval_tm, rem;
3508 struct ieee80211_conf *conf = NULL;
3509 u16 beacon_int;
3510 struct ieee80211_vif *vif = ctx->vif;
3511
6278ddab 3512 conf = &il->hw->conf;
be663ab6 3513
46bc8d4b 3514 lockdep_assert_held(&il->mutex);
be663ab6 3515
e2ebc833 3516 memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
be663ab6 3517
46bc8d4b 3518 ctx->timing.timestamp = cpu_to_le64(il->timestamp);
be663ab6
WYG
3519 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3520
3521 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3522
3523 /*
6ce1dc45 3524 * TODO: For IBSS we need to get atim_win from mac80211,
be663ab6
WYG
3525 * for now just always use 0
3526 */
6ce1dc45 3527 ctx->timing.atim_win = 0;
be663ab6 3528
e2ebc833 3529 beacon_int = il_adjust_beacon_interval(beacon_int,
46bc8d4b 3530 il->hw_params.max_beacon_itrvl * TIME_UNIT);
be663ab6
WYG
3531 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
3532
46bc8d4b 3533 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
be663ab6
WYG
3534 interval_tm = beacon_int * TIME_UNIT;
3535 rem = do_div(tsf, interval_tm);
3536 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3537
3538 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
3539
58de00a4 3540 D_ASSOC(
be663ab6
WYG
3541 "beacon interval %d beacon timer %d beacon tim %d\n",
3542 le16_to_cpu(ctx->timing.beacon_interval),
3543 le32_to_cpu(ctx->timing.beacon_init_val),
6ce1dc45 3544 le16_to_cpu(ctx->timing.atim_win));
be663ab6 3545
46bc8d4b 3546 return il_send_cmd_pdu(il, ctx->rxon_timing_cmd,
be663ab6
WYG
3547 sizeof(ctx->timing), &ctx->timing);
3548}
e2ebc833 3549EXPORT_SYMBOL(il_send_rxon_timing);
be663ab6
WYG
3550
3551void
46bc8d4b 3552il_set_rxon_hwcrypto(struct il_priv *il,
e2ebc833 3553 struct il_rxon_context *ctx,
be663ab6
WYG
3554 int hw_decrypt)
3555{
e2ebc833 3556 struct il_rxon_cmd *rxon = &ctx->staging;
be663ab6
WYG
3557
3558 if (hw_decrypt)
3559 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3560 else
3561 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3562
3563}
e2ebc833 3564EXPORT_SYMBOL(il_set_rxon_hwcrypto);
be663ab6
WYG
3565
3566/* validate RXON structure is valid */
3567int
46bc8d4b 3568il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
be663ab6 3569{
e2ebc833 3570 struct il_rxon_cmd *rxon = &ctx->staging;
be663ab6
WYG
3571 bool error = false;
3572
3573 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3574 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
9406f797 3575 IL_WARN("check 2.4G: wrong narrow\n");
be663ab6
WYG
3576 error = true;
3577 }
3578 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
9406f797 3579 IL_WARN("check 2.4G: wrong radar\n");
be663ab6
WYG
3580 error = true;
3581 }
3582 } else {
3583 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
9406f797 3584 IL_WARN("check 5.2G: not short slot!\n");
be663ab6
WYG
3585 error = true;
3586 }
3587 if (rxon->flags & RXON_FLG_CCK_MSK) {
9406f797 3588 IL_WARN("check 5.2G: CCK!\n");
be663ab6
WYG
3589 error = true;
3590 }
3591 }
3592 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
9406f797 3593 IL_WARN("mac/bssid mcast!\n");
be663ab6
WYG
3594 error = true;
3595 }
3596
3597 /* make sure basic rates 6Mbps and 1Mbps are supported */
2eb05816
SG
3598 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3599 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
9406f797 3600 IL_WARN("neither 1 nor 6 are basic\n");
be663ab6
WYG
3601 error = true;
3602 }
3603
3604 if (le16_to_cpu(rxon->assoc_id) > 2007) {
9406f797 3605 IL_WARN("aid > 2007\n");
be663ab6
WYG
3606 error = true;
3607 }
3608
3609 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
3610 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
9406f797 3611 IL_WARN("CCK and short slot\n");
be663ab6
WYG
3612 error = true;
3613 }
3614
3615 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
3616 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
9406f797 3617 IL_WARN("CCK and auto detect");
be663ab6
WYG
3618 error = true;
3619 }
3620
3621 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
3622 RXON_FLG_TGG_PROTECT_MSK)) ==
3623 RXON_FLG_TGG_PROTECT_MSK) {
9406f797 3624 IL_WARN("TGg but no auto-detect\n");
be663ab6
WYG
3625 error = true;
3626 }
3627
3628 if (error)
9406f797 3629 IL_WARN("Tuning to channel %d\n",
be663ab6
WYG
3630 le16_to_cpu(rxon->channel));
3631
3632 if (error) {
9406f797 3633 IL_ERR("Invalid RXON\n");
be663ab6
WYG
3634 return -EINVAL;
3635 }
3636 return 0;
3637}
e2ebc833 3638EXPORT_SYMBOL(il_check_rxon_cmd);
be663ab6
WYG
3639
3640/**
e2ebc833 3641 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
46bc8d4b 3642 * @il: staging_rxon is compared to active_rxon
be663ab6
WYG
3643 *
3644 * If the RXON structure is changing enough to require a new tune,
3645 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
3646 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
3647 */
46bc8d4b 3648int il_full_rxon_required(struct il_priv *il,
e2ebc833 3649 struct il_rxon_context *ctx)
be663ab6 3650{
e2ebc833
SG
3651 const struct il_rxon_cmd *staging = &ctx->staging;
3652 const struct il_rxon_cmd *active = &ctx->active;
be663ab6
WYG
3653
3654#define CHK(cond) \
3655 if ((cond)) { \
58de00a4 3656 D_INFO("need full RXON - " #cond "\n"); \
be663ab6
WYG
3657 return 1; \
3658 }
3659
3660#define CHK_NEQ(c1, c2) \
3661 if ((c1) != (c2)) { \
58de00a4 3662 D_INFO("need full RXON - " \
be663ab6
WYG
3663 #c1 " != " #c2 " - %d != %d\n", \
3664 (c1), (c2)); \
3665 return 1; \
3666 }
3667
3668 /* These items are only settable from the full RXON command */
e2ebc833 3669 CHK(!il_is_associated_ctx(ctx));
be663ab6
WYG
3670 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
3671 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
3672 CHK(compare_ether_addr(staging->wlap_bssid_addr,
3673 active->wlap_bssid_addr));
3674 CHK_NEQ(staging->dev_type, active->dev_type);
3675 CHK_NEQ(staging->channel, active->channel);
3676 CHK_NEQ(staging->air_propagation, active->air_propagation);
3677 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3678 active->ofdm_ht_single_stream_basic_rates);
3679 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3680 active->ofdm_ht_dual_stream_basic_rates);
3681 CHK_NEQ(staging->assoc_id, active->assoc_id);
3682
3683 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
3684 * be updated with the RXON_ASSOC command -- however only some
3685 * flag transitions are allowed using RXON_ASSOC */
3686
3687 /* Check if we are not switching bands */
3688 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3689 active->flags & RXON_FLG_BAND_24G_MSK);
3690
3691 /* Check if we are switching association toggle */
3692 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3693 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3694
3695#undef CHK
3696#undef CHK_NEQ
3697
3698 return 0;
3699}
e2ebc833 3700EXPORT_SYMBOL(il_full_rxon_required);
be663ab6 3701
46bc8d4b 3702u8 il_get_lowest_plcp(struct il_priv *il,
e2ebc833 3703 struct il_rxon_context *ctx)
be663ab6
WYG
3704{
3705 /*
3706 * Assign the lowest rate -- should really get this from
3707 * the beacon skb from mac80211.
3708 */
3709 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
2eb05816 3710 return RATE_1M_PLCP;
be663ab6 3711 else
2eb05816 3712 return RATE_6M_PLCP;
be663ab6 3713}
e2ebc833 3714EXPORT_SYMBOL(il_get_lowest_plcp);
be663ab6 3715
46bc8d4b 3716static void _il_set_rxon_ht(struct il_priv *il,
e2ebc833
SG
3717 struct il_ht_config *ht_conf,
3718 struct il_rxon_context *ctx)
be663ab6 3719{
e2ebc833 3720 struct il_rxon_cmd *rxon = &ctx->staging;
be663ab6
WYG
3721
3722 if (!ctx->ht.enabled) {
3723 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
3724 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
3725 RXON_FLG_HT40_PROT_MSK |
3726 RXON_FLG_HT_PROT_MSK);
3727 return;
3728 }
3729
3730 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
3731 RXON_FLG_HT_OPERATING_MODE_POS);
3732
3733 /* Set up channel bandwidth:
3734 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
3735 /* clear the HT channel mode before set the mode */
3736 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
3737 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
46bc8d4b 3738 if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
be663ab6
WYG
3739 /* pure ht40 */
3740 if (ctx->ht.protection ==
3741 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3742 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3743 /* Note: control channel is opposite of extension channel */
3744 switch (ctx->ht.extension_chan_offset) {
3745 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3746 rxon->flags &=
3747 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3748 break;
3749 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3750 rxon->flags |=
3751 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3752 break;
3753 }
3754 } else {
3755 /* Note: control channel is opposite of extension channel */
3756 switch (ctx->ht.extension_chan_offset) {
3757 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3758 rxon->flags &=
3759 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3760 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3761 break;
3762 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3763 rxon->flags |=
3764 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3765 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3766 break;
3767 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3768 default:
3769 /* channel location only valid if in Mixed mode */
9406f797 3770 IL_ERR(
be663ab6
WYG
3771 "invalid extension channel offset\n");
3772 break;
3773 }
3774 }
3775 } else {
3776 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3777 }
3778
46bc8d4b
SG
3779 if (il->cfg->ops->hcmd->set_rxon_chain)
3780 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
be663ab6 3781
58de00a4 3782 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
be663ab6
WYG
3783 "extension channel offset 0x%x\n",
3784 le32_to_cpu(rxon->flags), ctx->ht.protection,
3785 ctx->ht.extension_chan_offset);
3786}
3787
46bc8d4b 3788void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
be663ab6 3789{
17d6e557 3790 _il_set_rxon_ht(il, ht_conf, &il->ctx);
be663ab6 3791}
e2ebc833 3792EXPORT_SYMBOL(il_set_rxon_ht);
be663ab6
WYG
3793
3794/* Return valid, unused, channel for a passive scan to reset the RF */
46bc8d4b 3795u8 il_get_single_channel_number(struct il_priv *il,
be663ab6
WYG
3796 enum ieee80211_band band)
3797{
e2ebc833 3798 const struct il_channel_info *ch_info;
be663ab6
WYG
3799 int i;
3800 u8 channel = 0;
3801 u8 min, max;
be663ab6
WYG
3802
3803 if (band == IEEE80211_BAND_5GHZ) {
3804 min = 14;
46bc8d4b 3805 max = il->channel_count;
be663ab6
WYG
3806 } else {
3807 min = 0;
3808 max = 14;
3809 }
3810
3811 for (i = min; i < max; i++) {
17d6e557
SG
3812 channel = il->channel_info[i].channel;
3813 if (channel == le16_to_cpu(il->ctx.staging.channel))
be663ab6
WYG
3814 continue;
3815
46bc8d4b 3816 ch_info = il_get_channel_info(il, band, channel);
e2ebc833 3817 if (il_is_channel_valid(ch_info))
be663ab6
WYG
3818 break;
3819 }
3820
3821 return channel;
3822}
e2ebc833 3823EXPORT_SYMBOL(il_get_single_channel_number);
be663ab6
WYG
3824
3825/**
e2ebc833 3826 * il_set_rxon_channel - Set the band and channel values in staging RXON
be663ab6
WYG
3827 * @ch: requested channel as a pointer to struct ieee80211_channel
3828
3829 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
3830 * in the staging RXON flag structure based on the ch->band
3831 */
3832int
46bc8d4b 3833il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
e2ebc833 3834 struct il_rxon_context *ctx)
be663ab6
WYG
3835{
3836 enum ieee80211_band band = ch->band;
3837 u16 channel = ch->hw_value;
3838
232913b5 3839 if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
be663ab6
WYG
3840 return 0;
3841
3842 ctx->staging.channel = cpu_to_le16(channel);
3843 if (band == IEEE80211_BAND_5GHZ)
3844 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3845 else
3846 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
3847
46bc8d4b 3848 il->band = band;
be663ab6 3849
58de00a4 3850 D_INFO("Staging channel set to %d [%d]\n", channel, band);
be663ab6
WYG
3851
3852 return 0;
3853}
e2ebc833 3854EXPORT_SYMBOL(il_set_rxon_channel);
be663ab6 3855
46bc8d4b 3856void il_set_flags_for_band(struct il_priv *il,
e2ebc833 3857 struct il_rxon_context *ctx,
be663ab6
WYG
3858 enum ieee80211_band band,
3859 struct ieee80211_vif *vif)
3860{
3861 if (band == IEEE80211_BAND_5GHZ) {
3862 ctx->staging.flags &=
3863 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
3864 | RXON_FLG_CCK_MSK);
3865 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3866 } else {
e2ebc833 3867 /* Copied from il_post_associate() */
be663ab6
WYG
3868 if (vif && vif->bss_conf.use_short_slot)
3869 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3870 else
3871 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3872
3873 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
3874 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3875 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
3876 }
3877}
e2ebc833 3878EXPORT_SYMBOL(il_set_flags_for_band);
be663ab6
WYG
3879
3880/*
3881 * initialize rxon structure with default values from eeprom
3882 */
46bc8d4b 3883void il_connection_init_rx_config(struct il_priv *il,
e2ebc833 3884 struct il_rxon_context *ctx)
be663ab6 3885{
e2ebc833 3886 const struct il_channel_info *ch_info;
be663ab6
WYG
3887
3888 memset(&ctx->staging, 0, sizeof(ctx->staging));
3889
3890 if (!ctx->vif) {
3891 ctx->staging.dev_type = ctx->unused_devtype;
3892 } else
3893 switch (ctx->vif->type) {
3894
3895 case NL80211_IFTYPE_STATION:
3896 ctx->staging.dev_type = ctx->station_devtype;
3897 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
3898 break;
3899
3900 case NL80211_IFTYPE_ADHOC:
3901 ctx->staging.dev_type = ctx->ibss_devtype;
3902 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
3903 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
3904 RXON_FILTER_ACCEPT_GRP_MSK;
3905 break;
3906
3907 default:
9406f797 3908 IL_ERR("Unsupported interface type %d\n",
be663ab6
WYG
3909 ctx->vif->type);
3910 break;
3911 }
3912
3913#if 0
3914 /* TODO: Figure out when short_preamble would be set and cache from
3915 * that */
46bc8d4b 3916 if (!hw_to_local(il->hw)->short_preamble)
be663ab6
WYG
3917 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3918 else
3919 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3920#endif
3921
46bc8d4b 3922 ch_info = il_get_channel_info(il, il->band,
be663ab6
WYG
3923 le16_to_cpu(ctx->active.channel));
3924
3925 if (!ch_info)
46bc8d4b 3926 ch_info = &il->channel_info[0];
be663ab6
WYG
3927
3928 ctx->staging.channel = cpu_to_le16(ch_info->channel);
46bc8d4b 3929 il->band = ch_info->band;
be663ab6 3930
46bc8d4b 3931 il_set_flags_for_band(il, ctx, il->band, ctx->vif);
be663ab6
WYG
3932
3933 ctx->staging.ofdm_basic_rates =
e2ebc833 3934 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
be663ab6 3935 ctx->staging.cck_basic_rates =
e2ebc833 3936 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
be663ab6
WYG
3937
3938 /* clear both MIX and PURE40 mode flag */
3939 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
3940 RXON_FLG_CHANNEL_MODE_PURE_40);
3941 if (ctx->vif)
3942 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
3943
3944 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
3945 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
3946}
e2ebc833 3947EXPORT_SYMBOL(il_connection_init_rx_config);
be663ab6 3948
46bc8d4b 3949void il_set_rate(struct il_priv *il)
be663ab6
WYG
3950{
3951 const struct ieee80211_supported_band *hw = NULL;
3952 struct ieee80211_rate *rate;
be663ab6
WYG
3953 int i;
3954
46bc8d4b 3955 hw = il_get_hw_mode(il, il->band);
be663ab6 3956 if (!hw) {
9406f797 3957 IL_ERR("Failed to set rate: unable to get hw mode\n");
be663ab6
WYG
3958 return;
3959 }
3960
46bc8d4b 3961 il->active_rate = 0;
be663ab6
WYG
3962
3963 for (i = 0; i < hw->n_bitrates; i++) {
3964 rate = &(hw->bitrates[i]);
2eb05816 3965 if (rate->hw_value < RATE_COUNT_LEGACY)
46bc8d4b 3966 il->active_rate |= (1 << rate->hw_value);
be663ab6
WYG
3967 }
3968
58de00a4 3969 D_RATE("Set active_rate = %0x\n", il->active_rate);
be663ab6 3970
17d6e557 3971 il->ctx.staging.cck_basic_rates =
e2ebc833 3972 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
be663ab6 3973
17d6e557 3974 il->ctx.staging.ofdm_basic_rates =
e2ebc833 3975 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
be663ab6 3976}
e2ebc833 3977EXPORT_SYMBOL(il_set_rate);
be663ab6 3978
46bc8d4b 3979void il_chswitch_done(struct il_priv *il, bool is_success)
be663ab6 3980{
7c2cde2e 3981 struct il_rxon_context *ctx = &il->ctx;
be663ab6 3982
a6766ccd 3983 if (test_bit(S_EXIT_PENDING, &il->status))
be663ab6
WYG
3984 return;
3985
a6766ccd 3986 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
be663ab6 3987 ieee80211_chswitch_done(ctx->vif, is_success);
be663ab6 3988}
e2ebc833 3989EXPORT_SYMBOL(il_chswitch_done);
be663ab6 3990
d2dfb33e 3991void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
be663ab6 3992{
dcae1c64 3993 struct il_rx_pkt *pkt = rxb_addr(rxb);
e2ebc833 3994 struct il_csa_notification *csa = &(pkt->u.csa_notif);
be663ab6 3995
7c2cde2e 3996 struct il_rxon_context *ctx = &il->ctx;
e2ebc833 3997 struct il_rxon_cmd *rxon = (void *)&ctx->active;
be663ab6 3998
a6766ccd 3999 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
51e65257
SG
4000 return;
4001
46bc8d4b 4002 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
51e65257
SG
4003 rxon->channel = csa->channel;
4004 ctx->staging.channel = csa->channel;
58de00a4 4005 D_11H("CSA notif: channel %d\n",
be663ab6 4006 le16_to_cpu(csa->channel));
46bc8d4b 4007 il_chswitch_done(il, true);
51e65257 4008 } else {
9406f797 4009 IL_ERR("CSA notif (fail) : channel %d\n",
51e65257 4010 le16_to_cpu(csa->channel));
46bc8d4b 4011 il_chswitch_done(il, false);
be663ab6
WYG
4012 }
4013}
d2dfb33e 4014EXPORT_SYMBOL(il_hdl_csa);
be663ab6 4015
d3175167 4016#ifdef CONFIG_IWLEGACY_DEBUG
46bc8d4b 4017void il_print_rx_config_cmd(struct il_priv *il,
e2ebc833 4018 struct il_rxon_context *ctx)
be663ab6 4019{
e2ebc833 4020 struct il_rxon_cmd *rxon = &ctx->staging;
be663ab6 4021
58de00a4 4022 D_RADIO("RX CONFIG:\n");
46bc8d4b 4023 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
58de00a4 4024 D_RADIO("u16 channel: 0x%x\n",
be663ab6 4025 le16_to_cpu(rxon->channel));
58de00a4
SG
4026 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4027 D_RADIO("u32 filter_flags: 0x%08x\n",
be663ab6 4028 le32_to_cpu(rxon->filter_flags));
58de00a4
SG
4029 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4030 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
be663ab6 4031 rxon->ofdm_basic_rates);
58de00a4 4032 D_RADIO("u8 cck_basic_rates: 0x%02x\n",
be663ab6 4033 rxon->cck_basic_rates);
58de00a4
SG
4034 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4035 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4036 D_RADIO("u16 assoc_id: 0x%x\n",
be663ab6
WYG
4037 le16_to_cpu(rxon->assoc_id));
4038}
e2ebc833 4039EXPORT_SYMBOL(il_print_rx_config_cmd);
be663ab6
WYG
4040#endif
4041/**
e2ebc833 4042 * il_irq_handle_error - called for HW or SW error interrupt from card
be663ab6 4043 */
46bc8d4b 4044void il_irq_handle_error(struct il_priv *il)
be663ab6 4045{
e2ebc833 4046 /* Set the FW error flag -- cleared on il_down */
a6766ccd 4047 set_bit(S_FW_ERROR, &il->status);
be663ab6
WYG
4048
4049 /* Cancel currently queued command. */
a6766ccd 4050 clear_bit(S_HCMD_ACTIVE, &il->status);
be663ab6 4051
9406f797 4052 IL_ERR("Loaded firmware version: %s\n",
46bc8d4b 4053 il->hw->wiphy->fw_version);
be663ab6 4054
46bc8d4b
SG
4055 il->cfg->ops->lib->dump_nic_error_log(il);
4056 if (il->cfg->ops->lib->dump_fh)
4057 il->cfg->ops->lib->dump_fh(il, NULL, false);
d3175167 4058#ifdef CONFIG_IWLEGACY_DEBUG
46bc8d4b
SG
4059 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4060 il_print_rx_config_cmd(il,
7c2cde2e 4061 &il->ctx);
be663ab6
WYG
4062#endif
4063
46bc8d4b 4064 wake_up(&il->wait_command_queue);
be663ab6
WYG
4065
4066 /* Keep the restart process from trying to send host
4067 * commands by clearing the INIT status bit */
a6766ccd 4068 clear_bit(S_READY, &il->status);
be663ab6 4069
a6766ccd 4070 if (!test_bit(S_EXIT_PENDING, &il->status)) {
58de00a4 4071 IL_DBG(IL_DL_FW_ERRORS,
be663ab6
WYG
4072 "Restarting adapter due to uCode error.\n");
4073
46bc8d4b
SG
4074 if (il->cfg->mod_params->restart_fw)
4075 queue_work(il->workqueue, &il->restart);
be663ab6
WYG
4076 }
4077}
e2ebc833 4078EXPORT_SYMBOL(il_irq_handle_error);
be663ab6 4079
46bc8d4b 4080static int il_apm_stop_master(struct il_priv *il)
be663ab6
WYG
4081{
4082 int ret = 0;
4083
4084 /* stop device's busmaster DMA activity */
46bc8d4b 4085 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
be663ab6 4086
142b343f 4087 ret = _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
be663ab6
WYG
4088 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4089 if (ret)
9406f797 4090 IL_WARN("Master Disable Timed Out, 100 usec\n");
be663ab6 4091
58de00a4 4092 D_INFO("stop master\n");
be663ab6
WYG
4093
4094 return ret;
4095}
4096
46bc8d4b 4097void il_apm_stop(struct il_priv *il)
be663ab6 4098{
58de00a4 4099 D_INFO("Stop card, put in low power state\n");
be663ab6
WYG
4100
4101 /* Stop device's DMA activity */
46bc8d4b 4102 il_apm_stop_master(il);
be663ab6
WYG
4103
4104 /* Reset the entire device */
46bc8d4b 4105 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
be663ab6
WYG
4106
4107 udelay(10);
4108
4109 /*
4110 * Clear "initialization complete" bit to move adapter from
4111 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
4112 */
46bc8d4b 4113 il_clear_bit(il, CSR_GP_CNTRL,
be663ab6
WYG
4114 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4115}
e2ebc833 4116EXPORT_SYMBOL(il_apm_stop);
be663ab6
WYG
4117
4118
4119/*
4120 * Start up NIC's basic functionality after it has been reset
e2ebc833 4121 * (e.g. after platform boot, or shutdown via il_apm_stop())
be663ab6
WYG
4122 * NOTE: This does not load uCode nor start the embedded processor
4123 */
46bc8d4b 4124int il_apm_init(struct il_priv *il)
be663ab6
WYG
4125{
4126 int ret = 0;
4127 u16 lctl;
4128
58de00a4 4129 D_INFO("Init card's basic functions\n");
be663ab6
WYG
4130
4131 /*
4132 * Use "set_bit" below rather than "write", to preserve any hardware
4133 * bits already set by default after reset.
4134 */
4135
4136 /* Disable L0S exit timer (platform NMI Work/Around) */
46bc8d4b 4137 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
be663ab6
WYG
4138 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4139
4140 /*
4141 * Disable L0s without affecting L1;
4142 * don't wait for ICH L0s (ICH bug W/A)
4143 */
46bc8d4b 4144 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
be663ab6
WYG
4145 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4146
4147 /* Set FH wait threshold to maximum (HW error during stress W/A) */
46bc8d4b 4148 il_set_bit(il, CSR_DBG_HPET_MEM_REG,
be663ab6
WYG
4149 CSR_DBG_HPET_MEM_REG_VAL);
4150
4151 /*
4152 * Enable HAP INTA (interrupt from management bus) to
4153 * wake device's PCI Express link L1a -> L0s
25985edc 4154 * NOTE: This is no-op for 3945 (non-existent bit)
be663ab6 4155 */
46bc8d4b 4156 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
be663ab6
WYG
4157 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4158
4159 /*
4160 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
4161 * Check if BIOS (or OS) enabled L1-ASPM on this device.
4162 * If so (likely), disable L0S, so device moves directly L0->L1;
4163 * costs negligible amount of power savings.
4164 * If not (unlikely), enable L0S, so there is at least some
4165 * power savings, even without L1.
4166 */
46bc8d4b
SG
4167 if (il->cfg->base_params->set_l0s) {
4168 lctl = il_pcie_link_ctl(il);
be663ab6
WYG
4169 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
4170 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
4171 /* L1-ASPM enabled; disable(!) L0S */
46bc8d4b 4172 il_set_bit(il, CSR_GIO_REG,
be663ab6 4173 CSR_GIO_REG_VAL_L0S_ENABLED);
58de00a4 4174 D_POWER("L1 Enabled; Disabling L0S\n");
be663ab6
WYG
4175 } else {
4176 /* L1-ASPM disabled; enable(!) L0S */
46bc8d4b 4177 il_clear_bit(il, CSR_GIO_REG,
be663ab6 4178 CSR_GIO_REG_VAL_L0S_ENABLED);
58de00a4 4179 D_POWER("L1 Disabled; Enabling L0S\n");
be663ab6
WYG
4180 }
4181 }
4182
4183 /* Configure analog phase-lock-loop before activating to D0A */
46bc8d4b
SG
4184 if (il->cfg->base_params->pll_cfg_val)
4185 il_set_bit(il, CSR_ANA_PLL_CFG,
4186 il->cfg->base_params->pll_cfg_val);
be663ab6
WYG
4187
4188 /*
4189 * Set "initialization complete" bit to move adapter from
4190 * D0U* --> D0A* (powered-up active) state.
4191 */
46bc8d4b 4192 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
be663ab6
WYG
4193
4194 /*
4195 * Wait for clock stabilization; once stabilized, access to
db54eb57 4196 * device-internal resources is supported, e.g. il_wr_prph()
be663ab6
WYG
4197 * and accesses to uCode SRAM.
4198 */
142b343f 4199 ret = _il_poll_bit(il, CSR_GP_CNTRL,
be663ab6
WYG
4200 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4201 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4202 if (ret < 0) {
58de00a4 4203 D_INFO("Failed to init the card\n");
be663ab6
WYG
4204 goto out;
4205 }
4206
4207 /*
4208 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
4209 * BSM (Boostrap State Machine) is only in 3945 and 4965.
4210 *
4211 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
4212 * do not disable clocks. This preserves any hardware bits already
4213 * set by default in "CLK_CTRL_REG" after reset.
4214 */
46bc8d4b 4215 if (il->cfg->base_params->use_bsm)
db54eb57 4216 il_wr_prph(il, APMG_CLK_EN_REG,
be663ab6
WYG
4217 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4218 else
db54eb57 4219 il_wr_prph(il, APMG_CLK_EN_REG,
be663ab6
WYG
4220 APMG_CLK_VAL_DMA_CLK_RQT);
4221 udelay(20);
4222
4223 /* Disable L1-Active */
46bc8d4b 4224 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
be663ab6
WYG
4225 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4226
4227out:
4228 return ret;
4229}
e2ebc833 4230EXPORT_SYMBOL(il_apm_init);
be663ab6
WYG
4231
4232
46bc8d4b 4233int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
be663ab6
WYG
4234{
4235 int ret;
4236 s8 prev_tx_power;
43f12d47 4237 bool defer;
7c2cde2e 4238 struct il_rxon_context *ctx = &il->ctx;
be663ab6 4239
46bc8d4b 4240 lockdep_assert_held(&il->mutex);
be663ab6 4241
46bc8d4b 4242 if (il->tx_power_user_lmt == tx_power && !force)
be663ab6
WYG
4243 return 0;
4244
46bc8d4b 4245 if (!il->cfg->ops->lib->send_tx_power)
be663ab6
WYG
4246 return -EOPNOTSUPP;
4247
332704a5
SG
4248 /* 0 dBm mean 1 milliwatt */
4249 if (tx_power < 0) {
9406f797 4250 IL_WARN(
332704a5
SG
4251 "Requested user TXPOWER %d below 1 mW.\n",
4252 tx_power);
be663ab6
WYG
4253 return -EINVAL;
4254 }
4255
46bc8d4b 4256 if (tx_power > il->tx_power_device_lmt) {
9406f797 4257 IL_WARN(
be663ab6 4258 "Requested user TXPOWER %d above upper limit %d.\n",
46bc8d4b 4259 tx_power, il->tx_power_device_lmt);
be663ab6
WYG
4260 return -EINVAL;
4261 }
4262
46bc8d4b 4263 if (!il_is_ready_rf(il))
be663ab6
WYG
4264 return -EIO;
4265
43f12d47
SG
4266 /* scan complete and commit_rxon use tx_power_next value,
4267 * it always need to be updated for newest request */
46bc8d4b 4268 il->tx_power_next = tx_power;
43f12d47
SG
4269
4270 /* do not set tx power when scanning or channel changing */
a6766ccd 4271 defer = test_bit(S_SCANNING, &il->status) ||
43f12d47
SG
4272 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
4273 if (defer && !force) {
58de00a4 4274 D_INFO("Deferring tx power set\n");
be663ab6
WYG
4275 return 0;
4276 }
4277
46bc8d4b
SG
4278 prev_tx_power = il->tx_power_user_lmt;
4279 il->tx_power_user_lmt = tx_power;
be663ab6 4280
46bc8d4b 4281 ret = il->cfg->ops->lib->send_tx_power(il);
be663ab6
WYG
4282
4283 /* if fail to set tx_power, restore the orig. tx power */
4284 if (ret) {
46bc8d4b
SG
4285 il->tx_power_user_lmt = prev_tx_power;
4286 il->tx_power_next = prev_tx_power;
be663ab6
WYG
4287 }
4288 return ret;
4289}
e2ebc833 4290EXPORT_SYMBOL(il_set_tx_power);
be663ab6 4291
46bc8d4b 4292void il_send_bt_config(struct il_priv *il)
be663ab6 4293{
e2ebc833 4294 struct il_bt_cmd bt_cmd = {
be663ab6
WYG
4295 .lead_time = BT_LEAD_TIME_DEF,
4296 .max_kill = BT_MAX_KILL_DEF,
4297 .kill_ack_mask = 0,
4298 .kill_cts_mask = 0,
4299 };
4300
4301 if (!bt_coex_active)
4302 bt_cmd.flags = BT_COEX_DISABLE;
4303 else
4304 bt_cmd.flags = BT_COEX_ENABLE;
4305
58de00a4 4306 D_INFO("BT coex %s\n",
be663ab6
WYG
4307 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4308
4d69c752 4309 if (il_send_cmd_pdu(il, C_BT_CONFIG,
e2ebc833 4310 sizeof(struct il_bt_cmd), &bt_cmd))
9406f797 4311 IL_ERR("failed to send BT Coex Config\n");
be663ab6 4312}
e2ebc833 4313EXPORT_SYMBOL(il_send_bt_config);
be663ab6 4314
ebf0d90d 4315int il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
be663ab6 4316{
ebf0d90d 4317 struct il_stats_cmd stats_cmd = {
be663ab6 4318 .configuration_flags =
e2ebc833 4319 clear ? IL_STATS_CONF_CLEAR_STATS : 0,
be663ab6
WYG
4320 };
4321
4322 if (flags & CMD_ASYNC)
4d69c752 4323 return il_send_cmd_pdu_async(il, C_STATS,
ebf0d90d
SG
4324 sizeof(struct il_stats_cmd),
4325 &stats_cmd, NULL);
be663ab6 4326 else
4d69c752 4327 return il_send_cmd_pdu(il, C_STATS,
ebf0d90d
SG
4328 sizeof(struct il_stats_cmd),
4329 &stats_cmd);
be663ab6 4330}
ebf0d90d 4331EXPORT_SYMBOL(il_send_stats_request);
be663ab6 4332
d2dfb33e 4333void il_hdl_pm_sleep(struct il_priv *il,
b73bb5f1 4334 struct il_rx_buf *rxb)
be663ab6 4335{
d3175167 4336#ifdef CONFIG_IWLEGACY_DEBUG
dcae1c64 4337 struct il_rx_pkt *pkt = rxb_addr(rxb);
e2ebc833 4338 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
58de00a4 4339 D_RX("sleep mode: %d, src: %d\n",
be663ab6
WYG
4340 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4341#endif
4342}
d2dfb33e 4343EXPORT_SYMBOL(il_hdl_pm_sleep);
be663ab6 4344
d2dfb33e 4345void il_hdl_pm_debug_stats(struct il_priv *il,
b73bb5f1 4346 struct il_rx_buf *rxb)
be663ab6 4347{
dcae1c64 4348 struct il_rx_pkt *pkt = rxb_addr(rxb);
e94a4099 4349 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
58de00a4 4350 D_RADIO("Dumping %d bytes of unhandled "
be663ab6 4351 "notification for %s:\n", len,
e2ebc833 4352 il_get_cmd_string(pkt->hdr.cmd));
46bc8d4b 4353 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
be663ab6 4354}
d2dfb33e 4355EXPORT_SYMBOL(il_hdl_pm_debug_stats);
be663ab6 4356
6e9848b4 4357void il_hdl_error(struct il_priv *il,
b73bb5f1 4358 struct il_rx_buf *rxb)
be663ab6 4359{
dcae1c64 4360 struct il_rx_pkt *pkt = rxb_addr(rxb);
be663ab6 4361
9406f797 4362 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
be663ab6
WYG
4363 "seq 0x%04X ser 0x%08X\n",
4364 le32_to_cpu(pkt->u.err_resp.error_type),
e2ebc833 4365 il_get_cmd_string(pkt->u.err_resp.cmd_id),
be663ab6
WYG
4366 pkt->u.err_resp.cmd_id,
4367 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4368 le32_to_cpu(pkt->u.err_resp.error_info));
4369}
6e9848b4 4370EXPORT_SYMBOL(il_hdl_error);
be663ab6 4371
46bc8d4b 4372void il_clear_isr_stats(struct il_priv *il)
be663ab6 4373{
46bc8d4b 4374 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
be663ab6
WYG
4375}
4376
e2ebc833 4377int il_mac_conf_tx(struct ieee80211_hw *hw,
8a3a3c85 4378 struct ieee80211_vif *vif, u16 queue,
be663ab6
WYG
4379 const struct ieee80211_tx_queue_params *params)
4380{
46bc8d4b 4381 struct il_priv *il = hw->priv;
be663ab6
WYG
4382 unsigned long flags;
4383 int q;
4384
58de00a4 4385 D_MAC80211("enter\n");
be663ab6 4386
46bc8d4b 4387 if (!il_is_ready_rf(il)) {
58de00a4 4388 D_MAC80211("leave - RF not ready\n");
be663ab6
WYG
4389 return -EIO;
4390 }
4391
4392 if (queue >= AC_NUM) {
58de00a4 4393 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
be663ab6
WYG
4394 return 0;
4395 }
4396
4397 q = AC_NUM - 1 - queue;
4398
46bc8d4b 4399 spin_lock_irqsave(&il->lock, flags);
be663ab6 4400
17d6e557 4401 il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
be663ab6 4402 cpu_to_le16(params->cw_min);
17d6e557 4403 il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
be663ab6 4404 cpu_to_le16(params->cw_max);
17d6e557
SG
4405 il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4406 il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
be663ab6
WYG
4407 cpu_to_le16((params->txop * 32));
4408
17d6e557 4409 il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
be663ab6 4410
46bc8d4b 4411 spin_unlock_irqrestore(&il->lock, flags);
be663ab6 4412
58de00a4 4413 D_MAC80211("leave\n");
be663ab6
WYG
4414 return 0;
4415}
e2ebc833 4416EXPORT_SYMBOL(il_mac_conf_tx);
be663ab6 4417
e2ebc833 4418int il_mac_tx_last_beacon(struct ieee80211_hw *hw)
be663ab6 4419{
46bc8d4b 4420 struct il_priv *il = hw->priv;
be663ab6 4421
46bc8d4b 4422 return il->ibss_manager == IL_IBSS_MANAGER;
be663ab6 4423}
e2ebc833 4424EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
be663ab6
WYG
4425
4426static int
46bc8d4b 4427il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
be663ab6 4428{
46bc8d4b 4429 il_connection_init_rx_config(il, ctx);
be663ab6 4430
46bc8d4b
SG
4431 if (il->cfg->ops->hcmd->set_rxon_chain)
4432 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
be663ab6 4433
46bc8d4b 4434 return il_commit_rxon(il, ctx);
be663ab6
WYG
4435}
4436
46bc8d4b 4437static int il_setup_interface(struct il_priv *il,
e2ebc833 4438 struct il_rxon_context *ctx)
be663ab6
WYG
4439{
4440 struct ieee80211_vif *vif = ctx->vif;
4441 int err;
4442
46bc8d4b 4443 lockdep_assert_held(&il->mutex);
be663ab6
WYG
4444
4445 /*
4446 * This variable will be correct only when there's just
4447 * a single context, but all code using it is for hardware
4448 * that supports only one context.
4449 */
46bc8d4b 4450 il->iw_mode = vif->type;
be663ab6
WYG
4451
4452 ctx->is_active = true;
4453
46bc8d4b 4454 err = il_set_mode(il, ctx);
be663ab6
WYG
4455 if (err) {
4456 if (!ctx->always_active)
4457 ctx->is_active = false;
4458 return err;
4459 }
4460
4461 return 0;
4462}
4463
4464int
e2ebc833 4465il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
be663ab6 4466{
46bc8d4b 4467 struct il_priv *il = hw->priv;
e2ebc833 4468 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
be663ab6 4469 int err;
17d6e557 4470 u32 modes;
be663ab6 4471
58de00a4 4472 D_MAC80211("enter: type %d, addr %pM\n",
be663ab6
WYG
4473 vif->type, vif->addr);
4474
46bc8d4b 4475 mutex_lock(&il->mutex);
be663ab6 4476
46bc8d4b 4477 if (!il_is_ready_rf(il)) {
9406f797 4478 IL_WARN("Try to add interface when device not ready\n");
be663ab6
WYG
4479 err = -EINVAL;
4480 goto out;
4481 }
4482
be663ab6 4483
17d6e557
SG
4484 /* check if busy context is exclusive */
4485 if (il->ctx.vif &&
4486 (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
4487 err = -EINVAL;
4488 goto out;
be663ab6
WYG
4489 }
4490
17d6e557
SG
4491 modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
4492 if (!(modes & BIT(vif->type))) {
be663ab6
WYG
4493 err = -EOPNOTSUPP;
4494 goto out;
4495 }
4496
17d6e557
SG
4497 vif_priv->ctx = &il->ctx;
4498 il->ctx.vif = vif;
be663ab6 4499
17d6e557
SG
4500 err = il_setup_interface(il, &il->ctx);
4501 if (err) {
4502 il->ctx.vif = NULL;
4503 il->iw_mode = NL80211_IFTYPE_STATION;
4504 }
be663ab6 4505
be663ab6 4506 out:
46bc8d4b 4507 mutex_unlock(&il->mutex);
be663ab6 4508
58de00a4 4509 D_MAC80211("leave\n");
be663ab6
WYG
4510 return err;
4511}
e2ebc833 4512EXPORT_SYMBOL(il_mac_add_interface);
be663ab6 4513
46bc8d4b 4514static void il_teardown_interface(struct il_priv *il,
be663ab6
WYG
4515 struct ieee80211_vif *vif,
4516 bool mode_change)
4517{
e2ebc833 4518 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
be663ab6 4519
46bc8d4b 4520 lockdep_assert_held(&il->mutex);
be663ab6 4521
46bc8d4b
SG
4522 if (il->scan_vif == vif) {
4523 il_scan_cancel_timeout(il, 200);
4524 il_force_scan_end(il);
be663ab6
WYG
4525 }
4526
4527 if (!mode_change) {
46bc8d4b 4528 il_set_mode(il, ctx);
be663ab6
WYG
4529 if (!ctx->always_active)
4530 ctx->is_active = false;
4531 }
4532}
4533
e2ebc833 4534void il_mac_remove_interface(struct ieee80211_hw *hw,
be663ab6
WYG
4535 struct ieee80211_vif *vif)
4536{
46bc8d4b 4537 struct il_priv *il = hw->priv;
e2ebc833 4538 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
be663ab6 4539
58de00a4 4540 D_MAC80211("enter\n");
be663ab6 4541
46bc8d4b 4542 mutex_lock(&il->mutex);
be663ab6
WYG
4543
4544 WARN_ON(ctx->vif != vif);
4545 ctx->vif = NULL;
4546
46bc8d4b 4547 il_teardown_interface(il, vif, false);
be663ab6 4548
46bc8d4b
SG
4549 memset(il->bssid, 0, ETH_ALEN);
4550 mutex_unlock(&il->mutex);
be663ab6 4551
58de00a4 4552 D_MAC80211("leave\n");
be663ab6
WYG
4553
4554}
e2ebc833 4555EXPORT_SYMBOL(il_mac_remove_interface);
be663ab6 4556
46bc8d4b 4557int il_alloc_txq_mem(struct il_priv *il)
be663ab6 4558{
46bc8d4b
SG
4559 if (!il->txq)
4560 il->txq = kzalloc(
e2ebc833 4561 sizeof(struct il_tx_queue) *
46bc8d4b 4562 il->cfg->base_params->num_of_queues,
be663ab6 4563 GFP_KERNEL);
46bc8d4b 4564 if (!il->txq) {
9406f797 4565 IL_ERR("Not enough memory for txq\n");
be663ab6
WYG
4566 return -ENOMEM;
4567 }
4568 return 0;
4569}
e2ebc833 4570EXPORT_SYMBOL(il_alloc_txq_mem);
be663ab6 4571
46bc8d4b 4572void il_txq_mem(struct il_priv *il)
be663ab6 4573{
46bc8d4b
SG
4574 kfree(il->txq);
4575 il->txq = NULL;
be663ab6 4576}
e2ebc833 4577EXPORT_SYMBOL(il_txq_mem);
be663ab6 4578
d3175167 4579#ifdef CONFIG_IWLEGACY_DEBUGFS
be663ab6 4580
e2ebc833 4581#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
be663ab6 4582
46bc8d4b 4583void il_reset_traffic_log(struct il_priv *il)
be663ab6 4584{
46bc8d4b
SG
4585 il->tx_traffic_idx = 0;
4586 il->rx_traffic_idx = 0;
4587 if (il->tx_traffic)
4588 memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
4589 if (il->rx_traffic)
4590 memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
be663ab6
WYG
4591}
4592
46bc8d4b 4593int il_alloc_traffic_mem(struct il_priv *il)
be663ab6 4594{
e2ebc833 4595 u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
be663ab6 4596
d2ddf621 4597 if (il_debug_level & IL_DL_TX) {
46bc8d4b
SG
4598 if (!il->tx_traffic) {
4599 il->tx_traffic =
be663ab6 4600 kzalloc(traffic_size, GFP_KERNEL);
46bc8d4b 4601 if (!il->tx_traffic)
be663ab6
WYG
4602 return -ENOMEM;
4603 }
4604 }
d2ddf621 4605 if (il_debug_level & IL_DL_RX) {
46bc8d4b
SG
4606 if (!il->rx_traffic) {
4607 il->rx_traffic =
be663ab6 4608 kzalloc(traffic_size, GFP_KERNEL);
46bc8d4b 4609 if (!il->rx_traffic)
be663ab6
WYG
4610 return -ENOMEM;
4611 }
4612 }
46bc8d4b 4613 il_reset_traffic_log(il);
be663ab6
WYG
4614 return 0;
4615}
e2ebc833 4616EXPORT_SYMBOL(il_alloc_traffic_mem);
be663ab6 4617
46bc8d4b 4618void il_free_traffic_mem(struct il_priv *il)
be663ab6 4619{
46bc8d4b
SG
4620 kfree(il->tx_traffic);
4621 il->tx_traffic = NULL;
be663ab6 4622
46bc8d4b
SG
4623 kfree(il->rx_traffic);
4624 il->rx_traffic = NULL;
be663ab6 4625}
e2ebc833 4626EXPORT_SYMBOL(il_free_traffic_mem);
be663ab6 4627
46bc8d4b 4628void il_dbg_log_tx_data_frame(struct il_priv *il,
be663ab6
WYG
4629 u16 length, struct ieee80211_hdr *header)
4630{
4631 __le16 fc;
4632 u16 len;
4633
d2ddf621 4634 if (likely(!(il_debug_level & IL_DL_TX)))
be663ab6
WYG
4635 return;
4636
46bc8d4b 4637 if (!il->tx_traffic)
be663ab6
WYG
4638 return;
4639
4640 fc = header->frame_control;
4641 if (ieee80211_is_data(fc)) {
e2ebc833
SG
4642 len = (length > IL_TRAFFIC_ENTRY_SIZE)
4643 ? IL_TRAFFIC_ENTRY_SIZE : length;
46bc8d4b
SG
4644 memcpy((il->tx_traffic +
4645 (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)),
be663ab6 4646 header, len);
46bc8d4b
SG
4647 il->tx_traffic_idx =
4648 (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
be663ab6
WYG
4649 }
4650}
e2ebc833 4651EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
be663ab6 4652
46bc8d4b 4653void il_dbg_log_rx_data_frame(struct il_priv *il,
be663ab6
WYG
4654 u16 length, struct ieee80211_hdr *header)
4655{
4656 __le16 fc;
4657 u16 len;
4658
d2ddf621 4659 if (likely(!(il_debug_level & IL_DL_RX)))
be663ab6
WYG
4660 return;
4661
46bc8d4b 4662 if (!il->rx_traffic)
be663ab6
WYG
4663 return;
4664
4665 fc = header->frame_control;
4666 if (ieee80211_is_data(fc)) {
e2ebc833
SG
4667 len = (length > IL_TRAFFIC_ENTRY_SIZE)
4668 ? IL_TRAFFIC_ENTRY_SIZE : length;
46bc8d4b
SG
4669 memcpy((il->rx_traffic +
4670 (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)),
be663ab6 4671 header, len);
46bc8d4b
SG
4672 il->rx_traffic_idx =
4673 (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
be663ab6
WYG
4674 }
4675}
e2ebc833 4676EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
be663ab6 4677
e2ebc833 4678const char *il_get_mgmt_string(int cmd)
be663ab6
WYG
4679{
4680 switch (cmd) {
e2ebc833
SG
4681 IL_CMD(MANAGEMENT_ASSOC_REQ);
4682 IL_CMD(MANAGEMENT_ASSOC_RESP);
4683 IL_CMD(MANAGEMENT_REASSOC_REQ);
4684 IL_CMD(MANAGEMENT_REASSOC_RESP);
4685 IL_CMD(MANAGEMENT_PROBE_REQ);
4686 IL_CMD(MANAGEMENT_PROBE_RESP);
4687 IL_CMD(MANAGEMENT_BEACON);
4688 IL_CMD(MANAGEMENT_ATIM);
4689 IL_CMD(MANAGEMENT_DISASSOC);
4690 IL_CMD(MANAGEMENT_AUTH);
4691 IL_CMD(MANAGEMENT_DEAUTH);
4692 IL_CMD(MANAGEMENT_ACTION);
be663ab6
WYG
4693 default:
4694 return "UNKNOWN";
4695
4696 }
4697}
4698
e2ebc833 4699const char *il_get_ctrl_string(int cmd)
be663ab6
WYG
4700{
4701 switch (cmd) {
e2ebc833
SG
4702 IL_CMD(CONTROL_BACK_REQ);
4703 IL_CMD(CONTROL_BACK);
4704 IL_CMD(CONTROL_PSPOLL);
4705 IL_CMD(CONTROL_RTS);
4706 IL_CMD(CONTROL_CTS);
4707 IL_CMD(CONTROL_ACK);
4708 IL_CMD(CONTROL_CFEND);
4709 IL_CMD(CONTROL_CFENDACK);
be663ab6
WYG
4710 default:
4711 return "UNKNOWN";
4712
4713 }
4714}
4715
46bc8d4b 4716void il_clear_traffic_stats(struct il_priv *il)
be663ab6 4717{
46bc8d4b
SG
4718 memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
4719 memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
be663ab6
WYG
4720}
4721
4722/*
d3175167 4723 * if CONFIG_IWLEGACY_DEBUGFS defined,
e2ebc833 4724 * il_update_stats function will
be663ab6 4725 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
ebf0d90d 4726 * Use debugFs to display the rx/rx_stats
d3175167 4727 * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
be663ab6 4728 * information will be recorded, but DATA pkt still will be recorded
e2ebc833 4729 * for the reason of il_led.c need to control the led blinking based on
be663ab6
WYG
4730 * number of tx and rx data.
4731 *
4732 */
4733void
46bc8d4b 4734il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
be663ab6
WYG
4735{
4736 struct traffic_stats *stats;
4737
4738 if (is_tx)
46bc8d4b 4739 stats = &il->tx_stats;
be663ab6 4740 else
46bc8d4b 4741 stats = &il->rx_stats;
be663ab6
WYG
4742
4743 if (ieee80211_is_mgmt(fc)) {
4744 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4745 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
4746 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
4747 break;
4748 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
4749 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
4750 break;
4751 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
4752 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
4753 break;
4754 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
4755 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
4756 break;
4757 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
4758 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
4759 break;
4760 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
4761 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
4762 break;
4763 case cpu_to_le16(IEEE80211_STYPE_BEACON):
4764 stats->mgmt[MANAGEMENT_BEACON]++;
4765 break;
4766 case cpu_to_le16(IEEE80211_STYPE_ATIM):
4767 stats->mgmt[MANAGEMENT_ATIM]++;
4768 break;
4769 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
4770 stats->mgmt[MANAGEMENT_DISASSOC]++;
4771 break;
4772 case cpu_to_le16(IEEE80211_STYPE_AUTH):
4773 stats->mgmt[MANAGEMENT_AUTH]++;
4774 break;
4775 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
4776 stats->mgmt[MANAGEMENT_DEAUTH]++;
4777 break;
4778 case cpu_to_le16(IEEE80211_STYPE_ACTION):
4779 stats->mgmt[MANAGEMENT_ACTION]++;
4780 break;
4781 }
4782 } else if (ieee80211_is_ctl(fc)) {
4783 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4784 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
4785 stats->ctrl[CONTROL_BACK_REQ]++;
4786 break;
4787 case cpu_to_le16(IEEE80211_STYPE_BACK):
4788 stats->ctrl[CONTROL_BACK]++;
4789 break;
4790 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
4791 stats->ctrl[CONTROL_PSPOLL]++;
4792 break;
4793 case cpu_to_le16(IEEE80211_STYPE_RTS):
4794 stats->ctrl[CONTROL_RTS]++;
4795 break;
4796 case cpu_to_le16(IEEE80211_STYPE_CTS):
4797 stats->ctrl[CONTROL_CTS]++;
4798 break;
4799 case cpu_to_le16(IEEE80211_STYPE_ACK):
4800 stats->ctrl[CONTROL_ACK]++;
4801 break;
4802 case cpu_to_le16(IEEE80211_STYPE_CFEND):
4803 stats->ctrl[CONTROL_CFEND]++;
4804 break;
4805 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
4806 stats->ctrl[CONTROL_CFENDACK]++;
4807 break;
4808 }
4809 } else {
4810 /* data */
4811 stats->data_cnt++;
4812 stats->data_bytes += len;
4813 }
4814}
e2ebc833 4815EXPORT_SYMBOL(il_update_stats);
be663ab6
WYG
4816#endif
4817
46bc8d4b 4818int il_force_reset(struct il_priv *il, bool external)
be663ab6 4819{
e2ebc833 4820 struct il_force_reset *force_reset;
be663ab6 4821
a6766ccd 4822 if (test_bit(S_EXIT_PENDING, &il->status))
be663ab6
WYG
4823 return -EINVAL;
4824
46bc8d4b 4825 force_reset = &il->force_reset;
be663ab6
WYG
4826 force_reset->reset_request_count++;
4827 if (!external) {
4828 if (force_reset->last_force_reset_jiffies &&
4829 time_after(force_reset->last_force_reset_jiffies +
4830 force_reset->reset_duration, jiffies)) {
58de00a4 4831 D_INFO("force reset rejected\n");
be663ab6
WYG
4832 force_reset->reset_reject_count++;
4833 return -EAGAIN;
4834 }
4835 }
4836 force_reset->reset_success_count++;
4837 force_reset->last_force_reset_jiffies = jiffies;
dd6d2a8a
SG
4838
4839 /*
4840 * if the request is from external(ex: debugfs),
4841 * then always perform the request in regardless the module
4842 * parameter setting
4843 * if the request is from internal (uCode error or driver
4844 * detect failure), then fw_restart module parameter
4845 * need to be check before performing firmware reload
4846 */
4847
46bc8d4b 4848 if (!external && !il->cfg->mod_params->restart_fw) {
58de00a4 4849 D_INFO("Cancel firmware reload based on "
dd6d2a8a
SG
4850 "module parameter setting\n");
4851 return 0;
be663ab6 4852 }
dd6d2a8a 4853
9406f797 4854 IL_ERR("On demand firmware reload\n");
dd6d2a8a 4855
e2ebc833 4856 /* Set the FW error flag -- cleared on il_down */
a6766ccd 4857 set_bit(S_FW_ERROR, &il->status);
46bc8d4b 4858 wake_up(&il->wait_command_queue);
dd6d2a8a
SG
4859 /*
4860 * Keep the restart process from trying to send host
4861 * commands by clearing the INIT status bit
4862 */
a6766ccd 4863 clear_bit(S_READY, &il->status);
46bc8d4b 4864 queue_work(il->workqueue, &il->restart);
dd6d2a8a 4865
be663ab6
WYG
4866 return 0;
4867}
4868
4869int
e2ebc833 4870il_mac_change_interface(struct ieee80211_hw *hw,
be663ab6
WYG
4871 struct ieee80211_vif *vif,
4872 enum nl80211_iftype newtype, bool newp2p)
4873{
46bc8d4b 4874 struct il_priv *il = hw->priv;
e2ebc833 4875 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
17d6e557 4876 u32 modes;
be663ab6
WYG
4877 int err;
4878
4879 newtype = ieee80211_iftype_p2p(newtype, newp2p);
4880
46bc8d4b 4881 mutex_lock(&il->mutex);
be663ab6 4882
46bc8d4b 4883 if (!ctx->vif || !il_is_ready_rf(il)) {
ffd8c746
JB
4884 /*
4885 * Huh? But wait ... this can maybe happen when
4886 * we're in the middle of a firmware restart!
4887 */
4888 err = -EBUSY;
4889 goto out;
4890 }
4891
17d6e557
SG
4892 modes = ctx->interface_modes | ctx->exclusive_interface_modes;
4893 if (!(modes & BIT(newtype))) {
4894 err = -EOPNOTSUPP;
be663ab6
WYG
4895 goto out;
4896 }
4897
17d6e557
SG
4898 if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
4899 (il->ctx.exclusive_interface_modes & BIT(newtype))) {
4900 err = -EINVAL;
4901 goto out;
be663ab6
WYG
4902 }
4903
4904 /* success */
46bc8d4b 4905 il_teardown_interface(il, vif, true);
be663ab6 4906 vif->type = newtype;
ffd8c746 4907 vif->p2p = newp2p;
46bc8d4b 4908 err = il_setup_interface(il, ctx);
be663ab6
WYG
4909 WARN_ON(err);
4910 /*
4911 * We've switched internally, but submitting to the
4912 * device may have failed for some reason. Mask this
4913 * error, because otherwise mac80211 will not switch
4914 * (and set the interface type back) and we'll be
4915 * out of sync with it.
4916 */
4917 err = 0;
4918
4919 out:
46bc8d4b 4920 mutex_unlock(&il->mutex);
be663ab6
WYG
4921 return err;
4922}
e2ebc833 4923EXPORT_SYMBOL(il_mac_change_interface);
be663ab6
WYG
4924
4925/*
4926 * On every watchdog tick we check (latest) time stamp. If it does not
4927 * change during timeout period and queue is not empty we reset firmware.
4928 */
46bc8d4b 4929static int il_check_stuck_queue(struct il_priv *il, int cnt)
be663ab6 4930{
46bc8d4b 4931 struct il_tx_queue *txq = &il->txq[cnt];
e2ebc833 4932 struct il_queue *q = &txq->q;
be663ab6
WYG
4933 unsigned long timeout;
4934 int ret;
4935
4936 if (q->read_ptr == q->write_ptr) {
4937 txq->time_stamp = jiffies;
4938 return 0;
4939 }
4940
4941 timeout = txq->time_stamp +
46bc8d4b 4942 msecs_to_jiffies(il->cfg->base_params->wd_timeout);
be663ab6
WYG
4943
4944 if (time_after(jiffies, timeout)) {
9406f797 4945 IL_ERR("Queue %d stuck for %u ms.\n",
46bc8d4b
SG
4946 q->id, il->cfg->base_params->wd_timeout);
4947 ret = il_force_reset(il, false);
be663ab6
WYG
4948 return (ret == -EAGAIN) ? 0 : 1;
4949 }
4950
4951 return 0;
4952}
4953
4954/*
4955 * Making watchdog tick be a quarter of timeout assure we will
4956 * discover the queue hung between timeout and 1.25*timeout
4957 */
e2ebc833 4958#define IL_WD_TICK(timeout) ((timeout) / 4)
be663ab6
WYG
4959
4960/*
4961 * Watchdog timer callback, we check each tx queue for stuck, if if hung
4962 * we reset the firmware. If everything is fine just rearm the timer.
4963 */
e2ebc833 4964void il_bg_watchdog(unsigned long data)
be663ab6 4965{
46bc8d4b 4966 struct il_priv *il = (struct il_priv *)data;
be663ab6
WYG
4967 int cnt;
4968 unsigned long timeout;
4969
a6766ccd 4970 if (test_bit(S_EXIT_PENDING, &il->status))
be663ab6
WYG
4971 return;
4972
46bc8d4b 4973 timeout = il->cfg->base_params->wd_timeout;
be663ab6
WYG
4974 if (timeout == 0)
4975 return;
4976
4977 /* monitor and check for stuck cmd queue */
46bc8d4b 4978 if (il_check_stuck_queue(il, il->cmd_queue))
be663ab6
WYG
4979 return;
4980
4981 /* monitor and check for other stuck queues */
46bc8d4b
SG
4982 if (il_is_any_associated(il)) {
4983 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
be663ab6 4984 /* skip as we already checked the command queue */
46bc8d4b 4985 if (cnt == il->cmd_queue)
be663ab6 4986 continue;
46bc8d4b 4987 if (il_check_stuck_queue(il, cnt))
be663ab6
WYG
4988 return;
4989 }
4990 }
4991
46bc8d4b 4992 mod_timer(&il->watchdog, jiffies +
e2ebc833 4993 msecs_to_jiffies(IL_WD_TICK(timeout)));
be663ab6 4994}
e2ebc833 4995EXPORT_SYMBOL(il_bg_watchdog);
be663ab6 4996
46bc8d4b 4997void il_setup_watchdog(struct il_priv *il)
be663ab6 4998{
46bc8d4b 4999 unsigned int timeout = il->cfg->base_params->wd_timeout;
be663ab6
WYG
5000
5001 if (timeout)
46bc8d4b 5002 mod_timer(&il->watchdog,
e2ebc833 5003 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
be663ab6 5004 else
46bc8d4b 5005 del_timer(&il->watchdog);
be663ab6 5006}
e2ebc833 5007EXPORT_SYMBOL(il_setup_watchdog);
be663ab6
WYG
5008
5009/*
5010 * extended beacon time format
5011 * time in usec will be changed into a 32-bit value in extended:internal format
5012 * the extended part is the beacon counts
5013 * the internal part is the time in usec within one beacon interval
5014 */
5015u32
46bc8d4b 5016il_usecs_to_beacons(struct il_priv *il,
be663ab6
WYG
5017 u32 usec, u32 beacon_interval)
5018{
5019 u32 quot;
5020 u32 rem;
5021 u32 interval = beacon_interval * TIME_UNIT;
5022
5023 if (!interval || !usec)
5024 return 0;
5025
5026 quot = (usec / interval) &
46bc8d4b
SG
5027 (il_beacon_time_mask_high(il,
5028 il->hw_params.beacon_time_tsf_bits) >>
5029 il->hw_params.beacon_time_tsf_bits);
5030 rem = (usec % interval) & il_beacon_time_mask_low(il,
5031 il->hw_params.beacon_time_tsf_bits);
be663ab6 5032
46bc8d4b 5033 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
be663ab6 5034}
e2ebc833 5035EXPORT_SYMBOL(il_usecs_to_beacons);
be663ab6
WYG
5036
5037/* base is usually what we get from ucode with each received frame,
5038 * the same as HW timer counter counting down
5039 */
46bc8d4b 5040__le32 il_add_beacon_time(struct il_priv *il, u32 base,
be663ab6
WYG
5041 u32 addon, u32 beacon_interval)
5042{
46bc8d4b
SG
5043 u32 base_low = base & il_beacon_time_mask_low(il,
5044 il->hw_params.beacon_time_tsf_bits);
5045 u32 addon_low = addon & il_beacon_time_mask_low(il,
5046 il->hw_params.beacon_time_tsf_bits);
be663ab6 5047 u32 interval = beacon_interval * TIME_UNIT;
46bc8d4b
SG
5048 u32 res = (base & il_beacon_time_mask_high(il,
5049 il->hw_params.beacon_time_tsf_bits)) +
5050 (addon & il_beacon_time_mask_high(il,
5051 il->hw_params.beacon_time_tsf_bits));
be663ab6
WYG
5052
5053 if (base_low > addon_low)
5054 res += base_low - addon_low;
5055 else if (base_low < addon_low) {
5056 res += interval + base_low - addon_low;
46bc8d4b 5057 res += (1 << il->hw_params.beacon_time_tsf_bits);
be663ab6 5058 } else
46bc8d4b 5059 res += (1 << il->hw_params.beacon_time_tsf_bits);
be663ab6
WYG
5060
5061 return cpu_to_le32(res);
5062}
e2ebc833 5063EXPORT_SYMBOL(il_add_beacon_time);
be663ab6
WYG
5064
5065#ifdef CONFIG_PM
5066
e2ebc833 5067int il_pci_suspend(struct device *device)
be663ab6
WYG
5068{
5069 struct pci_dev *pdev = to_pci_dev(device);
46bc8d4b 5070 struct il_priv *il = pci_get_drvdata(pdev);
be663ab6
WYG
5071
5072 /*
5073 * This function is called when system goes into suspend state
e2ebc833
SG
5074 * mac80211 will call il_mac_stop() from the mac80211 suspend function
5075 * first but since il_mac_stop() has no knowledge of who the caller is,
be663ab6
WYG
5076 * it will not call apm_ops.stop() to stop the DMA operation.
5077 * Calling apm_ops.stop here to make sure we stop the DMA.
5078 */
46bc8d4b 5079 il_apm_stop(il);
be663ab6
WYG
5080
5081 return 0;
5082}
e2ebc833 5083EXPORT_SYMBOL(il_pci_suspend);
be663ab6 5084
e2ebc833 5085int il_pci_resume(struct device *device)
be663ab6
WYG
5086{
5087 struct pci_dev *pdev = to_pci_dev(device);
46bc8d4b 5088 struct il_priv *il = pci_get_drvdata(pdev);
be663ab6
WYG
5089 bool hw_rfkill = false;
5090
5091 /*
5092 * We disable the RETRY_TIMEOUT register (0x41) to keep
5093 * PCI Tx retries from interfering with C3 CPU state.
5094 */
5095 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
5096
46bc8d4b 5097 il_enable_interrupts(il);
be663ab6 5098
841b2cca 5099 if (!(_il_rd(il, CSR_GP_CNTRL) &
be663ab6
WYG
5100 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5101 hw_rfkill = true;
5102
5103 if (hw_rfkill)
a6766ccd 5104 set_bit(S_RF_KILL_HW, &il->status);
be663ab6 5105 else
a6766ccd 5106 clear_bit(S_RF_KILL_HW, &il->status);
be663ab6 5107
46bc8d4b 5108 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
be663ab6
WYG
5109
5110 return 0;
5111}
e2ebc833 5112EXPORT_SYMBOL(il_pci_resume);
be663ab6 5113
e2ebc833
SG
5114const struct dev_pm_ops il_pm_ops = {
5115 .suspend = il_pci_suspend,
5116 .resume = il_pci_resume,
5117 .freeze = il_pci_suspend,
5118 .thaw = il_pci_resume,
5119 .poweroff = il_pci_suspend,
5120 .restore = il_pci_resume,
be663ab6 5121};
e2ebc833 5122EXPORT_SYMBOL(il_pm_ops);
be663ab6
WYG
5123
5124#endif /* CONFIG_PM */
5125
5126static void
46bc8d4b 5127il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
be663ab6 5128{
a6766ccd 5129 if (test_bit(S_EXIT_PENDING, &il->status))
be663ab6
WYG
5130 return;
5131
5132 if (!ctx->is_active)
5133 return;
5134
5135 ctx->qos_data.def_qos_parm.qos_flags = 0;
5136
5137 if (ctx->qos_data.qos_active)
5138 ctx->qos_data.def_qos_parm.qos_flags |=
5139 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
5140
5141 if (ctx->ht.enabled)
5142 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5143
58de00a4 5144 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
be663ab6
WYG
5145 ctx->qos_data.qos_active,
5146 ctx->qos_data.def_qos_parm.qos_flags);
5147
46bc8d4b 5148 il_send_cmd_pdu_async(il, ctx->qos_cmd,
e2ebc833 5149 sizeof(struct il_qosparam_cmd),
be663ab6
WYG
5150 &ctx->qos_data.def_qos_parm, NULL);
5151}
5152
5153/**
e2ebc833 5154 * il_mac_config - mac80211 config callback
be663ab6 5155 */
e2ebc833 5156int il_mac_config(struct ieee80211_hw *hw, u32 changed)
be663ab6 5157{
46bc8d4b 5158 struct il_priv *il = hw->priv;
e2ebc833 5159 const struct il_channel_info *ch_info;
be663ab6
WYG
5160 struct ieee80211_conf *conf = &hw->conf;
5161 struct ieee80211_channel *channel = conf->channel;
46bc8d4b 5162 struct il_ht_config *ht_conf = &il->current_ht_config;
17d6e557 5163 struct il_rxon_context *ctx = &il->ctx;
be663ab6
WYG
5164 unsigned long flags = 0;
5165 int ret = 0;
5166 u16 ch;
5167 int scan_active = 0;
7c2cde2e 5168 bool ht_changed = false;
be663ab6 5169
46bc8d4b 5170 if (WARN_ON(!il->cfg->ops->legacy))
be663ab6
WYG
5171 return -EOPNOTSUPP;
5172
46bc8d4b 5173 mutex_lock(&il->mutex);
be663ab6 5174
58de00a4 5175 D_MAC80211("enter to channel %d changed 0x%X\n",
be663ab6
WYG
5176 channel->hw_value, changed);
5177
a6766ccd 5178 if (unlikely(test_bit(S_SCANNING, &il->status))) {
be663ab6 5179 scan_active = 1;
58de00a4 5180 D_MAC80211("scan active\n");
be663ab6
WYG
5181 }
5182
5183 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
5184 IEEE80211_CONF_CHANGE_CHANNEL)) {
5185 /* mac80211 uses static for non-HT which is what we want */
46bc8d4b 5186 il->current_ht_config.smps = conf->smps_mode;
be663ab6
WYG
5187
5188 /*
5189 * Recalculate chain counts.
5190 *
5191 * If monitor mode is enabled then mac80211 will
5192 * set up the SM PS mode to OFF if an HT channel is
5193 * configured.
5194 */
46bc8d4b 5195 if (il->cfg->ops->hcmd->set_rxon_chain)
17d6e557 5196 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
be663ab6
WYG
5197 }
5198
5199 /* during scanning mac80211 will delay channel setting until
5200 * scan finish with changed = 0
5201 */
5202 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
17d6e557 5203
be663ab6
WYG
5204 if (scan_active)
5205 goto set_ch_out;
5206
5207 ch = channel->hw_value;
46bc8d4b 5208 ch_info = il_get_channel_info(il, channel->band, ch);
e2ebc833 5209 if (!il_is_channel_valid(ch_info)) {
58de00a4 5210 D_MAC80211("leave - invalid channel\n");
be663ab6
WYG
5211 ret = -EINVAL;
5212 goto set_ch_out;
5213 }
5214
46bc8d4b 5215 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
e2ebc833 5216 !il_is_channel_ibss(ch_info)) {
58de00a4 5217 D_MAC80211("leave - not IBSS channel\n");
eb85de3f
SG
5218 ret = -EINVAL;
5219 goto set_ch_out;
5220 }
5221
46bc8d4b 5222 spin_lock_irqsave(&il->lock, flags);
be663ab6 5223
17d6e557
SG
5224 /* Configure HT40 channels */
5225 if (ctx->ht.enabled != conf_is_ht(conf)) {
5226 ctx->ht.enabled = conf_is_ht(conf);
5227 ht_changed = true;
5228 }
5229 if (ctx->ht.enabled) {
5230 if (conf_is_ht40_minus(conf)) {
5231 ctx->ht.extension_chan_offset =
5232 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5233 ctx->ht.is_40mhz = true;
5234 } else if (conf_is_ht40_plus(conf)) {
5235 ctx->ht.extension_chan_offset =
5236 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5237 ctx->ht.is_40mhz = true;
5238 } else {
5239 ctx->ht.extension_chan_offset =
5240 IEEE80211_HT_PARAM_CHA_SEC_NONE;
be663ab6 5241 ctx->ht.is_40mhz = false;
17d6e557
SG
5242 }
5243 } else
5244 ctx->ht.is_40mhz = false;
be663ab6 5245
17d6e557
SG
5246 /*
5247 * Default to no protection. Protection mode will
5248 * later be set from BSS config in il_ht_conf
5249 */
5250 ctx->ht.protection =
5251 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
be663ab6 5252
17d6e557
SG
5253 /* if we are switching from ht to 2.4 clear flags
5254 * from any ht related info since 2.4 does not
5255 * support ht */
5256 if ((le16_to_cpu(ctx->staging.channel) != ch))
5257 ctx->staging.flags = 0;
be663ab6 5258
17d6e557
SG
5259 il_set_rxon_channel(il, channel, ctx);
5260 il_set_rxon_ht(il, ht_conf);
be663ab6 5261
17d6e557
SG
5262 il_set_flags_for_band(il, ctx, channel->band,
5263 ctx->vif);
be663ab6 5264
46bc8d4b 5265 spin_unlock_irqrestore(&il->lock, flags);
be663ab6 5266
46bc8d4b 5267 if (il->cfg->ops->legacy->update_bcast_stations)
be663ab6 5268 ret =
46bc8d4b 5269 il->cfg->ops->legacy->update_bcast_stations(il);
be663ab6
WYG
5270
5271 set_ch_out:
5272 /* The list of supported rates and rate mask can be different
5273 * for each band; since the band may have changed, reset
5274 * the rate mask to what mac80211 lists */
46bc8d4b 5275 il_set_rate(il);
be663ab6
WYG
5276 }
5277
5278 if (changed & (IEEE80211_CONF_CHANGE_PS |
5279 IEEE80211_CONF_CHANGE_IDLE)) {
46bc8d4b 5280 ret = il_power_update_mode(il, false);
be663ab6 5281 if (ret)
58de00a4 5282 D_MAC80211("Error setting sleep level\n");
be663ab6
WYG
5283 }
5284
5285 if (changed & IEEE80211_CONF_CHANGE_POWER) {
58de00a4 5286 D_MAC80211("TX Power old=%d new=%d\n",
46bc8d4b 5287 il->tx_power_user_lmt, conf->power_level);
be663ab6 5288
46bc8d4b 5289 il_set_tx_power(il, conf->power_level, false);
be663ab6
WYG
5290 }
5291
46bc8d4b 5292 if (!il_is_ready(il)) {
58de00a4 5293 D_MAC80211("leave - not ready\n");
be663ab6
WYG
5294 goto out;
5295 }
5296
5297 if (scan_active)
5298 goto out;
5299
17d6e557
SG
5300 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
5301 il_commit_rxon(il, ctx);
5302 else
5303 D_INFO("Not re-sending same RXON configuration.\n");
5304 if (ht_changed)
5305 il_update_qos(il, ctx);
be663ab6
WYG
5306
5307out:
58de00a4 5308 D_MAC80211("leave\n");
46bc8d4b 5309 mutex_unlock(&il->mutex);
be663ab6
WYG
5310 return ret;
5311}
e2ebc833 5312EXPORT_SYMBOL(il_mac_config);
be663ab6 5313
e2ebc833 5314void il_mac_reset_tsf(struct ieee80211_hw *hw,
37a41b4a 5315 struct ieee80211_vif *vif)
be663ab6 5316{
46bc8d4b 5317 struct il_priv *il = hw->priv;
be663ab6 5318 unsigned long flags;
7c2cde2e 5319 struct il_rxon_context *ctx = &il->ctx;
be663ab6 5320
46bc8d4b 5321 if (WARN_ON(!il->cfg->ops->legacy))
be663ab6
WYG
5322 return;
5323
46bc8d4b 5324 mutex_lock(&il->mutex);
58de00a4 5325 D_MAC80211("enter\n");
be663ab6 5326
46bc8d4b
SG
5327 spin_lock_irqsave(&il->lock, flags);
5328 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5329 spin_unlock_irqrestore(&il->lock, flags);
be663ab6 5330
46bc8d4b 5331 spin_lock_irqsave(&il->lock, flags);
be663ab6
WYG
5332
5333 /* new association get rid of ibss beacon skb */
46bc8d4b
SG
5334 if (il->beacon_skb)
5335 dev_kfree_skb(il->beacon_skb);
be663ab6 5336
46bc8d4b 5337 il->beacon_skb = NULL;
be663ab6 5338
46bc8d4b 5339 il->timestamp = 0;
be663ab6 5340
46bc8d4b 5341 spin_unlock_irqrestore(&il->lock, flags);
be663ab6 5342
46bc8d4b
SG
5343 il_scan_cancel_timeout(il, 100);
5344 if (!il_is_ready_rf(il)) {
58de00a4 5345 D_MAC80211("leave - not ready\n");
46bc8d4b 5346 mutex_unlock(&il->mutex);
be663ab6
WYG
5347 return;
5348 }
5349
5350 /* we are restarting association process
5351 * clear RXON_FILTER_ASSOC_MSK bit
5352 */
5353 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
46bc8d4b 5354 il_commit_rxon(il, ctx);
be663ab6 5355
46bc8d4b 5356 il_set_rate(il);
be663ab6 5357
46bc8d4b 5358 mutex_unlock(&il->mutex);
be663ab6 5359
58de00a4 5360 D_MAC80211("leave\n");
be663ab6 5361}
e2ebc833 5362EXPORT_SYMBOL(il_mac_reset_tsf);
be663ab6 5363
46bc8d4b 5364static void il_ht_conf(struct il_priv *il,
be663ab6
WYG
5365 struct ieee80211_vif *vif)
5366{
46bc8d4b 5367 struct il_ht_config *ht_conf = &il->current_ht_config;
be663ab6
WYG
5368 struct ieee80211_sta *sta;
5369 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
e2ebc833 5370 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
be663ab6 5371
58de00a4 5372 D_ASSOC("enter:\n");
be663ab6
WYG
5373
5374 if (!ctx->ht.enabled)
5375 return;
5376
5377 ctx->ht.protection =
5378 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5379 ctx->ht.non_gf_sta_present =
5380 !!(bss_conf->ht_operation_mode &
5381 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5382
5383 ht_conf->single_chain_sufficient = false;
5384
5385 switch (vif->type) {
5386 case NL80211_IFTYPE_STATION:
5387 rcu_read_lock();
5388 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5389 if (sta) {
5390 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5391 int maxstreams;
5392
5393 maxstreams = (ht_cap->mcs.tx_params &
5394 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5395 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5396 maxstreams += 1;
5397
232913b5
SG
5398 if (ht_cap->mcs.rx_mask[1] == 0 &&
5399 ht_cap->mcs.rx_mask[2] == 0)
be663ab6
WYG
5400 ht_conf->single_chain_sufficient = true;
5401 if (maxstreams <= 1)
5402 ht_conf->single_chain_sufficient = true;
5403 } else {
5404 /*
5405 * If at all, this can only happen through a race
5406 * when the AP disconnects us while we're still
5407 * setting up the connection, in that case mac80211
5408 * will soon tell us about that.
5409 */
5410 ht_conf->single_chain_sufficient = true;
5411 }
5412 rcu_read_unlock();
5413 break;
5414 case NL80211_IFTYPE_ADHOC:
5415 ht_conf->single_chain_sufficient = true;
5416 break;
5417 default:
5418 break;
5419 }
5420
58de00a4 5421 D_ASSOC("leave\n");
be663ab6
WYG
5422}
5423
46bc8d4b 5424static inline void il_set_no_assoc(struct il_priv *il,
be663ab6
WYG
5425 struct ieee80211_vif *vif)
5426{
e2ebc833 5427 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
be663ab6
WYG
5428
5429 /*
5430 * inform the ucode that there is no longer an
5431 * association and that no more packets should be
5432 * sent
5433 */
5434 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5435 ctx->staging.assoc_id = 0;
46bc8d4b 5436 il_commit_rxon(il, ctx);
be663ab6
WYG
5437}
5438
e2ebc833 5439static void il_beacon_update(struct ieee80211_hw *hw,
be663ab6
WYG
5440 struct ieee80211_vif *vif)
5441{
46bc8d4b 5442 struct il_priv *il = hw->priv;
be663ab6
WYG
5443 unsigned long flags;
5444 __le64 timestamp;
5445 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5446
5447 if (!skb)
5448 return;
5449
58de00a4 5450 D_MAC80211("enter\n");
be663ab6 5451
46bc8d4b 5452 lockdep_assert_held(&il->mutex);
be663ab6 5453
46bc8d4b 5454 if (!il->beacon_ctx) {
9406f797 5455 IL_ERR("update beacon but no beacon context!\n");
be663ab6
WYG
5456 dev_kfree_skb(skb);
5457 return;
5458 }
5459
46bc8d4b 5460 spin_lock_irqsave(&il->lock, flags);
be663ab6 5461
46bc8d4b
SG
5462 if (il->beacon_skb)
5463 dev_kfree_skb(il->beacon_skb);
be663ab6 5464
46bc8d4b 5465 il->beacon_skb = skb;
be663ab6
WYG
5466
5467 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
46bc8d4b 5468 il->timestamp = le64_to_cpu(timestamp);
be663ab6 5469
58de00a4 5470 D_MAC80211("leave\n");
46bc8d4b 5471 spin_unlock_irqrestore(&il->lock, flags);
be663ab6 5472
46bc8d4b 5473 if (!il_is_ready_rf(il)) {
58de00a4 5474 D_MAC80211("leave - RF not ready\n");
be663ab6
WYG
5475 return;
5476 }
5477
46bc8d4b 5478 il->cfg->ops->legacy->post_associate(il);
be663ab6
WYG
5479}
5480
e2ebc833 5481void il_mac_bss_info_changed(struct ieee80211_hw *hw,
be663ab6
WYG
5482 struct ieee80211_vif *vif,
5483 struct ieee80211_bss_conf *bss_conf,
5484 u32 changes)
5485{
46bc8d4b 5486 struct il_priv *il = hw->priv;
e2ebc833 5487 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
be663ab6
WYG
5488 int ret;
5489
46bc8d4b 5490 if (WARN_ON(!il->cfg->ops->legacy))
be663ab6
WYG
5491 return;
5492
58de00a4 5493 D_MAC80211("changes = 0x%X\n", changes);
be663ab6 5494
46bc8d4b 5495 mutex_lock(&il->mutex);
be663ab6 5496
46bc8d4b
SG
5497 if (!il_is_alive(il)) {
5498 mutex_unlock(&il->mutex);
28a6e577
SG
5499 return;
5500 }
5501
be663ab6
WYG
5502 if (changes & BSS_CHANGED_QOS) {
5503 unsigned long flags;
5504
46bc8d4b 5505 spin_lock_irqsave(&il->lock, flags);
be663ab6 5506 ctx->qos_data.qos_active = bss_conf->qos;
46bc8d4b
SG
5507 il_update_qos(il, ctx);
5508 spin_unlock_irqrestore(&il->lock, flags);
be663ab6
WYG
5509 }
5510
5511 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5512 /*
5513 * the add_interface code must make sure we only ever
5514 * have a single interface that could be beaconing at
5515 * any time.
5516 */
5517 if (vif->bss_conf.enable_beacon)
46bc8d4b 5518 il->beacon_ctx = ctx;
be663ab6 5519 else
46bc8d4b 5520 il->beacon_ctx = NULL;
be663ab6
WYG
5521 }
5522
5523 if (changes & BSS_CHANGED_BSSID) {
58de00a4 5524 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
be663ab6
WYG
5525
5526 /*
5527 * If there is currently a HW scan going on in the
5528 * background then we need to cancel it else the RXON
5529 * below/in post_associate will fail.
5530 */
46bc8d4b 5531 if (il_scan_cancel_timeout(il, 100)) {
9406f797 5532 IL_WARN(
be663ab6 5533 "Aborted scan still in progress after 100ms\n");
58de00a4 5534 D_MAC80211(
be663ab6 5535 "leaving - scan abort failed.\n");
46bc8d4b 5536 mutex_unlock(&il->mutex);
be663ab6
WYG
5537 return;
5538 }
5539
5540 /* mac80211 only sets assoc when in STATION mode */
5541 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
5542 memcpy(ctx->staging.bssid_addr,
5543 bss_conf->bssid, ETH_ALEN);
5544
5545 /* currently needed in a few places */
46bc8d4b 5546 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
be663ab6
WYG
5547 } else {
5548 ctx->staging.filter_flags &=
5549 ~RXON_FILTER_ASSOC_MSK;
5550 }
5551
5552 }
5553
5554 /*
5555 * This needs to be after setting the BSSID in case
5556 * mac80211 decides to do both changes at once because
5557 * it will invoke post_associate.
5558 */
232913b5 5559 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
e2ebc833 5560 il_beacon_update(hw, vif);
be663ab6
WYG
5561
5562 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
58de00a4 5563 D_MAC80211("ERP_PREAMBLE %d\n",
be663ab6
WYG
5564 bss_conf->use_short_preamble);
5565 if (bss_conf->use_short_preamble)
5566 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5567 else
5568 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5569 }
5570
5571 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
58de00a4 5572 D_MAC80211(
be663ab6 5573 "ERP_CTS %d\n", bss_conf->use_cts_prot);
232913b5 5574 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
be663ab6
WYG
5575 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5576 else
5577 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5578 if (bss_conf->use_cts_prot)
5579 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
5580 else
5581 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5582 }
5583
5584 if (changes & BSS_CHANGED_BASIC_RATES) {
5585 /* XXX use this information
5586 *
e2ebc833 5587 * To do that, remove code from il_set_rate() and put something
be663ab6
WYG
5588 * like this here:
5589 *
5590 if (A-band)
5591 ctx->staging.ofdm_basic_rates =
5592 bss_conf->basic_rates;
5593 else
5594 ctx->staging.ofdm_basic_rates =
5595 bss_conf->basic_rates >> 4;
5596 ctx->staging.cck_basic_rates =
5597 bss_conf->basic_rates & 0xF;
5598 */
5599 }
5600
5601 if (changes & BSS_CHANGED_HT) {
46bc8d4b 5602 il_ht_conf(il, vif);
be663ab6 5603
46bc8d4b
SG
5604 if (il->cfg->ops->hcmd->set_rxon_chain)
5605 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
be663ab6
WYG
5606 }
5607
5608 if (changes & BSS_CHANGED_ASSOC) {
58de00a4 5609 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
be663ab6 5610 if (bss_conf->assoc) {
46bc8d4b 5611 il->timestamp = bss_conf->timestamp;
be663ab6 5612
46bc8d4b
SG
5613 if (!il_is_rfkill(il))
5614 il->cfg->ops->legacy->post_associate(il);
be663ab6 5615 } else
46bc8d4b 5616 il_set_no_assoc(il, vif);
be663ab6
WYG
5617 }
5618
e2ebc833 5619 if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
58de00a4 5620 D_MAC80211("Changes (%#x) while associated\n",
be663ab6 5621 changes);
46bc8d4b 5622 ret = il_send_rxon_assoc(il, ctx);
be663ab6
WYG
5623 if (!ret) {
5624 /* Sync active_rxon with latest change. */
5625 memcpy((void *)&ctx->active,
5626 &ctx->staging,
e2ebc833 5627 sizeof(struct il_rxon_cmd));
be663ab6
WYG
5628 }
5629 }
5630
5631 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5632 if (vif->bss_conf.enable_beacon) {
5633 memcpy(ctx->staging.bssid_addr,
5634 bss_conf->bssid, ETH_ALEN);
46bc8d4b
SG
5635 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5636 il->cfg->ops->legacy->config_ap(il);
be663ab6 5637 } else
46bc8d4b 5638 il_set_no_assoc(il, vif);
be663ab6
WYG
5639 }
5640
5641 if (changes & BSS_CHANGED_IBSS) {
46bc8d4b 5642 ret = il->cfg->ops->legacy->manage_ibss_station(il, vif,
be663ab6
WYG
5643 bss_conf->ibss_joined);
5644 if (ret)
9406f797 5645 IL_ERR("failed to %s IBSS station %pM\n",
be663ab6
WYG
5646 bss_conf->ibss_joined ? "add" : "remove",
5647 bss_conf->bssid);
5648 }
5649
46bc8d4b 5650 mutex_unlock(&il->mutex);
be663ab6 5651
58de00a4 5652 D_MAC80211("leave\n");
be663ab6 5653}
e2ebc833 5654EXPORT_SYMBOL(il_mac_bss_info_changed);
be663ab6 5655
e2ebc833 5656irqreturn_t il_isr(int irq, void *data)
be663ab6 5657{
46bc8d4b 5658 struct il_priv *il = data;
be663ab6
WYG
5659 u32 inta, inta_mask;
5660 u32 inta_fh;
5661 unsigned long flags;
46bc8d4b 5662 if (!il)
be663ab6
WYG
5663 return IRQ_NONE;
5664
46bc8d4b 5665 spin_lock_irqsave(&il->lock, flags);
be663ab6
WYG
5666
5667 /* Disable (but don't clear!) interrupts here to avoid
5668 * back-to-back ISRs and sporadic interrupts from our NIC.
5669 * If we have something to service, the tasklet will re-enable ints.
5670 * If we *don't* have something, we'll re-enable before leaving here. */
841b2cca
SG
5671 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
5672 _il_wr(il, CSR_INT_MASK, 0x00000000);
be663ab6
WYG
5673
5674 /* Discover which interrupts are active/pending */
841b2cca
SG
5675 inta = _il_rd(il, CSR_INT);
5676 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
be663ab6
WYG
5677
5678 /* Ignore interrupt if there's nothing in NIC to service.
5679 * This may be due to IRQ shared with another device,
5680 * or due to sporadic interrupts thrown from our NIC. */
5681 if (!inta && !inta_fh) {
58de00a4 5682 D_ISR(
be663ab6
WYG
5683 "Ignore interrupt, inta == 0, inta_fh == 0\n");
5684 goto none;
5685 }
5686
232913b5 5687 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
be663ab6
WYG
5688 /* Hardware disappeared. It might have already raised
5689 * an interrupt */
9406f797 5690 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
be663ab6
WYG
5691 goto unplugged;
5692 }
5693
58de00a4 5694 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
be663ab6
WYG
5695 inta, inta_mask, inta_fh);
5696
5697 inta &= ~CSR_INT_BIT_SCD;
5698
e2ebc833 5699 /* il_irq_tasklet() will service interrupts and re-enable them */
be663ab6 5700 if (likely(inta || inta_fh))
46bc8d4b 5701 tasklet_schedule(&il->irq_tasklet);
be663ab6
WYG
5702
5703unplugged:
46bc8d4b 5704 spin_unlock_irqrestore(&il->lock, flags);
be663ab6
WYG
5705 return IRQ_HANDLED;
5706
5707none:
5708 /* re-enable interrupts here since we don't have anything to service. */
93fd74e3 5709 /* only Re-enable if disabled by irq */
a6766ccd 5710 if (test_bit(S_INT_ENABLED, &il->status))
46bc8d4b
SG
5711 il_enable_interrupts(il);
5712 spin_unlock_irqrestore(&il->lock, flags);
be663ab6
WYG
5713 return IRQ_NONE;
5714}
e2ebc833 5715EXPORT_SYMBOL(il_isr);
be663ab6
WYG
5716
5717/*
e2ebc833 5718 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
be663ab6
WYG
5719 * function.
5720 */
46bc8d4b 5721void il_tx_cmd_protection(struct il_priv *il,
be663ab6
WYG
5722 struct ieee80211_tx_info *info,
5723 __le16 fc, __le32 *tx_flags)
5724{
5725 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5726 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5727 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5728 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5729
5730 if (!ieee80211_is_mgmt(fc))
5731 return;
5732
5733 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5734 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5735 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5736 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5737 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5738 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5739 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5740 break;
5741 }
5742 } else if (info->control.rates[0].flags &
5743 IEEE80211_TX_RC_USE_CTS_PROTECT) {
5744 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5745 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5746 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5747 }
5748}
e2ebc833 5749EXPORT_SYMBOL(il_tx_cmd_protection);