]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl4965-base.c
mac80211: dont use interface indices in drivers
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / iwlwifi / iwl4965-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/delay.h>
37#include <linux/skbuff.h>
38#include <linux/netdevice.h>
39#include <linux/wireless.h>
40#include <linux/firmware.h>
b481de9c
ZY
41#include <linux/etherdevice.h>
42#include <linux/if_arp.h>
43
b481de9c
ZY
44#include <net/mac80211.h>
45
46#include <asm/div64.h>
47
b481de9c
ZY
48#include "iwl-4965.h"
49#include "iwl-helpers.h"
50
c8b0e6e1 51#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 52u32 iwl4965_debug_level;
b481de9c
ZY
53#endif
54
bb8c093b
CH
55static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
56 struct iwl4965_tx_queue *txq);
416e1438 57
b481de9c
ZY
58/******************************************************************************
59 *
60 * module boiler plate
61 *
62 ******************************************************************************/
63
64/* module parameters */
6440adb5
BC
65static int iwl4965_param_disable_hw_scan; /* def: 0 = use 4965's h/w scan */
66static int iwl4965_param_debug; /* def: 0 = minimal debug log messages */
9fbab516
BC
67static int iwl4965_param_disable; /* def: enable radio */
68static int iwl4965_param_antenna; /* def: 0 = both antennas (use diversity) */
69int iwl4965_param_hwcrypto; /* def: using software encryption */
6440adb5
BC
70static int iwl4965_param_qos_enable = 1; /* def: 1 = use quality of service */
71int iwl4965_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 16 Tx queues */
9ee1ba47 72int iwl4965_param_amsdu_size_8K; /* def: enable 8K amsdu size */
b481de9c
ZY
73
74/*
75 * module name, copyright, version, etc.
76 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
77 */
78
79#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
80
c8b0e6e1 81#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
82#define VD "d"
83#else
84#define VD
85#endif
86
c8b0e6e1 87#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
88#define VS "s"
89#else
90#define VS
91#endif
92
d1283948 93#define IWLWIFI_VERSION "1.2.22k" VD VS
b481de9c
ZY
94#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
95#define DRV_VERSION IWLWIFI_VERSION
96
97/* Change firmware file name, using "-" and incrementing number,
98 * *only* when uCode interface or architecture changes so that it
99 * is not compatible with earlier drivers.
100 * This number will also appear in << 8 position of 1st dword of uCode file */
101#define IWL4965_UCODE_API "-1"
102
103MODULE_DESCRIPTION(DRV_DESCRIPTION);
104MODULE_VERSION(DRV_VERSION);
105MODULE_AUTHOR(DRV_COPYRIGHT);
106MODULE_LICENSE("GPL");
107
108__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
109{
110 u16 fc = le16_to_cpu(hdr->frame_control);
111 int hdr_len = ieee80211_get_hdrlen(fc);
112
113 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
114 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
115 return NULL;
116}
117
bb8c093b
CH
118static const struct ieee80211_hw_mode *iwl4965_get_hw_mode(
119 struct iwl4965_priv *priv, int mode)
b481de9c
ZY
120{
121 int i;
122
123 for (i = 0; i < 3; i++)
124 if (priv->modes[i].mode == mode)
125 return &priv->modes[i];
126
127 return NULL;
128}
129
bb8c093b 130static int iwl4965_is_empty_essid(const char *essid, int essid_len)
b481de9c
ZY
131{
132 /* Single white space is for Linksys APs */
133 if (essid_len == 1 && essid[0] == ' ')
134 return 1;
135
136 /* Otherwise, if the entire essid is 0, we assume it is hidden */
137 while (essid_len) {
138 essid_len--;
139 if (essid[essid_len] != '\0')
140 return 0;
141 }
142
143 return 1;
144}
145
bb8c093b 146static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
b481de9c
ZY
147{
148 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
149 const char *s = essid;
150 char *d = escaped;
151
bb8c093b 152 if (iwl4965_is_empty_essid(essid, essid_len)) {
b481de9c
ZY
153 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
154 return escaped;
155 }
156
157 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
158 while (essid_len--) {
159 if (*s == '\0') {
160 *d++ = '\\';
161 *d++ = '0';
162 s++;
163 } else
164 *d++ = *s++;
165 }
166 *d = '\0';
167 return escaped;
168}
169
bb8c093b 170static void iwl4965_print_hex_dump(int level, void *p, u32 len)
b481de9c 171{
c8b0e6e1 172#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 173 if (!(iwl4965_debug_level & level))
b481de9c
ZY
174 return;
175
176 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
177 p, len, 1);
178#endif
179}
180
181/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
182 * DMA services
183 *
184 * Theory of operation
185 *
6440adb5
BC
186 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
187 * of buffer descriptors, each of which points to one or more data buffers for
188 * the device to read from or fill. Driver and device exchange status of each
189 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
190 * entries in each circular buffer, to protect against confusing empty and full
191 * queue states.
192 *
193 * The device reads or writes the data in the queues via the device's several
194 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
195 *
196 * For Tx queue, there are low mark and high mark limits. If, after queuing
197 * the packet for Tx, free space become < low mark, Tx queue stopped. When
198 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
199 * Tx queue resumed.
200 *
6440adb5
BC
201 * The 4965 operates with up to 17 queues: One receive queue, one transmit
202 * queue (#4) for sending commands to the device firmware, and 15 other
203 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
e3851447
BC
204 *
205 * See more detailed info in iwl-4965-hw.h.
b481de9c
ZY
206 ***************************************************/
207
bb8c093b 208static int iwl4965_queue_space(const struct iwl4965_queue *q)
b481de9c 209{
fc4b6853 210 int s = q->read_ptr - q->write_ptr;
b481de9c 211
fc4b6853 212 if (q->read_ptr > q->write_ptr)
b481de9c
ZY
213 s -= q->n_bd;
214
215 if (s <= 0)
216 s += q->n_window;
217 /* keep some reserve to not confuse empty and full situations */
218 s -= 2;
219 if (s < 0)
220 s = 0;
221 return s;
222}
223
6440adb5
BC
224/**
225 * iwl4965_queue_inc_wrap - increment queue index, wrap back to beginning
226 * @index -- current index
227 * @n_bd -- total number of entries in queue (must be power of 2)
228 */
bb8c093b 229static inline int iwl4965_queue_inc_wrap(int index, int n_bd)
b481de9c
ZY
230{
231 return ++index & (n_bd - 1);
232}
233
6440adb5
BC
234/**
235 * iwl4965_queue_dec_wrap - decrement queue index, wrap back to end
236 * @index -- current index
237 * @n_bd -- total number of entries in queue (must be power of 2)
238 */
bb8c093b 239static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
b481de9c
ZY
240{
241 return --index & (n_bd - 1);
242}
243
bb8c093b 244static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
b481de9c 245{
fc4b6853
TW
246 return q->write_ptr > q->read_ptr ?
247 (i >= q->read_ptr && i < q->write_ptr) :
248 !(i < q->read_ptr && i >= q->write_ptr);
b481de9c
ZY
249}
250
bb8c093b 251static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
b481de9c 252{
6440adb5 253 /* This is for scan command, the big buffer at end of command array */
b481de9c 254 if (is_huge)
6440adb5 255 return q->n_window; /* must be power of 2 */
b481de9c 256
6440adb5 257 /* Otherwise, use normal size buffers */
b481de9c
ZY
258 return index & (q->n_window - 1);
259}
260
6440adb5
BC
261/**
262 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
263 */
bb8c093b 264static int iwl4965_queue_init(struct iwl4965_priv *priv, struct iwl4965_queue *q,
b481de9c
ZY
265 int count, int slots_num, u32 id)
266{
267 q->n_bd = count;
268 q->n_window = slots_num;
269 q->id = id;
270
bb8c093b
CH
271 /* count must be power-of-two size, otherwise iwl4965_queue_inc_wrap
272 * and iwl4965_queue_dec_wrap are broken. */
b481de9c
ZY
273 BUG_ON(!is_power_of_2(count));
274
275 /* slots_num must be power-of-two size, otherwise
276 * get_cmd_index is broken. */
277 BUG_ON(!is_power_of_2(slots_num));
278
279 q->low_mark = q->n_window / 4;
280 if (q->low_mark < 4)
281 q->low_mark = 4;
282
283 q->high_mark = q->n_window / 8;
284 if (q->high_mark < 2)
285 q->high_mark = 2;
286
fc4b6853 287 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
288
289 return 0;
290}
291
6440adb5
BC
292/**
293 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
294 */
bb8c093b
CH
295static int iwl4965_tx_queue_alloc(struct iwl4965_priv *priv,
296 struct iwl4965_tx_queue *txq, u32 id)
b481de9c
ZY
297{
298 struct pci_dev *dev = priv->pci_dev;
299
6440adb5
BC
300 /* Driver private data, only for Tx (not command) queues,
301 * not shared with device. */
b481de9c
ZY
302 if (id != IWL_CMD_QUEUE_NUM) {
303 txq->txb = kmalloc(sizeof(txq->txb[0]) *
304 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
305 if (!txq->txb) {
01ebd063 306 IWL_ERROR("kmalloc for auxiliary BD "
b481de9c
ZY
307 "structures failed\n");
308 goto error;
309 }
310 } else
311 txq->txb = NULL;
312
6440adb5
BC
313 /* Circular buffer of transmit frame descriptors (TFDs),
314 * shared with device */
b481de9c
ZY
315 txq->bd = pci_alloc_consistent(dev,
316 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
317 &txq->q.dma_addr);
318
319 if (!txq->bd) {
320 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
321 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
322 goto error;
323 }
324 txq->q.id = id;
325
326 return 0;
327
328 error:
329 if (txq->txb) {
330 kfree(txq->txb);
331 txq->txb = NULL;
332 }
333
334 return -ENOMEM;
335}
336
8b6eaea8
BC
337/**
338 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
339 */
bb8c093b
CH
340int iwl4965_tx_queue_init(struct iwl4965_priv *priv,
341 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c
ZY
342{
343 struct pci_dev *dev = priv->pci_dev;
344 int len;
345 int rc = 0;
346
8b6eaea8
BC
347 /*
348 * Alloc buffer array for commands (Tx or other types of commands).
349 * For the command queue (#4), allocate command space + one big
350 * command for scan, since scan command is very huge; the system will
351 * not have two scans at the same time, so only one is needed.
bb54244b 352 * For normal Tx queues (all other queues), no super-size command
8b6eaea8
BC
353 * space is needed.
354 */
bb8c093b 355 len = sizeof(struct iwl4965_cmd) * slots_num;
b481de9c
ZY
356 if (txq_id == IWL_CMD_QUEUE_NUM)
357 len += IWL_MAX_SCAN_SIZE;
358 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
359 if (!txq->cmd)
360 return -ENOMEM;
361
8b6eaea8 362 /* Alloc driver data array and TFD circular buffer */
bb8c093b 363 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
b481de9c
ZY
364 if (rc) {
365 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
366
367 return -ENOMEM;
368 }
369 txq->need_update = 0;
370
371 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
bb8c093b 372 * iwl4965_queue_inc_wrap and iwl4965_queue_dec_wrap are broken. */
b481de9c 373 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
8b6eaea8
BC
374
375 /* Initialize queue's high/low-water marks, and head/tail indexes */
bb8c093b 376 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 377
8b6eaea8 378 /* Tell device where to find queue */
bb8c093b 379 iwl4965_hw_tx_queue_init(priv, txq);
b481de9c
ZY
380
381 return 0;
382}
383
384/**
bb8c093b 385 * iwl4965_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
386 * @txq: Transmit queue to deallocate.
387 *
388 * Empty queue by removing and destroying all BD's.
6440adb5
BC
389 * Free all buffers.
390 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 391 */
bb8c093b 392void iwl4965_tx_queue_free(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
b481de9c 393{
bb8c093b 394 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
395 struct pci_dev *dev = priv->pci_dev;
396 int len;
397
398 if (q->n_bd == 0)
399 return;
400
401 /* first, empty all BD's */
fc4b6853 402 for (; q->write_ptr != q->read_ptr;
bb8c093b
CH
403 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd))
404 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c 405
bb8c093b 406 len = sizeof(struct iwl4965_cmd) * q->n_window;
b481de9c
ZY
407 if (q->id == IWL_CMD_QUEUE_NUM)
408 len += IWL_MAX_SCAN_SIZE;
409
6440adb5 410 /* De-alloc array of command/tx buffers */
b481de9c
ZY
411 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
412
6440adb5 413 /* De-alloc circular buffer of TFDs */
b481de9c 414 if (txq->q.n_bd)
bb8c093b 415 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
b481de9c
ZY
416 txq->q.n_bd, txq->bd, txq->q.dma_addr);
417
6440adb5 418 /* De-alloc array of per-TFD driver data */
b481de9c
ZY
419 if (txq->txb) {
420 kfree(txq->txb);
421 txq->txb = NULL;
422 }
423
6440adb5 424 /* 0-fill queue descriptor structure */
b481de9c
ZY
425 memset(txq, 0, sizeof(*txq));
426}
427
bb8c093b 428const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
b481de9c
ZY
429
430/*************** STATION TABLE MANAGEMENT ****
9fbab516 431 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
432 * the functionality provided here
433 */
434
435/**************************************************************/
436
01ebd063 437#if 0 /* temporary disable till we add real remove station */
6440adb5
BC
438/**
439 * iwl4965_remove_station - Remove driver's knowledge of station.
440 *
441 * NOTE: This does not remove station from device's station table.
442 */
bb8c093b 443static u8 iwl4965_remove_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
444{
445 int index = IWL_INVALID_STATION;
446 int i;
447 unsigned long flags;
448
449 spin_lock_irqsave(&priv->sta_lock, flags);
450
451 if (is_ap)
452 index = IWL_AP_ID;
453 else if (is_broadcast_ether_addr(addr))
454 index = priv->hw_setting.bcast_sta_id;
455 else
456 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
457 if (priv->stations[i].used &&
458 !compare_ether_addr(priv->stations[i].sta.sta.addr,
459 addr)) {
460 index = i;
461 break;
462 }
463
464 if (unlikely(index == IWL_INVALID_STATION))
465 goto out;
466
467 if (priv->stations[index].used) {
468 priv->stations[index].used = 0;
469 priv->num_stations--;
470 }
471
472 BUG_ON(priv->num_stations < 0);
473
474out:
475 spin_unlock_irqrestore(&priv->sta_lock, flags);
476 return 0;
477}
556f8db7 478#endif
b481de9c 479
6440adb5
BC
480/**
481 * iwl4965_clear_stations_table - Clear the driver's station table
482 *
483 * NOTE: This does not clear or otherwise alter the device's station table.
484 */
bb8c093b 485static void iwl4965_clear_stations_table(struct iwl4965_priv *priv)
b481de9c
ZY
486{
487 unsigned long flags;
488
489 spin_lock_irqsave(&priv->sta_lock, flags);
490
491 priv->num_stations = 0;
492 memset(priv->stations, 0, sizeof(priv->stations));
493
494 spin_unlock_irqrestore(&priv->sta_lock, flags);
495}
496
6440adb5
BC
497/**
498 * iwl4965_add_station_flags - Add station to tables in driver and device
499 */
67d62035
RR
500u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr,
501 int is_ap, u8 flags, void *ht_data)
b481de9c
ZY
502{
503 int i;
504 int index = IWL_INVALID_STATION;
bb8c093b 505 struct iwl4965_station_entry *station;
b481de9c 506 unsigned long flags_spin;
0795af57 507 DECLARE_MAC_BUF(mac);
b481de9c
ZY
508
509 spin_lock_irqsave(&priv->sta_lock, flags_spin);
510 if (is_ap)
511 index = IWL_AP_ID;
512 else if (is_broadcast_ether_addr(addr))
513 index = priv->hw_setting.bcast_sta_id;
514 else
515 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
516 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
517 addr)) {
518 index = i;
519 break;
520 }
521
522 if (!priv->stations[i].used &&
523 index == IWL_INVALID_STATION)
524 index = i;
525 }
526
527
9fbab516
BC
528 /* These two conditions have the same outcome, but keep them separate
529 since they have different meanings */
b481de9c
ZY
530 if (unlikely(index == IWL_INVALID_STATION)) {
531 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
532 return index;
533 }
534
535 if (priv->stations[index].used &&
536 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
537 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
538 return index;
539 }
540
541
0795af57 542 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
b481de9c
ZY
543 station = &priv->stations[index];
544 station->used = 1;
545 priv->num_stations++;
546
6440adb5 547 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 548 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
b481de9c
ZY
549 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
550 station->sta.mode = 0;
551 station->sta.sta.sta_id = index;
552 station->sta.station_flags = 0;
553
c8b0e6e1 554#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
555 /* BCAST station and IBSS stations do not work in HT mode */
556 if (index != priv->hw_setting.bcast_sta_id &&
557 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
67d62035
RR
558 iwl4965_set_ht_add_station(priv, index,
559 (struct ieee80211_ht_info *) ht_data);
c8b0e6e1 560#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
561
562 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
BC
563
564 /* Add station to device's station table */
bb8c093b 565 iwl4965_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
566 return index;
567
568}
569
570/*************** DRIVER STATUS FUNCTIONS *****/
571
bb8c093b 572static inline int iwl4965_is_ready(struct iwl4965_priv *priv)
b481de9c
ZY
573{
574 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
575 * set but EXIT_PENDING is not */
576 return test_bit(STATUS_READY, &priv->status) &&
577 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
578 !test_bit(STATUS_EXIT_PENDING, &priv->status);
579}
580
bb8c093b 581static inline int iwl4965_is_alive(struct iwl4965_priv *priv)
b481de9c
ZY
582{
583 return test_bit(STATUS_ALIVE, &priv->status);
584}
585
bb8c093b 586static inline int iwl4965_is_init(struct iwl4965_priv *priv)
b481de9c
ZY
587{
588 return test_bit(STATUS_INIT, &priv->status);
589}
590
bb8c093b 591static inline int iwl4965_is_rfkill(struct iwl4965_priv *priv)
b481de9c
ZY
592{
593 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
594 test_bit(STATUS_RF_KILL_SW, &priv->status);
595}
596
bb8c093b 597static inline int iwl4965_is_ready_rf(struct iwl4965_priv *priv)
b481de9c
ZY
598{
599
bb8c093b 600 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
601 return 0;
602
bb8c093b 603 return iwl4965_is_ready(priv);
b481de9c
ZY
604}
605
606/*************** HOST COMMAND QUEUE FUNCTIONS *****/
607
608#define IWL_CMD(x) case x : return #x
609
610static const char *get_cmd_string(u8 cmd)
611{
612 switch (cmd) {
613 IWL_CMD(REPLY_ALIVE);
614 IWL_CMD(REPLY_ERROR);
615 IWL_CMD(REPLY_RXON);
616 IWL_CMD(REPLY_RXON_ASSOC);
617 IWL_CMD(REPLY_QOS_PARAM);
618 IWL_CMD(REPLY_RXON_TIMING);
619 IWL_CMD(REPLY_ADD_STA);
620 IWL_CMD(REPLY_REMOVE_STA);
621 IWL_CMD(REPLY_REMOVE_ALL_STA);
622 IWL_CMD(REPLY_TX);
623 IWL_CMD(REPLY_RATE_SCALE);
624 IWL_CMD(REPLY_LEDS_CMD);
625 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
626 IWL_CMD(RADAR_NOTIFICATION);
627 IWL_CMD(REPLY_QUIET_CMD);
628 IWL_CMD(REPLY_CHANNEL_SWITCH);
629 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
630 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
631 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
632 IWL_CMD(POWER_TABLE_CMD);
633 IWL_CMD(PM_SLEEP_NOTIFICATION);
634 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
635 IWL_CMD(REPLY_SCAN_CMD);
636 IWL_CMD(REPLY_SCAN_ABORT_CMD);
637 IWL_CMD(SCAN_START_NOTIFICATION);
638 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
639 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
640 IWL_CMD(BEACON_NOTIFICATION);
641 IWL_CMD(REPLY_TX_BEACON);
642 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
643 IWL_CMD(QUIET_NOTIFICATION);
644 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
645 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
646 IWL_CMD(REPLY_BT_CONFIG);
647 IWL_CMD(REPLY_STATISTICS_CMD);
648 IWL_CMD(STATISTICS_NOTIFICATION);
649 IWL_CMD(REPLY_CARD_STATE_CMD);
650 IWL_CMD(CARD_STATE_NOTIFICATION);
651 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
652 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
653 IWL_CMD(SENSITIVITY_CMD);
654 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
655 IWL_CMD(REPLY_RX_PHY_CMD);
656 IWL_CMD(REPLY_RX_MPDU_CMD);
657 IWL_CMD(REPLY_4965_RX);
658 IWL_CMD(REPLY_COMPRESSED_BA);
659 default:
660 return "UNKNOWN";
661
662 }
663}
664
665#define HOST_COMPLETE_TIMEOUT (HZ / 2)
666
667/**
bb8c093b 668 * iwl4965_enqueue_hcmd - enqueue a uCode command
b481de9c
ZY
669 * @priv: device private data point
670 * @cmd: a point to the ucode command structure
671 *
672 * The function returns < 0 values to indicate the operation is
673 * failed. On success, it turns the index (> 0) of command in the
674 * command queue.
675 */
bb8c093b 676static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 677{
bb8c093b
CH
678 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
679 struct iwl4965_queue *q = &txq->q;
680 struct iwl4965_tfd_frame *tfd;
b481de9c 681 u32 *control_flags;
bb8c093b 682 struct iwl4965_cmd *out_cmd;
b481de9c
ZY
683 u32 idx;
684 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
685 dma_addr_t phys_addr;
686 int ret;
687 unsigned long flags;
688
689 /* If any of the command structures end up being larger than
690 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
691 * we will need to increase the size of the TFD entries */
692 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
693 !(cmd->meta.flags & CMD_SIZE_HUGE));
694
bb8c093b 695 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
b481de9c
ZY
696 IWL_ERROR("No space for Tx\n");
697 return -ENOSPC;
698 }
699
700 spin_lock_irqsave(&priv->hcmd_lock, flags);
701
fc4b6853 702 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
703 memset(tfd, 0, sizeof(*tfd));
704
705 control_flags = (u32 *) tfd;
706
fc4b6853 707 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
b481de9c
ZY
708 out_cmd = &txq->cmd[idx];
709
710 out_cmd->hdr.cmd = cmd->id;
711 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
712 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
713
714 /* At this point, the out_cmd now has all of the incoming cmd
715 * information */
716
717 out_cmd->hdr.flags = 0;
718 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
fc4b6853 719 INDEX_TO_SEQ(q->write_ptr));
b481de9c
ZY
720 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
721 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
722
723 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
bb8c093b
CH
724 offsetof(struct iwl4965_cmd, hdr);
725 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
b481de9c
ZY
726
727 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
728 "%d bytes at %d[%d]:%d\n",
729 get_cmd_string(out_cmd->hdr.cmd),
730 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fc4b6853 731 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
b481de9c
ZY
732
733 txq->need_update = 1;
6440adb5
BC
734
735 /* Set up entry in queue's byte count circular buffer */
b481de9c 736 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
6440adb5
BC
737
738 /* Increment and update queue's write index */
bb8c093b
CH
739 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
740 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
741
742 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
743 return ret ? ret : idx;
744}
745
bb8c093b 746static int iwl4965_send_cmd_async(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
747{
748 int ret;
749
750 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
751
752 /* An asynchronous command can not expect an SKB to be set. */
753 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
754
755 /* An asynchronous command MUST have a callback. */
756 BUG_ON(!cmd->meta.u.callback);
757
758 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
759 return -EBUSY;
760
bb8c093b 761 ret = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c 762 if (ret < 0) {
bb8c093b 763 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
764 get_cmd_string(cmd->id), ret);
765 return ret;
766 }
767 return 0;
768}
769
bb8c093b 770static int iwl4965_send_cmd_sync(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
771{
772 int cmd_idx;
773 int ret;
774 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
775
776 BUG_ON(cmd->meta.flags & CMD_ASYNC);
777
778 /* A synchronous command can not have a callback set. */
779 BUG_ON(cmd->meta.u.callback != NULL);
780
781 if (atomic_xchg(&entry, 1)) {
782 IWL_ERROR("Error sending %s: Already sending a host command\n",
783 get_cmd_string(cmd->id));
784 return -EBUSY;
785 }
786
787 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
788
789 if (cmd->meta.flags & CMD_WANT_SKB)
790 cmd->meta.source = &cmd->meta;
791
bb8c093b 792 cmd_idx = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c
ZY
793 if (cmd_idx < 0) {
794 ret = cmd_idx;
bb8c093b 795 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
796 get_cmd_string(cmd->id), ret);
797 goto out;
798 }
799
800 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
801 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
802 HOST_COMPLETE_TIMEOUT);
803 if (!ret) {
804 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
805 IWL_ERROR("Error sending %s: time out after %dms.\n",
806 get_cmd_string(cmd->id),
807 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
808
809 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
810 ret = -ETIMEDOUT;
811 goto cancel;
812 }
813 }
814
815 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
816 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
817 get_cmd_string(cmd->id));
818 ret = -ECANCELED;
819 goto fail;
820 }
821 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
822 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
823 get_cmd_string(cmd->id));
824 ret = -EIO;
825 goto fail;
826 }
827 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
828 IWL_ERROR("Error: Response NULL in '%s'\n",
829 get_cmd_string(cmd->id));
830 ret = -EIO;
831 goto out;
832 }
833
834 ret = 0;
835 goto out;
836
837cancel:
838 if (cmd->meta.flags & CMD_WANT_SKB) {
bb8c093b 839 struct iwl4965_cmd *qcmd;
b481de9c
ZY
840
841 /* Cancel the CMD_WANT_SKB flag for the cmd in the
842 * TX cmd queue. Otherwise in case the cmd comes
843 * in later, it will possibly set an invalid
844 * address (cmd->meta.source). */
845 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
846 qcmd->meta.flags &= ~CMD_WANT_SKB;
847 }
848fail:
849 if (cmd->meta.u.skb) {
850 dev_kfree_skb_any(cmd->meta.u.skb);
851 cmd->meta.u.skb = NULL;
852 }
853out:
854 atomic_set(&entry, 0);
855 return ret;
856}
857
bb8c093b 858int iwl4965_send_cmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 859{
b481de9c 860 if (cmd->meta.flags & CMD_ASYNC)
bb8c093b 861 return iwl4965_send_cmd_async(priv, cmd);
b481de9c 862
bb8c093b 863 return iwl4965_send_cmd_sync(priv, cmd);
b481de9c
ZY
864}
865
bb8c093b 866int iwl4965_send_cmd_pdu(struct iwl4965_priv *priv, u8 id, u16 len, const void *data)
b481de9c 867{
bb8c093b 868 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
869 .id = id,
870 .len = len,
871 .data = data,
872 };
873
bb8c093b 874 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
875}
876
bb8c093b 877static int __must_check iwl4965_send_cmd_u32(struct iwl4965_priv *priv, u8 id, u32 val)
b481de9c 878{
bb8c093b 879 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
880 .id = id,
881 .len = sizeof(val),
882 .data = &val,
883 };
884
bb8c093b 885 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
886}
887
bb8c093b 888int iwl4965_send_statistics_request(struct iwl4965_priv *priv)
b481de9c 889{
bb8c093b 890 return iwl4965_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
b481de9c
ZY
891}
892
893/**
bb8c093b 894 * iwl4965_rxon_add_station - add station into station table.
b481de9c
ZY
895 *
896 * there is only one AP station with id= IWL_AP_ID
9fbab516
BC
897 * NOTE: mutex must be held before calling this fnction
898 */
bb8c093b 899static int iwl4965_rxon_add_station(struct iwl4965_priv *priv,
b481de9c
ZY
900 const u8 *addr, int is_ap)
901{
556f8db7 902 u8 sta_id;
b481de9c 903
6440adb5 904 /* Add station to device's station table */
67d62035
RR
905#ifdef CONFIG_IWL4965_HT
906 struct ieee80211_conf *conf = &priv->hw->conf;
907 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
908
909 if ((is_ap) &&
910 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
911 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
912 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
913 0, cur_ht_config);
914 else
915#endif /* CONFIG_IWL4965_HT */
916 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
917 0, NULL);
6440adb5
BC
918
919 /* Set up default rate scaling table in device's station table */
b481de9c
ZY
920 iwl4965_add_station(priv, addr, is_ap);
921
556f8db7 922 return sta_id;
b481de9c
ZY
923}
924
925/**
bb8c093b 926 * iwl4965_set_rxon_channel - Set the phymode and channel values in staging RXON
b481de9c
ZY
927 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
928 * @channel: Any channel valid for the requested phymode
929
930 * In addition to setting the staging RXON, priv->phymode is also set.
931 *
932 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
933 * in the staging RXON flag structure based on the phymode
934 */
9fbab516
BC
935static int iwl4965_set_rxon_channel(struct iwl4965_priv *priv, u8 phymode,
936 u16 channel)
b481de9c 937{
bb8c093b 938 if (!iwl4965_get_channel_info(priv, phymode, channel)) {
b481de9c
ZY
939 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
940 channel, phymode);
941 return -EINVAL;
942 }
943
944 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
945 (priv->phymode == phymode))
946 return 0;
947
948 priv->staging_rxon.channel = cpu_to_le16(channel);
949 if (phymode == MODE_IEEE80211A)
950 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
951 else
952 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
953
954 priv->phymode = phymode;
955
956 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
957
958 return 0;
959}
960
961/**
bb8c093b 962 * iwl4965_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
963 *
964 * NOTE: This is really only useful during development and can eventually
965 * be #ifdef'd out once the driver is stable and folks aren't actively
966 * making changes
967 */
bb8c093b 968static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c
ZY
969{
970 int error = 0;
971 int counter = 1;
972
973 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
974 error |= le32_to_cpu(rxon->flags &
975 (RXON_FLG_TGJ_NARROW_BAND_MSK |
976 RXON_FLG_RADAR_DETECT_MSK));
977 if (error)
978 IWL_WARNING("check 24G fields %d | %d\n",
979 counter++, error);
980 } else {
981 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
982 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
983 if (error)
984 IWL_WARNING("check 52 fields %d | %d\n",
985 counter++, error);
986 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
987 if (error)
988 IWL_WARNING("check 52 CCK %d | %d\n",
989 counter++, error);
990 }
991 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
992 if (error)
993 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
994
995 /* make sure basic rates 6Mbps and 1Mbps are supported */
996 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
997 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
998 if (error)
999 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
1000
1001 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
1002 if (error)
1003 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
1004
1005 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
1006 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
1007 if (error)
1008 IWL_WARNING("check CCK and short slot %d | %d\n",
1009 counter++, error);
1010
1011 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
1012 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
1013 if (error)
1014 IWL_WARNING("check CCK & auto detect %d | %d\n",
1015 counter++, error);
1016
1017 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
1018 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
1019 if (error)
1020 IWL_WARNING("check TGG and auto detect %d | %d\n",
1021 counter++, error);
1022
1023 if (error)
1024 IWL_WARNING("Tuning to channel %d\n",
1025 le16_to_cpu(rxon->channel));
1026
1027 if (error) {
bb8c093b 1028 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
b481de9c
ZY
1029 return -1;
1030 }
1031 return 0;
1032}
1033
1034/**
9fbab516 1035 * iwl4965_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 1036 * @priv: staging_rxon is compared to active_rxon
b481de9c 1037 *
9fbab516
BC
1038 * If the RXON structure is changing enough to require a new tune,
1039 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
1040 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 1041 */
bb8c093b 1042static int iwl4965_full_rxon_required(struct iwl4965_priv *priv)
b481de9c
ZY
1043{
1044
1045 /* These items are only settable from the full RXON command */
1046 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
1047 compare_ether_addr(priv->staging_rxon.bssid_addr,
1048 priv->active_rxon.bssid_addr) ||
1049 compare_ether_addr(priv->staging_rxon.node_addr,
1050 priv->active_rxon.node_addr) ||
1051 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
1052 priv->active_rxon.wlap_bssid_addr) ||
1053 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
1054 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
1055 (priv->staging_rxon.air_propagation !=
1056 priv->active_rxon.air_propagation) ||
1057 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
1058 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
1059 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
1060 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
1061 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
1062 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
1063 return 1;
1064
1065 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
1066 * be updated with the RXON_ASSOC command -- however only some
1067 * flag transitions are allowed using RXON_ASSOC */
1068
1069 /* Check if we are not switching bands */
1070 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1071 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1072 return 1;
1073
1074 /* Check if we are switching association toggle */
1075 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1076 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1077 return 1;
1078
1079 return 0;
1080}
1081
bb8c093b 1082static int iwl4965_send_rxon_assoc(struct iwl4965_priv *priv)
b481de9c
ZY
1083{
1084 int rc = 0;
bb8c093b
CH
1085 struct iwl4965_rx_packet *res = NULL;
1086 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1087 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1088 .id = REPLY_RXON_ASSOC,
1089 .len = sizeof(rxon_assoc),
1090 .meta.flags = CMD_WANT_SKB,
1091 .data = &rxon_assoc,
1092 };
bb8c093b
CH
1093 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
1094 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
b481de9c
ZY
1095
1096 if ((rxon1->flags == rxon2->flags) &&
1097 (rxon1->filter_flags == rxon2->filter_flags) &&
1098 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1099 (rxon1->ofdm_ht_single_stream_basic_rates ==
1100 rxon2->ofdm_ht_single_stream_basic_rates) &&
1101 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1102 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1103 (rxon1->rx_chain == rxon2->rx_chain) &&
1104 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1105 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1106 return 0;
1107 }
1108
1109 rxon_assoc.flags = priv->staging_rxon.flags;
1110 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1111 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1112 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1113 rxon_assoc.reserved = 0;
1114 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1115 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1116 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1117 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1118 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1119
bb8c093b 1120 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1121 if (rc)
1122 return rc;
1123
bb8c093b 1124 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1125 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1126 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1127 rc = -EIO;
1128 }
1129
1130 priv->alloc_rxb_skb--;
1131 dev_kfree_skb_any(cmd.meta.u.skb);
1132
1133 return rc;
1134}
1135
1136/**
bb8c093b 1137 * iwl4965_commit_rxon - commit staging_rxon to hardware
b481de9c 1138 *
01ebd063 1139 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
1140 * the active_rxon structure is updated with the new data. This
1141 * function correctly transitions out of the RXON_ASSOC_MSK state if
1142 * a HW tune is required based on the RXON structure changes.
1143 */
bb8c093b 1144static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
b481de9c
ZY
1145{
1146 /* cast away the const for active_rxon in this function */
bb8c093b 1147 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
0795af57 1148 DECLARE_MAC_BUF(mac);
b481de9c
ZY
1149 int rc = 0;
1150
bb8c093b 1151 if (!iwl4965_is_alive(priv))
b481de9c
ZY
1152 return -1;
1153
1154 /* always get timestamp with Rx frame */
1155 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1156
bb8c093b 1157 rc = iwl4965_check_rxon_cmd(&priv->staging_rxon);
b481de9c
ZY
1158 if (rc) {
1159 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1160 return -EINVAL;
1161 }
1162
1163 /* If we don't need to send a full RXON, we can use
bb8c093b 1164 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 1165 * and other flags for the current radio configuration. */
bb8c093b
CH
1166 if (!iwl4965_full_rxon_required(priv)) {
1167 rc = iwl4965_send_rxon_assoc(priv);
b481de9c
ZY
1168 if (rc) {
1169 IWL_ERROR("Error setting RXON_ASSOC "
1170 "configuration (%d).\n", rc);
1171 return rc;
1172 }
1173
1174 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1175
1176 return 0;
1177 }
1178
1179 /* station table will be cleared */
1180 priv->assoc_station_added = 0;
1181
c8b0e6e1 1182#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1183 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1184 if (!priv->error_recovering)
1185 priv->start_calib = 0;
1186
1187 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1188#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1189
1190 /* If we are currently associated and the new config requires
1191 * an RXON_ASSOC and the new config wants the associated mask enabled,
1192 * we must clear the associated from the active configuration
1193 * before we apply the new config */
bb8c093b 1194 if (iwl4965_is_associated(priv) &&
b481de9c
ZY
1195 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1196 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1197 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1198
bb8c093b
CH
1199 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1200 sizeof(struct iwl4965_rxon_cmd),
b481de9c
ZY
1201 &priv->active_rxon);
1202
1203 /* If the mask clearing failed then we set
1204 * active_rxon back to what it was previously */
1205 if (rc) {
1206 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1207 IWL_ERROR("Error clearing ASSOC_MSK on current "
1208 "configuration (%d).\n", rc);
1209 return rc;
1210 }
b481de9c
ZY
1211 }
1212
1213 IWL_DEBUG_INFO("Sending RXON\n"
1214 "* with%s RXON_FILTER_ASSOC_MSK\n"
1215 "* channel = %d\n"
0795af57 1216 "* bssid = %s\n",
b481de9c
ZY
1217 ((priv->staging_rxon.filter_flags &
1218 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1219 le16_to_cpu(priv->staging_rxon.channel),
0795af57 1220 print_mac(mac, priv->staging_rxon.bssid_addr));
b481de9c
ZY
1221
1222 /* Apply the new configuration */
bb8c093b
CH
1223 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1224 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon);
b481de9c
ZY
1225 if (rc) {
1226 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1227 return rc;
1228 }
1229
bb8c093b 1230 iwl4965_clear_stations_table(priv);
556f8db7 1231
c8b0e6e1 1232#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1233 if (!priv->error_recovering)
1234 priv->start_calib = 0;
1235
1236 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1237 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1238#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1239
1240 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1241
1242 /* If we issue a new RXON command which required a tune then we must
1243 * send a new TXPOWER command or we won't be able to Tx any frames */
bb8c093b 1244 rc = iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
1245 if (rc) {
1246 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1247 return rc;
1248 }
1249
1250 /* Add the broadcast address so we can send broadcast frames */
bb8c093b 1251 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) ==
b481de9c
ZY
1252 IWL_INVALID_STATION) {
1253 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1254 return -EIO;
1255 }
1256
1257 /* If we have set the ASSOC_MSK and we are in BSS mode then
1258 * add the IWL_AP_ID to the station rate table */
bb8c093b 1259 if (iwl4965_is_associated(priv) &&
b481de9c 1260 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
bb8c093b 1261 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
b481de9c
ZY
1262 == IWL_INVALID_STATION) {
1263 IWL_ERROR("Error adding AP address for transmit.\n");
1264 return -EIO;
1265 }
1266 priv->assoc_station_added = 1;
1267 }
1268
1269 return 0;
1270}
1271
bb8c093b 1272static int iwl4965_send_bt_config(struct iwl4965_priv *priv)
b481de9c 1273{
bb8c093b 1274 struct iwl4965_bt_cmd bt_cmd = {
b481de9c
ZY
1275 .flags = 3,
1276 .lead_time = 0xAA,
1277 .max_kill = 1,
1278 .kill_ack_mask = 0,
1279 .kill_cts_mask = 0,
1280 };
1281
bb8c093b
CH
1282 return iwl4965_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1283 sizeof(struct iwl4965_bt_cmd), &bt_cmd);
b481de9c
ZY
1284}
1285
bb8c093b 1286static int iwl4965_send_scan_abort(struct iwl4965_priv *priv)
b481de9c
ZY
1287{
1288 int rc = 0;
bb8c093b
CH
1289 struct iwl4965_rx_packet *res;
1290 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1291 .id = REPLY_SCAN_ABORT_CMD,
1292 .meta.flags = CMD_WANT_SKB,
1293 };
1294
1295 /* If there isn't a scan actively going on in the hardware
1296 * then we are in between scan bands and not actually
1297 * actively scanning, so don't send the abort command */
1298 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1299 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1300 return 0;
1301 }
1302
bb8c093b 1303 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1304 if (rc) {
1305 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1306 return rc;
1307 }
1308
bb8c093b 1309 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1310 if (res->u.status != CAN_ABORT_STATUS) {
1311 /* The scan abort will return 1 for success or
1312 * 2 for "failure". A failure condition can be
1313 * due to simply not being in an active scan which
1314 * can occur if we send the scan abort before we
1315 * the microcode has notified us that a scan is
1316 * completed. */
1317 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1318 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1319 clear_bit(STATUS_SCAN_HW, &priv->status);
1320 }
1321
1322 dev_kfree_skb_any(cmd.meta.u.skb);
1323
1324 return rc;
1325}
1326
bb8c093b
CH
1327static int iwl4965_card_state_sync_callback(struct iwl4965_priv *priv,
1328 struct iwl4965_cmd *cmd,
b481de9c
ZY
1329 struct sk_buff *skb)
1330{
1331 return 1;
1332}
1333
1334/*
1335 * CARD_STATE_CMD
1336 *
9fbab516 1337 * Use: Sets the device's internal card state to enable, disable, or halt
b481de9c
ZY
1338 *
1339 * When in the 'enable' state the card operates as normal.
1340 * When in the 'disable' state, the card enters into a low power mode.
1341 * When in the 'halt' state, the card is shut down and must be fully
1342 * restarted to come back on.
1343 */
bb8c093b 1344static int iwl4965_send_card_state(struct iwl4965_priv *priv, u32 flags, u8 meta_flag)
b481de9c 1345{
bb8c093b 1346 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1347 .id = REPLY_CARD_STATE_CMD,
1348 .len = sizeof(u32),
1349 .data = &flags,
1350 .meta.flags = meta_flag,
1351 };
1352
1353 if (meta_flag & CMD_ASYNC)
bb8c093b 1354 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
b481de9c 1355
bb8c093b 1356 return iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1357}
1358
bb8c093b
CH
1359static int iwl4965_add_sta_sync_callback(struct iwl4965_priv *priv,
1360 struct iwl4965_cmd *cmd, struct sk_buff *skb)
b481de9c 1361{
bb8c093b 1362 struct iwl4965_rx_packet *res = NULL;
b481de9c
ZY
1363
1364 if (!skb) {
1365 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1366 return 1;
1367 }
1368
bb8c093b 1369 res = (struct iwl4965_rx_packet *)skb->data;
b481de9c
ZY
1370 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1371 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1372 res->hdr.flags);
1373 return 1;
1374 }
1375
1376 switch (res->u.add_sta.status) {
1377 case ADD_STA_SUCCESS_MSK:
1378 break;
1379 default:
1380 break;
1381 }
1382
1383 /* We didn't cache the SKB; let the caller free it */
1384 return 1;
1385}
1386
bb8c093b
CH
1387int iwl4965_send_add_station(struct iwl4965_priv *priv,
1388 struct iwl4965_addsta_cmd *sta, u8 flags)
b481de9c 1389{
bb8c093b 1390 struct iwl4965_rx_packet *res = NULL;
b481de9c 1391 int rc = 0;
bb8c093b 1392 struct iwl4965_host_cmd cmd = {
b481de9c 1393 .id = REPLY_ADD_STA,
bb8c093b 1394 .len = sizeof(struct iwl4965_addsta_cmd),
b481de9c
ZY
1395 .meta.flags = flags,
1396 .data = sta,
1397 };
1398
1399 if (flags & CMD_ASYNC)
bb8c093b 1400 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
b481de9c
ZY
1401 else
1402 cmd.meta.flags |= CMD_WANT_SKB;
1403
bb8c093b 1404 rc = iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1405
1406 if (rc || (flags & CMD_ASYNC))
1407 return rc;
1408
bb8c093b 1409 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1410 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1411 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1412 res->hdr.flags);
1413 rc = -EIO;
1414 }
1415
1416 if (rc == 0) {
1417 switch (res->u.add_sta.status) {
1418 case ADD_STA_SUCCESS_MSK:
1419 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1420 break;
1421 default:
1422 rc = -EIO;
1423 IWL_WARNING("REPLY_ADD_STA failed\n");
1424 break;
1425 }
1426 }
1427
1428 priv->alloc_rxb_skb--;
1429 dev_kfree_skb_any(cmd.meta.u.skb);
1430
1431 return rc;
1432}
1433
bb8c093b 1434static int iwl4965_update_sta_key_info(struct iwl4965_priv *priv,
b481de9c
ZY
1435 struct ieee80211_key_conf *keyconf,
1436 u8 sta_id)
1437{
1438 unsigned long flags;
1439 __le16 key_flags = 0;
1440
1441 switch (keyconf->alg) {
1442 case ALG_CCMP:
1443 key_flags |= STA_KEY_FLG_CCMP;
1444 key_flags |= cpu_to_le16(
1445 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1446 key_flags &= ~STA_KEY_FLG_INVALID;
1447 break;
1448 case ALG_TKIP:
1449 case ALG_WEP:
b481de9c
ZY
1450 default:
1451 return -EINVAL;
1452 }
1453 spin_lock_irqsave(&priv->sta_lock, flags);
1454 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1455 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1456 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1457 keyconf->keylen);
1458
1459 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1460 keyconf->keylen);
1461 priv->stations[sta_id].sta.key.key_flags = key_flags;
1462 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1463 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1464
1465 spin_unlock_irqrestore(&priv->sta_lock, flags);
1466
1467 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
bb8c093b 1468 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1469 return 0;
1470}
1471
bb8c093b 1472static int iwl4965_clear_sta_key_info(struct iwl4965_priv *priv, u8 sta_id)
b481de9c
ZY
1473{
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&priv->sta_lock, flags);
bb8c093b
CH
1477 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl4965_hw_key));
1478 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl4965_keyinfo));
b481de9c
ZY
1479 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1480 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1481 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1482 spin_unlock_irqrestore(&priv->sta_lock, flags);
1483
1484 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
bb8c093b 1485 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1486 return 0;
1487}
1488
bb8c093b 1489static void iwl4965_clear_free_frames(struct iwl4965_priv *priv)
b481de9c
ZY
1490{
1491 struct list_head *element;
1492
1493 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1494 priv->frames_count);
1495
1496 while (!list_empty(&priv->free_frames)) {
1497 element = priv->free_frames.next;
1498 list_del(element);
bb8c093b 1499 kfree(list_entry(element, struct iwl4965_frame, list));
b481de9c
ZY
1500 priv->frames_count--;
1501 }
1502
1503 if (priv->frames_count) {
1504 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1505 priv->frames_count);
1506 priv->frames_count = 0;
1507 }
1508}
1509
bb8c093b 1510static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl4965_priv *priv)
b481de9c 1511{
bb8c093b 1512 struct iwl4965_frame *frame;
b481de9c
ZY
1513 struct list_head *element;
1514 if (list_empty(&priv->free_frames)) {
1515 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1516 if (!frame) {
1517 IWL_ERROR("Could not allocate frame!\n");
1518 return NULL;
1519 }
1520
1521 priv->frames_count++;
1522 return frame;
1523 }
1524
1525 element = priv->free_frames.next;
1526 list_del(element);
bb8c093b 1527 return list_entry(element, struct iwl4965_frame, list);
b481de9c
ZY
1528}
1529
bb8c093b 1530static void iwl4965_free_frame(struct iwl4965_priv *priv, struct iwl4965_frame *frame)
b481de9c
ZY
1531{
1532 memset(frame, 0, sizeof(*frame));
1533 list_add(&frame->list, &priv->free_frames);
1534}
1535
bb8c093b 1536unsigned int iwl4965_fill_beacon_frame(struct iwl4965_priv *priv,
b481de9c
ZY
1537 struct ieee80211_hdr *hdr,
1538 const u8 *dest, int left)
1539{
1540
bb8c093b 1541 if (!iwl4965_is_associated(priv) || !priv->ibss_beacon ||
b481de9c
ZY
1542 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1543 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1544 return 0;
1545
1546 if (priv->ibss_beacon->len > left)
1547 return 0;
1548
1549 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1550
1551 return priv->ibss_beacon->len;
1552}
1553
bb8c093b 1554int iwl4965_rate_index_from_plcp(int plcp)
b481de9c
ZY
1555{
1556 int i = 0;
1557
77626355 1558 /* 4965 HT rate format */
b481de9c
ZY
1559 if (plcp & RATE_MCS_HT_MSK) {
1560 i = (plcp & 0xff);
1561
1562 if (i >= IWL_RATE_MIMO_6M_PLCP)
1563 i = i - IWL_RATE_MIMO_6M_PLCP;
1564
1565 i += IWL_FIRST_OFDM_RATE;
1566 /* skip 9M not supported in ht*/
1567 if (i >= IWL_RATE_9M_INDEX)
1568 i += 1;
1569 if ((i >= IWL_FIRST_OFDM_RATE) &&
1570 (i <= IWL_LAST_OFDM_RATE))
1571 return i;
77626355
BC
1572
1573 /* 4965 legacy rate format, search for match in table */
b481de9c 1574 } else {
bb8c093b
CH
1575 for (i = 0; i < ARRAY_SIZE(iwl4965_rates); i++)
1576 if (iwl4965_rates[i].plcp == (plcp &0xFF))
b481de9c
ZY
1577 return i;
1578 }
1579 return -1;
1580}
1581
bb8c093b 1582static u8 iwl4965_rate_get_lowest_plcp(int rate_mask)
b481de9c
ZY
1583{
1584 u8 i;
1585
1586 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1587 i = iwl4965_rates[i].next_ieee) {
b481de9c 1588 if (rate_mask & (1 << i))
bb8c093b 1589 return iwl4965_rates[i].plcp;
b481de9c
ZY
1590 }
1591
1592 return IWL_RATE_INVALID;
1593}
1594
bb8c093b 1595static int iwl4965_send_beacon_cmd(struct iwl4965_priv *priv)
b481de9c 1596{
bb8c093b 1597 struct iwl4965_frame *frame;
b481de9c
ZY
1598 unsigned int frame_size;
1599 int rc;
1600 u8 rate;
1601
bb8c093b 1602 frame = iwl4965_get_free_frame(priv);
b481de9c
ZY
1603
1604 if (!frame) {
1605 IWL_ERROR("Could not obtain free frame buffer for beacon "
1606 "command.\n");
1607 return -ENOMEM;
1608 }
1609
1610 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
bb8c093b 1611 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
b481de9c
ZY
1612 0xFF0);
1613 if (rate == IWL_INVALID_RATE)
1614 rate = IWL_RATE_6M_PLCP;
1615 } else {
bb8c093b 1616 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
b481de9c
ZY
1617 if (rate == IWL_INVALID_RATE)
1618 rate = IWL_RATE_1M_PLCP;
1619 }
1620
bb8c093b 1621 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1622
bb8c093b 1623 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1624 &frame->u.cmd[0]);
1625
bb8c093b 1626 iwl4965_free_frame(priv, frame);
b481de9c
ZY
1627
1628 return rc;
1629}
1630
1631/******************************************************************************
1632 *
1633 * EEPROM related functions
1634 *
1635 ******************************************************************************/
1636
bb8c093b 1637static void get_eeprom_mac(struct iwl4965_priv *priv, u8 *mac)
b481de9c
ZY
1638{
1639 memcpy(mac, priv->eeprom.mac_address, 6);
1640}
1641
1642/**
bb8c093b 1643 * iwl4965_eeprom_init - read EEPROM contents
b481de9c 1644 *
6440adb5 1645 * Load the EEPROM contents from adapter into priv->eeprom
b481de9c
ZY
1646 *
1647 * NOTE: This routine uses the non-debug IO access functions.
1648 */
bb8c093b 1649int iwl4965_eeprom_init(struct iwl4965_priv *priv)
b481de9c 1650{
0e5ce1f3 1651 __le16 *e = (__le16 *)&priv->eeprom;
bb8c093b 1652 u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP);
b481de9c
ZY
1653 u32 r;
1654 int sz = sizeof(priv->eeprom);
1655 int rc;
1656 int i;
1657 u16 addr;
1658
1659 /* The EEPROM structure has several padding buffers within it
1660 * and when adding new EEPROM maps is subject to programmer errors
1661 * which may be very difficult to identify without explicitly
1662 * checking the resulting size of the eeprom map. */
1663 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1664
1665 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1666 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1667 return -ENOENT;
1668 }
1669
6440adb5 1670 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
bb8c093b 1671 rc = iwl4965_eeprom_acquire_semaphore(priv);
b481de9c 1672 if (rc < 0) {
91e17473 1673 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
b481de9c
ZY
1674 return -ENOENT;
1675 }
1676
1677 /* eeprom is an array of 16bit values */
1678 for (addr = 0; addr < sz; addr += sizeof(u16)) {
bb8c093b
CH
1679 _iwl4965_write32(priv, CSR_EEPROM_REG, addr << 1);
1680 _iwl4965_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
b481de9c
ZY
1681
1682 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1683 i += IWL_EEPROM_ACCESS_DELAY) {
bb8c093b 1684 r = _iwl4965_read_direct32(priv, CSR_EEPROM_REG);
b481de9c
ZY
1685 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1686 break;
1687 udelay(IWL_EEPROM_ACCESS_DELAY);
1688 }
1689
1690 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1691 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1692 rc = -ETIMEDOUT;
1693 goto done;
1694 }
0e5ce1f3 1695 e[addr / 2] = cpu_to_le16(r >> 16);
b481de9c
ZY
1696 }
1697 rc = 0;
1698
1699done:
bb8c093b 1700 iwl4965_eeprom_release_semaphore(priv);
b481de9c
ZY
1701 return rc;
1702}
1703
1704/******************************************************************************
1705 *
1706 * Misc. internal state and helper functions
1707 *
1708 ******************************************************************************/
c8b0e6e1 1709#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
1710
1711/**
bb8c093b 1712 * iwl4965_report_frame - dump frame to syslog during debug sessions
b481de9c 1713 *
9fbab516 1714 * You may hack this function to show different aspects of received frames,
b481de9c
ZY
1715 * including selective frame dumps.
1716 * group100 parameter selects whether to show 1 out of 100 good frames.
1717 *
9fbab516
BC
1718 * TODO: This was originally written for 3945, need to audit for
1719 * proper operation with 4965.
b481de9c 1720 */
bb8c093b
CH
1721void iwl4965_report_frame(struct iwl4965_priv *priv,
1722 struct iwl4965_rx_packet *pkt,
b481de9c
ZY
1723 struct ieee80211_hdr *header, int group100)
1724{
1725 u32 to_us;
1726 u32 print_summary = 0;
1727 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1728 u32 hundred = 0;
1729 u32 dataframe = 0;
1730 u16 fc;
1731 u16 seq_ctl;
1732 u16 channel;
1733 u16 phy_flags;
1734 int rate_sym;
1735 u16 length;
1736 u16 status;
1737 u16 bcn_tmr;
1738 u32 tsf_low;
1739 u64 tsf;
1740 u8 rssi;
1741 u8 agc;
1742 u16 sig_avg;
1743 u16 noise_diff;
bb8c093b
CH
1744 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1745 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1746 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
b481de9c
ZY
1747 u8 *data = IWL_RX_DATA(pkt);
1748
1749 /* MAC header */
1750 fc = le16_to_cpu(header->frame_control);
1751 seq_ctl = le16_to_cpu(header->seq_ctrl);
1752
1753 /* metadata */
1754 channel = le16_to_cpu(rx_hdr->channel);
1755 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1756 rate_sym = rx_hdr->rate;
1757 length = le16_to_cpu(rx_hdr->len);
1758
1759 /* end-of-frame status and timestamp */
1760 status = le32_to_cpu(rx_end->status);
1761 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1762 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1763 tsf = le64_to_cpu(rx_end->timestamp);
1764
1765 /* signal statistics */
1766 rssi = rx_stats->rssi;
1767 agc = rx_stats->agc;
1768 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1769 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1770
1771 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1772
1773 /* if data frame is to us and all is good,
1774 * (optionally) print summary for only 1 out of every 100 */
1775 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1776 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1777 dataframe = 1;
1778 if (!group100)
1779 print_summary = 1; /* print each frame */
1780 else if (priv->framecnt_to_us < 100) {
1781 priv->framecnt_to_us++;
1782 print_summary = 0;
1783 } else {
1784 priv->framecnt_to_us = 0;
1785 print_summary = 1;
1786 hundred = 1;
1787 }
1788 } else {
1789 /* print summary for all other frames */
1790 print_summary = 1;
1791 }
1792
1793 if (print_summary) {
1794 char *title;
1795 u32 rate;
1796
1797 if (hundred)
1798 title = "100Frames";
1799 else if (fc & IEEE80211_FCTL_RETRY)
1800 title = "Retry";
1801 else if (ieee80211_is_assoc_response(fc))
1802 title = "AscRsp";
1803 else if (ieee80211_is_reassoc_response(fc))
1804 title = "RasRsp";
1805 else if (ieee80211_is_probe_response(fc)) {
1806 title = "PrbRsp";
1807 print_dump = 1; /* dump frame contents */
1808 } else if (ieee80211_is_beacon(fc)) {
1809 title = "Beacon";
1810 print_dump = 1; /* dump frame contents */
1811 } else if (ieee80211_is_atim(fc))
1812 title = "ATIM";
1813 else if (ieee80211_is_auth(fc))
1814 title = "Auth";
1815 else if (ieee80211_is_deauth(fc))
1816 title = "DeAuth";
1817 else if (ieee80211_is_disassoc(fc))
1818 title = "DisAssoc";
1819 else
1820 title = "Frame";
1821
bb8c093b 1822 rate = iwl4965_rate_index_from_plcp(rate_sym);
b481de9c
ZY
1823 if (rate == -1)
1824 rate = 0;
1825 else
bb8c093b 1826 rate = iwl4965_rates[rate].ieee / 2;
b481de9c
ZY
1827
1828 /* print frame summary.
1829 * MAC addresses show just the last byte (for brevity),
1830 * but you can hack it to show more, if you'd like to. */
1831 if (dataframe)
1832 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1833 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1834 title, fc, header->addr1[5],
1835 length, rssi, channel, rate);
1836 else {
1837 /* src/dst addresses assume managed mode */
1838 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1839 "src=0x%02x, rssi=%u, tim=%lu usec, "
1840 "phy=0x%02x, chnl=%d\n",
1841 title, fc, header->addr1[5],
1842 header->addr3[5], rssi,
1843 tsf_low - priv->scan_start_tsf,
1844 phy_flags, channel);
1845 }
1846 }
1847 if (print_dump)
bb8c093b 1848 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
b481de9c
ZY
1849}
1850#endif
1851
bb8c093b 1852static void iwl4965_unset_hw_setting(struct iwl4965_priv *priv)
b481de9c
ZY
1853{
1854 if (priv->hw_setting.shared_virt)
1855 pci_free_consistent(priv->pci_dev,
bb8c093b 1856 sizeof(struct iwl4965_shared),
b481de9c
ZY
1857 priv->hw_setting.shared_virt,
1858 priv->hw_setting.shared_phys);
1859}
1860
1861/**
bb8c093b 1862 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
b481de9c
ZY
1863 *
1864 * return : set the bit for each supported rate insert in ie
1865 */
bb8c093b 1866static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
c7c46676 1867 u16 basic_rate, int *left)
b481de9c
ZY
1868{
1869 u16 ret_rates = 0, bit;
1870 int i;
c7c46676
TW
1871 u8 *cnt = ie;
1872 u8 *rates = ie + 1;
b481de9c
ZY
1873
1874 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1875 if (bit & supported_rate) {
1876 ret_rates |= bit;
bb8c093b 1877 rates[*cnt] = iwl4965_rates[i].ieee |
c7c46676
TW
1878 ((bit & basic_rate) ? 0x80 : 0x00);
1879 (*cnt)++;
1880 (*left)--;
1881 if ((*left <= 0) ||
1882 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
b481de9c
ZY
1883 break;
1884 }
1885 }
1886
1887 return ret_rates;
1888}
1889
c8b0e6e1 1890#ifdef CONFIG_IWL4965_HT
bb8c093b 1891void static iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8fb88032
RR
1892 struct ieee80211_ht_cap *ht_cap,
1893 u8 use_current_config);
b481de9c
ZY
1894#endif
1895
1896/**
bb8c093b 1897 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
b481de9c 1898 */
bb8c093b 1899static u16 iwl4965_fill_probe_req(struct iwl4965_priv *priv,
b481de9c
ZY
1900 struct ieee80211_mgmt *frame,
1901 int left, int is_direct)
1902{
1903 int len = 0;
1904 u8 *pos = NULL;
bee488db 1905 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
8fb88032
RR
1906#ifdef CONFIG_IWL4965_HT
1907 struct ieee80211_hw_mode *mode;
1908#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
1909
1910 /* Make sure there is enough space for the probe request,
1911 * two mandatory IEs and the data */
1912 left -= 24;
1913 if (left < 0)
1914 return 0;
1915 len += 24;
1916
1917 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
bb8c093b 1918 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c 1919 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
bb8c093b 1920 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c
ZY
1921 frame->seq_ctrl = 0;
1922
1923 /* fill in our indirect SSID IE */
1924 /* ...next IE... */
1925
1926 left -= 2;
1927 if (left < 0)
1928 return 0;
1929 len += 2;
1930 pos = &(frame->u.probe_req.variable[0]);
1931 *pos++ = WLAN_EID_SSID;
1932 *pos++ = 0;
1933
1934 /* fill in our direct SSID IE... */
1935 if (is_direct) {
1936 /* ...next IE... */
1937 left -= 2 + priv->essid_len;
1938 if (left < 0)
1939 return 0;
1940 /* ... fill it in... */
1941 *pos++ = WLAN_EID_SSID;
1942 *pos++ = priv->essid_len;
1943 memcpy(pos, priv->essid, priv->essid_len);
1944 pos += priv->essid_len;
1945 len += 2 + priv->essid_len;
1946 }
1947
1948 /* fill in supported rate */
1949 /* ...next IE... */
1950 left -= 2;
1951 if (left < 0)
1952 return 0;
c7c46676 1953
b481de9c
ZY
1954 /* ... fill it in... */
1955 *pos++ = WLAN_EID_SUPP_RATES;
1956 *pos = 0;
c7c46676 1957
bee488db 1958 /* exclude 60M rate */
1959 active_rates = priv->rates_mask;
1960 active_rates &= ~IWL_RATE_60M_MASK;
1961
1962 active_rate_basic = active_rates & IWL_BASIC_RATES_MASK;
b481de9c 1963
c7c46676 1964 cck_rates = IWL_CCK_RATES_MASK & active_rates;
bb8c093b 1965 ret_rates = iwl4965_supported_rate_to_ie(pos, cck_rates,
bee488db 1966 active_rate_basic, &left);
c7c46676
TW
1967 active_rates &= ~ret_rates;
1968
bb8c093b 1969 ret_rates = iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1970 active_rate_basic, &left);
c7c46676
TW
1971 active_rates &= ~ret_rates;
1972
b481de9c
ZY
1973 len += 2 + *pos;
1974 pos += (*pos) + 1;
c7c46676 1975 if (active_rates == 0)
b481de9c
ZY
1976 goto fill_end;
1977
1978 /* fill in supported extended rate */
1979 /* ...next IE... */
1980 left -= 2;
1981 if (left < 0)
1982 return 0;
1983 /* ... fill it in... */
1984 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1985 *pos = 0;
bb8c093b 1986 iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1987 active_rate_basic, &left);
b481de9c
ZY
1988 if (*pos > 0)
1989 len += 2 + *pos;
1990
c8b0e6e1 1991#ifdef CONFIG_IWL4965_HT
8fb88032
RR
1992 mode = priv->hw->conf.mode;
1993 if (mode->ht_info.ht_supported) {
b481de9c
ZY
1994 pos += (*pos) + 1;
1995 *pos++ = WLAN_EID_HT_CAPABILITY;
8fb88032
RR
1996 *pos++ = sizeof(struct ieee80211_ht_cap);
1997 iwl4965_set_ht_capab(priv->hw,
1998 (struct ieee80211_ht_cap *)pos, 0);
1999 len += 2 + sizeof(struct ieee80211_ht_cap);
b481de9c 2000 }
c8b0e6e1 2001#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
2002
2003 fill_end:
2004 return (u16)len;
2005}
2006
2007/*
2008 * QoS support
2009*/
c8b0e6e1 2010#ifdef CONFIG_IWL4965_QOS
bb8c093b
CH
2011static int iwl4965_send_qos_params_command(struct iwl4965_priv *priv,
2012 struct iwl4965_qosparam_cmd *qos)
b481de9c
ZY
2013{
2014
bb8c093b
CH
2015 return iwl4965_send_cmd_pdu(priv, REPLY_QOS_PARAM,
2016 sizeof(struct iwl4965_qosparam_cmd), qos);
b481de9c
ZY
2017}
2018
bb8c093b 2019static void iwl4965_reset_qos(struct iwl4965_priv *priv)
b481de9c
ZY
2020{
2021 u16 cw_min = 15;
2022 u16 cw_max = 1023;
2023 u8 aifs = 2;
2024 u8 is_legacy = 0;
2025 unsigned long flags;
2026 int i;
2027
2028 spin_lock_irqsave(&priv->lock, flags);
2029 priv->qos_data.qos_active = 0;
2030
2031 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
2032 if (priv->qos_data.qos_enable)
2033 priv->qos_data.qos_active = 1;
2034 if (!(priv->active_rate & 0xfff0)) {
2035 cw_min = 31;
2036 is_legacy = 1;
2037 }
2038 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2039 if (priv->qos_data.qos_enable)
2040 priv->qos_data.qos_active = 1;
2041 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
2042 cw_min = 31;
2043 is_legacy = 1;
2044 }
2045
2046 if (priv->qos_data.qos_active)
2047 aifs = 3;
2048
2049 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
2050 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
2051 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
2052 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
2053 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
2054
2055 if (priv->qos_data.qos_active) {
2056 i = 1;
2057 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
2058 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
2059 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
2060 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2061 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2062
2063 i = 2;
2064 priv->qos_data.def_qos_parm.ac[i].cw_min =
2065 cpu_to_le16((cw_min + 1) / 2 - 1);
2066 priv->qos_data.def_qos_parm.ac[i].cw_max =
2067 cpu_to_le16(cw_max);
2068 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2069 if (is_legacy)
2070 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2071 cpu_to_le16(6016);
2072 else
2073 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2074 cpu_to_le16(3008);
2075 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2076
2077 i = 3;
2078 priv->qos_data.def_qos_parm.ac[i].cw_min =
2079 cpu_to_le16((cw_min + 1) / 4 - 1);
2080 priv->qos_data.def_qos_parm.ac[i].cw_max =
2081 cpu_to_le16((cw_max + 1) / 2 - 1);
2082 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2083 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2084 if (is_legacy)
2085 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2086 cpu_to_le16(3264);
2087 else
2088 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2089 cpu_to_le16(1504);
2090 } else {
2091 for (i = 1; i < 4; i++) {
2092 priv->qos_data.def_qos_parm.ac[i].cw_min =
2093 cpu_to_le16(cw_min);
2094 priv->qos_data.def_qos_parm.ac[i].cw_max =
2095 cpu_to_le16(cw_max);
2096 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2097 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2098 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2099 }
2100 }
2101 IWL_DEBUG_QOS("set QoS to default \n");
2102
2103 spin_unlock_irqrestore(&priv->lock, flags);
2104}
2105
bb8c093b 2106static void iwl4965_activate_qos(struct iwl4965_priv *priv, u8 force)
b481de9c
ZY
2107{
2108 unsigned long flags;
2109
b481de9c
ZY
2110 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2111 return;
2112
2113 if (!priv->qos_data.qos_enable)
2114 return;
2115
2116 spin_lock_irqsave(&priv->lock, flags);
2117 priv->qos_data.def_qos_parm.qos_flags = 0;
2118
2119 if (priv->qos_data.qos_cap.q_AP.queue_request &&
2120 !priv->qos_data.qos_cap.q_AP.txop_request)
2121 priv->qos_data.def_qos_parm.qos_flags |=
2122 QOS_PARAM_FLG_TXOP_TYPE_MSK;
b481de9c
ZY
2123 if (priv->qos_data.qos_active)
2124 priv->qos_data.def_qos_parm.qos_flags |=
2125 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2126
c8b0e6e1 2127#ifdef CONFIG_IWL4965_HT
fd105e79 2128 if (priv->current_ht_config.is_ht)
f1f1f5c7 2129 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
c8b0e6e1 2130#endif /* CONFIG_IWL4965_HT */
f1f1f5c7 2131
b481de9c
ZY
2132 spin_unlock_irqrestore(&priv->lock, flags);
2133
bb8c093b 2134 if (force || iwl4965_is_associated(priv)) {
f1f1f5c7
TW
2135 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2136 priv->qos_data.qos_active,
2137 priv->qos_data.def_qos_parm.qos_flags);
b481de9c 2138
bb8c093b 2139 iwl4965_send_qos_params_command(priv,
b481de9c
ZY
2140 &(priv->qos_data.def_qos_parm));
2141 }
2142}
2143
c8b0e6e1 2144#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
2145/*
2146 * Power management (not Tx power!) functions
2147 */
2148#define MSEC_TO_USEC 1024
2149
2150#define NOSLP __constant_cpu_to_le16(0), 0, 0
2151#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
2152#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2153#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2154 __constant_cpu_to_le32(X1), \
2155 __constant_cpu_to_le32(X2), \
2156 __constant_cpu_to_le32(X3), \
2157 __constant_cpu_to_le32(X4)}
2158
2159
2160/* default power management (not Tx power) table values */
2161/* for tim 0-10 */
bb8c093b 2162static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
b481de9c
ZY
2163 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2164 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2165 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2166 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2167 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2168 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2169};
2170
2171/* for tim > 10 */
bb8c093b 2172static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
b481de9c
ZY
2173 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2174 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2175 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2176 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2177 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2178 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2179 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2180 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2181 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2182 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2183};
2184
bb8c093b 2185int iwl4965_power_init_handle(struct iwl4965_priv *priv)
b481de9c
ZY
2186{
2187 int rc = 0, i;
bb8c093b
CH
2188 struct iwl4965_power_mgr *pow_data;
2189 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
b481de9c
ZY
2190 u16 pci_pm;
2191
2192 IWL_DEBUG_POWER("Initialize power \n");
2193
2194 pow_data = &(priv->power_data);
2195
2196 memset(pow_data, 0, sizeof(*pow_data));
2197
2198 pow_data->active_index = IWL_POWER_RANGE_0;
2199 pow_data->dtim_val = 0xffff;
2200
2201 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2202 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2203
2204 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2205 if (rc != 0)
2206 return 0;
2207 else {
bb8c093b 2208 struct iwl4965_powertable_cmd *cmd;
b481de9c
ZY
2209
2210 IWL_DEBUG_POWER("adjust power command flags\n");
2211
2212 for (i = 0; i < IWL_POWER_AC; i++) {
2213 cmd = &pow_data->pwr_range_0[i].cmd;
2214
2215 if (pci_pm & 0x1)
2216 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2217 else
2218 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2219 }
2220 }
2221 return rc;
2222}
2223
bb8c093b
CH
2224static int iwl4965_update_power_cmd(struct iwl4965_priv *priv,
2225 struct iwl4965_powertable_cmd *cmd, u32 mode)
b481de9c
ZY
2226{
2227 int rc = 0, i;
2228 u8 skip;
2229 u32 max_sleep = 0;
bb8c093b 2230 struct iwl4965_power_vec_entry *range;
b481de9c 2231 u8 period = 0;
bb8c093b 2232 struct iwl4965_power_mgr *pow_data;
b481de9c
ZY
2233
2234 if (mode > IWL_POWER_INDEX_5) {
2235 IWL_DEBUG_POWER("Error invalid power mode \n");
2236 return -1;
2237 }
2238 pow_data = &(priv->power_data);
2239
2240 if (pow_data->active_index == IWL_POWER_RANGE_0)
2241 range = &pow_data->pwr_range_0[0];
2242 else
2243 range = &pow_data->pwr_range_1[1];
2244
bb8c093b 2245 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
b481de9c
ZY
2246
2247#ifdef IWL_MAC80211_DISABLE
2248 if (priv->assoc_network != NULL) {
2249 unsigned long flags;
2250
2251 period = priv->assoc_network->tim.tim_period;
2252 }
2253#endif /*IWL_MAC80211_DISABLE */
2254 skip = range[mode].no_dtim;
2255
2256 if (period == 0) {
2257 period = 1;
2258 skip = 0;
2259 }
2260
2261 if (skip == 0) {
2262 max_sleep = period;
2263 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2264 } else {
2265 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2266 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2267 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2268 }
2269
2270 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2271 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2272 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2273 }
2274
2275 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2276 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2277 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2278 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2279 le32_to_cpu(cmd->sleep_interval[0]),
2280 le32_to_cpu(cmd->sleep_interval[1]),
2281 le32_to_cpu(cmd->sleep_interval[2]),
2282 le32_to_cpu(cmd->sleep_interval[3]),
2283 le32_to_cpu(cmd->sleep_interval[4]));
2284
2285 return rc;
2286}
2287
bb8c093b 2288static int iwl4965_send_power_mode(struct iwl4965_priv *priv, u32 mode)
b481de9c 2289{
9a62f73b 2290 u32 uninitialized_var(final_mode);
b481de9c 2291 int rc;
bb8c093b 2292 struct iwl4965_powertable_cmd cmd;
b481de9c
ZY
2293
2294 /* If on battery, set to 3,
01ebd063 2295 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
2296 * else user level */
2297 switch (mode) {
2298 case IWL_POWER_BATTERY:
2299 final_mode = IWL_POWER_INDEX_3;
2300 break;
2301 case IWL_POWER_AC:
2302 final_mode = IWL_POWER_MODE_CAM;
2303 break;
2304 default:
2305 final_mode = mode;
2306 break;
2307 }
2308
2309 cmd.keep_alive_beacons = 0;
2310
bb8c093b 2311 iwl4965_update_power_cmd(priv, &cmd, final_mode);
b481de9c 2312
bb8c093b 2313 rc = iwl4965_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
b481de9c
ZY
2314
2315 if (final_mode == IWL_POWER_MODE_CAM)
2316 clear_bit(STATUS_POWER_PMI, &priv->status);
2317 else
2318 set_bit(STATUS_POWER_PMI, &priv->status);
2319
2320 return rc;
2321}
2322
bb8c093b 2323int iwl4965_is_network_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
2324{
2325 /* Filter incoming packets to determine if they are targeted toward
2326 * this network, discarding packets coming from ourselves */
2327 switch (priv->iw_mode) {
2328 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2329 /* packets from our adapter are dropped (echo) */
2330 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2331 return 0;
2332 /* {broad,multi}cast packets to our IBSS go through */
2333 if (is_multicast_ether_addr(header->addr1))
2334 return !compare_ether_addr(header->addr3, priv->bssid);
2335 /* packets to our adapter go through */
2336 return !compare_ether_addr(header->addr1, priv->mac_addr);
2337 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2338 /* packets from our adapter are dropped (echo) */
2339 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2340 return 0;
2341 /* {broad,multi}cast packets to our BSS go through */
2342 if (is_multicast_ether_addr(header->addr1))
2343 return !compare_ether_addr(header->addr2, priv->bssid);
2344 /* packets to our adapter go through */
2345 return !compare_ether_addr(header->addr1, priv->mac_addr);
2346 }
2347
2348 return 1;
2349}
2350
2351#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2352
bb8c093b 2353static const char *iwl4965_get_tx_fail_reason(u32 status)
b481de9c
ZY
2354{
2355 switch (status & TX_STATUS_MSK) {
2356 case TX_STATUS_SUCCESS:
2357 return "SUCCESS";
2358 TX_STATUS_ENTRY(SHORT_LIMIT);
2359 TX_STATUS_ENTRY(LONG_LIMIT);
2360 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2361 TX_STATUS_ENTRY(MGMNT_ABORT);
2362 TX_STATUS_ENTRY(NEXT_FRAG);
2363 TX_STATUS_ENTRY(LIFE_EXPIRE);
2364 TX_STATUS_ENTRY(DEST_PS);
2365 TX_STATUS_ENTRY(ABORTED);
2366 TX_STATUS_ENTRY(BT_RETRY);
2367 TX_STATUS_ENTRY(STA_INVALID);
2368 TX_STATUS_ENTRY(FRAG_DROPPED);
2369 TX_STATUS_ENTRY(TID_DISABLE);
2370 TX_STATUS_ENTRY(FRAME_FLUSHED);
2371 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2372 TX_STATUS_ENTRY(TX_LOCKED);
2373 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2374 }
2375
2376 return "UNKNOWN";
2377}
2378
2379/**
bb8c093b 2380 * iwl4965_scan_cancel - Cancel any currently executing HW scan
b481de9c
ZY
2381 *
2382 * NOTE: priv->mutex is not required before calling this function
2383 */
bb8c093b 2384static int iwl4965_scan_cancel(struct iwl4965_priv *priv)
b481de9c
ZY
2385{
2386 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2387 clear_bit(STATUS_SCANNING, &priv->status);
2388 return 0;
2389 }
2390
2391 if (test_bit(STATUS_SCANNING, &priv->status)) {
2392 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2393 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2394 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2395 queue_work(priv->workqueue, &priv->abort_scan);
2396
2397 } else
2398 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2399
2400 return test_bit(STATUS_SCANNING, &priv->status);
2401 }
2402
2403 return 0;
2404}
2405
2406/**
bb8c093b 2407 * iwl4965_scan_cancel_timeout - Cancel any currently executing HW scan
b481de9c
ZY
2408 * @ms: amount of time to wait (in milliseconds) for scan to abort
2409 *
2410 * NOTE: priv->mutex must be held before calling this function
2411 */
bb8c093b 2412static int iwl4965_scan_cancel_timeout(struct iwl4965_priv *priv, unsigned long ms)
b481de9c
ZY
2413{
2414 unsigned long now = jiffies;
2415 int ret;
2416
bb8c093b 2417 ret = iwl4965_scan_cancel(priv);
b481de9c
ZY
2418 if (ret && ms) {
2419 mutex_unlock(&priv->mutex);
2420 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2421 test_bit(STATUS_SCANNING, &priv->status))
2422 msleep(1);
2423 mutex_lock(&priv->mutex);
2424
2425 return test_bit(STATUS_SCANNING, &priv->status);
2426 }
2427
2428 return ret;
2429}
2430
bb8c093b 2431static void iwl4965_sequence_reset(struct iwl4965_priv *priv)
b481de9c
ZY
2432{
2433 /* Reset ieee stats */
2434
2435 /* We don't reset the net_device_stats (ieee->stats) on
2436 * re-association */
2437
2438 priv->last_seq_num = -1;
2439 priv->last_frag_num = -1;
2440 priv->last_packet_time = 0;
2441
bb8c093b 2442 iwl4965_scan_cancel(priv);
b481de9c
ZY
2443}
2444
2445#define MAX_UCODE_BEACON_INTERVAL 4096
2446#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2447
bb8c093b 2448static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
2449{
2450 u16 new_val = 0;
2451 u16 beacon_factor = 0;
2452
2453 beacon_factor =
2454 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2455 / MAX_UCODE_BEACON_INTERVAL;
2456 new_val = beacon_val / beacon_factor;
2457
2458 return cpu_to_le16(new_val);
2459}
2460
bb8c093b 2461static void iwl4965_setup_rxon_timing(struct iwl4965_priv *priv)
b481de9c
ZY
2462{
2463 u64 interval_tm_unit;
2464 u64 tsf, result;
2465 unsigned long flags;
2466 struct ieee80211_conf *conf = NULL;
2467 u16 beacon_int = 0;
2468
2469 conf = ieee80211_get_hw_conf(priv->hw);
2470
2471 spin_lock_irqsave(&priv->lock, flags);
2472 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2473 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2474
2475 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2476
2477 tsf = priv->timestamp1;
2478 tsf = ((tsf << 32) | priv->timestamp0);
2479
2480 beacon_int = priv->beacon_int;
2481 spin_unlock_irqrestore(&priv->lock, flags);
2482
2483 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2484 if (beacon_int == 0) {
2485 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2486 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2487 } else {
2488 priv->rxon_timing.beacon_interval =
2489 cpu_to_le16(beacon_int);
2490 priv->rxon_timing.beacon_interval =
bb8c093b 2491 iwl4965_adjust_beacon_interval(
b481de9c
ZY
2492 le16_to_cpu(priv->rxon_timing.beacon_interval));
2493 }
2494
2495 priv->rxon_timing.atim_window = 0;
2496 } else {
2497 priv->rxon_timing.beacon_interval =
bb8c093b 2498 iwl4965_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
2499 /* TODO: we need to get atim_window from upper stack
2500 * for now we set to 0 */
2501 priv->rxon_timing.atim_window = 0;
2502 }
2503
2504 interval_tm_unit =
2505 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2506 result = do_div(tsf, interval_tm_unit);
2507 priv->rxon_timing.beacon_init_val =
2508 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2509
2510 IWL_DEBUG_ASSOC
2511 ("beacon interval %d beacon timer %d beacon tim %d\n",
2512 le16_to_cpu(priv->rxon_timing.beacon_interval),
2513 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2514 le16_to_cpu(priv->rxon_timing.atim_window));
2515}
2516
bb8c093b 2517static int iwl4965_scan_initiate(struct iwl4965_priv *priv)
b481de9c
ZY
2518{
2519 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2520 IWL_ERROR("APs don't scan.\n");
2521 return 0;
2522 }
2523
bb8c093b 2524 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
2525 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2526 return -EIO;
2527 }
2528
2529 if (test_bit(STATUS_SCANNING, &priv->status)) {
2530 IWL_DEBUG_SCAN("Scan already in progress.\n");
2531 return -EAGAIN;
2532 }
2533
2534 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2535 IWL_DEBUG_SCAN("Scan request while abort pending. "
2536 "Queuing.\n");
2537 return -EAGAIN;
2538 }
2539
2540 IWL_DEBUG_INFO("Starting scan...\n");
2541 priv->scan_bands = 2;
2542 set_bit(STATUS_SCANNING, &priv->status);
2543 priv->scan_start = jiffies;
2544 priv->scan_pass_start = priv->scan_start;
2545
2546 queue_work(priv->workqueue, &priv->request_scan);
2547
2548 return 0;
2549}
2550
bb8c093b 2551static int iwl4965_set_rxon_hwcrypto(struct iwl4965_priv *priv, int hw_decrypt)
b481de9c 2552{
bb8c093b 2553 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
b481de9c
ZY
2554
2555 if (hw_decrypt)
2556 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2557 else
2558 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2559
2560 return 0;
2561}
2562
bb8c093b 2563static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode)
b481de9c
ZY
2564{
2565 if (phymode == MODE_IEEE80211A) {
2566 priv->staging_rxon.flags &=
2567 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2568 | RXON_FLG_CCK_MSK);
2569 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2570 } else {
bb8c093b 2571 /* Copied from iwl4965_bg_post_associate() */
b481de9c
ZY
2572 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2573 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2574 else
2575 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2576
2577 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2578 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2579
2580 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2581 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2582 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2583 }
2584}
2585
2586/*
01ebd063 2587 * initialize rxon structure with default values from eeprom
b481de9c 2588 */
bb8c093b 2589static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
b481de9c 2590{
bb8c093b 2591 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
2592
2593 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2594
2595 switch (priv->iw_mode) {
2596 case IEEE80211_IF_TYPE_AP:
2597 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2598 break;
2599
2600 case IEEE80211_IF_TYPE_STA:
2601 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2602 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2603 break;
2604
2605 case IEEE80211_IF_TYPE_IBSS:
2606 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2607 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2608 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2609 RXON_FILTER_ACCEPT_GRP_MSK;
2610 break;
2611
2612 case IEEE80211_IF_TYPE_MNTR:
2613 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2614 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2615 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2616 break;
2617 }
2618
2619#if 0
2620 /* TODO: Figure out when short_preamble would be set and cache from
2621 * that */
2622 if (!hw_to_local(priv->hw)->short_preamble)
2623 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2624 else
2625 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2626#endif
2627
bb8c093b 2628 ch_info = iwl4965_get_channel_info(priv, priv->phymode,
b481de9c
ZY
2629 le16_to_cpu(priv->staging_rxon.channel));
2630
2631 if (!ch_info)
2632 ch_info = &priv->channel_info[0];
2633
2634 /*
2635 * in some case A channels are all non IBSS
2636 * in this case force B/G channel
2637 */
2638 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2639 !(is_channel_ibss(ch_info)))
2640 ch_info = &priv->channel_info[0];
2641
2642 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2643 if (is_channel_a_band(ch_info))
2644 priv->phymode = MODE_IEEE80211A;
2645 else
2646 priv->phymode = MODE_IEEE80211G;
2647
bb8c093b 2648 iwl4965_set_flags_for_phymode(priv, priv->phymode);
b481de9c
ZY
2649
2650 priv->staging_rxon.ofdm_basic_rates =
2651 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2652 priv->staging_rxon.cck_basic_rates =
2653 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2654
2655 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2656 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2657 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2658 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2659 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2660 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2661 iwl4965_set_rxon_chain(priv);
2662}
2663
bb8c093b 2664static int iwl4965_set_mode(struct iwl4965_priv *priv, int mode)
b481de9c 2665{
b481de9c 2666 if (mode == IEEE80211_IF_TYPE_IBSS) {
bb8c093b 2667 const struct iwl4965_channel_info *ch_info;
b481de9c 2668
bb8c093b 2669 ch_info = iwl4965_get_channel_info(priv,
b481de9c
ZY
2670 priv->phymode,
2671 le16_to_cpu(priv->staging_rxon.channel));
2672
2673 if (!ch_info || !is_channel_ibss(ch_info)) {
2674 IWL_ERROR("channel %d not IBSS channel\n",
2675 le16_to_cpu(priv->staging_rxon.channel));
2676 return -EINVAL;
2677 }
2678 }
2679
b481de9c
ZY
2680 priv->iw_mode = mode;
2681
bb8c093b 2682 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
2683 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2684
bb8c093b 2685 iwl4965_clear_stations_table(priv);
b481de9c 2686
fde3571f
MA
2687 /* dont commit rxon if rf-kill is on*/
2688 if (!iwl4965_is_ready_rf(priv))
2689 return -EAGAIN;
2690
2691 cancel_delayed_work(&priv->scan_check);
2692 if (iwl4965_scan_cancel_timeout(priv, 100)) {
2693 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2694 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2695 return -EAGAIN;
2696 }
2697
bb8c093b 2698 iwl4965_commit_rxon(priv);
b481de9c
ZY
2699
2700 return 0;
2701}
2702
bb8c093b 2703static void iwl4965_build_tx_cmd_hwcrypto(struct iwl4965_priv *priv,
b481de9c 2704 struct ieee80211_tx_control *ctl,
bb8c093b 2705 struct iwl4965_cmd *cmd,
b481de9c
ZY
2706 struct sk_buff *skb_frag,
2707 int last_frag)
2708{
bb8c093b 2709 struct iwl4965_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
b481de9c
ZY
2710
2711 switch (keyinfo->alg) {
2712 case ALG_CCMP:
2713 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2714 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2715 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2716 break;
2717
2718 case ALG_TKIP:
2719#if 0
2720 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2721
2722 if (last_frag)
2723 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2724 8);
2725 else
2726 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2727#endif
2728 break;
2729
2730 case ALG_WEP:
2731 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2732 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2733
2734 if (keyinfo->keylen == 13)
2735 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2736
2737 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2738
2739 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2740 "with key %d\n", ctl->key_idx);
2741 break;
2742
b481de9c
ZY
2743 default:
2744 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2745 break;
2746 }
2747}
2748
2749/*
2750 * handle build REPLY_TX command notification.
2751 */
bb8c093b
CH
2752static void iwl4965_build_tx_cmd_basic(struct iwl4965_priv *priv,
2753 struct iwl4965_cmd *cmd,
b481de9c
ZY
2754 struct ieee80211_tx_control *ctrl,
2755 struct ieee80211_hdr *hdr,
2756 int is_unicast, u8 std_id)
2757{
2758 __le16 *qc;
2759 u16 fc = le16_to_cpu(hdr->frame_control);
2760 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2761
2762 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2763 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2764 tx_flags |= TX_CMD_FLG_ACK_MSK;
2765 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2766 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2767 if (ieee80211_is_probe_response(fc) &&
2768 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2769 tx_flags |= TX_CMD_FLG_TSF_MSK;
2770 } else {
2771 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2772 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2773 }
2774
2775 cmd->cmd.tx.sta_id = std_id;
2776 if (ieee80211_get_morefrag(hdr))
2777 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2778
2779 qc = ieee80211_get_qos_ctrl(hdr);
2780 if (qc) {
2781 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2782 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2783 } else
2784 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2785
2786 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2787 tx_flags |= TX_CMD_FLG_RTS_MSK;
2788 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2789 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2790 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2791 tx_flags |= TX_CMD_FLG_CTS_MSK;
2792 }
2793
2794 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2795 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2796
2797 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2798 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2799 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2800 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
bc434dd2 2801 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 2802 else
bc434dd2 2803 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
b481de9c
ZY
2804 } else
2805 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2806
2807 cmd->cmd.tx.driver_txop = 0;
2808 cmd->cmd.tx.tx_flags = tx_flags;
2809 cmd->cmd.tx.next_frame_len = 0;
2810}
2811
6440adb5
BC
2812/**
2813 * iwl4965_get_sta_id - Find station's index within station table
2814 *
2815 * If new IBSS station, create new entry in station table
2816 */
9fbab516
BC
2817static int iwl4965_get_sta_id(struct iwl4965_priv *priv,
2818 struct ieee80211_hdr *hdr)
b481de9c
ZY
2819{
2820 int sta_id;
2821 u16 fc = le16_to_cpu(hdr->frame_control);
0795af57 2822 DECLARE_MAC_BUF(mac);
b481de9c 2823
6440adb5 2824 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
2825 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2826 is_multicast_ether_addr(hdr->addr1))
2827 return priv->hw_setting.bcast_sta_id;
2828
2829 switch (priv->iw_mode) {
2830
6440adb5
BC
2831 /* If we are a client station in a BSS network, use the special
2832 * AP station entry (that's the only station we communicate with) */
b481de9c
ZY
2833 case IEEE80211_IF_TYPE_STA:
2834 return IWL_AP_ID;
2835
2836 /* If we are an AP, then find the station, or use BCAST */
2837 case IEEE80211_IF_TYPE_AP:
bb8c093b 2838 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2839 if (sta_id != IWL_INVALID_STATION)
2840 return sta_id;
2841 return priv->hw_setting.bcast_sta_id;
2842
6440adb5
BC
2843 /* If this frame is going out to an IBSS network, find the station,
2844 * or create a new station table entry */
b481de9c 2845 case IEEE80211_IF_TYPE_IBSS:
bb8c093b 2846 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2847 if (sta_id != IWL_INVALID_STATION)
2848 return sta_id;
2849
6440adb5 2850 /* Create new station table entry */
67d62035
RR
2851 sta_id = iwl4965_add_station_flags(priv, hdr->addr1,
2852 0, CMD_ASYNC, NULL);
b481de9c
ZY
2853
2854 if (sta_id != IWL_INVALID_STATION)
2855 return sta_id;
2856
0795af57 2857 IWL_DEBUG_DROP("Station %s not in station map. "
b481de9c 2858 "Defaulting to broadcast...\n",
0795af57 2859 print_mac(mac, hdr->addr1));
bb8c093b 2860 iwl4965_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
b481de9c
ZY
2861 return priv->hw_setting.bcast_sta_id;
2862
2863 default:
01ebd063 2864 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
b481de9c
ZY
2865 return priv->hw_setting.bcast_sta_id;
2866 }
2867}
2868
2869/*
2870 * start REPLY_TX command process
2871 */
bb8c093b 2872static int iwl4965_tx_skb(struct iwl4965_priv *priv,
b481de9c
ZY
2873 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2874{
2875 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bb8c093b 2876 struct iwl4965_tfd_frame *tfd;
b481de9c
ZY
2877 u32 *control_flags;
2878 int txq_id = ctl->queue;
bb8c093b
CH
2879 struct iwl4965_tx_queue *txq = NULL;
2880 struct iwl4965_queue *q = NULL;
b481de9c
ZY
2881 dma_addr_t phys_addr;
2882 dma_addr_t txcmd_phys;
bb8c093b 2883 struct iwl4965_cmd *out_cmd = NULL;
b481de9c
ZY
2884 u16 len, idx, len_org;
2885 u8 id, hdr_len, unicast;
2886 u8 sta_id;
2887 u16 seq_number = 0;
2888 u16 fc;
2889 __le16 *qc;
2890 u8 wait_write_ptr = 0;
2891 unsigned long flags;
2892 int rc;
2893
2894 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 2895 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
2896 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2897 goto drop_unlock;
2898 }
2899
32bfd35d
JB
2900 if (!priv->vif) {
2901 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
b481de9c
ZY
2902 goto drop_unlock;
2903 }
2904
2905 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2906 IWL_ERROR("ERROR: No TX rate available.\n");
2907 goto drop_unlock;
2908 }
2909
2910 unicast = !is_multicast_ether_addr(hdr->addr1);
2911 id = 0;
2912
2913 fc = le16_to_cpu(hdr->frame_control);
2914
c8b0e6e1 2915#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
2916 if (ieee80211_is_auth(fc))
2917 IWL_DEBUG_TX("Sending AUTH frame\n");
2918 else if (ieee80211_is_assoc_request(fc))
2919 IWL_DEBUG_TX("Sending ASSOC frame\n");
2920 else if (ieee80211_is_reassoc_request(fc))
2921 IWL_DEBUG_TX("Sending REASSOC frame\n");
2922#endif
2923
7878a5a4
MA
2924 /* drop all data frame if we are not associated */
2925 if (!iwl4965_is_associated(priv) && !priv->assoc_id &&
b481de9c 2926 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
bb8c093b 2927 IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n");
b481de9c
ZY
2928 goto drop_unlock;
2929 }
2930
2931 spin_unlock_irqrestore(&priv->lock, flags);
2932
2933 hdr_len = ieee80211_get_hdrlen(fc);
6440adb5
BC
2934
2935 /* Find (or create) index into station table for destination station */
bb8c093b 2936 sta_id = iwl4965_get_sta_id(priv, hdr);
b481de9c 2937 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
2938 DECLARE_MAC_BUF(mac);
2939
2940 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2941 print_mac(mac, hdr->addr1));
b481de9c
ZY
2942 goto drop;
2943 }
2944
2945 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2946
2947 qc = ieee80211_get_qos_ctrl(hdr);
2948 if (qc) {
2949 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2950 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2951 IEEE80211_SCTL_SEQ;
2952 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2953 (hdr->seq_ctrl &
2954 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2955 seq_number += 0x10;
c8b0e6e1
CH
2956#ifdef CONFIG_IWL4965_HT
2957#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
2958 /* aggregation is on for this <sta,tid> */
2959 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG)
2960 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
c8b0e6e1
CH
2961#endif /* CONFIG_IWL4965_HT_AGG */
2962#endif /* CONFIG_IWL4965_HT */
b481de9c 2963 }
6440adb5
BC
2964
2965 /* Descriptor for chosen Tx queue */
b481de9c
ZY
2966 txq = &priv->txq[txq_id];
2967 q = &txq->q;
2968
2969 spin_lock_irqsave(&priv->lock, flags);
2970
6440adb5 2971 /* Set up first empty TFD within this queue's circular TFD buffer */
fc4b6853 2972 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
2973 memset(tfd, 0, sizeof(*tfd));
2974 control_flags = (u32 *) tfd;
fc4b6853 2975 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 2976
6440adb5 2977 /* Set up driver data for this TFD */
bb8c093b 2978 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
fc4b6853
TW
2979 txq->txb[q->write_ptr].skb[0] = skb;
2980 memcpy(&(txq->txb[q->write_ptr].status.control),
b481de9c 2981 ctl, sizeof(struct ieee80211_tx_control));
6440adb5
BC
2982
2983 /* Set up first empty entry in queue's array of Tx/cmd buffers */
b481de9c
ZY
2984 out_cmd = &txq->cmd[idx];
2985 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2986 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
6440adb5
BC
2987
2988 /*
2989 * Set up the Tx-command (not MAC!) header.
2990 * Store the chosen Tx queue and TFD index within the sequence field;
2991 * after Tx, uCode's Tx response will return this value so driver can
2992 * locate the frame within the tx queue and do post-tx processing.
2993 */
b481de9c
ZY
2994 out_cmd->hdr.cmd = REPLY_TX;
2995 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 2996 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
BC
2997
2998 /* Copy MAC header from skb into command buffer */
b481de9c
ZY
2999 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
3000
6440adb5
BC
3001 /*
3002 * Use the first empty entry in this queue's command buffer array
3003 * to contain the Tx command and MAC header concatenated together
3004 * (payload data will be in another buffer).
3005 * Size of this varies, due to varying MAC header length.
3006 * If end is not dword aligned, we'll have 2 extra bytes at the end
3007 * of the MAC header (device reads on dword boundaries).
3008 * We'll tell device about this padding later.
3009 */
b481de9c 3010 len = priv->hw_setting.tx_cmd_len +
bb8c093b 3011 sizeof(struct iwl4965_cmd_header) + hdr_len;
b481de9c
ZY
3012
3013 len_org = len;
3014 len = (len + 3) & ~3;
3015
3016 if (len_org != len)
3017 len_org = 1;
3018 else
3019 len_org = 0;
3020
6440adb5
BC
3021 /* Physical address of this Tx command's header (not MAC header!),
3022 * within command buffer array. */
bb8c093b
CH
3023 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl4965_cmd) * idx +
3024 offsetof(struct iwl4965_cmd, hdr);
b481de9c 3025
6440adb5
BC
3026 /* Add buffer containing Tx command and MAC(!) header to TFD's
3027 * first entry */
bb8c093b 3028 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
b481de9c
ZY
3029
3030 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
bb8c093b 3031 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
b481de9c 3032
6440adb5
BC
3033 /* Set up TFD's 2nd entry to point directly to remainder of skb,
3034 * if any (802.11 null frames have no payload). */
b481de9c
ZY
3035 len = skb->len - hdr_len;
3036 if (len) {
3037 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
3038 len, PCI_DMA_TODEVICE);
bb8c093b 3039 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
b481de9c
ZY
3040 }
3041
6440adb5 3042 /* Tell 4965 about any 2-byte padding after MAC header */
b481de9c
ZY
3043 if (len_org)
3044 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
3045
6440adb5 3046 /* Total # bytes to be transmitted */
b481de9c
ZY
3047 len = (u16)skb->len;
3048 out_cmd->cmd.tx.len = cpu_to_le16(len);
3049
3050 /* TODO need this for burst mode later on */
bb8c093b 3051 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
b481de9c
ZY
3052
3053 /* set is_hcca to 0; it probably will never be implemented */
bb8c093b 3054 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
b481de9c
ZY
3055
3056 iwl4965_tx_cmd(priv, out_cmd, sta_id, txcmd_phys,
3057 hdr, hdr_len, ctl, NULL);
3058
3059 if (!ieee80211_get_morefrag(hdr)) {
3060 txq->need_update = 1;
3061 if (qc) {
3062 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
3063 priv->stations[sta_id].tid[tid].seq_number = seq_number;
3064 }
3065 } else {
3066 wait_write_ptr = 1;
3067 txq->need_update = 0;
3068 }
3069
bb8c093b 3070 iwl4965_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
b481de9c
ZY
3071 sizeof(out_cmd->cmd.tx));
3072
bb8c093b 3073 iwl4965_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
b481de9c
ZY
3074 ieee80211_get_hdrlen(fc));
3075
6440adb5 3076 /* Set up entry for this TFD in Tx byte-count array */
b481de9c
ZY
3077 iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
3078
6440adb5 3079 /* Tell device the write index *just past* this latest filled TFD */
bb8c093b
CH
3080 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
3081 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3082 spin_unlock_irqrestore(&priv->lock, flags);
3083
3084 if (rc)
3085 return rc;
3086
bb8c093b 3087 if ((iwl4965_queue_space(q) < q->high_mark)
b481de9c
ZY
3088 && priv->mac80211_registered) {
3089 if (wait_write_ptr) {
3090 spin_lock_irqsave(&priv->lock, flags);
3091 txq->need_update = 1;
bb8c093b 3092 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3093 spin_unlock_irqrestore(&priv->lock, flags);
3094 }
3095
3096 ieee80211_stop_queue(priv->hw, ctl->queue);
3097 }
3098
3099 return 0;
3100
3101drop_unlock:
3102 spin_unlock_irqrestore(&priv->lock, flags);
3103drop:
3104 return -1;
3105}
3106
bb8c093b 3107static void iwl4965_set_rate(struct iwl4965_priv *priv)
b481de9c
ZY
3108{
3109 const struct ieee80211_hw_mode *hw = NULL;
3110 struct ieee80211_rate *rate;
3111 int i;
3112
bb8c093b 3113 hw = iwl4965_get_hw_mode(priv, priv->phymode);
c4ba9621
SA
3114 if (!hw) {
3115 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
3116 return;
3117 }
b481de9c
ZY
3118
3119 priv->active_rate = 0;
3120 priv->active_rate_basic = 0;
3121
3122 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
3123 hw->mode == MODE_IEEE80211A ?
3124 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
3125
3126 for (i = 0; i < hw->num_rates; i++) {
3127 rate = &(hw->rates[i]);
3128 if ((rate->val < IWL_RATE_COUNT) &&
3129 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3130 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
bb8c093b 3131 rate->val, iwl4965_rates[rate->val].plcp,
b481de9c
ZY
3132 (rate->flags & IEEE80211_RATE_BASIC) ?
3133 "*" : "");
3134 priv->active_rate |= (1 << rate->val);
3135 if (rate->flags & IEEE80211_RATE_BASIC)
3136 priv->active_rate_basic |= (1 << rate->val);
3137 } else
3138 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
bb8c093b 3139 rate->val, iwl4965_rates[rate->val].plcp);
b481de9c
ZY
3140 }
3141
3142 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
3143 priv->active_rate, priv->active_rate_basic);
3144
3145 /*
3146 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
3147 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
3148 * OFDM
3149 */
3150 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
3151 priv->staging_rxon.cck_basic_rates =
3152 ((priv->active_rate_basic &
3153 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
3154 else
3155 priv->staging_rxon.cck_basic_rates =
3156 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
3157
3158 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
3159 priv->staging_rxon.ofdm_basic_rates =
3160 ((priv->active_rate_basic &
3161 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
3162 IWL_FIRST_OFDM_RATE) & 0xFF;
3163 else
3164 priv->staging_rxon.ofdm_basic_rates =
3165 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3166}
3167
bb8c093b 3168static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio)
b481de9c
ZY
3169{
3170 unsigned long flags;
3171
3172 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
3173 return;
3174
3175 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
3176 disable_radio ? "OFF" : "ON");
3177
3178 if (disable_radio) {
bb8c093b 3179 iwl4965_scan_cancel(priv);
b481de9c
ZY
3180 /* FIXME: This is a workaround for AP */
3181 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3182 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3183 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
3184 CSR_UCODE_SW_BIT_RFKILL);
3185 spin_unlock_irqrestore(&priv->lock, flags);
bb8c093b 3186 iwl4965_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
b481de9c
ZY
3187 set_bit(STATUS_RF_KILL_SW, &priv->status);
3188 }
3189 return;
3190 }
3191
3192 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3193 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
3194
3195 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3196 spin_unlock_irqrestore(&priv->lock, flags);
3197
3198 /* wake up ucode */
3199 msleep(10);
3200
3201 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
3202 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
3203 if (!iwl4965_grab_nic_access(priv))
3204 iwl4965_release_nic_access(priv);
b481de9c
ZY
3205 spin_unlock_irqrestore(&priv->lock, flags);
3206
3207 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3208 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3209 "disabled by HW switch\n");
3210 return;
3211 }
3212
3213 queue_work(priv->workqueue, &priv->restart);
3214 return;
3215}
3216
bb8c093b 3217void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb,
b481de9c
ZY
3218 u32 decrypt_res, struct ieee80211_rx_status *stats)
3219{
3220 u16 fc =
3221 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3222
3223 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3224 return;
3225
3226 if (!(fc & IEEE80211_FCTL_PROTECTED))
3227 return;
3228
3229 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3230 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3231 case RX_RES_STATUS_SEC_TYPE_TKIP:
3232 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3233 RX_RES_STATUS_BAD_ICV_MIC)
3234 stats->flag |= RX_FLAG_MMIC_ERROR;
3235 case RX_RES_STATUS_SEC_TYPE_WEP:
3236 case RX_RES_STATUS_SEC_TYPE_CCMP:
3237 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3238 RX_RES_STATUS_DECRYPT_OK) {
3239 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3240 stats->flag |= RX_FLAG_DECRYPTED;
3241 }
3242 break;
3243
3244 default:
3245 break;
3246 }
3247}
3248
b481de9c
ZY
3249
3250#define IWL_PACKET_RETRY_TIME HZ
3251
bb8c093b 3252int iwl4965_is_duplicate_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
3253{
3254 u16 sc = le16_to_cpu(header->seq_ctrl);
3255 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3256 u16 frag = sc & IEEE80211_SCTL_FRAG;
3257 u16 *last_seq, *last_frag;
3258 unsigned long *last_time;
3259
3260 switch (priv->iw_mode) {
3261 case IEEE80211_IF_TYPE_IBSS:{
3262 struct list_head *p;
bb8c093b 3263 struct iwl4965_ibss_seq *entry = NULL;
b481de9c
ZY
3264 u8 *mac = header->addr2;
3265 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3266
3267 __list_for_each(p, &priv->ibss_mac_hash[index]) {
bb8c093b 3268 entry = list_entry(p, struct iwl4965_ibss_seq, list);
b481de9c
ZY
3269 if (!compare_ether_addr(entry->mac, mac))
3270 break;
3271 }
3272 if (p == &priv->ibss_mac_hash[index]) {
3273 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3274 if (!entry) {
bc434dd2 3275 IWL_ERROR("Cannot malloc new mac entry\n");
b481de9c
ZY
3276 return 0;
3277 }
3278 memcpy(entry->mac, mac, ETH_ALEN);
3279 entry->seq_num = seq;
3280 entry->frag_num = frag;
3281 entry->packet_time = jiffies;
bc434dd2 3282 list_add(&entry->list, &priv->ibss_mac_hash[index]);
b481de9c
ZY
3283 return 0;
3284 }
3285 last_seq = &entry->seq_num;
3286 last_frag = &entry->frag_num;
3287 last_time = &entry->packet_time;
3288 break;
3289 }
3290 case IEEE80211_IF_TYPE_STA:
3291 last_seq = &priv->last_seq_num;
3292 last_frag = &priv->last_frag_num;
3293 last_time = &priv->last_packet_time;
3294 break;
3295 default:
3296 return 0;
3297 }
3298 if ((*last_seq == seq) &&
3299 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3300 if (*last_frag == frag)
3301 goto drop;
3302 if (*last_frag + 1 != frag)
3303 /* out-of-order fragment */
3304 goto drop;
3305 } else
3306 *last_seq = seq;
3307
3308 *last_frag = frag;
3309 *last_time = jiffies;
3310 return 0;
3311
3312 drop:
3313 return 1;
3314}
3315
c8b0e6e1 3316#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
3317
3318#include "iwl-spectrum.h"
3319
3320#define BEACON_TIME_MASK_LOW 0x00FFFFFF
3321#define BEACON_TIME_MASK_HIGH 0xFF000000
3322#define TIME_UNIT 1024
3323
3324/*
3325 * extended beacon time format
3326 * time in usec will be changed into a 32-bit value in 8:24 format
3327 * the high 1 byte is the beacon counts
3328 * the lower 3 bytes is the time in usec within one beacon interval
3329 */
3330
bb8c093b 3331static u32 iwl4965_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
3332{
3333 u32 quot;
3334 u32 rem;
3335 u32 interval = beacon_interval * 1024;
3336
3337 if (!interval || !usec)
3338 return 0;
3339
3340 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3341 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3342
3343 return (quot << 24) + rem;
3344}
3345
3346/* base is usually what we get from ucode with each received frame,
3347 * the same as HW timer counter counting down
3348 */
3349
bb8c093b 3350static __le32 iwl4965_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
3351{
3352 u32 base_low = base & BEACON_TIME_MASK_LOW;
3353 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3354 u32 interval = beacon_interval * TIME_UNIT;
3355 u32 res = (base & BEACON_TIME_MASK_HIGH) +
3356 (addon & BEACON_TIME_MASK_HIGH);
3357
3358 if (base_low > addon_low)
3359 res += base_low - addon_low;
3360 else if (base_low < addon_low) {
3361 res += interval + base_low - addon_low;
3362 res += (1 << 24);
3363 } else
3364 res += (1 << 24);
3365
3366 return cpu_to_le32(res);
3367}
3368
bb8c093b 3369static int iwl4965_get_measurement(struct iwl4965_priv *priv,
b481de9c
ZY
3370 struct ieee80211_measurement_params *params,
3371 u8 type)
3372{
bb8c093b
CH
3373 struct iwl4965_spectrum_cmd spectrum;
3374 struct iwl4965_rx_packet *res;
3375 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
3376 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3377 .data = (void *)&spectrum,
3378 .meta.flags = CMD_WANT_SKB,
3379 };
3380 u32 add_time = le64_to_cpu(params->start_time);
3381 int rc;
3382 int spectrum_resp_status;
3383 int duration = le16_to_cpu(params->duration);
3384
bb8c093b 3385 if (iwl4965_is_associated(priv))
b481de9c 3386 add_time =
bb8c093b 3387 iwl4965_usecs_to_beacons(
b481de9c
ZY
3388 le64_to_cpu(params->start_time) - priv->last_tsf,
3389 le16_to_cpu(priv->rxon_timing.beacon_interval));
3390
3391 memset(&spectrum, 0, sizeof(spectrum));
3392
3393 spectrum.channel_count = cpu_to_le16(1);
3394 spectrum.flags =
3395 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3396 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3397 cmd.len = sizeof(spectrum);
3398 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3399
bb8c093b 3400 if (iwl4965_is_associated(priv))
b481de9c 3401 spectrum.start_time =
bb8c093b 3402 iwl4965_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
3403 add_time,
3404 le16_to_cpu(priv->rxon_timing.beacon_interval));
3405 else
3406 spectrum.start_time = 0;
3407
3408 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3409 spectrum.channels[0].channel = params->channel;
3410 spectrum.channels[0].type = type;
3411 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3412 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3413 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3414
bb8c093b 3415 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
3416 if (rc)
3417 return rc;
3418
bb8c093b 3419 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
3420 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3421 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3422 rc = -EIO;
3423 }
3424
3425 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3426 switch (spectrum_resp_status) {
3427 case 0: /* Command will be handled */
3428 if (res->u.spectrum.id != 0xff) {
3429 IWL_DEBUG_INFO
3430 ("Replaced existing measurement: %d\n",
3431 res->u.spectrum.id);
3432 priv->measurement_status &= ~MEASUREMENT_READY;
3433 }
3434 priv->measurement_status |= MEASUREMENT_ACTIVE;
3435 rc = 0;
3436 break;
3437
3438 case 1: /* Command will not be handled */
3439 rc = -EAGAIN;
3440 break;
3441 }
3442
3443 dev_kfree_skb_any(cmd.meta.u.skb);
3444
3445 return rc;
3446}
3447#endif
3448
bb8c093b
CH
3449static void iwl4965_txstatus_to_ieee(struct iwl4965_priv *priv,
3450 struct iwl4965_tx_info *tx_sta)
b481de9c
ZY
3451{
3452
3453 tx_sta->status.ack_signal = 0;
3454 tx_sta->status.excessive_retries = 0;
3455 tx_sta->status.queue_length = 0;
3456 tx_sta->status.queue_number = 0;
3457
3458 if (in_interrupt())
3459 ieee80211_tx_status_irqsafe(priv->hw,
3460 tx_sta->skb[0], &(tx_sta->status));
3461 else
3462 ieee80211_tx_status(priv->hw,
3463 tx_sta->skb[0], &(tx_sta->status));
3464
3465 tx_sta->skb[0] = NULL;
3466}
3467
3468/**
6440adb5 3469 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
b481de9c 3470 *
6440adb5
BC
3471 * When FW advances 'R' index, all entries between old and new 'R' index
3472 * need to be reclaimed. As result, some free space forms. If there is
3473 * enough free space (> low mark), wake the stack that feeds us.
b481de9c 3474 */
bb8c093b 3475int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index)
b481de9c 3476{
bb8c093b
CH
3477 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
3478 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
3479 int nfreed = 0;
3480
3481 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3482 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3483 "is out of range [0-%d] %d %d.\n", txq_id,
fc4b6853 3484 index, q->n_bd, q->write_ptr, q->read_ptr);
b481de9c
ZY
3485 return 0;
3486 }
3487
bb8c093b 3488 for (index = iwl4965_queue_inc_wrap(index, q->n_bd);
fc4b6853 3489 q->read_ptr != index;
bb8c093b 3490 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd)) {
b481de9c 3491 if (txq_id != IWL_CMD_QUEUE_NUM) {
bb8c093b 3492 iwl4965_txstatus_to_ieee(priv,
fc4b6853 3493 &(txq->txb[txq->q.read_ptr]));
bb8c093b 3494 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c
ZY
3495 } else if (nfreed > 1) {
3496 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
fc4b6853 3497 q->write_ptr, q->read_ptr);
b481de9c
ZY
3498 queue_work(priv->workqueue, &priv->restart);
3499 }
3500 nfreed++;
3501 }
3502
bb8c093b 3503 if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
b481de9c
ZY
3504 (txq_id != IWL_CMD_QUEUE_NUM) &&
3505 priv->mac80211_registered)
3506 ieee80211_wake_queue(priv->hw, txq_id);
3507
3508
3509 return nfreed;
3510}
3511
bb8c093b 3512static int iwl4965_is_tx_success(u32 status)
b481de9c
ZY
3513{
3514 status &= TX_STATUS_MSK;
3515 return (status == TX_STATUS_SUCCESS)
3516 || (status == TX_STATUS_DIRECT_DONE);
3517}
3518
3519/******************************************************************************
3520 *
3521 * Generic RX handler implementations
3522 *
3523 ******************************************************************************/
c8b0e6e1
CH
3524#ifdef CONFIG_IWL4965_HT
3525#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3526
bb8c093b 3527static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv,
b481de9c
ZY
3528 struct ieee80211_hdr *hdr)
3529{
3530 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
3531 return IWL_AP_ID;
3532 else {
3533 u8 *da = ieee80211_get_DA(hdr);
bb8c093b 3534 return iwl4965_hw_find_station(priv, da);
b481de9c
ZY
3535 }
3536}
3537
bb8c093b
CH
3538static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
3539 struct iwl4965_priv *priv, int txq_id, int idx)
b481de9c
ZY
3540{
3541 if (priv->txq[txq_id].txb[idx].skb[0])
3542 return (struct ieee80211_hdr *)priv->txq[txq_id].
3543 txb[idx].skb[0]->data;
3544 return NULL;
3545}
3546
bb8c093b 3547static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
b481de9c
ZY
3548{
3549 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
3550 tx_resp->frame_count);
3551 return le32_to_cpu(*scd_ssn) & MAX_SN;
3552
3553}
6440adb5
BC
3554
3555/**
3556 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
3557 */
bb8c093b
CH
3558static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3559 struct iwl4965_ht_agg *agg,
3560 struct iwl4965_tx_resp *tx_resp,
b481de9c
ZY
3561 u16 start_idx)
3562{
3563 u32 status;
3564 __le32 *frame_status = &tx_resp->status;
3565 struct ieee80211_tx_status *tx_status = NULL;
3566 struct ieee80211_hdr *hdr = NULL;
3567 int i, sh;
3568 int txq_id, idx;
3569 u16 seq;
3570
3571 if (agg->wait_for_ba)
6440adb5 3572 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
b481de9c
ZY
3573
3574 agg->frame_count = tx_resp->frame_count;
3575 agg->start_idx = start_idx;
3576 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3577 agg->bitmap0 = agg->bitmap1 = 0;
3578
6440adb5 3579 /* # frames attempted by Tx command */
b481de9c 3580 if (agg->frame_count == 1) {
6440adb5 3581 /* Only one frame was attempted; no block-ack will arrive */
bb8c093b 3582 struct iwl4965_tx_queue *txq ;
b481de9c
ZY
3583 status = le32_to_cpu(frame_status[0]);
3584
3585 txq_id = agg->txq_id;
3586 txq = &priv->txq[txq_id];
3587 /* FIXME: code repetition */
3588 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n",
3589 agg->frame_count, agg->start_idx);
3590
fc4b6853 3591 tx_status = &(priv->txq[txq_id].txb[txq->q.read_ptr].status);
b481de9c
ZY
3592 tx_status->retry_count = tx_resp->failure_frame;
3593 tx_status->queue_number = status & 0xff;
3594 tx_status->queue_length = tx_resp->bt_kill_count;
3595 tx_status->queue_length |= tx_resp->failure_rts;
3596
bb8c093b 3597 tx_status->flags = iwl4965_is_tx_success(status)?
b481de9c
ZY
3598 IEEE80211_TX_STATUS_ACK : 0;
3599 tx_status->control.tx_rate =
bb8c093b 3600 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3601 /* FIXME: code repetition end */
3602
3603 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3604 status & 0xff, tx_resp->failure_frame);
3605 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
bb8c093b 3606 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
b481de9c
ZY
3607
3608 agg->wait_for_ba = 0;
3609 } else {
6440adb5 3610 /* Two or more frames were attempted; expect block-ack */
b481de9c
ZY
3611 u64 bitmap = 0;
3612 int start = agg->start_idx;
3613
6440adb5 3614 /* Construct bit-map of pending frames within Tx window */
b481de9c
ZY
3615 for (i = 0; i < agg->frame_count; i++) {
3616 u16 sc;
3617 status = le32_to_cpu(frame_status[i]);
3618 seq = status >> 16;
3619 idx = SEQ_TO_INDEX(seq);
3620 txq_id = SEQ_TO_QUEUE(seq);
3621
3622 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3623 AGG_TX_STATE_ABORT_MSK))
3624 continue;
3625
3626 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3627 agg->frame_count, txq_id, idx);
3628
bb8c093b 3629 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
b481de9c
ZY
3630
3631 sc = le16_to_cpu(hdr->seq_ctrl);
3632 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3633 IWL_ERROR("BUG_ON idx doesn't match seq control"
3634 " idx=%d, seq_idx=%d, seq=%d\n",
3635 idx, SEQ_TO_SN(sc),
3636 hdr->seq_ctrl);
3637 return -1;
3638 }
3639
3640 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
3641 i, idx, SEQ_TO_SN(sc));
3642
3643 sh = idx - start;
3644 if (sh > 64) {
3645 sh = (start - idx) + 0xff;
3646 bitmap = bitmap << sh;
3647 sh = 0;
3648 start = idx;
3649 } else if (sh < -64)
3650 sh = 0xff - (start - idx);
3651 else if (sh < 0) {
3652 sh = start - idx;
3653 start = idx;
3654 bitmap = bitmap << sh;
3655 sh = 0;
3656 }
3657 bitmap |= (1 << sh);
3658 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3659 start, (u32)(bitmap & 0xFFFFFFFF));
3660 }
3661
3662 agg->bitmap0 = bitmap & 0xFFFFFFFF;
3663 agg->bitmap1 = bitmap >> 32;
3664 agg->start_idx = start;
3665 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3666 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n",
3667 agg->frame_count, agg->start_idx,
3668 agg->bitmap0);
3669
3670 if (bitmap)
3671 agg->wait_for_ba = 1;
3672 }
3673 return 0;
3674}
3675#endif
3676#endif
3677
6440adb5
BC
3678/**
3679 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3680 */
bb8c093b
CH
3681static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3682 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3683{
bb8c093b 3684 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3685 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3686 int txq_id = SEQ_TO_QUEUE(sequence);
3687 int index = SEQ_TO_INDEX(sequence);
bb8c093b 3688 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
b481de9c 3689 struct ieee80211_tx_status *tx_status;
bb8c093b 3690 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
b481de9c 3691 u32 status = le32_to_cpu(tx_resp->status);
c8b0e6e1
CH
3692#ifdef CONFIG_IWL4965_HT
3693#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
3694 int tid, sta_id;
3695#endif
3696#endif
3697
3698 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3699 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3700 "is out of range [0-%d] %d %d\n", txq_id,
fc4b6853
TW
3701 index, txq->q.n_bd, txq->q.write_ptr,
3702 txq->q.read_ptr);
b481de9c
ZY
3703 return;
3704 }
3705
c8b0e6e1
CH
3706#ifdef CONFIG_IWL4965_HT
3707#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3708 if (txq->sched_retry) {
bb8c093b 3709 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
b481de9c 3710 struct ieee80211_hdr *hdr =
bb8c093b
CH
3711 iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3712 struct iwl4965_ht_agg *agg = NULL;
b481de9c
ZY
3713 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3714
3715 if (qc == NULL) {
3716 IWL_ERROR("BUG_ON qc is null!!!!\n");
3717 return;
3718 }
3719
3720 tid = le16_to_cpu(*qc) & 0xf;
3721
bb8c093b 3722 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
b481de9c
ZY
3723 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3724 IWL_ERROR("Station not known for\n");
3725 return;
3726 }
3727
3728 agg = &priv->stations[sta_id].tid[tid].agg;
3729
3730 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index);
3731
3732 if ((tx_resp->frame_count == 1) &&
bb8c093b 3733 !iwl4965_is_tx_success(status)) {
b481de9c
ZY
3734 /* TODO: send BAR */
3735 }
3736
fc4b6853 3737 if ((txq->q.read_ptr != (scd_ssn & 0xff))) {
bb8c093b 3738 index = iwl4965_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
b481de9c
ZY
3739 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3740 "%d index %d\n", scd_ssn , index);
bb8c093b 3741 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
3742 }
3743 } else {
c8b0e6e1
CH
3744#endif /* CONFIG_IWL4965_HT_AGG */
3745#endif /* CONFIG_IWL4965_HT */
fc4b6853 3746 tx_status = &(txq->txb[txq->q.read_ptr].status);
b481de9c
ZY
3747
3748 tx_status->retry_count = tx_resp->failure_frame;
3749 tx_status->queue_number = status;
3750 tx_status->queue_length = tx_resp->bt_kill_count;
3751 tx_status->queue_length |= tx_resp->failure_rts;
3752
3753 tx_status->flags =
bb8c093b 3754 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
b481de9c
ZY
3755
3756 tx_status->control.tx_rate =
bb8c093b 3757 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3758
3759 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
bb8c093b 3760 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
b481de9c
ZY
3761 status, le32_to_cpu(tx_resp->rate_n_flags),
3762 tx_resp->failure_frame);
3763
3764 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3765 if (index != -1)
bb8c093b 3766 iwl4965_tx_queue_reclaim(priv, txq_id, index);
c8b0e6e1
CH
3767#ifdef CONFIG_IWL4965_HT
3768#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3769 }
c8b0e6e1
CH
3770#endif /* CONFIG_IWL4965_HT_AGG */
3771#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
3772
3773 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3774 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3775}
3776
3777
bb8c093b
CH
3778static void iwl4965_rx_reply_alive(struct iwl4965_priv *priv,
3779 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3780{
bb8c093b
CH
3781 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3782 struct iwl4965_alive_resp *palive;
b481de9c
ZY
3783 struct delayed_work *pwork;
3784
3785 palive = &pkt->u.alive_frame;
3786
3787 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3788 "0x%01X 0x%01X\n",
3789 palive->is_valid, palive->ver_type,
3790 palive->ver_subtype);
3791
3792 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3793 IWL_DEBUG_INFO("Initialization Alive received.\n");
3794 memcpy(&priv->card_alive_init,
3795 &pkt->u.alive_frame,
bb8c093b 3796 sizeof(struct iwl4965_init_alive_resp));
b481de9c
ZY
3797 pwork = &priv->init_alive_start;
3798 } else {
3799 IWL_DEBUG_INFO("Runtime Alive received.\n");
3800 memcpy(&priv->card_alive, &pkt->u.alive_frame,
bb8c093b 3801 sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
3802 pwork = &priv->alive_start;
3803 }
3804
3805 /* We delay the ALIVE response by 5ms to
3806 * give the HW RF Kill time to activate... */
3807 if (palive->is_valid == UCODE_VALID_OK)
3808 queue_delayed_work(priv->workqueue, pwork,
3809 msecs_to_jiffies(5));
3810 else
3811 IWL_WARNING("uCode did not respond OK.\n");
3812}
3813
bb8c093b
CH
3814static void iwl4965_rx_reply_add_sta(struct iwl4965_priv *priv,
3815 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3816{
bb8c093b 3817 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3818
3819 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3820 return;
3821}
3822
bb8c093b
CH
3823static void iwl4965_rx_reply_error(struct iwl4965_priv *priv,
3824 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3825{
bb8c093b 3826 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3827
3828 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3829 "seq 0x%04X ser 0x%08X\n",
3830 le32_to_cpu(pkt->u.err_resp.error_type),
3831 get_cmd_string(pkt->u.err_resp.cmd_id),
3832 pkt->u.err_resp.cmd_id,
3833 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3834 le32_to_cpu(pkt->u.err_resp.error_info));
3835}
3836
3837#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3838
bb8c093b 3839static void iwl4965_rx_csa(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3840{
bb8c093b
CH
3841 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3842 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon;
3843 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
3844 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3845 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3846 rxon->channel = csa->channel;
3847 priv->staging_rxon.channel = csa->channel;
3848}
3849
bb8c093b
CH
3850static void iwl4965_rx_spectrum_measure_notif(struct iwl4965_priv *priv,
3851 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3852{
c8b0e6e1 3853#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
bb8c093b
CH
3854 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3855 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
3856
3857 if (!report->state) {
3858 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3859 "Spectrum Measure Notification: Start\n");
3860 return;
3861 }
3862
3863 memcpy(&priv->measure_report, report, sizeof(*report));
3864 priv->measurement_status |= MEASUREMENT_READY;
3865#endif
3866}
3867
bb8c093b
CH
3868static void iwl4965_rx_pm_sleep_notif(struct iwl4965_priv *priv,
3869 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3870{
c8b0e6e1 3871#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3872 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3873 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
3874 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3875 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3876#endif
3877}
3878
bb8c093b
CH
3879static void iwl4965_rx_pm_debug_statistics_notif(struct iwl4965_priv *priv,
3880 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3881{
bb8c093b 3882 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3883 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3884 "notification for %s:\n",
3885 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
bb8c093b 3886 iwl4965_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
b481de9c
ZY
3887}
3888
bb8c093b 3889static void iwl4965_bg_beacon_update(struct work_struct *work)
b481de9c 3890{
bb8c093b
CH
3891 struct iwl4965_priv *priv =
3892 container_of(work, struct iwl4965_priv, beacon_update);
b481de9c
ZY
3893 struct sk_buff *beacon;
3894
3895 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
32bfd35d 3896 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL);
b481de9c
ZY
3897
3898 if (!beacon) {
3899 IWL_ERROR("update beacon failed\n");
3900 return;
3901 }
3902
3903 mutex_lock(&priv->mutex);
3904 /* new beacon skb is allocated every time; dispose previous.*/
3905 if (priv->ibss_beacon)
3906 dev_kfree_skb(priv->ibss_beacon);
3907
3908 priv->ibss_beacon = beacon;
3909 mutex_unlock(&priv->mutex);
3910
bb8c093b 3911 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
3912}
3913
bb8c093b
CH
3914static void iwl4965_rx_beacon_notif(struct iwl4965_priv *priv,
3915 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3916{
c8b0e6e1 3917#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3918 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3919 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3920 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
b481de9c
ZY
3921
3922 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3923 "tsf %d %d rate %d\n",
3924 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3925 beacon->beacon_notify_hdr.failure_frame,
3926 le32_to_cpu(beacon->ibss_mgr_status),
3927 le32_to_cpu(beacon->high_tsf),
3928 le32_to_cpu(beacon->low_tsf), rate);
3929#endif
3930
3931 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3932 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3933 queue_work(priv->workqueue, &priv->beacon_update);
3934}
3935
3936/* Service response to REPLY_SCAN_CMD (0x80) */
bb8c093b
CH
3937static void iwl4965_rx_reply_scan(struct iwl4965_priv *priv,
3938 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3939{
c8b0e6e1 3940#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3941 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3942 struct iwl4965_scanreq_notification *notif =
3943 (struct iwl4965_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
3944
3945 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3946#endif
3947}
3948
3949/* Service SCAN_START_NOTIFICATION (0x82) */
bb8c093b
CH
3950static void iwl4965_rx_scan_start_notif(struct iwl4965_priv *priv,
3951 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3952{
bb8c093b
CH
3953 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3954 struct iwl4965_scanstart_notification *notif =
3955 (struct iwl4965_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
3956 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3957 IWL_DEBUG_SCAN("Scan start: "
3958 "%d [802.11%s] "
3959 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3960 notif->channel,
3961 notif->band ? "bg" : "a",
3962 notif->tsf_high,
3963 notif->tsf_low, notif->status, notif->beacon_timer);
3964}
3965
3966/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
bb8c093b
CH
3967static void iwl4965_rx_scan_results_notif(struct iwl4965_priv *priv,
3968 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3969{
bb8c093b
CH
3970 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3971 struct iwl4965_scanresults_notification *notif =
3972 (struct iwl4965_scanresults_notification *)pkt->u.raw;
b481de9c
ZY
3973
3974 IWL_DEBUG_SCAN("Scan ch.res: "
3975 "%d [802.11%s] "
3976 "(TSF: 0x%08X:%08X) - %d "
3977 "elapsed=%lu usec (%dms since last)\n",
3978 notif->channel,
3979 notif->band ? "bg" : "a",
3980 le32_to_cpu(notif->tsf_high),
3981 le32_to_cpu(notif->tsf_low),
3982 le32_to_cpu(notif->statistics[0]),
3983 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3984 jiffies_to_msecs(elapsed_jiffies
3985 (priv->last_scan_jiffies, jiffies)));
3986
3987 priv->last_scan_jiffies = jiffies;
7878a5a4 3988 priv->next_scan_jiffies = 0;
b481de9c
ZY
3989}
3990
3991/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
bb8c093b
CH
3992static void iwl4965_rx_scan_complete_notif(struct iwl4965_priv *priv,
3993 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3994{
bb8c093b
CH
3995 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3996 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
b481de9c
ZY
3997
3998 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3999 scan_notif->scanned_channels,
4000 scan_notif->tsf_low,
4001 scan_notif->tsf_high, scan_notif->status);
4002
4003 /* The HW is no longer scanning */
4004 clear_bit(STATUS_SCAN_HW, &priv->status);
4005
4006 /* The scan completion notification came in, so kill that timer... */
4007 cancel_delayed_work(&priv->scan_check);
4008
4009 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
4010 (priv->scan_bands == 2) ? "2.4" : "5.2",
4011 jiffies_to_msecs(elapsed_jiffies
4012 (priv->scan_pass_start, jiffies)));
4013
4014 /* Remove this scanned band from the list
4015 * of pending bands to scan */
4016 priv->scan_bands--;
4017
4018 /* If a request to abort was given, or the scan did not succeed
4019 * then we reset the scan state machine and terminate,
4020 * re-queuing another scan if one has been requested */
4021 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4022 IWL_DEBUG_INFO("Aborted scan completed.\n");
4023 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
4024 } else {
4025 /* If there are more bands on this scan pass reschedule */
4026 if (priv->scan_bands > 0)
4027 goto reschedule;
4028 }
4029
4030 priv->last_scan_jiffies = jiffies;
7878a5a4 4031 priv->next_scan_jiffies = 0;
b481de9c
ZY
4032 IWL_DEBUG_INFO("Setting scan to off\n");
4033
4034 clear_bit(STATUS_SCANNING, &priv->status);
4035
4036 IWL_DEBUG_INFO("Scan took %dms\n",
4037 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
4038
4039 queue_work(priv->workqueue, &priv->scan_completed);
4040
4041 return;
4042
4043reschedule:
4044 priv->scan_pass_start = jiffies;
4045 queue_work(priv->workqueue, &priv->request_scan);
4046}
4047
4048/* Handle notification from uCode that card's power state is changing
4049 * due to software, hardware, or critical temperature RFKILL */
bb8c093b
CH
4050static void iwl4965_rx_card_state_notif(struct iwl4965_priv *priv,
4051 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4052{
bb8c093b 4053 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
4054 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4055 unsigned long status = priv->status;
4056
4057 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
4058 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4059 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
4060
4061 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
4062 RF_CARD_DISABLED)) {
4063
bb8c093b 4064 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
4065 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4066
bb8c093b
CH
4067 if (!iwl4965_grab_nic_access(priv)) {
4068 iwl4965_write_direct32(
b481de9c
ZY
4069 priv, HBUS_TARG_MBX_C,
4070 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4071
bb8c093b 4072 iwl4965_release_nic_access(priv);
b481de9c
ZY
4073 }
4074
4075 if (!(flags & RXON_CARD_DISABLED)) {
bb8c093b 4076 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 4077 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
bb8c093b
CH
4078 if (!iwl4965_grab_nic_access(priv)) {
4079 iwl4965_write_direct32(
b481de9c
ZY
4080 priv, HBUS_TARG_MBX_C,
4081 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4082
bb8c093b 4083 iwl4965_release_nic_access(priv);
b481de9c
ZY
4084 }
4085 }
4086
4087 if (flags & RF_CARD_DISABLED) {
bb8c093b 4088 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c 4089 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
bb8c093b
CH
4090 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
4091 if (!iwl4965_grab_nic_access(priv))
4092 iwl4965_release_nic_access(priv);
b481de9c
ZY
4093 }
4094 }
4095
4096 if (flags & HW_CARD_DISABLED)
4097 set_bit(STATUS_RF_KILL_HW, &priv->status);
4098 else
4099 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4100
4101
4102 if (flags & SW_CARD_DISABLED)
4103 set_bit(STATUS_RF_KILL_SW, &priv->status);
4104 else
4105 clear_bit(STATUS_RF_KILL_SW, &priv->status);
4106
4107 if (!(flags & RXON_CARD_DISABLED))
bb8c093b 4108 iwl4965_scan_cancel(priv);
b481de9c
ZY
4109
4110 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
4111 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
4112 (test_bit(STATUS_RF_KILL_SW, &status) !=
4113 test_bit(STATUS_RF_KILL_SW, &priv->status)))
4114 queue_work(priv->workqueue, &priv->rf_kill);
4115 else
4116 wake_up_interruptible(&priv->wait_command_queue);
4117}
4118
4119/**
bb8c093b 4120 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
4121 *
4122 * Setup the RX handlers for each of the reply types sent from the uCode
4123 * to the host.
4124 *
4125 * This function chains into the hardware specific files for them to setup
4126 * any hardware specific handlers as well.
4127 */
bb8c093b 4128static void iwl4965_setup_rx_handlers(struct iwl4965_priv *priv)
b481de9c 4129{
bb8c093b
CH
4130 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
4131 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
4132 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
4133 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
b481de9c 4134 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
4135 iwl4965_rx_spectrum_measure_notif;
4136 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl4965_rx_pm_sleep_notif;
b481de9c 4137 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
4138 iwl4965_rx_pm_debug_statistics_notif;
4139 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
b481de9c 4140
9fbab516
BC
4141 /*
4142 * The same handler is used for both the REPLY to a discrete
4143 * statistics request from the host as well as for the periodic
4144 * statistics notifications (after received beacons) from the uCode.
b481de9c 4145 */
bb8c093b
CH
4146 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics;
4147 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics;
b481de9c 4148
bb8c093b
CH
4149 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
4150 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
b481de9c 4151 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 4152 iwl4965_rx_scan_results_notif;
b481de9c 4153 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
4154 iwl4965_rx_scan_complete_notif;
4155 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
4156 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
b481de9c 4157
9fbab516 4158 /* Set up hardware specific Rx handlers */
bb8c093b 4159 iwl4965_hw_rx_handler_setup(priv);
b481de9c
ZY
4160}
4161
4162/**
bb8c093b 4163 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
4164 * @rxb: Rx buffer to reclaim
4165 *
4166 * If an Rx buffer has an async callback associated with it the callback
4167 * will be executed. The attached skb (if present) will only be freed
4168 * if the callback returns 1
4169 */
bb8c093b
CH
4170static void iwl4965_tx_cmd_complete(struct iwl4965_priv *priv,
4171 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4172{
bb8c093b 4173 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4174 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4175 int txq_id = SEQ_TO_QUEUE(sequence);
4176 int index = SEQ_TO_INDEX(sequence);
4177 int huge = sequence & SEQ_HUGE_FRAME;
4178 int cmd_index;
bb8c093b 4179 struct iwl4965_cmd *cmd;
b481de9c
ZY
4180
4181 /* If a Tx command is being handled and it isn't in the actual
4182 * command queue then there a command routing bug has been introduced
4183 * in the queue management code. */
4184 if (txq_id != IWL_CMD_QUEUE_NUM)
4185 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
4186 txq_id, pkt->hdr.cmd);
4187 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
4188
4189 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
4190 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
4191
4192 /* Input error checking is done when commands are added to queue. */
4193 if (cmd->meta.flags & CMD_WANT_SKB) {
4194 cmd->meta.source->u.skb = rxb->skb;
4195 rxb->skb = NULL;
4196 } else if (cmd->meta.u.callback &&
4197 !cmd->meta.u.callback(priv, cmd, rxb->skb))
4198 rxb->skb = NULL;
4199
bb8c093b 4200 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
4201
4202 if (!(cmd->meta.flags & CMD_ASYNC)) {
4203 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4204 wake_up_interruptible(&priv->wait_command_queue);
4205 }
4206}
4207
4208/************************** RX-FUNCTIONS ****************************/
4209/*
4210 * Rx theory of operation
4211 *
9fbab516
BC
4212 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
4213 * each of which point to Receive Buffers to be filled by 4965. These get
4214 * used not only for Rx frames, but for any command response or notification
4215 * from the 4965. The driver and 4965 manage the Rx buffers by means
4216 * of indexes into the circular buffer.
b481de9c
ZY
4217 *
4218 * Rx Queue Indexes
4219 * The host/firmware share two index registers for managing the Rx buffers.
4220 *
4221 * The READ index maps to the first position that the firmware may be writing
4222 * to -- the driver can read up to (but not including) this position and get
4223 * good data.
4224 * The READ index is managed by the firmware once the card is enabled.
4225 *
4226 * The WRITE index maps to the last position the driver has read from -- the
4227 * position preceding WRITE is the last slot the firmware can place a packet.
4228 *
4229 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4230 * WRITE = READ.
4231 *
9fbab516 4232 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
4233 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4234 *
9fbab516 4235 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
4236 * and fire the RX interrupt. The driver can then query the READ index and
4237 * process as many packets as possible, moving the WRITE index forward as it
4238 * resets the Rx queue buffers with new memory.
4239 *
4240 * The management in the driver is as follows:
4241 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
4242 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 4243 * to replenish the iwl->rxq->rx_free.
bb8c093b 4244 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
4245 * iwl->rxq is replenished and the READ INDEX is updated (updating the
4246 * 'processed' and 'read' driver indexes as well)
4247 * + A received packet is processed and handed to the kernel network stack,
4248 * detached from the iwl->rxq. The driver 'processed' index is updated.
4249 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
4250 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
4251 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
4252 * were enough free buffers and RX_STALLED is set it is cleared.
4253 *
4254 *
4255 * Driver sequence:
4256 *
9fbab516
BC
4257 * iwl4965_rx_queue_alloc() Allocates rx_free
4258 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 4259 * iwl4965_rx_queue_restock
9fbab516 4260 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
4261 * queue, updates firmware pointers, and updates
4262 * the WRITE index. If insufficient rx_free buffers
bb8c093b 4263 * are available, schedules iwl4965_rx_replenish
b481de9c
ZY
4264 *
4265 * -- enable interrupts --
9fbab516 4266 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
b481de9c
ZY
4267 * READ INDEX, detaching the SKB from the pool.
4268 * Moves the packet buffer from queue to rx_used.
bb8c093b 4269 * Calls iwl4965_rx_queue_restock to refill any empty
b481de9c
ZY
4270 * slots.
4271 * ...
4272 *
4273 */
4274
4275/**
bb8c093b 4276 * iwl4965_rx_queue_space - Return number of free slots available in queue.
b481de9c 4277 */
bb8c093b 4278static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
b481de9c
ZY
4279{
4280 int s = q->read - q->write;
4281 if (s <= 0)
4282 s += RX_QUEUE_SIZE;
4283 /* keep some buffer to not confuse full and empty queue */
4284 s -= 2;
4285 if (s < 0)
4286 s = 0;
4287 return s;
4288}
4289
4290/**
bb8c093b 4291 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
b481de9c 4292 */
bb8c093b 4293int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, struct iwl4965_rx_queue *q)
b481de9c
ZY
4294{
4295 u32 reg = 0;
4296 int rc = 0;
4297 unsigned long flags;
4298
4299 spin_lock_irqsave(&q->lock, flags);
4300
4301 if (q->need_update == 0)
4302 goto exit_unlock;
4303
6440adb5 4304 /* If power-saving is in use, make sure device is awake */
b481de9c 4305 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
bb8c093b 4306 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4307
4308 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
bb8c093b 4309 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4310 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4311 goto exit_unlock;
4312 }
4313
bb8c093b 4314 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4315 if (rc)
4316 goto exit_unlock;
4317
6440adb5 4318 /* Device expects a multiple of 8 */
bb8c093b 4319 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
b481de9c 4320 q->write & ~0x7);
bb8c093b 4321 iwl4965_release_nic_access(priv);
6440adb5
BC
4322
4323 /* Else device is assumed to be awake */
b481de9c 4324 } else
6440adb5 4325 /* Device expects a multiple of 8 */
bb8c093b 4326 iwl4965_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
b481de9c
ZY
4327
4328
4329 q->need_update = 0;
4330
4331 exit_unlock:
4332 spin_unlock_irqrestore(&q->lock, flags);
4333 return rc;
4334}
4335
4336/**
9fbab516 4337 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 4338 */
bb8c093b 4339static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl4965_priv *priv,
b481de9c
ZY
4340 dma_addr_t dma_addr)
4341{
4342 return cpu_to_le32((u32)(dma_addr >> 8));
4343}
4344
4345
4346/**
bb8c093b 4347 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 4348 *
9fbab516 4349 * If there are slots in the RX queue that need to be restocked,
b481de9c 4350 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 4351 * as we can, pulling from rx_free.
b481de9c
ZY
4352 *
4353 * This moves the 'write' index forward to catch up with 'processed', and
4354 * also updates the memory address in the firmware to reference the new
4355 * target buffer.
4356 */
bb8c093b 4357static int iwl4965_rx_queue_restock(struct iwl4965_priv *priv)
b481de9c 4358{
bb8c093b 4359 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4360 struct list_head *element;
bb8c093b 4361 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4362 unsigned long flags;
4363 int write, rc;
4364
4365 spin_lock_irqsave(&rxq->lock, flags);
4366 write = rxq->write & ~0x7;
bb8c093b 4367 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 4368 /* Get next free Rx buffer, remove from free list */
b481de9c 4369 element = rxq->rx_free.next;
bb8c093b 4370 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
b481de9c 4371 list_del(element);
6440adb5
BC
4372
4373 /* Point to Rx buffer via next RBD in circular buffer */
bb8c093b 4374 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
b481de9c
ZY
4375 rxq->queue[rxq->write] = rxb;
4376 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4377 rxq->free_count--;
4378 }
4379 spin_unlock_irqrestore(&rxq->lock, flags);
4380 /* If the pre-allocated buffer pool is dropping low, schedule to
4381 * refill it */
4382 if (rxq->free_count <= RX_LOW_WATERMARK)
4383 queue_work(priv->workqueue, &priv->rx_replenish);
4384
4385
6440adb5
BC
4386 /* If we've added more space for the firmware to place data, tell it.
4387 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
4388 if ((write != (rxq->write & ~0x7))
4389 || (abs(rxq->write - rxq->read) > 7)) {
4390 spin_lock_irqsave(&rxq->lock, flags);
4391 rxq->need_update = 1;
4392 spin_unlock_irqrestore(&rxq->lock, flags);
bb8c093b 4393 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
4394 if (rc)
4395 return rc;
4396 }
4397
4398 return 0;
4399}
4400
4401/**
bb8c093b 4402 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
4403 *
4404 * When moving to rx_free an SKB is allocated for the slot.
4405 *
bb8c093b 4406 * Also restock the Rx queue via iwl4965_rx_queue_restock.
01ebd063 4407 * This is called as a scheduled work item (except for during initialization)
b481de9c 4408 */
5c0eef96 4409static void iwl4965_rx_allocate(struct iwl4965_priv *priv)
b481de9c 4410{
bb8c093b 4411 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4412 struct list_head *element;
bb8c093b 4413 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4414 unsigned long flags;
4415 spin_lock_irqsave(&rxq->lock, flags);
4416 while (!list_empty(&rxq->rx_used)) {
4417 element = rxq->rx_used.next;
bb8c093b 4418 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
6440adb5
BC
4419
4420 /* Alloc a new receive buffer */
b481de9c 4421 rxb->skb =
9ee1ba47
RR
4422 alloc_skb(priv->hw_setting.rx_buf_size,
4423 __GFP_NOWARN | GFP_ATOMIC);
b481de9c
ZY
4424 if (!rxb->skb) {
4425 if (net_ratelimit())
4426 printk(KERN_CRIT DRV_NAME
4427 ": Can not allocate SKB buffers\n");
4428 /* We don't reschedule replenish work here -- we will
4429 * call the restock method and if it still needs
4430 * more buffers it will schedule replenish */
4431 break;
4432 }
4433 priv->alloc_rxb_skb++;
4434 list_del(element);
6440adb5
BC
4435
4436 /* Get physical address of RB/SKB */
b481de9c
ZY
4437 rxb->dma_addr =
4438 pci_map_single(priv->pci_dev, rxb->skb->data,
9ee1ba47 4439 priv->hw_setting.rx_buf_size, PCI_DMA_FROMDEVICE);
b481de9c
ZY
4440 list_add_tail(&rxb->list, &rxq->rx_free);
4441 rxq->free_count++;
4442 }
4443 spin_unlock_irqrestore(&rxq->lock, flags);
5c0eef96
MA
4444}
4445
4446/*
4447 * this should be called while priv->lock is locked
4448*/
4fd1f841 4449static void __iwl4965_rx_replenish(void *data)
5c0eef96
MA
4450{
4451 struct iwl4965_priv *priv = data;
4452
4453 iwl4965_rx_allocate(priv);
4454 iwl4965_rx_queue_restock(priv);
4455}
4456
4457
4458void iwl4965_rx_replenish(void *data)
4459{
4460 struct iwl4965_priv *priv = data;
4461 unsigned long flags;
4462
4463 iwl4965_rx_allocate(priv);
b481de9c
ZY
4464
4465 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 4466 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4467 spin_unlock_irqrestore(&priv->lock, flags);
4468}
4469
4470/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
9fbab516 4471 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
b481de9c
ZY
4472 * This free routine walks the list of POOL entries and if SKB is set to
4473 * non NULL it is unmapped and freed
4474 */
bb8c093b 4475static void iwl4965_rx_queue_free(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4476{
4477 int i;
4478 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4479 if (rxq->pool[i].skb != NULL) {
4480 pci_unmap_single(priv->pci_dev,
4481 rxq->pool[i].dma_addr,
9ee1ba47
RR
4482 priv->hw_setting.rx_buf_size,
4483 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4484 dev_kfree_skb(rxq->pool[i].skb);
4485 }
4486 }
4487
4488 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4489 rxq->dma_addr);
4490 rxq->bd = NULL;
4491}
4492
bb8c093b 4493int iwl4965_rx_queue_alloc(struct iwl4965_priv *priv)
b481de9c 4494{
bb8c093b 4495 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4496 struct pci_dev *dev = priv->pci_dev;
4497 int i;
4498
4499 spin_lock_init(&rxq->lock);
4500 INIT_LIST_HEAD(&rxq->rx_free);
4501 INIT_LIST_HEAD(&rxq->rx_used);
6440adb5
BC
4502
4503 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
b481de9c
ZY
4504 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4505 if (!rxq->bd)
4506 return -ENOMEM;
6440adb5 4507
b481de9c
ZY
4508 /* Fill the rx_used queue with _all_ of the Rx buffers */
4509 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4510 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
6440adb5 4511
b481de9c
ZY
4512 /* Set us so that we have processed and used all buffers, but have
4513 * not restocked the Rx queue with fresh buffers */
4514 rxq->read = rxq->write = 0;
4515 rxq->free_count = 0;
4516 rxq->need_update = 0;
4517 return 0;
4518}
4519
bb8c093b 4520void iwl4965_rx_queue_reset(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4521{
4522 unsigned long flags;
4523 int i;
4524 spin_lock_irqsave(&rxq->lock, flags);
4525 INIT_LIST_HEAD(&rxq->rx_free);
4526 INIT_LIST_HEAD(&rxq->rx_used);
4527 /* Fill the rx_used queue with _all_ of the Rx buffers */
4528 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4529 /* In the reset function, these buffers may have been allocated
4530 * to an SKB, so we need to unmap and free potential storage */
4531 if (rxq->pool[i].skb != NULL) {
4532 pci_unmap_single(priv->pci_dev,
4533 rxq->pool[i].dma_addr,
9ee1ba47
RR
4534 priv->hw_setting.rx_buf_size,
4535 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4536 priv->alloc_rxb_skb--;
4537 dev_kfree_skb(rxq->pool[i].skb);
4538 rxq->pool[i].skb = NULL;
4539 }
4540 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4541 }
4542
4543 /* Set us so that we have processed and used all buffers, but have
4544 * not restocked the Rx queue with fresh buffers */
4545 rxq->read = rxq->write = 0;
4546 rxq->free_count = 0;
4547 spin_unlock_irqrestore(&rxq->lock, flags);
4548}
4549
4550/* Convert linear signal-to-noise ratio into dB */
4551static u8 ratio2dB[100] = {
4552/* 0 1 2 3 4 5 6 7 8 9 */
4553 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4554 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4555 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4556 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4557 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4558 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4559 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4560 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4561 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4562 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4563};
4564
4565/* Calculates a relative dB value from a ratio of linear
4566 * (i.e. not dB) signal levels.
4567 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 4568int iwl4965_calc_db_from_ratio(int sig_ratio)
b481de9c 4569{
c899a575
AB
4570 /* 1000:1 or higher just report as 60 dB */
4571 if (sig_ratio >= 1000)
b481de9c
ZY
4572 return 60;
4573
c899a575 4574 /* 100:1 or higher, divide by 10 and use table,
b481de9c 4575 * add 20 dB to make up for divide by 10 */
c899a575 4576 if (sig_ratio >= 100)
b481de9c
ZY
4577 return (20 + (int)ratio2dB[sig_ratio/10]);
4578
4579 /* We shouldn't see this */
4580 if (sig_ratio < 1)
4581 return 0;
4582
4583 /* Use table for ratios 1:1 - 99:1 */
4584 return (int)ratio2dB[sig_ratio];
4585}
4586
4587#define PERFECT_RSSI (-20) /* dBm */
4588#define WORST_RSSI (-95) /* dBm */
4589#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4590
4591/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4592 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4593 * about formulas used below. */
bb8c093b 4594int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
4595{
4596 int sig_qual;
4597 int degradation = PERFECT_RSSI - rssi_dbm;
4598
4599 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4600 * as indicator; formula is (signal dbm - noise dbm).
4601 * SNR at or above 40 is a great signal (100%).
4602 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4603 * Weakest usable signal is usually 10 - 15 dB SNR. */
4604 if (noise_dbm) {
4605 if (rssi_dbm - noise_dbm >= 40)
4606 return 100;
4607 else if (rssi_dbm < noise_dbm)
4608 return 0;
4609 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4610
4611 /* Else use just the signal level.
4612 * This formula is a least squares fit of data points collected and
4613 * compared with a reference system that had a percentage (%) display
4614 * for signal quality. */
4615 } else
4616 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4617 (15 * RSSI_RANGE + 62 * degradation)) /
4618 (RSSI_RANGE * RSSI_RANGE);
4619
4620 if (sig_qual > 100)
4621 sig_qual = 100;
4622 else if (sig_qual < 1)
4623 sig_qual = 0;
4624
4625 return sig_qual;
4626}
4627
4628/**
9fbab516 4629 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
4630 *
4631 * Uses the priv->rx_handlers callback function array to invoke
4632 * the appropriate handlers, including command responses,
4633 * frame-received notifications, and other notifications.
4634 */
bb8c093b 4635static void iwl4965_rx_handle(struct iwl4965_priv *priv)
b481de9c 4636{
bb8c093b
CH
4637 struct iwl4965_rx_mem_buffer *rxb;
4638 struct iwl4965_rx_packet *pkt;
4639 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4640 u32 r, i;
4641 int reclaim;
4642 unsigned long flags;
5c0eef96
MA
4643 u8 fill_rx = 0;
4644 u32 count = 0;
b481de9c 4645
6440adb5
BC
4646 /* uCode's read index (stored in shared DRAM) indicates the last Rx
4647 * buffer that the driver may process (last buffer filled by ucode). */
bb8c093b 4648 r = iwl4965_hw_get_rx_read(priv);
b481de9c
ZY
4649 i = rxq->read;
4650
4651 /* Rx interrupt, but nothing sent from uCode */
4652 if (i == r)
4653 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4654
5c0eef96
MA
4655 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
4656 fill_rx = 1;
4657
b481de9c
ZY
4658 while (i != r) {
4659 rxb = rxq->queue[i];
4660
9fbab516 4661 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
4662 * then a bug has been introduced in the queue refilling
4663 * routines -- catch it here */
4664 BUG_ON(rxb == NULL);
4665
4666 rxq->queue[i] = NULL;
4667
4668 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
9ee1ba47 4669 priv->hw_setting.rx_buf_size,
b481de9c 4670 PCI_DMA_FROMDEVICE);
bb8c093b 4671 pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4672
4673 /* Reclaim a command buffer only if this packet is a response
4674 * to a (driver-originated) command.
4675 * If the packet (e.g. Rx frame) originated from uCode,
4676 * there is no command buffer to reclaim.
4677 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4678 * but apparently a few don't get set; catch them here. */
4679 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4680 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4681 (pkt->hdr.cmd != REPLY_4965_RX) &&
cfe01709 4682 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
b481de9c
ZY
4683 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4684 (pkt->hdr.cmd != REPLY_TX);
4685
4686 /* Based on type of command response or notification,
4687 * handle those that need handling via function in
bb8c093b 4688 * rx_handlers table. See iwl4965_setup_rx_handlers() */
b481de9c
ZY
4689 if (priv->rx_handlers[pkt->hdr.cmd]) {
4690 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4691 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4692 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4693 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4694 } else {
4695 /* No handling needed */
4696 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4697 "r %d i %d No handler needed for %s, 0x%02x\n",
4698 r, i, get_cmd_string(pkt->hdr.cmd),
4699 pkt->hdr.cmd);
4700 }
4701
4702 if (reclaim) {
9fbab516
BC
4703 /* Invoke any callbacks, transfer the skb to caller, and
4704 * fire off the (possibly) blocking iwl4965_send_cmd()
b481de9c
ZY
4705 * as we reclaim the driver command queue */
4706 if (rxb && rxb->skb)
bb8c093b 4707 iwl4965_tx_cmd_complete(priv, rxb);
b481de9c
ZY
4708 else
4709 IWL_WARNING("Claim null rxb?\n");
4710 }
4711
4712 /* For now we just don't re-use anything. We can tweak this
4713 * later to try and re-use notification packets and SKBs that
4714 * fail to Rx correctly */
4715 if (rxb->skb != NULL) {
4716 priv->alloc_rxb_skb--;
4717 dev_kfree_skb_any(rxb->skb);
4718 rxb->skb = NULL;
4719 }
4720
4721 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
9ee1ba47
RR
4722 priv->hw_setting.rx_buf_size,
4723 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4724 spin_lock_irqsave(&rxq->lock, flags);
4725 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4726 spin_unlock_irqrestore(&rxq->lock, flags);
4727 i = (i + 1) & RX_QUEUE_MASK;
5c0eef96
MA
4728 /* If there are a lot of unused frames,
4729 * restock the Rx queue so ucode wont assert. */
4730 if (fill_rx) {
4731 count++;
4732 if (count >= 8) {
4733 priv->rxq.read = i;
4734 __iwl4965_rx_replenish(priv);
4735 count = 0;
4736 }
4737 }
b481de9c
ZY
4738 }
4739
4740 /* Backtrack one entry */
4741 priv->rxq.read = i;
bb8c093b 4742 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4743}
4744
6440adb5
BC
4745/**
4746 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware
4747 */
bb8c093b
CH
4748static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
4749 struct iwl4965_tx_queue *txq)
b481de9c
ZY
4750{
4751 u32 reg = 0;
4752 int rc = 0;
4753 int txq_id = txq->q.id;
4754
4755 if (txq->need_update == 0)
4756 return rc;
4757
4758 /* if we're trying to save power */
4759 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4760 /* wake up nic if it's powered down ...
4761 * uCode will wake up, and interrupt us again, so next
4762 * time we'll skip this part. */
bb8c093b 4763 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4764
4765 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4766 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
bb8c093b 4767 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4768 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4769 return rc;
4770 }
4771
4772 /* restore this queue's parameters in nic hardware. */
bb8c093b 4773 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4774 if (rc)
4775 return rc;
bb8c093b 4776 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
fc4b6853 4777 txq->q.write_ptr | (txq_id << 8));
bb8c093b 4778 iwl4965_release_nic_access(priv);
b481de9c
ZY
4779
4780 /* else not in power-save mode, uCode will never sleep when we're
4781 * trying to tx (during RFKILL, we're not trying to tx). */
4782 } else
bb8c093b 4783 iwl4965_write32(priv, HBUS_TARG_WRPTR,
fc4b6853 4784 txq->q.write_ptr | (txq_id << 8));
b481de9c
ZY
4785
4786 txq->need_update = 0;
4787
4788 return rc;
4789}
4790
c8b0e6e1 4791#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 4792static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c 4793{
0795af57
JP
4794 DECLARE_MAC_BUF(mac);
4795
b481de9c 4796 IWL_DEBUG_RADIO("RX CONFIG:\n");
bb8c093b 4797 iwl4965_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
4798 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4799 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4800 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4801 le32_to_cpu(rxon->filter_flags));
4802 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4803 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4804 rxon->ofdm_basic_rates);
4805 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
0795af57
JP
4806 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n",
4807 print_mac(mac, rxon->node_addr));
4808 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4809 print_mac(mac, rxon->bssid_addr));
b481de9c
ZY
4810 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4811}
4812#endif
4813
bb8c093b 4814static void iwl4965_enable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4815{
4816 IWL_DEBUG_ISR("Enabling interrupts\n");
4817 set_bit(STATUS_INT_ENABLED, &priv->status);
bb8c093b 4818 iwl4965_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
4819}
4820
bb8c093b 4821static inline void iwl4965_disable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4822{
4823 clear_bit(STATUS_INT_ENABLED, &priv->status);
4824
4825 /* disable interrupts from uCode/NIC to host */
bb8c093b 4826 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
4827
4828 /* acknowledge/clear/reset any interrupts still pending
4829 * from uCode or flow handler (Rx/Tx DMA) */
bb8c093b
CH
4830 iwl4965_write32(priv, CSR_INT, 0xffffffff);
4831 iwl4965_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
4832 IWL_DEBUG_ISR("Disabled interrupts\n");
4833}
4834
4835static const char *desc_lookup(int i)
4836{
4837 switch (i) {
4838 case 1:
4839 return "FAIL";
4840 case 2:
4841 return "BAD_PARAM";
4842 case 3:
4843 return "BAD_CHECKSUM";
4844 case 4:
4845 return "NMI_INTERRUPT";
4846 case 5:
4847 return "SYSASSERT";
4848 case 6:
4849 return "FATAL_ERROR";
4850 }
4851
4852 return "UNKNOWN";
4853}
4854
4855#define ERROR_START_OFFSET (1 * sizeof(u32))
4856#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4857
bb8c093b 4858static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv)
b481de9c
ZY
4859{
4860 u32 data2, line;
4861 u32 desc, time, count, base, data1;
4862 u32 blink1, blink2, ilink1, ilink2;
4863 int rc;
4864
4865 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4866
bb8c093b 4867 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4868 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4869 return;
4870 }
4871
bb8c093b 4872 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4873 if (rc) {
4874 IWL_WARNING("Can not read from adapter at this time.\n");
4875 return;
4876 }
4877
bb8c093b 4878 count = iwl4965_read_targ_mem(priv, base);
b481de9c
ZY
4879
4880 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4881 IWL_ERROR("Start IWL Error Log Dump:\n");
4882 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4883 priv->status, priv->config, count);
4884 }
4885
bb8c093b
CH
4886 desc = iwl4965_read_targ_mem(priv, base + 1 * sizeof(u32));
4887 blink1 = iwl4965_read_targ_mem(priv, base + 3 * sizeof(u32));
4888 blink2 = iwl4965_read_targ_mem(priv, base + 4 * sizeof(u32));
4889 ilink1 = iwl4965_read_targ_mem(priv, base + 5 * sizeof(u32));
4890 ilink2 = iwl4965_read_targ_mem(priv, base + 6 * sizeof(u32));
4891 data1 = iwl4965_read_targ_mem(priv, base + 7 * sizeof(u32));
4892 data2 = iwl4965_read_targ_mem(priv, base + 8 * sizeof(u32));
4893 line = iwl4965_read_targ_mem(priv, base + 9 * sizeof(u32));
4894 time = iwl4965_read_targ_mem(priv, base + 11 * sizeof(u32));
b481de9c
ZY
4895
4896 IWL_ERROR("Desc Time "
4897 "data1 data2 line\n");
4898 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4899 desc_lookup(desc), desc, time, data1, data2, line);
4900 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4901 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4902 ilink1, ilink2);
4903
bb8c093b 4904 iwl4965_release_nic_access(priv);
b481de9c
ZY
4905}
4906
4907#define EVENT_START_OFFSET (4 * sizeof(u32))
4908
4909/**
bb8c093b 4910 * iwl4965_print_event_log - Dump error event log to syslog
b481de9c 4911 *
bb8c093b 4912 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
b481de9c 4913 */
bb8c093b 4914static void iwl4965_print_event_log(struct iwl4965_priv *priv, u32 start_idx,
b481de9c
ZY
4915 u32 num_events, u32 mode)
4916{
4917 u32 i;
4918 u32 base; /* SRAM byte address of event log header */
4919 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4920 u32 ptr; /* SRAM byte address of log data */
4921 u32 ev, time, data; /* event log data */
4922
4923 if (num_events == 0)
4924 return;
4925
4926 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4927
4928 if (mode == 0)
4929 event_size = 2 * sizeof(u32);
4930 else
4931 event_size = 3 * sizeof(u32);
4932
4933 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4934
4935 /* "time" is actually "data" for mode 0 (no timestamp).
4936 * place event id # at far right for easier visual parsing. */
4937 for (i = 0; i < num_events; i++) {
bb8c093b 4938 ev = iwl4965_read_targ_mem(priv, ptr);
b481de9c 4939 ptr += sizeof(u32);
bb8c093b 4940 time = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4941 ptr += sizeof(u32);
4942 if (mode == 0)
4943 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4944 else {
bb8c093b 4945 data = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4946 ptr += sizeof(u32);
4947 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4948 }
4949 }
4950}
4951
bb8c093b 4952static void iwl4965_dump_nic_event_log(struct iwl4965_priv *priv)
b481de9c
ZY
4953{
4954 int rc;
4955 u32 base; /* SRAM byte address of event log header */
4956 u32 capacity; /* event log capacity in # entries */
4957 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4958 u32 num_wraps; /* # times uCode wrapped to top of log */
4959 u32 next_entry; /* index of next entry to be written by uCode */
4960 u32 size; /* # entries that we'll print */
4961
4962 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
bb8c093b 4963 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4964 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4965 return;
4966 }
4967
bb8c093b 4968 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4969 if (rc) {
4970 IWL_WARNING("Can not read from adapter at this time.\n");
4971 return;
4972 }
4973
4974 /* event log header */
bb8c093b
CH
4975 capacity = iwl4965_read_targ_mem(priv, base);
4976 mode = iwl4965_read_targ_mem(priv, base + (1 * sizeof(u32)));
4977 num_wraps = iwl4965_read_targ_mem(priv, base + (2 * sizeof(u32)));
4978 next_entry = iwl4965_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
4979
4980 size = num_wraps ? capacity : next_entry;
4981
4982 /* bail out if nothing in log */
4983 if (size == 0) {
583fab37 4984 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
bb8c093b 4985 iwl4965_release_nic_access(priv);
b481de9c
ZY
4986 return;
4987 }
4988
583fab37 4989 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
4990 size, num_wraps);
4991
4992 /* if uCode has wrapped back to top of log, start at the oldest entry,
4993 * i.e the next one that uCode would fill. */
4994 if (num_wraps)
bb8c093b 4995 iwl4965_print_event_log(priv, next_entry,
b481de9c
ZY
4996 capacity - next_entry, mode);
4997
4998 /* (then/else) start at top of log */
bb8c093b 4999 iwl4965_print_event_log(priv, 0, next_entry, mode);
b481de9c 5000
bb8c093b 5001 iwl4965_release_nic_access(priv);
b481de9c
ZY
5002}
5003
5004/**
bb8c093b 5005 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 5006 */
bb8c093b 5007static void iwl4965_irq_handle_error(struct iwl4965_priv *priv)
b481de9c 5008{
bb8c093b 5009 /* Set the FW error flag -- cleared on iwl4965_down */
b481de9c
ZY
5010 set_bit(STATUS_FW_ERROR, &priv->status);
5011
5012 /* Cancel currently queued command. */
5013 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
5014
c8b0e6e1 5015#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5016 if (iwl4965_debug_level & IWL_DL_FW_ERRORS) {
5017 iwl4965_dump_nic_error_log(priv);
5018 iwl4965_dump_nic_event_log(priv);
5019 iwl4965_print_rx_config_cmd(&priv->staging_rxon);
b481de9c
ZY
5020 }
5021#endif
5022
5023 wake_up_interruptible(&priv->wait_command_queue);
5024
5025 /* Keep the restart process from trying to send host
5026 * commands by clearing the INIT status bit */
5027 clear_bit(STATUS_READY, &priv->status);
5028
5029 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5030 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
5031 "Restarting adapter due to uCode error.\n");
5032
bb8c093b 5033 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5034 memcpy(&priv->recovery_rxon, &priv->active_rxon,
5035 sizeof(priv->recovery_rxon));
5036 priv->error_recovering = 1;
5037 }
5038 queue_work(priv->workqueue, &priv->restart);
5039 }
5040}
5041
bb8c093b 5042static void iwl4965_error_recovery(struct iwl4965_priv *priv)
b481de9c
ZY
5043{
5044 unsigned long flags;
5045
5046 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
5047 sizeof(priv->staging_rxon));
5048 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5049 iwl4965_commit_rxon(priv);
b481de9c 5050
bb8c093b 5051 iwl4965_rxon_add_station(priv, priv->bssid, 1);
b481de9c
ZY
5052
5053 spin_lock_irqsave(&priv->lock, flags);
5054 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
5055 priv->error_recovering = 0;
5056 spin_unlock_irqrestore(&priv->lock, flags);
5057}
5058
bb8c093b 5059static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
b481de9c
ZY
5060{
5061 u32 inta, handled = 0;
5062 u32 inta_fh;
5063 unsigned long flags;
c8b0e6e1 5064#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
5065 u32 inta_mask;
5066#endif
5067
5068 spin_lock_irqsave(&priv->lock, flags);
5069
5070 /* Ack/clear/reset pending uCode interrupts.
5071 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
5072 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
bb8c093b
CH
5073 inta = iwl4965_read32(priv, CSR_INT);
5074 iwl4965_write32(priv, CSR_INT, inta);
b481de9c
ZY
5075
5076 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
5077 * Any new interrupts that happen after this, either while we're
5078 * in this tasklet, or later, will show up in next ISR/tasklet. */
bb8c093b
CH
5079 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
5080 iwl4965_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 5081
c8b0e6e1 5082#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5083 if (iwl4965_debug_level & IWL_DL_ISR) {
9fbab516
BC
5084 /* just for debug */
5085 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
b481de9c
ZY
5086 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5087 inta, inta_mask, inta_fh);
5088 }
5089#endif
5090
5091 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
5092 * atomic, make sure that inta covers all the interrupts that
5093 * we've discovered, even if FH interrupt came in just after
5094 * reading CSR_INT. */
5095 if (inta_fh & CSR_FH_INT_RX_MASK)
5096 inta |= CSR_INT_BIT_FH_RX;
5097 if (inta_fh & CSR_FH_INT_TX_MASK)
5098 inta |= CSR_INT_BIT_FH_TX;
5099
5100 /* Now service all interrupt bits discovered above. */
5101 if (inta & CSR_INT_BIT_HW_ERR) {
5102 IWL_ERROR("Microcode HW error detected. Restarting.\n");
5103
5104 /* Tell the device to stop sending interrupts */
bb8c093b 5105 iwl4965_disable_interrupts(priv);
b481de9c 5106
bb8c093b 5107 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5108
5109 handled |= CSR_INT_BIT_HW_ERR;
5110
5111 spin_unlock_irqrestore(&priv->lock, flags);
5112
5113 return;
5114 }
5115
c8b0e6e1 5116#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5117 if (iwl4965_debug_level & (IWL_DL_ISR)) {
b481de9c
ZY
5118 /* NIC fires this, but we don't use it, redundant with WAKEUP */
5119 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
5120 IWL_DEBUG_ISR("Microcode started or stopped.\n");
5121
5122 /* Alive notification via Rx interrupt will do the real work */
5123 if (inta & CSR_INT_BIT_ALIVE)
5124 IWL_DEBUG_ISR("Alive interrupt\n");
5125 }
5126#endif
5127 /* Safely ignore these bits for debug checks below */
5128 inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
5129
9fbab516 5130 /* HW RF KILL switch toggled */
b481de9c
ZY
5131 if (inta & CSR_INT_BIT_RF_KILL) {
5132 int hw_rf_kill = 0;
bb8c093b 5133 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
5134 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5135 hw_rf_kill = 1;
5136
5137 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
5138 "RF_KILL bit toggled to %s.\n",
5139 hw_rf_kill ? "disable radio":"enable radio");
5140
5141 /* Queue restart only if RF_KILL switch was set to "kill"
5142 * when we loaded driver, and is now set to "enable".
5143 * After we're Alive, RF_KILL gets handled by
5144 * iwl_rx_card_state_notif() */
53e49093
ZY
5145 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
5146 clear_bit(STATUS_RF_KILL_HW, &priv->status);
b481de9c 5147 queue_work(priv->workqueue, &priv->restart);
53e49093 5148 }
b481de9c
ZY
5149
5150 handled |= CSR_INT_BIT_RF_KILL;
5151 }
5152
9fbab516 5153 /* Chip got too hot and stopped itself */
b481de9c
ZY
5154 if (inta & CSR_INT_BIT_CT_KILL) {
5155 IWL_ERROR("Microcode CT kill error detected.\n");
5156 handled |= CSR_INT_BIT_CT_KILL;
5157 }
5158
5159 /* Error detected by uCode */
5160 if (inta & CSR_INT_BIT_SW_ERR) {
5161 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
5162 inta);
bb8c093b 5163 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5164 handled |= CSR_INT_BIT_SW_ERR;
5165 }
5166
5167 /* uCode wakes up after power-down sleep */
5168 if (inta & CSR_INT_BIT_WAKEUP) {
5169 IWL_DEBUG_ISR("Wakeup interrupt\n");
bb8c093b
CH
5170 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq);
5171 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]);
5172 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]);
5173 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]);
5174 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]);
5175 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]);
5176 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
5177
5178 handled |= CSR_INT_BIT_WAKEUP;
5179 }
5180
5181 /* All uCode command responses, including Tx command responses,
5182 * Rx "responses" (frame-received notification), and other
5183 * notifications from uCode come through here*/
5184 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 5185 iwl4965_rx_handle(priv);
b481de9c
ZY
5186 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
5187 }
5188
5189 if (inta & CSR_INT_BIT_FH_TX) {
5190 IWL_DEBUG_ISR("Tx interrupt\n");
5191 handled |= CSR_INT_BIT_FH_TX;
5192 }
5193
5194 if (inta & ~handled)
5195 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
5196
5197 if (inta & ~CSR_INI_SET_MASK) {
5198 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
5199 inta & ~CSR_INI_SET_MASK);
5200 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
5201 }
5202
5203 /* Re-enable all interrupts */
bb8c093b 5204 iwl4965_enable_interrupts(priv);
b481de9c 5205
c8b0e6e1 5206#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5207 if (iwl4965_debug_level & (IWL_DL_ISR)) {
5208 inta = iwl4965_read32(priv, CSR_INT);
5209 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
5210 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5211 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
5212 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
5213 }
5214#endif
5215 spin_unlock_irqrestore(&priv->lock, flags);
5216}
5217
bb8c093b 5218static irqreturn_t iwl4965_isr(int irq, void *data)
b481de9c 5219{
bb8c093b 5220 struct iwl4965_priv *priv = data;
b481de9c
ZY
5221 u32 inta, inta_mask;
5222 u32 inta_fh;
5223 if (!priv)
5224 return IRQ_NONE;
5225
5226 spin_lock(&priv->lock);
5227
5228 /* Disable (but don't clear!) interrupts here to avoid
5229 * back-to-back ISRs and sporadic interrupts from our NIC.
5230 * If we have something to service, the tasklet will re-enable ints.
5231 * If we *don't* have something, we'll re-enable before leaving here. */
bb8c093b
CH
5232 inta_mask = iwl4965_read32(priv, CSR_INT_MASK); /* just for debug */
5233 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
5234
5235 /* Discover which interrupts are active/pending */
bb8c093b
CH
5236 inta = iwl4965_read32(priv, CSR_INT);
5237 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5238
5239 /* Ignore interrupt if there's nothing in NIC to service.
5240 * This may be due to IRQ shared with another device,
5241 * or due to sporadic interrupts thrown from our NIC. */
5242 if (!inta && !inta_fh) {
5243 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5244 goto none;
5245 }
5246
5247 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
66fbb541
ON
5248 /* Hardware disappeared. It might have already raised
5249 * an interrupt */
b481de9c 5250 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
66fbb541 5251 goto unplugged;
b481de9c
ZY
5252 }
5253
5254 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5255 inta, inta_mask, inta_fh);
5256
bb8c093b 5257 /* iwl4965_irq_tasklet() will service interrupts and re-enable them */
b481de9c 5258 tasklet_schedule(&priv->irq_tasklet);
b481de9c 5259
66fbb541
ON
5260 unplugged:
5261 spin_unlock(&priv->lock);
b481de9c
ZY
5262 return IRQ_HANDLED;
5263
5264 none:
5265 /* re-enable interrupts here since we don't have anything to service. */
bb8c093b 5266 iwl4965_enable_interrupts(priv);
b481de9c
ZY
5267 spin_unlock(&priv->lock);
5268 return IRQ_NONE;
5269}
5270
5271/************************** EEPROM BANDS ****************************
5272 *
bb8c093b 5273 * The iwl4965_eeprom_band definitions below provide the mapping from the
b481de9c
ZY
5274 * EEPROM contents to the specific channel number supported for each
5275 * band.
5276 *
bb8c093b 5277 * For example, iwl4965_priv->eeprom.band_3_channels[4] from the band_3
b481de9c
ZY
5278 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
5279 * The specific geography and calibration information for that channel
5280 * is contained in the eeprom map itself.
5281 *
5282 * During init, we copy the eeprom information and channel map
5283 * information into priv->channel_info_24/52 and priv->channel_map_24/52
5284 *
5285 * channel_map_24/52 provides the index in the channel_info array for a
5286 * given channel. We have to have two separate maps as there is channel
5287 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
5288 * band_2
5289 *
5290 * A value of 0xff stored in the channel_map indicates that the channel
5291 * is not supported by the hardware at all.
5292 *
5293 * A value of 0xfe in the channel_map indicates that the channel is not
5294 * valid for Tx with the current hardware. This means that
5295 * while the system can tune and receive on a given channel, it may not
5296 * be able to associate or transmit any frames on that
5297 * channel. There is no corresponding channel information for that
5298 * entry.
5299 *
5300 *********************************************************************/
5301
5302/* 2.4 GHz */
bb8c093b 5303static const u8 iwl4965_eeprom_band_1[14] = {
b481de9c
ZY
5304 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
5305};
5306
5307/* 5.2 GHz bands */
9fbab516 5308static const u8 iwl4965_eeprom_band_2[] = { /* 4915-5080MHz */
b481de9c
ZY
5309 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
5310};
5311
9fbab516 5312static const u8 iwl4965_eeprom_band_3[] = { /* 5170-5320MHz */
b481de9c
ZY
5313 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
5314};
5315
bb8c093b 5316static const u8 iwl4965_eeprom_band_4[] = { /* 5500-5700MHz */
b481de9c
ZY
5317 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
5318};
5319
bb8c093b 5320static const u8 iwl4965_eeprom_band_5[] = { /* 5725-5825MHz */
b481de9c
ZY
5321 145, 149, 153, 157, 161, 165
5322};
5323
bb8c093b 5324static u8 iwl4965_eeprom_band_6[] = { /* 2.4 FAT channel */
b481de9c
ZY
5325 1, 2, 3, 4, 5, 6, 7
5326};
5327
bb8c093b 5328static u8 iwl4965_eeprom_band_7[] = { /* 5.2 FAT channel */
b481de9c
ZY
5329 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
5330};
5331
9fbab516
BC
5332static void iwl4965_init_band_reference(const struct iwl4965_priv *priv,
5333 int band,
b481de9c 5334 int *eeprom_ch_count,
bb8c093b 5335 const struct iwl4965_eeprom_channel
b481de9c
ZY
5336 **eeprom_ch_info,
5337 const u8 **eeprom_ch_index)
5338{
5339 switch (band) {
5340 case 1: /* 2.4GHz band */
bb8c093b 5341 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_1);
b481de9c 5342 *eeprom_ch_info = priv->eeprom.band_1_channels;
bb8c093b 5343 *eeprom_ch_index = iwl4965_eeprom_band_1;
b481de9c 5344 break;
9fbab516 5345 case 2: /* 4.9GHz band */
bb8c093b 5346 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_2);
b481de9c 5347 *eeprom_ch_info = priv->eeprom.band_2_channels;
bb8c093b 5348 *eeprom_ch_index = iwl4965_eeprom_band_2;
b481de9c
ZY
5349 break;
5350 case 3: /* 5.2GHz band */
bb8c093b 5351 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_3);
b481de9c 5352 *eeprom_ch_info = priv->eeprom.band_3_channels;
bb8c093b 5353 *eeprom_ch_index = iwl4965_eeprom_band_3;
b481de9c 5354 break;
9fbab516 5355 case 4: /* 5.5GHz band */
bb8c093b 5356 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_4);
b481de9c 5357 *eeprom_ch_info = priv->eeprom.band_4_channels;
bb8c093b 5358 *eeprom_ch_index = iwl4965_eeprom_band_4;
b481de9c 5359 break;
9fbab516 5360 case 5: /* 5.7GHz band */
bb8c093b 5361 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c 5362 *eeprom_ch_info = priv->eeprom.band_5_channels;
bb8c093b 5363 *eeprom_ch_index = iwl4965_eeprom_band_5;
b481de9c 5364 break;
9fbab516 5365 case 6: /* 2.4GHz FAT channels */
bb8c093b 5366 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_6);
b481de9c 5367 *eeprom_ch_info = priv->eeprom.band_24_channels;
bb8c093b 5368 *eeprom_ch_index = iwl4965_eeprom_band_6;
b481de9c 5369 break;
9fbab516 5370 case 7: /* 5 GHz FAT channels */
bb8c093b 5371 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_7);
b481de9c 5372 *eeprom_ch_info = priv->eeprom.band_52_channels;
bb8c093b 5373 *eeprom_ch_index = iwl4965_eeprom_band_7;
b481de9c
ZY
5374 break;
5375 default:
5376 BUG();
5377 return;
5378 }
5379}
5380
6440adb5
BC
5381/**
5382 * iwl4965_get_channel_info - Find driver's private channel info
5383 *
5384 * Based on band and channel number.
5385 */
bb8c093b 5386const struct iwl4965_channel_info *iwl4965_get_channel_info(const struct iwl4965_priv *priv,
b481de9c
ZY
5387 int phymode, u16 channel)
5388{
5389 int i;
5390
5391 switch (phymode) {
5392 case MODE_IEEE80211A:
5393 for (i = 14; i < priv->channel_count; i++) {
5394 if (priv->channel_info[i].channel == channel)
5395 return &priv->channel_info[i];
5396 }
5397 break;
5398
5399 case MODE_IEEE80211B:
5400 case MODE_IEEE80211G:
5401 if (channel >= 1 && channel <= 14)
5402 return &priv->channel_info[channel - 1];
5403 break;
5404
5405 }
5406
5407 return NULL;
5408}
5409
5410#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
5411 ? # x " " : "")
5412
6440adb5
BC
5413/**
5414 * iwl4965_init_channel_map - Set up driver's info for all possible channels
5415 */
bb8c093b 5416static int iwl4965_init_channel_map(struct iwl4965_priv *priv)
b481de9c
ZY
5417{
5418 int eeprom_ch_count = 0;
5419 const u8 *eeprom_ch_index = NULL;
bb8c093b 5420 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL;
b481de9c 5421 int band, ch;
bb8c093b 5422 struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5423
5424 if (priv->channel_count) {
5425 IWL_DEBUG_INFO("Channel map already initialized.\n");
5426 return 0;
5427 }
5428
5429 if (priv->eeprom.version < 0x2f) {
5430 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5431 priv->eeprom.version);
5432 return -EINVAL;
5433 }
5434
5435 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5436
5437 priv->channel_count =
bb8c093b
CH
5438 ARRAY_SIZE(iwl4965_eeprom_band_1) +
5439 ARRAY_SIZE(iwl4965_eeprom_band_2) +
5440 ARRAY_SIZE(iwl4965_eeprom_band_3) +
5441 ARRAY_SIZE(iwl4965_eeprom_band_4) +
5442 ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c
ZY
5443
5444 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5445
bb8c093b 5446 priv->channel_info = kzalloc(sizeof(struct iwl4965_channel_info) *
b481de9c
ZY
5447 priv->channel_count, GFP_KERNEL);
5448 if (!priv->channel_info) {
5449 IWL_ERROR("Could not allocate channel_info\n");
5450 priv->channel_count = 0;
5451 return -ENOMEM;
5452 }
5453
5454 ch_info = priv->channel_info;
5455
5456 /* Loop through the 5 EEPROM bands adding them in order to the
5457 * channel map we maintain (that contains additional information than
5458 * what just in the EEPROM) */
5459 for (band = 1; band <= 5; band++) {
5460
bb8c093b 5461 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5462 &eeprom_ch_info, &eeprom_ch_index);
5463
5464 /* Loop through each band adding each of the channels */
5465 for (ch = 0; ch < eeprom_ch_count; ch++) {
5466 ch_info->channel = eeprom_ch_index[ch];
5467 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5468 MODE_IEEE80211A;
5469
5470 /* permanently store EEPROM's channel regulatory flags
5471 * and max power in channel info database. */
5472 ch_info->eeprom = eeprom_ch_info[ch];
5473
5474 /* Copy the run-time flags so they are there even on
5475 * invalid channels */
5476 ch_info->flags = eeprom_ch_info[ch].flags;
5477
5478 if (!(is_channel_valid(ch_info))) {
5479 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5480 "No traffic\n",
5481 ch_info->channel,
5482 ch_info->flags,
5483 is_channel_a_band(ch_info) ?
5484 "5.2" : "2.4");
5485 ch_info++;
5486 continue;
5487 }
5488
5489 /* Initialize regulatory-based run-time data */
5490 ch_info->max_power_avg = ch_info->curr_txpow =
5491 eeprom_ch_info[ch].max_power_avg;
5492 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5493 ch_info->min_power = 0;
5494
5495 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5496 " %ddBm): Ad-Hoc %ssupported\n",
5497 ch_info->channel,
5498 is_channel_a_band(ch_info) ?
5499 "5.2" : "2.4",
5500 CHECK_AND_PRINT(IBSS),
5501 CHECK_AND_PRINT(ACTIVE),
5502 CHECK_AND_PRINT(RADAR),
5503 CHECK_AND_PRINT(WIDE),
5504 CHECK_AND_PRINT(NARROW),
5505 CHECK_AND_PRINT(DFS),
5506 eeprom_ch_info[ch].flags,
5507 eeprom_ch_info[ch].max_power_avg,
5508 ((eeprom_ch_info[ch].
5509 flags & EEPROM_CHANNEL_IBSS)
5510 && !(eeprom_ch_info[ch].
5511 flags & EEPROM_CHANNEL_RADAR))
5512 ? "" : "not ");
5513
5514 /* Set the user_txpower_limit to the highest power
5515 * supported by any channel */
5516 if (eeprom_ch_info[ch].max_power_avg >
5517 priv->user_txpower_limit)
5518 priv->user_txpower_limit =
5519 eeprom_ch_info[ch].max_power_avg;
5520
5521 ch_info++;
5522 }
5523 }
5524
6440adb5 5525 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */
b481de9c
ZY
5526 for (band = 6; band <= 7; band++) {
5527 int phymode;
5528 u8 fat_extension_chan;
5529
bb8c093b 5530 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5531 &eeprom_ch_info, &eeprom_ch_index);
5532
6440adb5 5533 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
b481de9c 5534 phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A;
6440adb5 5535
b481de9c
ZY
5536 /* Loop through each band adding each of the channels */
5537 for (ch = 0; ch < eeprom_ch_count; ch++) {
5538
5539 if ((band == 6) &&
5540 ((eeprom_ch_index[ch] == 5) ||
5541 (eeprom_ch_index[ch] == 6) ||
5542 (eeprom_ch_index[ch] == 7)))
5543 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
5544 else
5545 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
5546
6440adb5 5547 /* Set up driver's info for lower half */
b481de9c
ZY
5548 iwl4965_set_fat_chan_info(priv, phymode,
5549 eeprom_ch_index[ch],
5550 &(eeprom_ch_info[ch]),
5551 fat_extension_chan);
5552
6440adb5 5553 /* Set up driver's info for upper half */
b481de9c
ZY
5554 iwl4965_set_fat_chan_info(priv, phymode,
5555 (eeprom_ch_index[ch] + 4),
5556 &(eeprom_ch_info[ch]),
5557 HT_IE_EXT_CHANNEL_BELOW);
5558 }
5559 }
5560
5561 return 0;
5562}
5563
5564/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5565 * sending probe req. This should be set long enough to hear probe responses
5566 * from more than one AP. */
5567#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
5568#define IWL_ACTIVE_DWELL_TIME_52 (10)
5569
5570/* For faster active scanning, scan will move to the next channel if fewer than
5571 * PLCP_QUIET_THRESH packets are heard on this channel within
5572 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
5573 * time if it's a quiet channel (nothing responded to our probe, and there's
5574 * no other traffic).
5575 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5576#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
5577#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
5578
5579/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5580 * Must be set longer than active dwell time.
5581 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5582#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
5583#define IWL_PASSIVE_DWELL_TIME_52 (10)
5584#define IWL_PASSIVE_DWELL_BASE (100)
5585#define IWL_CHANNEL_TUNE_TIME 5
5586
bb8c093b 5587static inline u16 iwl4965_get_active_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c
ZY
5588{
5589 if (phymode == MODE_IEEE80211A)
5590 return IWL_ACTIVE_DWELL_TIME_52;
5591 else
5592 return IWL_ACTIVE_DWELL_TIME_24;
5593}
5594
bb8c093b 5595static u16 iwl4965_get_passive_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c 5596{
bb8c093b 5597 u16 active = iwl4965_get_active_dwell_time(priv, phymode);
b481de9c
ZY
5598 u16 passive = (phymode != MODE_IEEE80211A) ?
5599 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5600 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5601
bb8c093b 5602 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5603 /* If we're associated, we clamp the maximum passive
5604 * dwell time to be 98% of the beacon interval (minus
5605 * 2 * channel tune time) */
5606 passive = priv->beacon_int;
5607 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5608 passive = IWL_PASSIVE_DWELL_BASE;
5609 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5610 }
5611
5612 if (passive <= active)
5613 passive = active + 1;
5614
5615 return passive;
5616}
5617
bb8c093b 5618static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
b481de9c 5619 u8 is_active, u8 direct_mask,
bb8c093b 5620 struct iwl4965_scan_channel *scan_ch)
b481de9c
ZY
5621{
5622 const struct ieee80211_channel *channels = NULL;
5623 const struct ieee80211_hw_mode *hw_mode;
bb8c093b 5624 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5625 u16 passive_dwell = 0;
5626 u16 active_dwell = 0;
5627 int added, i;
5628
bb8c093b 5629 hw_mode = iwl4965_get_hw_mode(priv, phymode);
b481de9c
ZY
5630 if (!hw_mode)
5631 return 0;
5632
5633 channels = hw_mode->channels;
5634
bb8c093b
CH
5635 active_dwell = iwl4965_get_active_dwell_time(priv, phymode);
5636 passive_dwell = iwl4965_get_passive_dwell_time(priv, phymode);
b481de9c
ZY
5637
5638 for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5639 if (channels[i].chan ==
5640 le16_to_cpu(priv->active_rxon.channel)) {
bb8c093b 5641 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5642 IWL_DEBUG_SCAN
5643 ("Skipping current channel %d\n",
5644 le16_to_cpu(priv->active_rxon.channel));
5645 continue;
5646 }
5647 } else if (priv->only_active_channel)
5648 continue;
5649
5650 scan_ch->channel = channels[i].chan;
5651
9fbab516
BC
5652 ch_info = iwl4965_get_channel_info(priv, phymode,
5653 scan_ch->channel);
b481de9c
ZY
5654 if (!is_channel_valid(ch_info)) {
5655 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5656 scan_ch->channel);
5657 continue;
5658 }
5659
5660 if (!is_active || is_channel_passive(ch_info) ||
5661 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5662 scan_ch->type = 0; /* passive */
5663 else
5664 scan_ch->type = 1; /* active */
5665
5666 if (scan_ch->type & 1)
5667 scan_ch->type |= (direct_mask << 1);
5668
5669 if (is_channel_narrow(ch_info))
5670 scan_ch->type |= (1 << 7);
5671
5672 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5673 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5674
9fbab516 5675 /* Set txpower levels to defaults */
b481de9c
ZY
5676 scan_ch->tpc.dsp_atten = 110;
5677 /* scan_pwr_info->tpc.dsp_atten; */
5678
5679 /*scan_pwr_info->tpc.tx_gain; */
5680 if (phymode == MODE_IEEE80211A)
5681 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5682 else {
5683 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5684 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516
BC
5685 * power level:
5686 * scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3;
b481de9c
ZY
5687 */
5688 }
5689
5690 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5691 scan_ch->channel,
5692 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5693 (scan_ch->type & 1) ?
5694 active_dwell : passive_dwell);
5695
5696 scan_ch++;
5697 added++;
5698 }
5699
5700 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5701 return added;
5702}
5703
bb8c093b 5704static void iwl4965_reset_channel_flag(struct iwl4965_priv *priv)
b481de9c
ZY
5705{
5706 int i, j;
5707 for (i = 0; i < 3; i++) {
5708 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5709 for (j = 0; j < hw_mode->num_channels; j++)
5710 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5711 }
5712}
5713
bb8c093b 5714static void iwl4965_init_hw_rates(struct iwl4965_priv *priv,
b481de9c
ZY
5715 struct ieee80211_rate *rates)
5716{
5717 int i;
5718
5719 for (i = 0; i < IWL_RATE_COUNT; i++) {
bb8c093b 5720 rates[i].rate = iwl4965_rates[i].ieee * 5;
b481de9c
ZY
5721 rates[i].val = i; /* Rate scaling will work on indexes */
5722 rates[i].val2 = i;
5723 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5724 /* Only OFDM have the bits-per-symbol set */
5725 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5726 rates[i].flags |= IEEE80211_RATE_OFDM;
5727 else {
5728 /*
5729 * If CCK 1M then set rate flag to CCK else CCK_2
5730 * which is CCK | PREAMBLE2
5731 */
bb8c093b 5732 rates[i].flags |= (iwl4965_rates[i].plcp == 10) ?
b481de9c
ZY
5733 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5734 }
5735
5736 /* Set up which ones are basic rates... */
5737 if (IWL_BASIC_RATES_MASK & (1 << i))
5738 rates[i].flags |= IEEE80211_RATE_BASIC;
5739 }
b481de9c
ZY
5740}
5741
5742/**
bb8c093b 5743 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 5744 */
bb8c093b 5745static int iwl4965_init_geos(struct iwl4965_priv *priv)
b481de9c 5746{
bb8c093b 5747 struct iwl4965_channel_info *ch;
b481de9c
ZY
5748 struct ieee80211_hw_mode *modes;
5749 struct ieee80211_channel *channels;
5750 struct ieee80211_channel *geo_ch;
5751 struct ieee80211_rate *rates;
5752 int i = 0;
5753 enum {
5754 A = 0,
5755 B = 1,
5756 G = 2,
b481de9c 5757 };
326eeee8 5758 int mode_count = 3;
b481de9c
ZY
5759
5760 if (priv->modes) {
5761 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5762 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5763 return 0;
5764 }
5765
5766 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5767 GFP_KERNEL);
5768 if (!modes)
5769 return -ENOMEM;
5770
5771 channels = kzalloc(sizeof(struct ieee80211_channel) *
5772 priv->channel_count, GFP_KERNEL);
5773 if (!channels) {
5774 kfree(modes);
5775 return -ENOMEM;
5776 }
5777
5778 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5779 GFP_KERNEL);
5780 if (!rates) {
5781 kfree(modes);
5782 kfree(channels);
5783 return -ENOMEM;
5784 }
5785
5786 /* 0 = 802.11a
5787 * 1 = 802.11b
5788 * 2 = 802.11g
5789 */
5790
5791 /* 5.2GHz channels start after the 2.4GHz channels */
5792 modes[A].mode = MODE_IEEE80211A;
bb8c093b 5793 modes[A].channels = &channels[ARRAY_SIZE(iwl4965_eeprom_band_1)];
b481de9c
ZY
5794 modes[A].rates = rates;
5795 modes[A].num_rates = 8; /* just OFDM */
5796 modes[A].rates = &rates[4];
5797 modes[A].num_channels = 0;
326eeee8
RR
5798#ifdef CONFIG_IWL4965_HT
5799 iwl4965_init_ht_hw_capab(&modes[A].ht_info, MODE_IEEE80211A);
5800#endif
b481de9c
ZY
5801
5802 modes[B].mode = MODE_IEEE80211B;
5803 modes[B].channels = channels;
5804 modes[B].rates = rates;
5805 modes[B].num_rates = 4; /* just CCK */
5806 modes[B].num_channels = 0;
5807
5808 modes[G].mode = MODE_IEEE80211G;
5809 modes[G].channels = channels;
5810 modes[G].rates = rates;
5811 modes[G].num_rates = 12; /* OFDM & CCK */
5812 modes[G].num_channels = 0;
326eeee8
RR
5813#ifdef CONFIG_IWL4965_HT
5814 iwl4965_init_ht_hw_capab(&modes[G].ht_info, MODE_IEEE80211G);
5815#endif
b481de9c
ZY
5816
5817 priv->ieee_channels = channels;
5818 priv->ieee_rates = rates;
5819
bb8c093b 5820 iwl4965_init_hw_rates(priv, rates);
b481de9c
ZY
5821
5822 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5823 ch = &priv->channel_info[i];
5824
5825 if (!is_channel_valid(ch)) {
5826 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5827 "skipping.\n",
5828 ch->channel, is_channel_a_band(ch) ?
5829 "5.2" : "2.4");
5830 continue;
5831 }
5832
5833 if (is_channel_a_band(ch)) {
5834 geo_ch = &modes[A].channels[modes[A].num_channels++];
b481de9c
ZY
5835 } else {
5836 geo_ch = &modes[B].channels[modes[B].num_channels++];
5837 modes[G].num_channels++;
b481de9c
ZY
5838 }
5839
5840 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5841 geo_ch->chan = ch->channel;
5842 geo_ch->power_level = ch->max_power_avg;
5843 geo_ch->antenna_max = 0xff;
5844
5845 if (is_channel_valid(ch)) {
5846 geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5847 if (ch->flags & EEPROM_CHANNEL_IBSS)
5848 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5849
5850 if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5851 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5852
5853 if (ch->flags & EEPROM_CHANNEL_RADAR)
5854 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5855
5856 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5857 priv->max_channel_txpower_limit =
5858 ch->max_power_avg;
5859 }
5860
5861 geo_ch->val = geo_ch->flag;
5862 }
5863
5864 if ((modes[A].num_channels == 0) && priv->is_abg) {
5865 printk(KERN_INFO DRV_NAME
5866 ": Incorrectly detected BG card as ABG. Please send "
5867 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5868 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5869 priv->is_abg = 0;
5870 }
5871
5872 printk(KERN_INFO DRV_NAME
5873 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5874 modes[G].num_channels, modes[A].num_channels);
5875
5876 /*
5877 * NOTE: We register these in preference of order -- the
5878 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5879 * a phymode based on rates or AP capabilities but seems to
5880 * configure it purely on if the channel being configured
5881 * is supported by a mode -- and the first match is taken
5882 */
5883
5884 if (modes[G].num_channels)
5885 ieee80211_register_hwmode(priv->hw, &modes[G]);
5886 if (modes[B].num_channels)
5887 ieee80211_register_hwmode(priv->hw, &modes[B]);
5888 if (modes[A].num_channels)
5889 ieee80211_register_hwmode(priv->hw, &modes[A]);
5890
5891 priv->modes = modes;
5892 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5893
5894 return 0;
5895}
5896
5897/******************************************************************************
5898 *
5899 * uCode download functions
5900 *
5901 ******************************************************************************/
5902
bb8c093b 5903static void iwl4965_dealloc_ucode_pci(struct iwl4965_priv *priv)
b481de9c
ZY
5904{
5905 if (priv->ucode_code.v_addr != NULL) {
5906 pci_free_consistent(priv->pci_dev,
5907 priv->ucode_code.len,
5908 priv->ucode_code.v_addr,
5909 priv->ucode_code.p_addr);
5910 priv->ucode_code.v_addr = NULL;
5911 }
5912 if (priv->ucode_data.v_addr != NULL) {
5913 pci_free_consistent(priv->pci_dev,
5914 priv->ucode_data.len,
5915 priv->ucode_data.v_addr,
5916 priv->ucode_data.p_addr);
5917 priv->ucode_data.v_addr = NULL;
5918 }
5919 if (priv->ucode_data_backup.v_addr != NULL) {
5920 pci_free_consistent(priv->pci_dev,
5921 priv->ucode_data_backup.len,
5922 priv->ucode_data_backup.v_addr,
5923 priv->ucode_data_backup.p_addr);
5924 priv->ucode_data_backup.v_addr = NULL;
5925 }
5926 if (priv->ucode_init.v_addr != NULL) {
5927 pci_free_consistent(priv->pci_dev,
5928 priv->ucode_init.len,
5929 priv->ucode_init.v_addr,
5930 priv->ucode_init.p_addr);
5931 priv->ucode_init.v_addr = NULL;
5932 }
5933 if (priv->ucode_init_data.v_addr != NULL) {
5934 pci_free_consistent(priv->pci_dev,
5935 priv->ucode_init_data.len,
5936 priv->ucode_init_data.v_addr,
5937 priv->ucode_init_data.p_addr);
5938 priv->ucode_init_data.v_addr = NULL;
5939 }
5940 if (priv->ucode_boot.v_addr != NULL) {
5941 pci_free_consistent(priv->pci_dev,
5942 priv->ucode_boot.len,
5943 priv->ucode_boot.v_addr,
5944 priv->ucode_boot.p_addr);
5945 priv->ucode_boot.v_addr = NULL;
5946 }
5947}
5948
5949/**
bb8c093b 5950 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
5951 * looking at all data.
5952 */
4fd1f841 5953static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 *image,
9fbab516 5954 u32 len)
b481de9c
ZY
5955{
5956 u32 val;
5957 u32 save_len = len;
5958 int rc = 0;
5959 u32 errcnt;
5960
5961 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5962
bb8c093b 5963 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
5964 if (rc)
5965 return rc;
5966
bb8c093b 5967 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
b481de9c
ZY
5968
5969 errcnt = 0;
5970 for (; len > 0; len -= sizeof(u32), image++) {
5971 /* read data comes through single port, auto-incr addr */
5972 /* NOTE: Use the debugless read so we don't flood kernel log
5973 * if IWL_DL_IO is set */
bb8c093b 5974 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
5975 if (val != le32_to_cpu(*image)) {
5976 IWL_ERROR("uCode INST section is invalid at "
5977 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5978 save_len - len, val, le32_to_cpu(*image));
5979 rc = -EIO;
5980 errcnt++;
5981 if (errcnt >= 20)
5982 break;
5983 }
5984 }
5985
bb8c093b 5986 iwl4965_release_nic_access(priv);
b481de9c
ZY
5987
5988 if (!errcnt)
5989 IWL_DEBUG_INFO
5990 ("ucode image in INSTRUCTION memory is good\n");
5991
5992 return rc;
5993}
5994
5995
5996/**
bb8c093b 5997 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
5998 * using sample data 100 bytes apart. If these sample points are good,
5999 * it's a pretty good bet that everything between them is good, too.
6000 */
bb8c093b 6001static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
6002{
6003 u32 val;
6004 int rc = 0;
6005 u32 errcnt = 0;
6006 u32 i;
6007
6008 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
6009
bb8c093b 6010 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6011 if (rc)
6012 return rc;
6013
6014 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
6015 /* read data comes through single port, auto-incr addr */
6016 /* NOTE: Use the debugless read so we don't flood kernel log
6017 * if IWL_DL_IO is set */
bb8c093b 6018 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR,
b481de9c 6019 i + RTC_INST_LOWER_BOUND);
bb8c093b 6020 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
6021 if (val != le32_to_cpu(*image)) {
6022#if 0 /* Enable this if you want to see details */
6023 IWL_ERROR("uCode INST section is invalid at "
6024 "offset 0x%x, is 0x%x, s/b 0x%x\n",
6025 i, val, *image);
6026#endif
6027 rc = -EIO;
6028 errcnt++;
6029 if (errcnt >= 3)
6030 break;
6031 }
6032 }
6033
bb8c093b 6034 iwl4965_release_nic_access(priv);
b481de9c
ZY
6035
6036 return rc;
6037}
6038
6039
6040/**
bb8c093b 6041 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
6042 * and verify its contents
6043 */
bb8c093b 6044static int iwl4965_verify_ucode(struct iwl4965_priv *priv)
b481de9c
ZY
6045{
6046 __le32 *image;
6047 u32 len;
6048 int rc = 0;
6049
6050 /* Try bootstrap */
6051 image = (__le32 *)priv->ucode_boot.v_addr;
6052 len = priv->ucode_boot.len;
bb8c093b 6053 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6054 if (rc == 0) {
6055 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
6056 return 0;
6057 }
6058
6059 /* Try initialize */
6060 image = (__le32 *)priv->ucode_init.v_addr;
6061 len = priv->ucode_init.len;
bb8c093b 6062 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6063 if (rc == 0) {
6064 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
6065 return 0;
6066 }
6067
6068 /* Try runtime/protocol */
6069 image = (__le32 *)priv->ucode_code.v_addr;
6070 len = priv->ucode_code.len;
bb8c093b 6071 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6072 if (rc == 0) {
6073 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
6074 return 0;
6075 }
6076
6077 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
6078
9fbab516
BC
6079 /* Since nothing seems to match, show first several data entries in
6080 * instruction SRAM, so maybe visual inspection will give a clue.
6081 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
6082 image = (__le32 *)priv->ucode_boot.v_addr;
6083 len = priv->ucode_boot.len;
bb8c093b 6084 rc = iwl4965_verify_inst_full(priv, image, len);
b481de9c
ZY
6085
6086 return rc;
6087}
6088
6089
6090/* check contents of special bootstrap uCode SRAM */
bb8c093b 6091static int iwl4965_verify_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6092{
6093 __le32 *image = priv->ucode_boot.v_addr;
6094 u32 len = priv->ucode_boot.len;
6095 u32 reg;
6096 u32 val;
6097
6098 IWL_DEBUG_INFO("Begin verify bsm\n");
6099
6100 /* verify BSM SRAM contents */
bb8c093b 6101 val = iwl4965_read_prph(priv, BSM_WR_DWCOUNT_REG);
b481de9c
ZY
6102 for (reg = BSM_SRAM_LOWER_BOUND;
6103 reg < BSM_SRAM_LOWER_BOUND + len;
6104 reg += sizeof(u32), image ++) {
bb8c093b 6105 val = iwl4965_read_prph(priv, reg);
b481de9c
ZY
6106 if (val != le32_to_cpu(*image)) {
6107 IWL_ERROR("BSM uCode verification failed at "
6108 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
6109 BSM_SRAM_LOWER_BOUND,
6110 reg - BSM_SRAM_LOWER_BOUND, len,
6111 val, le32_to_cpu(*image));
6112 return -EIO;
6113 }
6114 }
6115
6116 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
6117
6118 return 0;
6119}
6120
6121/**
bb8c093b 6122 * iwl4965_load_bsm - Load bootstrap instructions
b481de9c
ZY
6123 *
6124 * BSM operation:
6125 *
6126 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
6127 * in special SRAM that does not power down during RFKILL. When powering back
6128 * up after power-saving sleeps (or during initial uCode load), the BSM loads
6129 * the bootstrap program into the on-board processor, and starts it.
6130 *
6131 * The bootstrap program loads (via DMA) instructions and data for a new
6132 * program from host DRAM locations indicated by the host driver in the
6133 * BSM_DRAM_* registers. Once the new program is loaded, it starts
6134 * automatically.
6135 *
6136 * When initializing the NIC, the host driver points the BSM to the
6137 * "initialize" uCode image. This uCode sets up some internal data, then
6138 * notifies host via "initialize alive" that it is complete.
6139 *
6140 * The host then replaces the BSM_DRAM_* pointer values to point to the
6141 * normal runtime uCode instructions and a backup uCode data cache buffer
6142 * (filled initially with starting data values for the on-board processor),
6143 * then triggers the "initialize" uCode to load and launch the runtime uCode,
6144 * which begins normal operation.
6145 *
6146 * When doing a power-save shutdown, runtime uCode saves data SRAM into
6147 * the backup data cache in DRAM before SRAM is powered down.
6148 *
6149 * When powering back up, the BSM loads the bootstrap program. This reloads
6150 * the runtime uCode instructions and the backup data cache into SRAM,
6151 * and re-launches the runtime uCode from where it left off.
6152 */
bb8c093b 6153static int iwl4965_load_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6154{
6155 __le32 *image = priv->ucode_boot.v_addr;
6156 u32 len = priv->ucode_boot.len;
6157 dma_addr_t pinst;
6158 dma_addr_t pdata;
6159 u32 inst_len;
6160 u32 data_len;
6161 int rc;
6162 int i;
6163 u32 done;
6164 u32 reg_offset;
6165
6166 IWL_DEBUG_INFO("Begin load bsm\n");
6167
6168 /* make sure bootstrap program is no larger than BSM's SRAM size */
6169 if (len > IWL_MAX_BSM_SIZE)
6170 return -EINVAL;
6171
6172 /* Tell bootstrap uCode where to find the "Initialize" uCode
9fbab516 6173 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
bb8c093b 6174 * NOTE: iwl4965_initialize_alive_start() will replace these values,
b481de9c
ZY
6175 * after the "initialize" uCode has run, to point to
6176 * runtime/protocol instructions and backup data cache. */
6177 pinst = priv->ucode_init.p_addr >> 4;
6178 pdata = priv->ucode_init_data.p_addr >> 4;
6179 inst_len = priv->ucode_init.len;
6180 data_len = priv->ucode_init_data.len;
6181
bb8c093b 6182 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6183 if (rc)
6184 return rc;
6185
bb8c093b
CH
6186 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6187 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6188 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
6189 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
b481de9c
ZY
6190
6191 /* Fill BSM memory with bootstrap instructions */
6192 for (reg_offset = BSM_SRAM_LOWER_BOUND;
6193 reg_offset < BSM_SRAM_LOWER_BOUND + len;
6194 reg_offset += sizeof(u32), image++)
bb8c093b 6195 _iwl4965_write_prph(priv, reg_offset,
b481de9c
ZY
6196 le32_to_cpu(*image));
6197
bb8c093b 6198 rc = iwl4965_verify_bsm(priv);
b481de9c 6199 if (rc) {
bb8c093b 6200 iwl4965_release_nic_access(priv);
b481de9c
ZY
6201 return rc;
6202 }
6203
6204 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
bb8c093b
CH
6205 iwl4965_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
6206 iwl4965_write_prph(priv, BSM_WR_MEM_DST_REG,
b481de9c 6207 RTC_INST_LOWER_BOUND);
bb8c093b 6208 iwl4965_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
b481de9c
ZY
6209
6210 /* Load bootstrap code into instruction SRAM now,
6211 * to prepare to load "initialize" uCode */
bb8c093b 6212 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6213 BSM_WR_CTRL_REG_BIT_START);
6214
6215 /* Wait for load of bootstrap uCode to finish */
6216 for (i = 0; i < 100; i++) {
bb8c093b 6217 done = iwl4965_read_prph(priv, BSM_WR_CTRL_REG);
b481de9c
ZY
6218 if (!(done & BSM_WR_CTRL_REG_BIT_START))
6219 break;
6220 udelay(10);
6221 }
6222 if (i < 100)
6223 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
6224 else {
6225 IWL_ERROR("BSM write did not complete!\n");
6226 return -EIO;
6227 }
6228
6229 /* Enable future boot loads whenever power management unit triggers it
6230 * (e.g. when powering back up after power-save shutdown) */
bb8c093b 6231 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6232 BSM_WR_CTRL_REG_BIT_START_EN);
6233
bb8c093b 6234 iwl4965_release_nic_access(priv);
b481de9c
ZY
6235
6236 return 0;
6237}
6238
bb8c093b 6239static void iwl4965_nic_start(struct iwl4965_priv *priv)
b481de9c
ZY
6240{
6241 /* Remove all resets to allow NIC to operate */
bb8c093b 6242 iwl4965_write32(priv, CSR_RESET, 0);
b481de9c
ZY
6243}
6244
90e759d1
TW
6245static int iwl4965_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
6246{
6247 desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
6248 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
6249}
6250
b481de9c 6251/**
bb8c093b 6252 * iwl4965_read_ucode - Read uCode images from disk file.
b481de9c
ZY
6253 *
6254 * Copy into buffers for card to fetch via bus-mastering
6255 */
bb8c093b 6256static int iwl4965_read_ucode(struct iwl4965_priv *priv)
b481de9c 6257{
bb8c093b 6258 struct iwl4965_ucode *ucode;
90e759d1 6259 int ret;
b481de9c
ZY
6260 const struct firmware *ucode_raw;
6261 const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode";
6262 u8 *src;
6263 size_t len;
6264 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
6265
6266 /* Ask kernel firmware_class module to get the boot firmware off disk.
6267 * request_firmware() is synchronous, file is in memory on return. */
90e759d1
TW
6268 ret = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
6269 if (ret < 0) {
6270 IWL_ERROR("%s firmware file req failed: Reason %d\n",
6271 name, ret);
b481de9c
ZY
6272 goto error;
6273 }
6274
6275 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
6276 name, ucode_raw->size);
6277
6278 /* Make sure that we got at least our header! */
6279 if (ucode_raw->size < sizeof(*ucode)) {
6280 IWL_ERROR("File size way too small!\n");
90e759d1 6281 ret = -EINVAL;
b481de9c
ZY
6282 goto err_release;
6283 }
6284
6285 /* Data from ucode file: header followed by uCode images */
6286 ucode = (void *)ucode_raw->data;
6287
6288 ver = le32_to_cpu(ucode->ver);
6289 inst_size = le32_to_cpu(ucode->inst_size);
6290 data_size = le32_to_cpu(ucode->data_size);
6291 init_size = le32_to_cpu(ucode->init_size);
6292 init_data_size = le32_to_cpu(ucode->init_data_size);
6293 boot_size = le32_to_cpu(ucode->boot_size);
6294
6295 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
6296 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
6297 inst_size);
6298 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
6299 data_size);
6300 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
6301 init_size);
6302 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
6303 init_data_size);
6304 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
6305 boot_size);
6306
6307 /* Verify size of file vs. image size info in file's header */
6308 if (ucode_raw->size < sizeof(*ucode) +
6309 inst_size + data_size + init_size +
6310 init_data_size + boot_size) {
6311
6312 IWL_DEBUG_INFO("uCode file size %d too small\n",
6313 (int)ucode_raw->size);
90e759d1 6314 ret = -EINVAL;
b481de9c
ZY
6315 goto err_release;
6316 }
6317
6318 /* Verify that uCode images will fit in card's SRAM */
6319 if (inst_size > IWL_MAX_INST_SIZE) {
90e759d1
TW
6320 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
6321 inst_size);
6322 ret = -EINVAL;
b481de9c
ZY
6323 goto err_release;
6324 }
6325
6326 if (data_size > IWL_MAX_DATA_SIZE) {
90e759d1
TW
6327 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
6328 data_size);
6329 ret = -EINVAL;
b481de9c
ZY
6330 goto err_release;
6331 }
6332 if (init_size > IWL_MAX_INST_SIZE) {
6333 IWL_DEBUG_INFO
90e759d1
TW
6334 ("uCode init instr len %d too large to fit in\n",
6335 init_size);
6336 ret = -EINVAL;
b481de9c
ZY
6337 goto err_release;
6338 }
6339 if (init_data_size > IWL_MAX_DATA_SIZE) {
6340 IWL_DEBUG_INFO
90e759d1
TW
6341 ("uCode init data len %d too large to fit in\n",
6342 init_data_size);
6343 ret = -EINVAL;
b481de9c
ZY
6344 goto err_release;
6345 }
6346 if (boot_size > IWL_MAX_BSM_SIZE) {
6347 IWL_DEBUG_INFO
90e759d1
TW
6348 ("uCode boot instr len %d too large to fit in\n",
6349 boot_size);
6350 ret = -EINVAL;
b481de9c
ZY
6351 goto err_release;
6352 }
6353
6354 /* Allocate ucode buffers for card's bus-master loading ... */
6355
6356 /* Runtime instructions and 2 copies of data:
6357 * 1) unmodified from disk
6358 * 2) backup cache for save/restore during power-downs */
6359 priv->ucode_code.len = inst_size;
90e759d1 6360 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
6361
6362 priv->ucode_data.len = data_size;
90e759d1 6363 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
6364
6365 priv->ucode_data_backup.len = data_size;
90e759d1 6366 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c
ZY
6367
6368 /* Initialization instructions and data */
90e759d1
TW
6369 if (init_size && init_data_size) {
6370 priv->ucode_init.len = init_size;
6371 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
6372
6373 priv->ucode_init_data.len = init_data_size;
6374 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
6375
6376 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
6377 goto err_pci_alloc;
6378 }
b481de9c
ZY
6379
6380 /* Bootstrap (instructions only, no data) */
90e759d1
TW
6381 if (boot_size) {
6382 priv->ucode_boot.len = boot_size;
6383 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 6384
90e759d1
TW
6385 if (!priv->ucode_boot.v_addr)
6386 goto err_pci_alloc;
6387 }
b481de9c
ZY
6388
6389 /* Copy images into buffers for card's bus-master reads ... */
6390
6391 /* Runtime instructions (first block of data in file) */
6392 src = &ucode->data[0];
6393 len = priv->ucode_code.len;
90e759d1 6394 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
6395 memcpy(priv->ucode_code.v_addr, src, len);
6396 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
6397 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
6398
6399 /* Runtime data (2nd block)
bb8c093b 6400 * NOTE: Copy into backup buffer will be done in iwl4965_up() */
b481de9c
ZY
6401 src = &ucode->data[inst_size];
6402 len = priv->ucode_data.len;
90e759d1 6403 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
6404 memcpy(priv->ucode_data.v_addr, src, len);
6405 memcpy(priv->ucode_data_backup.v_addr, src, len);
6406
6407 /* Initialization instructions (3rd block) */
6408 if (init_size) {
6409 src = &ucode->data[inst_size + data_size];
6410 len = priv->ucode_init.len;
90e759d1
TW
6411 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
6412 len);
b481de9c
ZY
6413 memcpy(priv->ucode_init.v_addr, src, len);
6414 }
6415
6416 /* Initialization data (4th block) */
6417 if (init_data_size) {
6418 src = &ucode->data[inst_size + data_size + init_size];
6419 len = priv->ucode_init_data.len;
90e759d1
TW
6420 IWL_DEBUG_INFO("Copying (but not loading) init data len %Zd\n",
6421 len);
b481de9c
ZY
6422 memcpy(priv->ucode_init_data.v_addr, src, len);
6423 }
6424
6425 /* Bootstrap instructions (5th block) */
6426 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
6427 len = priv->ucode_boot.len;
90e759d1 6428 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %Zd\n", len);
b481de9c
ZY
6429 memcpy(priv->ucode_boot.v_addr, src, len);
6430
6431 /* We have our copies now, allow OS release its copies */
6432 release_firmware(ucode_raw);
6433 return 0;
6434
6435 err_pci_alloc:
6436 IWL_ERROR("failed to allocate pci memory\n");
90e759d1 6437 ret = -ENOMEM;
bb8c093b 6438 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
6439
6440 err_release:
6441 release_firmware(ucode_raw);
6442
6443 error:
90e759d1 6444 return ret;
b481de9c
ZY
6445}
6446
6447
6448/**
bb8c093b 6449 * iwl4965_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
6450 *
6451 * Tell initialization uCode where to find runtime uCode.
6452 *
6453 * BSM registers initially contain pointers to initialization uCode.
6454 * We need to replace them to load runtime uCode inst and data,
6455 * and to save runtime data when powering down.
6456 */
bb8c093b 6457static int iwl4965_set_ucode_ptrs(struct iwl4965_priv *priv)
b481de9c
ZY
6458{
6459 dma_addr_t pinst;
6460 dma_addr_t pdata;
6461 int rc = 0;
6462 unsigned long flags;
6463
6464 /* bits 35:4 for 4965 */
6465 pinst = priv->ucode_code.p_addr >> 4;
6466 pdata = priv->ucode_data_backup.p_addr >> 4;
6467
6468 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 6469 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6470 if (rc) {
6471 spin_unlock_irqrestore(&priv->lock, flags);
6472 return rc;
6473 }
6474
6475 /* Tell bootstrap uCode where to find image to load */
bb8c093b
CH
6476 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6477 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6478 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
6479 priv->ucode_data.len);
6480
6481 /* Inst bytecount must be last to set up, bit 31 signals uCode
6482 * that all new ptr/size info is in place */
bb8c093b 6483 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
6484 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6485
bb8c093b 6486 iwl4965_release_nic_access(priv);
b481de9c
ZY
6487
6488 spin_unlock_irqrestore(&priv->lock, flags);
6489
6490 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6491
6492 return rc;
6493}
6494
6495/**
bb8c093b 6496 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
6497 *
6498 * Called after REPLY_ALIVE notification received from "initialize" uCode.
6499 *
6500 * The 4965 "initialize" ALIVE reply contains calibration data for:
6501 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
6502 * (3945 does not contain this data).
6503 *
6504 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6505*/
bb8c093b 6506static void iwl4965_init_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6507{
6508 /* Check alive response for "valid" sign from uCode */
6509 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
6510 /* We had an error bringing up the hardware, so take it
6511 * all the way back down so we can try again */
6512 IWL_DEBUG_INFO("Initialize Alive failed.\n");
6513 goto restart;
6514 }
6515
6516 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
6517 * This is a paranoid check, because we would not have gotten the
6518 * "initialize" alive if code weren't properly loaded. */
bb8c093b 6519 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6520 /* Runtime instruction load was bad;
6521 * take it all the way back down so we can try again */
6522 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
6523 goto restart;
6524 }
6525
6526 /* Calculate temperature */
6527 priv->temperature = iwl4965_get_temperature(priv);
6528
6529 /* Send pointers to protocol/runtime uCode image ... init code will
6530 * load and launch runtime uCode, which will send us another "Alive"
6531 * notification. */
6532 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 6533 if (iwl4965_set_ucode_ptrs(priv)) {
b481de9c
ZY
6534 /* Runtime instruction load won't happen;
6535 * take it all the way back down so we can try again */
6536 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
6537 goto restart;
6538 }
6539 return;
6540
6541 restart:
6542 queue_work(priv->workqueue, &priv->restart);
6543}
6544
6545
6546/**
bb8c093b 6547 * iwl4965_alive_start - called after REPLY_ALIVE notification received
b481de9c 6548 * from protocol/runtime uCode (initialization uCode's
bb8c093b 6549 * Alive gets handled by iwl4965_init_alive_start()).
b481de9c 6550 */
bb8c093b 6551static void iwl4965_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6552{
6553 int rc = 0;
6554
6555 IWL_DEBUG_INFO("Runtime Alive received.\n");
6556
6557 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
6558 /* We had an error bringing up the hardware, so take it
6559 * all the way back down so we can try again */
6560 IWL_DEBUG_INFO("Alive failed.\n");
6561 goto restart;
6562 }
6563
6564 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
6565 * This is a paranoid check, because we would not have gotten the
6566 * "runtime" alive if code weren't properly loaded. */
bb8c093b 6567 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6568 /* Runtime instruction load was bad;
6569 * take it all the way back down so we can try again */
6570 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
6571 goto restart;
6572 }
6573
bb8c093b 6574 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6575
6576 rc = iwl4965_alive_notify(priv);
6577 if (rc) {
6578 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
6579 rc);
6580 goto restart;
6581 }
6582
9fbab516 6583 /* After the ALIVE response, we can send host commands to 4965 uCode */
b481de9c
ZY
6584 set_bit(STATUS_ALIVE, &priv->status);
6585
6586 /* Clear out the uCode error bit if it is set */
6587 clear_bit(STATUS_FW_ERROR, &priv->status);
6588
bb8c093b 6589 rc = iwl4965_init_channel_map(priv);
b481de9c
ZY
6590 if (rc) {
6591 IWL_ERROR("initializing regulatory failed: %d\n", rc);
6592 return;
6593 }
6594
bb8c093b 6595 iwl4965_init_geos(priv);
b481de9c 6596
bb8c093b 6597 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
6598 return;
6599
6600 if (!priv->mac80211_registered) {
6601 /* Unlock so any user space entry points can call back into
6602 * the driver without a deadlock... */
6603 mutex_unlock(&priv->mutex);
bb8c093b 6604 iwl4965_rate_control_register(priv->hw);
b481de9c
ZY
6605 rc = ieee80211_register_hw(priv->hw);
6606 priv->hw->conf.beacon_int = 100;
6607 mutex_lock(&priv->mutex);
6608
6609 if (rc) {
bb8c093b 6610 iwl4965_rate_control_unregister(priv->hw);
b481de9c
ZY
6611 IWL_ERROR("Failed to register network "
6612 "device (error %d)\n", rc);
6613 return;
6614 }
6615
6616 priv->mac80211_registered = 1;
6617
bb8c093b 6618 iwl4965_reset_channel_flag(priv);
b481de9c
ZY
6619 } else
6620 ieee80211_start_queues(priv->hw);
6621
6622 priv->active_rate = priv->rates_mask;
6623 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
6624
bb8c093b 6625 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 6626
bb8c093b
CH
6627 if (iwl4965_is_associated(priv)) {
6628 struct iwl4965_rxon_cmd *active_rxon =
6629 (struct iwl4965_rxon_cmd *)(&priv->active_rxon);
b481de9c
ZY
6630
6631 memcpy(&priv->staging_rxon, &priv->active_rxon,
6632 sizeof(priv->staging_rxon));
6633 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6634 } else {
6635 /* Initialize our rx_config data */
bb8c093b 6636 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
6637 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
6638 }
6639
9fbab516 6640 /* Configure Bluetooth device coexistence support */
bb8c093b 6641 iwl4965_send_bt_config(priv);
b481de9c
ZY
6642
6643 /* Configure the adapter for unassociated operation */
bb8c093b 6644 iwl4965_commit_rxon(priv);
b481de9c
ZY
6645
6646 /* At this point, the NIC is initialized and operational */
6647 priv->notif_missed_beacons = 0;
6648 set_bit(STATUS_READY, &priv->status);
6649
6650 iwl4965_rf_kill_ct_config(priv);
6651 IWL_DEBUG_INFO("ALIVE processing complete.\n");
6652
6653 if (priv->error_recovering)
bb8c093b 6654 iwl4965_error_recovery(priv);
b481de9c
ZY
6655
6656 return;
6657
6658 restart:
6659 queue_work(priv->workqueue, &priv->restart);
6660}
6661
bb8c093b 6662static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv);
b481de9c 6663
bb8c093b 6664static void __iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6665{
6666 unsigned long flags;
6667 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
6668 struct ieee80211_conf *conf = NULL;
6669
6670 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
6671
6672 conf = ieee80211_get_hw_conf(priv->hw);
6673
6674 if (!exit_pending)
6675 set_bit(STATUS_EXIT_PENDING, &priv->status);
6676
bb8c093b 6677 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6678
6679 /* Unblock any waiting calls */
6680 wake_up_interruptible_all(&priv->wait_command_queue);
6681
b481de9c
ZY
6682 /* Wipe out the EXIT_PENDING status bit if we are not actually
6683 * exiting the module */
6684 if (!exit_pending)
6685 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6686
6687 /* stop and reset the on-board processor */
bb8c093b 6688 iwl4965_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
6689
6690 /* tell the device to stop sending interrupts */
bb8c093b 6691 iwl4965_disable_interrupts(priv);
b481de9c
ZY
6692
6693 if (priv->mac80211_registered)
6694 ieee80211_stop_queues(priv->hw);
6695
bb8c093b 6696 /* If we have not previously called iwl4965_init() then
b481de9c 6697 * clear all bits but the RF Kill and SUSPEND bits and return */
bb8c093b 6698 if (!iwl4965_is_init(priv)) {
b481de9c
ZY
6699 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6700 STATUS_RF_KILL_HW |
6701 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6702 STATUS_RF_KILL_SW |
6703 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6704 STATUS_IN_SUSPEND;
6705 goto exit;
6706 }
6707
6708 /* ...otherwise clear out all the status bits but the RF Kill and
6709 * SUSPEND bits and continue taking the NIC down. */
6710 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6711 STATUS_RF_KILL_HW |
6712 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6713 STATUS_RF_KILL_SW |
6714 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6715 STATUS_IN_SUSPEND |
6716 test_bit(STATUS_FW_ERROR, &priv->status) <<
6717 STATUS_FW_ERROR;
6718
6719 spin_lock_irqsave(&priv->lock, flags);
9fbab516
BC
6720 iwl4965_clear_bit(priv, CSR_GP_CNTRL,
6721 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
6722 spin_unlock_irqrestore(&priv->lock, flags);
6723
bb8c093b
CH
6724 iwl4965_hw_txq_ctx_stop(priv);
6725 iwl4965_hw_rxq_stop(priv);
b481de9c
ZY
6726
6727 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
6728 if (!iwl4965_grab_nic_access(priv)) {
6729 iwl4965_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 6730 APMG_CLK_VAL_DMA_CLK_RQT);
bb8c093b 6731 iwl4965_release_nic_access(priv);
b481de9c
ZY
6732 }
6733 spin_unlock_irqrestore(&priv->lock, flags);
6734
6735 udelay(5);
6736
bb8c093b
CH
6737 iwl4965_hw_nic_stop_master(priv);
6738 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6739 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
6740
6741 exit:
bb8c093b 6742 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
6743
6744 if (priv->ibss_beacon)
6745 dev_kfree_skb(priv->ibss_beacon);
6746 priv->ibss_beacon = NULL;
6747
6748 /* clear out any free frames */
bb8c093b 6749 iwl4965_clear_free_frames(priv);
b481de9c
ZY
6750}
6751
bb8c093b 6752static void iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6753{
6754 mutex_lock(&priv->mutex);
bb8c093b 6755 __iwl4965_down(priv);
b481de9c 6756 mutex_unlock(&priv->mutex);
b24d22b1 6757
bb8c093b 6758 iwl4965_cancel_deferred_work(priv);
b481de9c
ZY
6759}
6760
6761#define MAX_HW_RESTARTS 5
6762
bb8c093b 6763static int __iwl4965_up(struct iwl4965_priv *priv)
b481de9c 6764{
0795af57 6765 DECLARE_MAC_BUF(mac);
b481de9c
ZY
6766 int rc, i;
6767 u32 hw_rf_kill = 0;
6768
6769 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6770 IWL_WARNING("Exit pending; will not bring the NIC up\n");
6771 return -EIO;
6772 }
6773
6774 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6775 IWL_WARNING("Radio disabled by SW RF kill (module "
6776 "parameter)\n");
6777 return 0;
6778 }
6779
a781cf94
RC
6780 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
6781 IWL_ERROR("ucode not available for device bringup\n");
6782 return -EIO;
6783 }
6784
bb8c093b 6785 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 6786
bb8c093b 6787 rc = iwl4965_hw_nic_init(priv);
b481de9c
ZY
6788 if (rc) {
6789 IWL_ERROR("Unable to int nic\n");
6790 return rc;
6791 }
6792
6793 /* make sure rfkill handshake bits are cleared */
bb8c093b
CH
6794 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6795 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
6796 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6797
6798 /* clear (again), then enable host interrupts */
bb8c093b
CH
6799 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
6800 iwl4965_enable_interrupts(priv);
b481de9c
ZY
6801
6802 /* really make sure rfkill handshake bits are cleared */
bb8c093b
CH
6803 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6804 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
6805
6806 /* Copy original ucode data image from disk into backup cache.
6807 * This will be used to initialize the on-board processor's
6808 * data SRAM for a clean start when the runtime program first loads. */
6809 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
6810 priv->ucode_data.len);
6811
6812 /* If platform's RF_KILL switch is set to KILL,
6813 * wait for BIT_INT_RF_KILL interrupt before loading uCode
6814 * and getting things started */
bb8c093b 6815 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
6816 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
6817 hw_rf_kill = 1;
6818
6819 if (test_bit(STATUS_RF_KILL_HW, &priv->status) || hw_rf_kill) {
6820 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
6821 return 0;
6822 }
6823
6824 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6825
bb8c093b 6826 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6827
6828 /* load bootstrap state machine,
6829 * load bootstrap program into processor's memory,
6830 * prepare to load the "initialize" uCode */
bb8c093b 6831 rc = iwl4965_load_bsm(priv);
b481de9c
ZY
6832
6833 if (rc) {
6834 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
6835 continue;
6836 }
6837
6838 /* start card; "initialize" will load runtime ucode */
bb8c093b 6839 iwl4965_nic_start(priv);
b481de9c 6840
9fbab516 6841 /* MAC Address location in EEPROM is same for 3945/4965 */
b481de9c 6842 get_eeprom_mac(priv, priv->mac_addr);
0795af57
JP
6843 IWL_DEBUG_INFO("MAC address: %s\n",
6844 print_mac(mac, priv->mac_addr));
b481de9c
ZY
6845
6846 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
6847
6848 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
6849
6850 return 0;
6851 }
6852
6853 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 6854 __iwl4965_down(priv);
b481de9c
ZY
6855
6856 /* tried to restart and config the device for as long as our
6857 * patience could withstand */
6858 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
6859 return -EIO;
6860}
6861
6862
6863/*****************************************************************************
6864 *
6865 * Workqueue callbacks
6866 *
6867 *****************************************************************************/
6868
bb8c093b 6869static void iwl4965_bg_init_alive_start(struct work_struct *data)
b481de9c 6870{
bb8c093b
CH
6871 struct iwl4965_priv *priv =
6872 container_of(data, struct iwl4965_priv, init_alive_start.work);
b481de9c
ZY
6873
6874 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6875 return;
6876
6877 mutex_lock(&priv->mutex);
bb8c093b 6878 iwl4965_init_alive_start(priv);
b481de9c
ZY
6879 mutex_unlock(&priv->mutex);
6880}
6881
bb8c093b 6882static void iwl4965_bg_alive_start(struct work_struct *data)
b481de9c 6883{
bb8c093b
CH
6884 struct iwl4965_priv *priv =
6885 container_of(data, struct iwl4965_priv, alive_start.work);
b481de9c
ZY
6886
6887 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6888 return;
6889
6890 mutex_lock(&priv->mutex);
bb8c093b 6891 iwl4965_alive_start(priv);
b481de9c
ZY
6892 mutex_unlock(&priv->mutex);
6893}
6894
bb8c093b 6895static void iwl4965_bg_rf_kill(struct work_struct *work)
b481de9c 6896{
bb8c093b 6897 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, rf_kill);
b481de9c
ZY
6898
6899 wake_up_interruptible(&priv->wait_command_queue);
6900
6901 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6902 return;
6903
6904 mutex_lock(&priv->mutex);
6905
bb8c093b 6906 if (!iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6907 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6908 "HW and/or SW RF Kill no longer active, restarting "
6909 "device\n");
6910 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6911 queue_work(priv->workqueue, &priv->restart);
6912 } else {
6913
6914 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6915 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6916 "disabled by SW switch\n");
6917 else
6918 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6919 "Kill switch must be turned off for "
6920 "wireless networking to work.\n");
6921 }
6922 mutex_unlock(&priv->mutex);
6923}
6924
6925#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6926
bb8c093b 6927static void iwl4965_bg_scan_check(struct work_struct *data)
b481de9c 6928{
bb8c093b
CH
6929 struct iwl4965_priv *priv =
6930 container_of(data, struct iwl4965_priv, scan_check.work);
b481de9c
ZY
6931
6932 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6933 return;
6934
6935 mutex_lock(&priv->mutex);
6936 if (test_bit(STATUS_SCANNING, &priv->status) ||
6937 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6938 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6939 "Scan completion watchdog resetting adapter (%dms)\n",
6940 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
052c4b9f 6941
b481de9c 6942 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
bb8c093b 6943 iwl4965_send_scan_abort(priv);
b481de9c
ZY
6944 }
6945 mutex_unlock(&priv->mutex);
6946}
6947
bb8c093b 6948static void iwl4965_bg_request_scan(struct work_struct *data)
b481de9c 6949{
bb8c093b
CH
6950 struct iwl4965_priv *priv =
6951 container_of(data, struct iwl4965_priv, request_scan);
6952 struct iwl4965_host_cmd cmd = {
b481de9c 6953 .id = REPLY_SCAN_CMD,
bb8c093b 6954 .len = sizeof(struct iwl4965_scan_cmd),
b481de9c
ZY
6955 .meta.flags = CMD_SIZE_HUGE,
6956 };
6957 int rc = 0;
bb8c093b 6958 struct iwl4965_scan_cmd *scan;
b481de9c
ZY
6959 struct ieee80211_conf *conf = NULL;
6960 u8 direct_mask;
6961 int phymode;
6962
6963 conf = ieee80211_get_hw_conf(priv->hw);
6964
6965 mutex_lock(&priv->mutex);
6966
bb8c093b 6967 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
6968 IWL_WARNING("request scan called when driver not ready.\n");
6969 goto done;
6970 }
6971
6972 /* Make sure the scan wasn't cancelled before this queued work
6973 * was given the chance to run... */
6974 if (!test_bit(STATUS_SCANNING, &priv->status))
6975 goto done;
6976
6977 /* This should never be called or scheduled if there is currently
6978 * a scan active in the hardware. */
6979 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6980 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6981 "Ignoring second request.\n");
6982 rc = -EIO;
6983 goto done;
6984 }
6985
6986 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6987 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
6988 goto done;
6989 }
6990
6991 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6992 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6993 goto done;
6994 }
6995
bb8c093b 6996 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6997 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6998 goto done;
6999 }
7000
7001 if (!test_bit(STATUS_READY, &priv->status)) {
7002 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
7003 goto done;
7004 }
7005
7006 if (!priv->scan_bands) {
7007 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
7008 goto done;
7009 }
7010
7011 if (!priv->scan) {
bb8c093b 7012 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) +
b481de9c
ZY
7013 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
7014 if (!priv->scan) {
7015 rc = -ENOMEM;
7016 goto done;
7017 }
7018 }
7019 scan = priv->scan;
bb8c093b 7020 memset(scan, 0, sizeof(struct iwl4965_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
7021
7022 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
7023 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
7024
bb8c093b 7025 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
7026 u16 interval = 0;
7027 u32 extra;
7028 u32 suspend_time = 100;
7029 u32 scan_suspend_time = 100;
7030 unsigned long flags;
7031
7032 IWL_DEBUG_INFO("Scanning while associated...\n");
7033
7034 spin_lock_irqsave(&priv->lock, flags);
7035 interval = priv->beacon_int;
7036 spin_unlock_irqrestore(&priv->lock, flags);
7037
7038 scan->suspend_time = 0;
052c4b9f 7039 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
7040 if (!interval)
7041 interval = suspend_time;
7042
7043 extra = (suspend_time / interval) << 22;
7044 scan_suspend_time = (extra |
7045 ((suspend_time % interval) * 1024));
7046 scan->suspend_time = cpu_to_le32(scan_suspend_time);
7047 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
7048 scan_suspend_time, interval);
7049 }
7050
7051 /* We should add the ability for user to lock to PASSIVE ONLY */
7052 if (priv->one_direct_scan) {
7053 IWL_DEBUG_SCAN
7054 ("Kicking off one direct scan for '%s'\n",
bb8c093b 7055 iwl4965_escape_essid(priv->direct_ssid,
b481de9c
ZY
7056 priv->direct_ssid_len));
7057 scan->direct_scan[0].id = WLAN_EID_SSID;
7058 scan->direct_scan[0].len = priv->direct_ssid_len;
7059 memcpy(scan->direct_scan[0].ssid,
7060 priv->direct_ssid, priv->direct_ssid_len);
7061 direct_mask = 1;
bb8c093b 7062 } else if (!iwl4965_is_associated(priv) && priv->essid_len) {
b481de9c
ZY
7063 scan->direct_scan[0].id = WLAN_EID_SSID;
7064 scan->direct_scan[0].len = priv->essid_len;
7065 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
7066 direct_mask = 1;
7067 } else
7068 direct_mask = 0;
7069
7070 /* We don't build a direct scan probe request; the uCode will do
7071 * that based on the direct_mask added to each channel entry */
7072 scan->tx_cmd.len = cpu_to_le16(
bb8c093b 7073 iwl4965_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
b481de9c
ZY
7074 IWL_MAX_SCAN_SIZE - sizeof(scan), 0));
7075 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
7076 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
7077 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
7078
7079 /* flags + rate selection */
7080
7081 scan->tx_cmd.tx_flags |= cpu_to_le32(0x200);
7082
7083 switch (priv->scan_bands) {
7084 case 2:
7085 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
7086 scan->tx_cmd.rate_n_flags =
bb8c093b 7087 iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
b481de9c
ZY
7088 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
7089
7090 scan->good_CRC_th = 0;
7091 phymode = MODE_IEEE80211G;
7092 break;
7093
7094 case 1:
7095 scan->tx_cmd.rate_n_flags =
bb8c093b 7096 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
b481de9c
ZY
7097 RATE_MCS_ANT_B_MSK);
7098 scan->good_CRC_th = IWL_GOOD_CRC_TH;
7099 phymode = MODE_IEEE80211A;
7100 break;
7101
7102 default:
7103 IWL_WARNING("Invalid scan band count\n");
7104 goto done;
7105 }
7106
7107 /* select Rx chains */
7108
7109 /* Force use of chains B and C (0x6) for scan Rx.
7110 * Avoid A (0x1) because of its off-channel reception on A-band.
7111 * MIMO is not used here, but value is required to make uCode happy. */
7112 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
7113 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
7114 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
7115 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
7116
7117 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
7118 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
7119
7120 if (direct_mask)
7121 IWL_DEBUG_SCAN
7122 ("Initiating direct scan for %s.\n",
bb8c093b 7123 iwl4965_escape_essid(priv->essid, priv->essid_len));
b481de9c
ZY
7124 else
7125 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
7126
7127 scan->channel_count =
bb8c093b 7128 iwl4965_get_channels_for_scan(
b481de9c
ZY
7129 priv, phymode, 1, /* active */
7130 direct_mask,
7131 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
7132
7133 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 7134 scan->channel_count * sizeof(struct iwl4965_scan_channel);
b481de9c
ZY
7135 cmd.data = scan;
7136 scan->len = cpu_to_le16(cmd.len);
7137
7138 set_bit(STATUS_SCAN_HW, &priv->status);
bb8c093b 7139 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
7140 if (rc)
7141 goto done;
7142
7143 queue_delayed_work(priv->workqueue, &priv->scan_check,
7144 IWL_SCAN_CHECK_WATCHDOG);
7145
7146 mutex_unlock(&priv->mutex);
7147 return;
7148
7149 done:
01ebd063 7150 /* inform mac80211 scan aborted */
b481de9c
ZY
7151 queue_work(priv->workqueue, &priv->scan_completed);
7152 mutex_unlock(&priv->mutex);
7153}
7154
bb8c093b 7155static void iwl4965_bg_up(struct work_struct *data)
b481de9c 7156{
bb8c093b 7157 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, up);
b481de9c
ZY
7158
7159 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7160 return;
7161
7162 mutex_lock(&priv->mutex);
bb8c093b 7163 __iwl4965_up(priv);
b481de9c
ZY
7164 mutex_unlock(&priv->mutex);
7165}
7166
bb8c093b 7167static void iwl4965_bg_restart(struct work_struct *data)
b481de9c 7168{
bb8c093b 7169 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, restart);
b481de9c
ZY
7170
7171 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7172 return;
7173
bb8c093b 7174 iwl4965_down(priv);
b481de9c
ZY
7175 queue_work(priv->workqueue, &priv->up);
7176}
7177
bb8c093b 7178static void iwl4965_bg_rx_replenish(struct work_struct *data)
b481de9c 7179{
bb8c093b
CH
7180 struct iwl4965_priv *priv =
7181 container_of(data, struct iwl4965_priv, rx_replenish);
b481de9c
ZY
7182
7183 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7184 return;
7185
7186 mutex_lock(&priv->mutex);
bb8c093b 7187 iwl4965_rx_replenish(priv);
b481de9c
ZY
7188 mutex_unlock(&priv->mutex);
7189}
7190
7878a5a4
MA
7191#define IWL_DELAY_NEXT_SCAN (HZ*2)
7192
bb8c093b 7193static void iwl4965_bg_post_associate(struct work_struct *data)
b481de9c 7194{
bb8c093b 7195 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv,
b481de9c
ZY
7196 post_associate.work);
7197
7198 int rc = 0;
7199 struct ieee80211_conf *conf = NULL;
0795af57 7200 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7201
7202 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7203 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
7204 return;
7205 }
7206
0795af57
JP
7207 IWL_DEBUG_ASSOC("Associated as %d to: %s\n",
7208 priv->assoc_id,
7209 print_mac(mac, priv->active_rxon.bssid_addr));
b481de9c
ZY
7210
7211
7212 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7213 return;
7214
7215 mutex_lock(&priv->mutex);
7216
32bfd35d 7217 if (!priv->vif || !priv->is_open) {
948c171c
MA
7218 mutex_unlock(&priv->mutex);
7219 return;
7220 }
bb8c093b 7221 iwl4965_scan_cancel_timeout(priv, 200);
052c4b9f 7222
b481de9c
ZY
7223 conf = ieee80211_get_hw_conf(priv->hw);
7224
7225 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7226 iwl4965_commit_rxon(priv);
b481de9c 7227
bb8c093b
CH
7228 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7229 iwl4965_setup_rxon_timing(priv);
7230 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7231 sizeof(priv->rxon_timing), &priv->rxon_timing);
7232 if (rc)
7233 IWL_WARNING("REPLY_RXON_TIMING failed - "
7234 "Attempting to continue.\n");
7235
7236 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7237
c8b0e6e1 7238#ifdef CONFIG_IWL4965_HT
fd105e79
RR
7239 if (priv->current_ht_config.is_ht)
7240 iwl4965_set_rxon_ht(priv, &priv->current_ht_config);
c8b0e6e1 7241#endif /* CONFIG_IWL4965_HT*/
b481de9c
ZY
7242 iwl4965_set_rxon_chain(priv);
7243 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7244
7245 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
7246 priv->assoc_id, priv->beacon_int);
7247
7248 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7249 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7250 else
7251 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7252
7253 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7254 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
7255 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
7256 else
7257 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7258
7259 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7260 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7261
7262 }
7263
bb8c093b 7264 iwl4965_commit_rxon(priv);
b481de9c
ZY
7265
7266 switch (priv->iw_mode) {
7267 case IEEE80211_IF_TYPE_STA:
bb8c093b 7268 iwl4965_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
7269 break;
7270
7271 case IEEE80211_IF_TYPE_IBSS:
7272
7273 /* clear out the station table */
bb8c093b 7274 iwl4965_clear_stations_table(priv);
b481de9c 7275
bb8c093b
CH
7276 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
7277 iwl4965_rxon_add_station(priv, priv->bssid, 0);
7278 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
7279 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7280
7281 break;
7282
7283 default:
7284 IWL_ERROR("%s Should not be called in %d mode\n",
7285 __FUNCTION__, priv->iw_mode);
7286 break;
7287 }
7288
bb8c093b 7289 iwl4965_sequence_reset(priv);
b481de9c 7290
c8b0e6e1 7291#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
7292 /* Enable Rx differential gain and sensitivity calibrations */
7293 iwl4965_chain_noise_reset(priv);
7294 priv->start_calib = 1;
c8b0e6e1 7295#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
7296
7297 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7298 priv->assoc_station_added = 1;
7299
c8b0e6e1 7300#ifdef CONFIG_IWL4965_QOS
bb8c093b 7301 iwl4965_activate_qos(priv, 0);
c8b0e6e1 7302#endif /* CONFIG_IWL4965_QOS */
7878a5a4
MA
7303 /* we have just associated, don't start scan too early */
7304 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
b481de9c
ZY
7305 mutex_unlock(&priv->mutex);
7306}
7307
bb8c093b 7308static void iwl4965_bg_abort_scan(struct work_struct *work)
b481de9c 7309{
bb8c093b 7310 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, abort_scan);
b481de9c 7311
bb8c093b 7312 if (!iwl4965_is_ready(priv))
b481de9c
ZY
7313 return;
7314
7315 mutex_lock(&priv->mutex);
7316
7317 set_bit(STATUS_SCAN_ABORTING, &priv->status);
bb8c093b 7318 iwl4965_send_scan_abort(priv);
b481de9c
ZY
7319
7320 mutex_unlock(&priv->mutex);
7321}
7322
76bb77e0
ZY
7323static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf);
7324
bb8c093b 7325static void iwl4965_bg_scan_completed(struct work_struct *work)
b481de9c 7326{
bb8c093b
CH
7327 struct iwl4965_priv *priv =
7328 container_of(work, struct iwl4965_priv, scan_completed);
b481de9c
ZY
7329
7330 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
7331
7332 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7333 return;
7334
a0646470
ZY
7335 if (test_bit(STATUS_CONF_PENDING, &priv->status))
7336 iwl4965_mac_config(priv->hw, ieee80211_get_hw_conf(priv->hw));
76bb77e0 7337
b481de9c
ZY
7338 ieee80211_scan_completed(priv->hw);
7339
7340 /* Since setting the TXPOWER may have been deferred while
7341 * performing the scan, fire one off */
7342 mutex_lock(&priv->mutex);
bb8c093b 7343 iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
7344 mutex_unlock(&priv->mutex);
7345}
7346
7347/*****************************************************************************
7348 *
7349 * mac80211 entry point functions
7350 *
7351 *****************************************************************************/
7352
bb8c093b 7353static int iwl4965_mac_start(struct ieee80211_hw *hw)
b481de9c 7354{
bb8c093b 7355 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7356
7357 IWL_DEBUG_MAC80211("enter\n");
7358
7359 /* we should be verifying the device is ready to be opened */
7360 mutex_lock(&priv->mutex);
7361
7362 priv->is_open = 1;
7363
bb8c093b 7364 if (!iwl4965_is_rfkill(priv))
b481de9c
ZY
7365 ieee80211_start_queues(priv->hw);
7366
7367 mutex_unlock(&priv->mutex);
7368 IWL_DEBUG_MAC80211("leave\n");
7369 return 0;
7370}
7371
bb8c093b 7372static void iwl4965_mac_stop(struct ieee80211_hw *hw)
b481de9c 7373{
bb8c093b 7374 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7375
7376 IWL_DEBUG_MAC80211("enter\n");
948c171c
MA
7377
7378
7379 mutex_lock(&priv->mutex);
7380 /* stop mac, cancel any scan request and clear
7381 * RXON_FILTER_ASSOC_MSK BIT
7382 */
b481de9c 7383 priv->is_open = 0;
fde3571f
MA
7384 if (!iwl4965_is_ready_rf(priv)) {
7385 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7386 mutex_unlock(&priv->mutex);
7387 return;
7388 }
7389
bb8c093b 7390 iwl4965_scan_cancel_timeout(priv, 100);
948c171c
MA
7391 cancel_delayed_work(&priv->post_associate);
7392 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7393 iwl4965_commit_rxon(priv);
948c171c
MA
7394 mutex_unlock(&priv->mutex);
7395
b481de9c 7396 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7397}
7398
bb8c093b 7399static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
7400 struct ieee80211_tx_control *ctl)
7401{
bb8c093b 7402 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7403
7404 IWL_DEBUG_MAC80211("enter\n");
7405
7406 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
7407 IWL_DEBUG_MAC80211("leave - monitor\n");
7408 return -1;
7409 }
7410
7411 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
7412 ctl->tx_rate);
7413
bb8c093b 7414 if (iwl4965_tx_skb(priv, skb, ctl))
b481de9c
ZY
7415 dev_kfree_skb_any(skb);
7416
7417 IWL_DEBUG_MAC80211("leave\n");
7418 return 0;
7419}
7420
bb8c093b 7421static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7422 struct ieee80211_if_init_conf *conf)
7423{
bb8c093b 7424 struct iwl4965_priv *priv = hw->priv;
b481de9c 7425 unsigned long flags;
0795af57 7426 DECLARE_MAC_BUF(mac);
b481de9c 7427
32bfd35d 7428 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
b481de9c 7429
32bfd35d
JB
7430 if (priv->vif) {
7431 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
b481de9c
ZY
7432 return 0;
7433 }
7434
7435 spin_lock_irqsave(&priv->lock, flags);
32bfd35d 7436 priv->vif = conf->vif;
b481de9c
ZY
7437
7438 spin_unlock_irqrestore(&priv->lock, flags);
7439
7440 mutex_lock(&priv->mutex);
864792e3
TW
7441
7442 if (conf->mac_addr) {
7443 IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr));
7444 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
7445 }
bb8c093b 7446 iwl4965_set_mode(priv, conf->type);
b481de9c
ZY
7447
7448 IWL_DEBUG_MAC80211("leave\n");
7449 mutex_unlock(&priv->mutex);
7450
7451 return 0;
7452}
7453
7454/**
bb8c093b 7455 * iwl4965_mac_config - mac80211 config callback
b481de9c
ZY
7456 *
7457 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
7458 * be set inappropriately and the driver currently sets the hardware up to
7459 * use it whenever needed.
7460 */
bb8c093b 7461static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
b481de9c 7462{
bb8c093b
CH
7463 struct iwl4965_priv *priv = hw->priv;
7464 const struct iwl4965_channel_info *ch_info;
b481de9c 7465 unsigned long flags;
76bb77e0 7466 int ret = 0;
b481de9c
ZY
7467
7468 mutex_lock(&priv->mutex);
7469 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel);
7470
12342c47
ZY
7471 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
7472
bb8c093b 7473 if (!iwl4965_is_ready(priv)) {
b481de9c 7474 IWL_DEBUG_MAC80211("leave - not ready\n");
76bb77e0
ZY
7475 ret = -EIO;
7476 goto out;
b481de9c
ZY
7477 }
7478
bb8c093b 7479 if (unlikely(!iwl4965_param_disable_hw_scan &&
b481de9c 7480 test_bit(STATUS_SCANNING, &priv->status))) {
a0646470
ZY
7481 IWL_DEBUG_MAC80211("leave - scanning\n");
7482 set_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 7483 mutex_unlock(&priv->mutex);
a0646470 7484 return 0;
b481de9c
ZY
7485 }
7486
7487 spin_lock_irqsave(&priv->lock, flags);
7488
bb8c093b 7489 ch_info = iwl4965_get_channel_info(priv, conf->phymode, conf->channel);
b481de9c
ZY
7490 if (!is_channel_valid(ch_info)) {
7491 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
7492 conf->channel, conf->phymode);
7493 IWL_DEBUG_MAC80211("leave - invalid channel\n");
7494 spin_unlock_irqrestore(&priv->lock, flags);
76bb77e0
ZY
7495 ret = -EINVAL;
7496 goto out;
b481de9c
ZY
7497 }
7498
c8b0e6e1 7499#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
7500 /* if we are switching fron ht to 2.4 clear flags
7501 * from any ht related info since 2.4 does not
7502 * support ht */
7503 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel)
7504#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7505 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
7506#endif
7507 )
7508 priv->staging_rxon.flags = 0;
c8b0e6e1 7509#endif /* CONFIG_IWL4965_HT */
b481de9c 7510
bb8c093b 7511 iwl4965_set_rxon_channel(priv, conf->phymode, conf->channel);
b481de9c 7512
bb8c093b 7513 iwl4965_set_flags_for_phymode(priv, conf->phymode);
b481de9c
ZY
7514
7515 /* The list of supported rates and rate mask can be different
7516 * for each phymode; since the phymode may have changed, reset
7517 * the rate mask to what mac80211 lists */
bb8c093b 7518 iwl4965_set_rate(priv);
b481de9c
ZY
7519
7520 spin_unlock_irqrestore(&priv->lock, flags);
7521
7522#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7523 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 7524 iwl4965_hw_channel_switch(priv, conf->channel);
76bb77e0 7525 goto out;
b481de9c
ZY
7526 }
7527#endif
7528
bb8c093b 7529 iwl4965_radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
7530
7531 if (!conf->radio_enabled) {
7532 IWL_DEBUG_MAC80211("leave - radio disabled\n");
76bb77e0 7533 goto out;
b481de9c
ZY
7534 }
7535
bb8c093b 7536 if (iwl4965_is_rfkill(priv)) {
b481de9c 7537 IWL_DEBUG_MAC80211("leave - RF kill\n");
76bb77e0
ZY
7538 ret = -EIO;
7539 goto out;
b481de9c
ZY
7540 }
7541
bb8c093b 7542 iwl4965_set_rate(priv);
b481de9c
ZY
7543
7544 if (memcmp(&priv->active_rxon,
7545 &priv->staging_rxon, sizeof(priv->staging_rxon)))
bb8c093b 7546 iwl4965_commit_rxon(priv);
b481de9c
ZY
7547 else
7548 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
7549
7550 IWL_DEBUG_MAC80211("leave\n");
7551
7552 mutex_unlock(&priv->mutex);
a0646470
ZY
7553out:
7554 clear_bit(STATUS_CONF_PENDING, &priv->status);
76bb77e0 7555 return ret;
b481de9c
ZY
7556}
7557
bb8c093b 7558static void iwl4965_config_ap(struct iwl4965_priv *priv)
b481de9c
ZY
7559{
7560 int rc = 0;
7561
7562 if (priv->status & STATUS_EXIT_PENDING)
7563 return;
7564
7565 /* The following should be done only at AP bring up */
7566 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
7567
7568 /* RXON - unassoc (to set timing command) */
7569 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7570 iwl4965_commit_rxon(priv);
b481de9c
ZY
7571
7572 /* RXON Timing */
bb8c093b
CH
7573 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7574 iwl4965_setup_rxon_timing(priv);
7575 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7576 sizeof(priv->rxon_timing), &priv->rxon_timing);
7577 if (rc)
7578 IWL_WARNING("REPLY_RXON_TIMING failed - "
7579 "Attempting to continue.\n");
7580
7581 iwl4965_set_rxon_chain(priv);
7582
7583 /* FIXME: what should be the assoc_id for AP? */
7584 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7585 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7586 priv->staging_rxon.flags |=
7587 RXON_FLG_SHORT_PREAMBLE_MSK;
7588 else
7589 priv->staging_rxon.flags &=
7590 ~RXON_FLG_SHORT_PREAMBLE_MSK;
7591
7592 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7593 if (priv->assoc_capability &
7594 WLAN_CAPABILITY_SHORT_SLOT_TIME)
7595 priv->staging_rxon.flags |=
7596 RXON_FLG_SHORT_SLOT_MSK;
7597 else
7598 priv->staging_rxon.flags &=
7599 ~RXON_FLG_SHORT_SLOT_MSK;
7600
7601 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7602 priv->staging_rxon.flags &=
7603 ~RXON_FLG_SHORT_SLOT_MSK;
7604 }
7605 /* restore RXON assoc */
7606 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 7607 iwl4965_commit_rxon(priv);
c8b0e6e1 7608#ifdef CONFIG_IWL4965_QOS
bb8c093b 7609 iwl4965_activate_qos(priv, 1);
b481de9c 7610#endif
bb8c093b 7611 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
e1493deb 7612 }
bb8c093b 7613 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7614
7615 /* FIXME - we need to add code here to detect a totally new
7616 * configuration, reset the AP, unassoc, rxon timing, assoc,
7617 * clear sta table, add BCAST sta... */
7618}
7619
32bfd35d
JB
7620static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
7621 struct ieee80211_vif *vif,
b481de9c
ZY
7622 struct ieee80211_if_conf *conf)
7623{
bb8c093b 7624 struct iwl4965_priv *priv = hw->priv;
0795af57 7625 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7626 unsigned long flags;
7627 int rc;
7628
7629 if (conf == NULL)
7630 return -EIO;
7631
7632 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7633 (!conf->beacon || !conf->ssid_len)) {
7634 IWL_DEBUG_MAC80211
7635 ("Leaving in AP mode because HostAPD is not ready.\n");
7636 return 0;
7637 }
7638
7639 mutex_lock(&priv->mutex);
7640
b481de9c 7641 if (conf->bssid)
0795af57
JP
7642 IWL_DEBUG_MAC80211("bssid: %s\n",
7643 print_mac(mac, conf->bssid));
b481de9c 7644
4150c572
JB
7645/*
7646 * very dubious code was here; the probe filtering flag is never set:
7647 *
b481de9c
ZY
7648 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7649 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572
JB
7650 */
7651 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
b481de9c
ZY
7652 IWL_DEBUG_MAC80211("leave - scanning\n");
7653 mutex_unlock(&priv->mutex);
7654 return 0;
7655 }
7656
32bfd35d
JB
7657 if (priv->vif != vif) {
7658 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
b481de9c
ZY
7659 mutex_unlock(&priv->mutex);
7660 return 0;
7661 }
7662
7663 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7664 if (!conf->bssid) {
7665 conf->bssid = priv->mac_addr;
7666 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
0795af57
JP
7667 IWL_DEBUG_MAC80211("bssid was set to: %s\n",
7668 print_mac(mac, conf->bssid));
b481de9c
ZY
7669 }
7670 if (priv->ibss_beacon)
7671 dev_kfree_skb(priv->ibss_beacon);
7672
7673 priv->ibss_beacon = conf->beacon;
7674 }
7675
fde3571f
MA
7676 if (iwl4965_is_rfkill(priv))
7677 goto done;
7678
b481de9c
ZY
7679 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
7680 !is_multicast_ether_addr(conf->bssid)) {
7681 /* If there is currently a HW scan going on in the background
7682 * then we need to cancel it else the RXON below will fail. */
bb8c093b 7683 if (iwl4965_scan_cancel_timeout(priv, 100)) {
b481de9c
ZY
7684 IWL_WARNING("Aborted scan still in progress "
7685 "after 100ms\n");
7686 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
7687 mutex_unlock(&priv->mutex);
7688 return -EAGAIN;
7689 }
7690 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
7691
7692 /* TODO: Audit driver for usage of these members and see
7693 * if mac80211 deprecates them (priv->bssid looks like it
7694 * shouldn't be there, but I haven't scanned the IBSS code
7695 * to verify) - jpk */
7696 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
7697
7698 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b 7699 iwl4965_config_ap(priv);
b481de9c 7700 else {
bb8c093b 7701 rc = iwl4965_commit_rxon(priv);
b481de9c 7702 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
bb8c093b 7703 iwl4965_rxon_add_station(
b481de9c
ZY
7704 priv, priv->active_rxon.bssid_addr, 1);
7705 }
7706
7707 } else {
bb8c093b 7708 iwl4965_scan_cancel_timeout(priv, 100);
b481de9c 7709 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7710 iwl4965_commit_rxon(priv);
b481de9c
ZY
7711 }
7712
fde3571f 7713 done:
b481de9c
ZY
7714 spin_lock_irqsave(&priv->lock, flags);
7715 if (!conf->ssid_len)
7716 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7717 else
7718 memcpy(priv->essid, conf->ssid, conf->ssid_len);
7719
7720 priv->essid_len = conf->ssid_len;
7721 spin_unlock_irqrestore(&priv->lock, flags);
7722
7723 IWL_DEBUG_MAC80211("leave\n");
7724 mutex_unlock(&priv->mutex);
7725
7726 return 0;
7727}
7728
bb8c093b 7729static void iwl4965_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
7730 unsigned int changed_flags,
7731 unsigned int *total_flags,
7732 int mc_count, struct dev_addr_list *mc_list)
7733{
7734 /*
7735 * XXX: dummy
bb8c093b 7736 * see also iwl4965_connection_init_rx_config
4150c572
JB
7737 */
7738 *total_flags = 0;
7739}
7740
bb8c093b 7741static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7742 struct ieee80211_if_init_conf *conf)
7743{
bb8c093b 7744 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7745
7746 IWL_DEBUG_MAC80211("enter\n");
7747
7748 mutex_lock(&priv->mutex);
948c171c 7749
fde3571f
MA
7750 if (iwl4965_is_ready_rf(priv)) {
7751 iwl4965_scan_cancel_timeout(priv, 100);
7752 cancel_delayed_work(&priv->post_associate);
7753 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7754 iwl4965_commit_rxon(priv);
7755 }
32bfd35d
JB
7756 if (priv->vif == conf->vif) {
7757 priv->vif = NULL;
b481de9c
ZY
7758 memset(priv->bssid, 0, ETH_ALEN);
7759 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7760 priv->essid_len = 0;
7761 }
7762 mutex_unlock(&priv->mutex);
7763
7764 IWL_DEBUG_MAC80211("leave\n");
7765
7766}
bb8c093b 7767static void iwl4965_mac_erp_ie_changed(struct ieee80211_hw *hw,
220173b0
TW
7768 u8 changes, int cts_protection, int preamble)
7769{
bb8c093b 7770 struct iwl4965_priv *priv = hw->priv;
220173b0
TW
7771
7772 if (changes & IEEE80211_ERP_CHANGE_PREAMBLE) {
7773 if (preamble == WLAN_ERP_PREAMBLE_SHORT)
7774 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7775 else
7776 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7777 }
7778
7779 if (changes & IEEE80211_ERP_CHANGE_PROTECTION) {
797a54c6 7780 if (cts_protection && (priv->phymode != MODE_IEEE80211A))
220173b0
TW
7781 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
7782 else
7783 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
7784 }
7785
bb8c093b
CH
7786 if (iwl4965_is_associated(priv))
7787 iwl4965_send_rxon_assoc(priv);
220173b0 7788}
b481de9c 7789
bb8c093b 7790static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
7791{
7792 int rc = 0;
7793 unsigned long flags;
bb8c093b 7794 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7795
7796 IWL_DEBUG_MAC80211("enter\n");
7797
052c4b9f 7798 mutex_lock(&priv->mutex);
b481de9c
ZY
7799 spin_lock_irqsave(&priv->lock, flags);
7800
bb8c093b 7801 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7802 rc = -EIO;
7803 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7804 goto out_unlock;
7805 }
7806
7807 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
7808 rc = -EIO;
7809 IWL_ERROR("ERROR: APs don't scan\n");
7810 goto out_unlock;
7811 }
7812
7878a5a4
MA
7813 /* we don't schedule scan within next_scan_jiffies period */
7814 if (priv->next_scan_jiffies &&
7815 time_after(priv->next_scan_jiffies, jiffies)) {
7816 rc = -EAGAIN;
7817 goto out_unlock;
7818 }
b481de9c 7819 /* if we just finished scan ask for delay */
7878a5a4
MA
7820 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies +
7821 IWL_DELAY_NEXT_SCAN, jiffies)) {
b481de9c
ZY
7822 rc = -EAGAIN;
7823 goto out_unlock;
7824 }
7825 if (len) {
7878a5a4 7826 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
bb8c093b 7827 iwl4965_escape_essid(ssid, len), (int)len);
b481de9c
ZY
7828
7829 priv->one_direct_scan = 1;
7830 priv->direct_ssid_len = (u8)
7831 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
7832 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
948c171c
MA
7833 } else
7834 priv->one_direct_scan = 0;
b481de9c 7835
bb8c093b 7836 rc = iwl4965_scan_initiate(priv);
b481de9c
ZY
7837
7838 IWL_DEBUG_MAC80211("leave\n");
7839
7840out_unlock:
7841 spin_unlock_irqrestore(&priv->lock, flags);
052c4b9f 7842 mutex_unlock(&priv->mutex);
b481de9c
ZY
7843
7844 return rc;
7845}
7846
bb8c093b 7847static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
b481de9c
ZY
7848 const u8 *local_addr, const u8 *addr,
7849 struct ieee80211_key_conf *key)
7850{
bb8c093b 7851 struct iwl4965_priv *priv = hw->priv;
0795af57 7852 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7853 int rc = 0;
7854 u8 sta_id;
7855
7856 IWL_DEBUG_MAC80211("enter\n");
7857
bb8c093b 7858 if (!iwl4965_param_hwcrypto) {
b481de9c
ZY
7859 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7860 return -EOPNOTSUPP;
7861 }
7862
7863 if (is_zero_ether_addr(addr))
7864 /* only support pairwise keys */
7865 return -EOPNOTSUPP;
7866
bb8c093b 7867 sta_id = iwl4965_hw_find_station(priv, addr);
b481de9c 7868 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
7869 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7870 print_mac(mac, addr));
b481de9c
ZY
7871 return -EINVAL;
7872 }
7873
7874 mutex_lock(&priv->mutex);
7875
bb8c093b 7876 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 7877
b481de9c
ZY
7878 switch (cmd) {
7879 case SET_KEY:
bb8c093b 7880 rc = iwl4965_update_sta_key_info(priv, key, sta_id);
b481de9c 7881 if (!rc) {
bb8c093b
CH
7882 iwl4965_set_rxon_hwcrypto(priv, 1);
7883 iwl4965_commit_rxon(priv);
b481de9c
ZY
7884 key->hw_key_idx = sta_id;
7885 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7886 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7887 }
7888 break;
7889 case DISABLE_KEY:
bb8c093b 7890 rc = iwl4965_clear_sta_key_info(priv, sta_id);
b481de9c 7891 if (!rc) {
bb8c093b
CH
7892 iwl4965_set_rxon_hwcrypto(priv, 0);
7893 iwl4965_commit_rxon(priv);
b481de9c
ZY
7894 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7895 }
7896 break;
7897 default:
7898 rc = -EINVAL;
7899 }
7900
7901 IWL_DEBUG_MAC80211("leave\n");
7902 mutex_unlock(&priv->mutex);
7903
7904 return rc;
7905}
7906
bb8c093b 7907static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
b481de9c
ZY
7908 const struct ieee80211_tx_queue_params *params)
7909{
bb8c093b 7910 struct iwl4965_priv *priv = hw->priv;
c8b0e6e1 7911#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7912 unsigned long flags;
7913 int q;
0054b34d 7914#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
7915
7916 IWL_DEBUG_MAC80211("enter\n");
7917
bb8c093b 7918 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7919 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7920 return -EIO;
7921 }
7922
7923 if (queue >= AC_NUM) {
7924 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7925 return 0;
7926 }
7927
c8b0e6e1 7928#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7929 if (!priv->qos_data.qos_enable) {
7930 priv->qos_data.qos_active = 0;
7931 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7932 return 0;
7933 }
7934 q = AC_NUM - 1 - queue;
7935
7936 spin_lock_irqsave(&priv->lock, flags);
7937
7938 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
7939 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
7940 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
7941 priv->qos_data.def_qos_parm.ac[q].edca_txop =
7942 cpu_to_le16((params->burst_time * 100));
7943
7944 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
7945 priv->qos_data.qos_active = 1;
7946
7947 spin_unlock_irqrestore(&priv->lock, flags);
7948
7949 mutex_lock(&priv->mutex);
7950 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b
CH
7951 iwl4965_activate_qos(priv, 1);
7952 else if (priv->assoc_id && iwl4965_is_associated(priv))
7953 iwl4965_activate_qos(priv, 0);
b481de9c
ZY
7954
7955 mutex_unlock(&priv->mutex);
7956
c8b0e6e1 7957#endif /*CONFIG_IWL4965_QOS */
b481de9c
ZY
7958
7959 IWL_DEBUG_MAC80211("leave\n");
7960 return 0;
7961}
7962
bb8c093b 7963static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
7964 struct ieee80211_tx_queue_stats *stats)
7965{
bb8c093b 7966 struct iwl4965_priv *priv = hw->priv;
b481de9c 7967 int i, avail;
bb8c093b
CH
7968 struct iwl4965_tx_queue *txq;
7969 struct iwl4965_queue *q;
b481de9c
ZY
7970 unsigned long flags;
7971
7972 IWL_DEBUG_MAC80211("enter\n");
7973
bb8c093b 7974 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7975 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7976 return -EIO;
7977 }
7978
7979 spin_lock_irqsave(&priv->lock, flags);
7980
7981 for (i = 0; i < AC_NUM; i++) {
7982 txq = &priv->txq[i];
7983 q = &txq->q;
bb8c093b 7984 avail = iwl4965_queue_space(q);
b481de9c
ZY
7985
7986 stats->data[i].len = q->n_window - avail;
7987 stats->data[i].limit = q->n_window - q->high_mark;
7988 stats->data[i].count = q->n_window;
7989
7990 }
7991 spin_unlock_irqrestore(&priv->lock, flags);
7992
7993 IWL_DEBUG_MAC80211("leave\n");
7994
7995 return 0;
7996}
7997
bb8c093b 7998static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
b481de9c
ZY
7999 struct ieee80211_low_level_stats *stats)
8000{
8001 IWL_DEBUG_MAC80211("enter\n");
8002 IWL_DEBUG_MAC80211("leave\n");
8003
8004 return 0;
8005}
8006
bb8c093b 8007static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
b481de9c
ZY
8008{
8009 IWL_DEBUG_MAC80211("enter\n");
8010 IWL_DEBUG_MAC80211("leave\n");
8011
8012 return 0;
8013}
8014
bb8c093b 8015static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 8016{
bb8c093b 8017 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8018 unsigned long flags;
8019
8020 mutex_lock(&priv->mutex);
8021 IWL_DEBUG_MAC80211("enter\n");
8022
8023 priv->lq_mngr.lq_ready = 0;
c8b0e6e1 8024#ifdef CONFIG_IWL4965_HT
b481de9c 8025 spin_lock_irqsave(&priv->lock, flags);
fd105e79 8026 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
b481de9c 8027 spin_unlock_irqrestore(&priv->lock, flags);
c8b0e6e1 8028#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
8029/* if (priv->lq_mngr.agg_ctrl.granted_ba)
8030 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/
8031
bb8c093b 8032 memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl4965_agg_control));
b481de9c
ZY
8033 priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10;
8034 priv->lq_mngr.agg_ctrl.ba_timeout = 5000;
8035 priv->lq_mngr.agg_ctrl.auto_agg = 1;
8036
8037 if (priv->lq_mngr.agg_ctrl.auto_agg)
8038 priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED;
c8b0e6e1
CH
8039#endif /*CONFIG_IWL4965_HT_AGG */
8040#endif /* CONFIG_IWL4965_HT */
b481de9c 8041
c8b0e6e1 8042#ifdef CONFIG_IWL4965_QOS
bb8c093b 8043 iwl4965_reset_qos(priv);
b481de9c
ZY
8044#endif
8045
8046 cancel_delayed_work(&priv->post_associate);
8047
8048 spin_lock_irqsave(&priv->lock, flags);
8049 priv->assoc_id = 0;
8050 priv->assoc_capability = 0;
8051 priv->call_post_assoc_from_beacon = 0;
8052 priv->assoc_station_added = 0;
8053
8054 /* new association get rid of ibss beacon skb */
8055 if (priv->ibss_beacon)
8056 dev_kfree_skb(priv->ibss_beacon);
8057
8058 priv->ibss_beacon = NULL;
8059
8060 priv->beacon_int = priv->hw->conf.beacon_int;
8061 priv->timestamp1 = 0;
8062 priv->timestamp0 = 0;
8063 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
8064 priv->beacon_int = 0;
8065
8066 spin_unlock_irqrestore(&priv->lock, flags);
8067
fde3571f
MA
8068 if (!iwl4965_is_ready_rf(priv)) {
8069 IWL_DEBUG_MAC80211("leave - not ready\n");
8070 mutex_unlock(&priv->mutex);
8071 return;
8072 }
8073
052c4b9f 8074 /* we are restarting association process
8075 * clear RXON_FILTER_ASSOC_MSK bit
8076 */
8077 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
bb8c093b 8078 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 8079 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 8080 iwl4965_commit_rxon(priv);
052c4b9f 8081 }
8082
b481de9c
ZY
8083 /* Per mac80211.h: This is only used in IBSS mode... */
8084 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
052c4b9f 8085
b481de9c
ZY
8086 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
8087 mutex_unlock(&priv->mutex);
8088 return;
8089 }
8090
b481de9c
ZY
8091 priv->only_active_channel = 0;
8092
bb8c093b 8093 iwl4965_set_rate(priv);
b481de9c
ZY
8094
8095 mutex_unlock(&priv->mutex);
8096
8097 IWL_DEBUG_MAC80211("leave\n");
8098
8099}
8100
bb8c093b 8101static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
8102 struct ieee80211_tx_control *control)
8103{
bb8c093b 8104 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8105 unsigned long flags;
8106
8107 mutex_lock(&priv->mutex);
8108 IWL_DEBUG_MAC80211("enter\n");
8109
bb8c093b 8110 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8111 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8112 mutex_unlock(&priv->mutex);
8113 return -EIO;
8114 }
8115
8116 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
8117 IWL_DEBUG_MAC80211("leave - not IBSS\n");
8118 mutex_unlock(&priv->mutex);
8119 return -EIO;
8120 }
8121
8122 spin_lock_irqsave(&priv->lock, flags);
8123
8124 if (priv->ibss_beacon)
8125 dev_kfree_skb(priv->ibss_beacon);
8126
8127 priv->ibss_beacon = skb;
8128
8129 priv->assoc_id = 0;
8130
8131 IWL_DEBUG_MAC80211("leave\n");
8132 spin_unlock_irqrestore(&priv->lock, flags);
8133
c8b0e6e1 8134#ifdef CONFIG_IWL4965_QOS
bb8c093b 8135 iwl4965_reset_qos(priv);
b481de9c
ZY
8136#endif
8137
8138 queue_work(priv->workqueue, &priv->post_associate.work);
8139
8140 mutex_unlock(&priv->mutex);
8141
8142 return 0;
8143}
8144
c8b0e6e1 8145#ifdef CONFIG_IWL4965_HT
b481de9c 8146
fd105e79
RR
8147static void iwl4965_ht_info_fill(struct ieee80211_conf *conf,
8148 struct iwl4965_priv *priv)
b481de9c 8149{
fd105e79
RR
8150 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
8151 struct ieee80211_ht_info *ht_conf = &conf->ht_conf;
8152 struct ieee80211_ht_bss_info *ht_bss_conf = &conf->ht_bss_conf;
b481de9c
ZY
8153
8154 IWL_DEBUG_MAC80211("enter: \n");
8155
fd105e79
RR
8156 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) {
8157 iwl_conf->is_ht = 0;
8158 return;
b481de9c
ZY
8159 }
8160
fd105e79
RR
8161 iwl_conf->is_ht = 1;
8162 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8163
8164 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
8165 iwl_conf->sgf |= 0x1;
8166 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
8167 iwl_conf->sgf |= 0x2;
8168
8169 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
8170 iwl_conf->max_amsdu_size =
8171 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
8172 iwl_conf->supported_chan_width =
8173 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
8174 iwl_conf->tx_mimo_ps_mode =
8175 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8176 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
8177
8178 iwl_conf->control_channel = ht_bss_conf->primary_channel;
8179 iwl_conf->extension_chan_offset =
8180 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
8181 iwl_conf->tx_chan_width =
8182 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
8183 iwl_conf->ht_protection =
8184 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
8185 iwl_conf->non_GF_STA_present =
8186 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
8187
8188 IWL_DEBUG_MAC80211("control channel %d\n",
8189 iwl_conf->control_channel);
b481de9c 8190 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
8191}
8192
bb8c093b 8193static int iwl4965_mac_conf_ht(struct ieee80211_hw *hw,
fd105e79 8194 struct ieee80211_conf *conf)
b481de9c 8195{
bb8c093b 8196 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8197
8198 IWL_DEBUG_MAC80211("enter: \n");
8199
fd105e79 8200 iwl4965_ht_info_fill(conf, priv);
b481de9c
ZY
8201 iwl4965_set_rxon_chain(priv);
8202
8203 if (priv && priv->assoc_id &&
8204 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
8205 unsigned long flags;
8206
8207 spin_lock_irqsave(&priv->lock, flags);
8208 if (priv->beacon_int)
8209 queue_work(priv->workqueue, &priv->post_associate.work);
8210 else
8211 priv->call_post_assoc_from_beacon = 1;
8212 spin_unlock_irqrestore(&priv->lock, flags);
8213 }
8214
fd105e79
RR
8215 IWL_DEBUG_MAC80211("leave:\n");
8216 return 0;
b481de9c
ZY
8217}
8218
bb8c093b 8219static void iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8fb88032
RR
8220 struct ieee80211_ht_cap *ht_cap,
8221 u8 use_current_config)
b481de9c 8222{
8fb88032
RR
8223 struct ieee80211_conf *conf = &hw->conf;
8224 struct ieee80211_hw_mode *mode = conf->mode;
b481de9c 8225
8fb88032
RR
8226 if (use_current_config) {
8227 ht_cap->cap_info = cpu_to_le16(conf->ht_conf.cap);
8228 memcpy(ht_cap->supp_mcs_set,
8229 conf->ht_conf.supp_mcs_set, 16);
8230 } else {
8231 ht_cap->cap_info = cpu_to_le16(mode->ht_info.cap);
8232 memcpy(ht_cap->supp_mcs_set,
8233 mode->ht_info.supp_mcs_set, 16);
8234 }
8235 ht_cap->ampdu_params_info =
8236 (mode->ht_info.ampdu_factor & IEEE80211_HT_CAP_AMPDU_FACTOR) |
8237 ((mode->ht_info.ampdu_density << 2) &
8238 IEEE80211_HT_CAP_AMPDU_DENSITY);
b481de9c
ZY
8239}
8240
c8b0e6e1 8241#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
8242
8243/*****************************************************************************
8244 *
8245 * sysfs attributes
8246 *
8247 *****************************************************************************/
8248
c8b0e6e1 8249#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
8250
8251/*
8252 * The following adds a new attribute to the sysfs representation
8253 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
8254 * used for controlling the debug level.
8255 *
8256 * See the level definitions in iwl for details.
8257 */
8258
8259static ssize_t show_debug_level(struct device_driver *d, char *buf)
8260{
bb8c093b 8261 return sprintf(buf, "0x%08X\n", iwl4965_debug_level);
b481de9c
ZY
8262}
8263static ssize_t store_debug_level(struct device_driver *d,
8264 const char *buf, size_t count)
8265{
8266 char *p = (char *)buf;
8267 u32 val;
8268
8269 val = simple_strtoul(p, &p, 0);
8270 if (p == buf)
8271 printk(KERN_INFO DRV_NAME
8272 ": %s is not in hex or decimal form.\n", buf);
8273 else
bb8c093b 8274 iwl4965_debug_level = val;
b481de9c
ZY
8275
8276 return strnlen(buf, count);
8277}
8278
8279static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
8280 show_debug_level, store_debug_level);
8281
c8b0e6e1 8282#endif /* CONFIG_IWL4965_DEBUG */
b481de9c
ZY
8283
8284static ssize_t show_rf_kill(struct device *d,
8285 struct device_attribute *attr, char *buf)
8286{
8287 /*
8288 * 0 - RF kill not enabled
8289 * 1 - SW based RF kill active (sysfs)
8290 * 2 - HW based RF kill active
8291 * 3 - Both HW and SW based RF kill active
8292 */
bb8c093b 8293 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8294 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
8295 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
8296
8297 return sprintf(buf, "%i\n", val);
8298}
8299
8300static ssize_t store_rf_kill(struct device *d,
8301 struct device_attribute *attr,
8302 const char *buf, size_t count)
8303{
bb8c093b 8304 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8305
8306 mutex_lock(&priv->mutex);
bb8c093b 8307 iwl4965_radio_kill_sw(priv, buf[0] == '1');
b481de9c
ZY
8308 mutex_unlock(&priv->mutex);
8309
8310 return count;
8311}
8312
8313static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
8314
8315static ssize_t show_temperature(struct device *d,
8316 struct device_attribute *attr, char *buf)
8317{
bb8c093b 8318 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c 8319
bb8c093b 8320 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8321 return -EAGAIN;
8322
bb8c093b 8323 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv));
b481de9c
ZY
8324}
8325
8326static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
8327
8328static ssize_t show_rs_window(struct device *d,
8329 struct device_attribute *attr,
8330 char *buf)
8331{
bb8c093b
CH
8332 struct iwl4965_priv *priv = d->driver_data;
8333 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
b481de9c
ZY
8334}
8335static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
8336
8337static ssize_t show_tx_power(struct device *d,
8338 struct device_attribute *attr, char *buf)
8339{
bb8c093b 8340 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8341 return sprintf(buf, "%d\n", priv->user_txpower_limit);
8342}
8343
8344static ssize_t store_tx_power(struct device *d,
8345 struct device_attribute *attr,
8346 const char *buf, size_t count)
8347{
bb8c093b 8348 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8349 char *p = (char *)buf;
8350 u32 val;
8351
8352 val = simple_strtoul(p, &p, 10);
8353 if (p == buf)
8354 printk(KERN_INFO DRV_NAME
8355 ": %s is not in decimal form.\n", buf);
8356 else
bb8c093b 8357 iwl4965_hw_reg_set_txpower(priv, val);
b481de9c
ZY
8358
8359 return count;
8360}
8361
8362static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
8363
8364static ssize_t show_flags(struct device *d,
8365 struct device_attribute *attr, char *buf)
8366{
bb8c093b 8367 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8368
8369 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
8370}
8371
8372static ssize_t store_flags(struct device *d,
8373 struct device_attribute *attr,
8374 const char *buf, size_t count)
8375{
bb8c093b 8376 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8377 u32 flags = simple_strtoul(buf, NULL, 0);
8378
8379 mutex_lock(&priv->mutex);
8380 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
8381 /* Cancel any currently running scans... */
bb8c093b 8382 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8383 IWL_WARNING("Could not cancel scan.\n");
8384 else {
8385 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
8386 flags);
8387 priv->staging_rxon.flags = cpu_to_le32(flags);
bb8c093b 8388 iwl4965_commit_rxon(priv);
b481de9c
ZY
8389 }
8390 }
8391 mutex_unlock(&priv->mutex);
8392
8393 return count;
8394}
8395
8396static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
8397
8398static ssize_t show_filter_flags(struct device *d,
8399 struct device_attribute *attr, char *buf)
8400{
bb8c093b 8401 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8402
8403 return sprintf(buf, "0x%04X\n",
8404 le32_to_cpu(priv->active_rxon.filter_flags));
8405}
8406
8407static ssize_t store_filter_flags(struct device *d,
8408 struct device_attribute *attr,
8409 const char *buf, size_t count)
8410{
bb8c093b 8411 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8412 u32 filter_flags = simple_strtoul(buf, NULL, 0);
8413
8414 mutex_lock(&priv->mutex);
8415 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
8416 /* Cancel any currently running scans... */
bb8c093b 8417 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8418 IWL_WARNING("Could not cancel scan.\n");
8419 else {
8420 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
8421 "0x%04X\n", filter_flags);
8422 priv->staging_rxon.filter_flags =
8423 cpu_to_le32(filter_flags);
bb8c093b 8424 iwl4965_commit_rxon(priv);
b481de9c
ZY
8425 }
8426 }
8427 mutex_unlock(&priv->mutex);
8428
8429 return count;
8430}
8431
8432static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
8433 store_filter_flags);
8434
8435static ssize_t show_tune(struct device *d,
8436 struct device_attribute *attr, char *buf)
8437{
bb8c093b 8438 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8439
8440 return sprintf(buf, "0x%04X\n",
8441 (priv->phymode << 8) |
8442 le16_to_cpu(priv->active_rxon.channel));
8443}
8444
bb8c093b 8445static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode);
b481de9c
ZY
8446
8447static ssize_t store_tune(struct device *d,
8448 struct device_attribute *attr,
8449 const char *buf, size_t count)
8450{
bb8c093b 8451 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8452 char *p = (char *)buf;
8453 u16 tune = simple_strtoul(p, &p, 0);
8454 u8 phymode = (tune >> 8) & 0xff;
8455 u16 channel = tune & 0xff;
8456
8457 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
8458
8459 mutex_lock(&priv->mutex);
8460 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
8461 (priv->phymode != phymode)) {
bb8c093b 8462 const struct iwl4965_channel_info *ch_info;
b481de9c 8463
bb8c093b 8464 ch_info = iwl4965_get_channel_info(priv, phymode, channel);
b481de9c
ZY
8465 if (!ch_info) {
8466 IWL_WARNING("Requested invalid phymode/channel "
8467 "combination: %d %d\n", phymode, channel);
8468 mutex_unlock(&priv->mutex);
8469 return -EINVAL;
8470 }
8471
8472 /* Cancel any currently running scans... */
bb8c093b 8473 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8474 IWL_WARNING("Could not cancel scan.\n");
8475 else {
8476 IWL_DEBUG_INFO("Committing phymode and "
8477 "rxon.channel = %d %d\n",
8478 phymode, channel);
8479
bb8c093b
CH
8480 iwl4965_set_rxon_channel(priv, phymode, channel);
8481 iwl4965_set_flags_for_phymode(priv, phymode);
b481de9c 8482
bb8c093b
CH
8483 iwl4965_set_rate(priv);
8484 iwl4965_commit_rxon(priv);
b481de9c
ZY
8485 }
8486 }
8487 mutex_unlock(&priv->mutex);
8488
8489 return count;
8490}
8491
8492static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
8493
c8b0e6e1 8494#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8495
8496static ssize_t show_measurement(struct device *d,
8497 struct device_attribute *attr, char *buf)
8498{
bb8c093b
CH
8499 struct iwl4965_priv *priv = dev_get_drvdata(d);
8500 struct iwl4965_spectrum_notification measure_report;
b481de9c
ZY
8501 u32 size = sizeof(measure_report), len = 0, ofs = 0;
8502 u8 *data = (u8 *) & measure_report;
8503 unsigned long flags;
8504
8505 spin_lock_irqsave(&priv->lock, flags);
8506 if (!(priv->measurement_status & MEASUREMENT_READY)) {
8507 spin_unlock_irqrestore(&priv->lock, flags);
8508 return 0;
8509 }
8510 memcpy(&measure_report, &priv->measure_report, size);
8511 priv->measurement_status = 0;
8512 spin_unlock_irqrestore(&priv->lock, flags);
8513
8514 while (size && (PAGE_SIZE - len)) {
8515 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8516 PAGE_SIZE - len, 1);
8517 len = strlen(buf);
8518 if (PAGE_SIZE - len)
8519 buf[len++] = '\n';
8520
8521 ofs += 16;
8522 size -= min(size, 16U);
8523 }
8524
8525 return len;
8526}
8527
8528static ssize_t store_measurement(struct device *d,
8529 struct device_attribute *attr,
8530 const char *buf, size_t count)
8531{
bb8c093b 8532 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8533 struct ieee80211_measurement_params params = {
8534 .channel = le16_to_cpu(priv->active_rxon.channel),
8535 .start_time = cpu_to_le64(priv->last_tsf),
8536 .duration = cpu_to_le16(1),
8537 };
8538 u8 type = IWL_MEASURE_BASIC;
8539 u8 buffer[32];
8540 u8 channel;
8541
8542 if (count) {
8543 char *p = buffer;
8544 strncpy(buffer, buf, min(sizeof(buffer), count));
8545 channel = simple_strtoul(p, NULL, 0);
8546 if (channel)
8547 params.channel = channel;
8548
8549 p = buffer;
8550 while (*p && *p != ' ')
8551 p++;
8552 if (*p)
8553 type = simple_strtoul(p + 1, NULL, 0);
8554 }
8555
8556 IWL_DEBUG_INFO("Invoking measurement of type %d on "
8557 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 8558 iwl4965_get_measurement(priv, &params, type);
b481de9c
ZY
8559
8560 return count;
8561}
8562
8563static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
8564 show_measurement, store_measurement);
c8b0e6e1 8565#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */
b481de9c
ZY
8566
8567static ssize_t store_retry_rate(struct device *d,
8568 struct device_attribute *attr,
8569 const char *buf, size_t count)
8570{
bb8c093b 8571 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8572
8573 priv->retry_rate = simple_strtoul(buf, NULL, 0);
8574 if (priv->retry_rate <= 0)
8575 priv->retry_rate = 1;
8576
8577 return count;
8578}
8579
8580static ssize_t show_retry_rate(struct device *d,
8581 struct device_attribute *attr, char *buf)
8582{
bb8c093b 8583 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8584 return sprintf(buf, "%d", priv->retry_rate);
8585}
8586
8587static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
8588 store_retry_rate);
8589
8590static ssize_t store_power_level(struct device *d,
8591 struct device_attribute *attr,
8592 const char *buf, size_t count)
8593{
bb8c093b 8594 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8595 int rc;
8596 int mode;
8597
8598 mode = simple_strtoul(buf, NULL, 0);
8599 mutex_lock(&priv->mutex);
8600
bb8c093b 8601 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
8602 rc = -EAGAIN;
8603 goto out;
8604 }
8605
8606 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
8607 mode = IWL_POWER_AC;
8608 else
8609 mode |= IWL_POWER_ENABLED;
8610
8611 if (mode != priv->power_mode) {
bb8c093b 8612 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
8613 if (rc) {
8614 IWL_DEBUG_MAC80211("failed setting power mode.\n");
8615 goto out;
8616 }
8617 priv->power_mode = mode;
8618 }
8619
8620 rc = count;
8621
8622 out:
8623 mutex_unlock(&priv->mutex);
8624 return rc;
8625}
8626
8627#define MAX_WX_STRING 80
8628
8629/* Values are in microsecond */
8630static const s32 timeout_duration[] = {
8631 350000,
8632 250000,
8633 75000,
8634 37000,
8635 25000,
8636};
8637static const s32 period_duration[] = {
8638 400000,
8639 700000,
8640 1000000,
8641 1000000,
8642 1000000
8643};
8644
8645static ssize_t show_power_level(struct device *d,
8646 struct device_attribute *attr, char *buf)
8647{
bb8c093b 8648 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8649 int level = IWL_POWER_LEVEL(priv->power_mode);
8650 char *p = buf;
8651
8652 p += sprintf(p, "%d ", level);
8653 switch (level) {
8654 case IWL_POWER_MODE_CAM:
8655 case IWL_POWER_AC:
8656 p += sprintf(p, "(AC)");
8657 break;
8658 case IWL_POWER_BATTERY:
8659 p += sprintf(p, "(BATTERY)");
8660 break;
8661 default:
8662 p += sprintf(p,
8663 "(Timeout %dms, Period %dms)",
8664 timeout_duration[level - 1] / 1000,
8665 period_duration[level - 1] / 1000);
8666 }
8667
8668 if (!(priv->power_mode & IWL_POWER_ENABLED))
8669 p += sprintf(p, " OFF\n");
8670 else
8671 p += sprintf(p, " \n");
8672
8673 return (p - buf + 1);
8674
8675}
8676
8677static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8678 store_power_level);
8679
8680static ssize_t show_channels(struct device *d,
8681 struct device_attribute *attr, char *buf)
8682{
bb8c093b 8683 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8684 int len = 0, i;
8685 struct ieee80211_channel *channels = NULL;
8686 const struct ieee80211_hw_mode *hw_mode = NULL;
8687 int count = 0;
8688
bb8c093b 8689 if (!iwl4965_is_ready(priv))
b481de9c
ZY
8690 return -EAGAIN;
8691
bb8c093b 8692 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211G);
b481de9c 8693 if (!hw_mode)
bb8c093b 8694 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211B);
b481de9c
ZY
8695 if (hw_mode) {
8696 channels = hw_mode->channels;
8697 count = hw_mode->num_channels;
8698 }
8699
8700 len +=
8701 sprintf(&buf[len],
8702 "Displaying %d channels in 2.4GHz band "
8703 "(802.11bg):\n", count);
8704
8705 for (i = 0; i < count; i++)
8706 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8707 channels[i].chan,
8708 channels[i].power_level,
8709 channels[i].
8710 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8711 " (IEEE 802.11h required)" : "",
8712 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8713 || (channels[i].
8714 flag &
8715 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8716 ", IBSS",
8717 channels[i].
8718 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8719 "active/passive" : "passive only");
8720
bb8c093b 8721 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211A);
b481de9c
ZY
8722 if (hw_mode) {
8723 channels = hw_mode->channels;
8724 count = hw_mode->num_channels;
8725 } else {
8726 channels = NULL;
8727 count = 0;
8728 }
8729
8730 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8731 "(802.11a):\n", count);
8732
8733 for (i = 0; i < count; i++)
8734 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8735 channels[i].chan,
8736 channels[i].power_level,
8737 channels[i].
8738 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8739 " (IEEE 802.11h required)" : "",
8740 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8741 || (channels[i].
8742 flag &
8743 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8744 ", IBSS",
8745 channels[i].
8746 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8747 "active/passive" : "passive only");
8748
8749 return len;
8750}
8751
8752static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8753
8754static ssize_t show_statistics(struct device *d,
8755 struct device_attribute *attr, char *buf)
8756{
bb8c093b
CH
8757 struct iwl4965_priv *priv = dev_get_drvdata(d);
8758 u32 size = sizeof(struct iwl4965_notif_statistics);
b481de9c
ZY
8759 u32 len = 0, ofs = 0;
8760 u8 *data = (u8 *) & priv->statistics;
8761 int rc = 0;
8762
bb8c093b 8763 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8764 return -EAGAIN;
8765
8766 mutex_lock(&priv->mutex);
bb8c093b 8767 rc = iwl4965_send_statistics_request(priv);
b481de9c
ZY
8768 mutex_unlock(&priv->mutex);
8769
8770 if (rc) {
8771 len = sprintf(buf,
8772 "Error sending statistics request: 0x%08X\n", rc);
8773 return len;
8774 }
8775
8776 while (size && (PAGE_SIZE - len)) {
8777 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8778 PAGE_SIZE - len, 1);
8779 len = strlen(buf);
8780 if (PAGE_SIZE - len)
8781 buf[len++] = '\n';
8782
8783 ofs += 16;
8784 size -= min(size, 16U);
8785 }
8786
8787 return len;
8788}
8789
8790static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8791
8792static ssize_t show_antenna(struct device *d,
8793 struct device_attribute *attr, char *buf)
8794{
bb8c093b 8795 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c 8796
bb8c093b 8797 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8798 return -EAGAIN;
8799
8800 return sprintf(buf, "%d\n", priv->antenna);
8801}
8802
8803static ssize_t store_antenna(struct device *d,
8804 struct device_attribute *attr,
8805 const char *buf, size_t count)
8806{
8807 int ant;
bb8c093b 8808 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8809
8810 if (count == 0)
8811 return 0;
8812
8813 if (sscanf(buf, "%1i", &ant) != 1) {
8814 IWL_DEBUG_INFO("not in hex or decimal form.\n");
8815 return count;
8816 }
8817
8818 if ((ant >= 0) && (ant <= 2)) {
8819 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 8820 priv->antenna = (enum iwl4965_antenna)ant;
b481de9c
ZY
8821 } else
8822 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
8823
8824
8825 return count;
8826}
8827
8828static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8829
8830static ssize_t show_status(struct device *d,
8831 struct device_attribute *attr, char *buf)
8832{
bb8c093b
CH
8833 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8834 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8835 return -EAGAIN;
8836 return sprintf(buf, "0x%08x\n", (int)priv->status);
8837}
8838
8839static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
8840
8841static ssize_t dump_error_log(struct device *d,
8842 struct device_attribute *attr,
8843 const char *buf, size_t count)
8844{
8845 char *p = (char *)buf;
8846
8847 if (p[0] == '1')
bb8c093b 8848 iwl4965_dump_nic_error_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8849
8850 return strnlen(buf, count);
8851}
8852
8853static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
8854
8855static ssize_t dump_event_log(struct device *d,
8856 struct device_attribute *attr,
8857 const char *buf, size_t count)
8858{
8859 char *p = (char *)buf;
8860
8861 if (p[0] == '1')
bb8c093b 8862 iwl4965_dump_nic_event_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8863
8864 return strnlen(buf, count);
8865}
8866
8867static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
8868
8869/*****************************************************************************
8870 *
8871 * driver setup and teardown
8872 *
8873 *****************************************************************************/
8874
bb8c093b 8875static void iwl4965_setup_deferred_work(struct iwl4965_priv *priv)
b481de9c
ZY
8876{
8877 priv->workqueue = create_workqueue(DRV_NAME);
8878
8879 init_waitqueue_head(&priv->wait_command_queue);
8880
bb8c093b
CH
8881 INIT_WORK(&priv->up, iwl4965_bg_up);
8882 INIT_WORK(&priv->restart, iwl4965_bg_restart);
8883 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
8884 INIT_WORK(&priv->scan_completed, iwl4965_bg_scan_completed);
8885 INIT_WORK(&priv->request_scan, iwl4965_bg_request_scan);
8886 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
8887 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
8888 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
8889 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate);
8890 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
8891 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
8892 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check);
8893
8894 iwl4965_hw_setup_deferred_work(priv);
b481de9c
ZY
8895
8896 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 8897 iwl4965_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
8898}
8899
bb8c093b 8900static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv)
b481de9c 8901{
bb8c093b 8902 iwl4965_hw_cancel_deferred_work(priv);
b481de9c 8903
3ae6a054 8904 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
8905 cancel_delayed_work(&priv->scan_check);
8906 cancel_delayed_work(&priv->alive_start);
8907 cancel_delayed_work(&priv->post_associate);
8908 cancel_work_sync(&priv->beacon_update);
8909}
8910
bb8c093b 8911static struct attribute *iwl4965_sysfs_entries[] = {
b481de9c
ZY
8912 &dev_attr_antenna.attr,
8913 &dev_attr_channels.attr,
8914 &dev_attr_dump_errors.attr,
8915 &dev_attr_dump_events.attr,
8916 &dev_attr_flags.attr,
8917 &dev_attr_filter_flags.attr,
c8b0e6e1 8918#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8919 &dev_attr_measurement.attr,
8920#endif
8921 &dev_attr_power_level.attr,
8922 &dev_attr_retry_rate.attr,
8923 &dev_attr_rf_kill.attr,
8924 &dev_attr_rs_window.attr,
8925 &dev_attr_statistics.attr,
8926 &dev_attr_status.attr,
8927 &dev_attr_temperature.attr,
8928 &dev_attr_tune.attr,
8929 &dev_attr_tx_power.attr,
8930
8931 NULL
8932};
8933
bb8c093b 8934static struct attribute_group iwl4965_attribute_group = {
b481de9c 8935 .name = NULL, /* put in device directory */
bb8c093b 8936 .attrs = iwl4965_sysfs_entries,
b481de9c
ZY
8937};
8938
bb8c093b
CH
8939static struct ieee80211_ops iwl4965_hw_ops = {
8940 .tx = iwl4965_mac_tx,
8941 .start = iwl4965_mac_start,
8942 .stop = iwl4965_mac_stop,
8943 .add_interface = iwl4965_mac_add_interface,
8944 .remove_interface = iwl4965_mac_remove_interface,
8945 .config = iwl4965_mac_config,
8946 .config_interface = iwl4965_mac_config_interface,
8947 .configure_filter = iwl4965_configure_filter,
8948 .set_key = iwl4965_mac_set_key,
8949 .get_stats = iwl4965_mac_get_stats,
8950 .get_tx_stats = iwl4965_mac_get_tx_stats,
8951 .conf_tx = iwl4965_mac_conf_tx,
8952 .get_tsf = iwl4965_mac_get_tsf,
8953 .reset_tsf = iwl4965_mac_reset_tsf,
8954 .beacon_update = iwl4965_mac_beacon_update,
8955 .erp_ie_changed = iwl4965_mac_erp_ie_changed,
c8b0e6e1 8956#ifdef CONFIG_IWL4965_HT
bb8c093b 8957 .conf_ht = iwl4965_mac_conf_ht,
c8b0e6e1 8958#ifdef CONFIG_IWL4965_HT_AGG
bb8c093b
CH
8959 .ht_tx_agg_start = iwl4965_mac_ht_tx_agg_start,
8960 .ht_tx_agg_stop = iwl4965_mac_ht_tx_agg_stop,
8961 .ht_rx_agg_start = iwl4965_mac_ht_rx_agg_start,
8962 .ht_rx_agg_stop = iwl4965_mac_ht_rx_agg_stop,
c8b0e6e1
CH
8963#endif /* CONFIG_IWL4965_HT_AGG */
8964#endif /* CONFIG_IWL4965_HT */
bb8c093b 8965 .hw_scan = iwl4965_mac_hw_scan
b481de9c
ZY
8966};
8967
bb8c093b 8968static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
8969{
8970 int err = 0;
bb8c093b 8971 struct iwl4965_priv *priv;
b481de9c
ZY
8972 struct ieee80211_hw *hw;
8973 int i;
8974
6440adb5
BC
8975 /* Disabling hardware scan means that mac80211 will perform scans
8976 * "the hard way", rather than using device's scan. */
bb8c093b 8977 if (iwl4965_param_disable_hw_scan) {
b481de9c 8978 IWL_DEBUG_INFO("Disabling hw_scan\n");
bb8c093b 8979 iwl4965_hw_ops.hw_scan = NULL;
b481de9c
ZY
8980 }
8981
bb8c093b
CH
8982 if ((iwl4965_param_queues_num > IWL_MAX_NUM_QUEUES) ||
8983 (iwl4965_param_queues_num < IWL_MIN_NUM_QUEUES)) {
b481de9c
ZY
8984 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
8985 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
8986 err = -EINVAL;
8987 goto out;
8988 }
8989
8990 /* mac80211 allocates memory for this device instance, including
8991 * space for this driver's private structure */
bb8c093b 8992 hw = ieee80211_alloc_hw(sizeof(struct iwl4965_priv), &iwl4965_hw_ops);
b481de9c
ZY
8993 if (hw == NULL) {
8994 IWL_ERROR("Can not allocate network device\n");
8995 err = -ENOMEM;
8996 goto out;
8997 }
8998 SET_IEEE80211_DEV(hw, &pdev->dev);
8999
f51359a8
JB
9000 hw->rate_control_algorithm = "iwl-4965-rs";
9001
b481de9c
ZY
9002 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
9003 priv = hw->priv;
9004 priv->hw = hw;
9005
9006 priv->pci_dev = pdev;
bb8c093b 9007 priv->antenna = (enum iwl4965_antenna)iwl4965_param_antenna;
c8b0e6e1 9008#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9009 iwl4965_debug_level = iwl4965_param_debug;
b481de9c
ZY
9010 atomic_set(&priv->restrict_refcnt, 0);
9011#endif
9012 priv->retry_rate = 1;
9013
9014 priv->ibss_beacon = NULL;
9015
9016 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
9017 * the range of signal quality values that we'll provide.
9018 * Negative values for level/noise indicate that we'll provide dBm.
9019 * For WE, at least, non-0 values here *enable* display of values
9020 * in app (iwconfig). */
9021 hw->max_rssi = -20; /* signal level, negative indicates dBm */
9022 hw->max_noise = -20; /* noise level, negative indicates dBm */
9023 hw->max_signal = 100; /* link quality indication (%) */
9024
9025 /* Tell mac80211 our Tx characteristics */
9026 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
9027
6440adb5 9028 /* Default value; 4 EDCA QOS priorities */
b481de9c 9029 hw->queues = 4;
c8b0e6e1
CH
9030#ifdef CONFIG_IWL4965_HT
9031#ifdef CONFIG_IWL4965_HT_AGG
6440adb5 9032 /* Enhanced value; more queues, to support 11n aggregation */
b481de9c 9033 hw->queues = 16;
c8b0e6e1
CH
9034#endif /* CONFIG_IWL4965_HT_AGG */
9035#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
9036
9037 spin_lock_init(&priv->lock);
9038 spin_lock_init(&priv->power_data.lock);
9039 spin_lock_init(&priv->sta_lock);
9040 spin_lock_init(&priv->hcmd_lock);
9041 spin_lock_init(&priv->lq_mngr.lock);
9042
9043 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
9044 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
9045
9046 INIT_LIST_HEAD(&priv->free_frames);
9047
9048 mutex_init(&priv->mutex);
9049 if (pci_enable_device(pdev)) {
9050 err = -ENODEV;
9051 goto out_ieee80211_free_hw;
9052 }
9053
9054 pci_set_master(pdev);
9055
6440adb5 9056 /* Clear the driver's (not device's) station table */
bb8c093b 9057 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9058
9059 priv->data_retry_limit = -1;
9060 priv->ieee_channels = NULL;
9061 priv->ieee_rates = NULL;
9062 priv->phymode = -1;
9063
9064 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
9065 if (!err)
9066 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
9067 if (err) {
9068 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
9069 goto out_pci_disable_device;
9070 }
9071
9072 pci_set_drvdata(pdev, priv);
9073 err = pci_request_regions(pdev, DRV_NAME);
9074 if (err)
9075 goto out_pci_disable_device;
6440adb5 9076
b481de9c
ZY
9077 /* We disable the RETRY_TIMEOUT register (0x41) to keep
9078 * PCI Tx retries from interfering with C3 CPU state */
9079 pci_write_config_byte(pdev, 0x41, 0x00);
6440adb5 9080
b481de9c
ZY
9081 priv->hw_base = pci_iomap(pdev, 0, 0);
9082 if (!priv->hw_base) {
9083 err = -ENODEV;
9084 goto out_pci_release_regions;
9085 }
9086
9087 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
9088 (unsigned long long) pci_resource_len(pdev, 0));
9089 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
9090
9091 /* Initialize module parameter values here */
9092
6440adb5 9093 /* Disable radio (SW RF KILL) via parameter when loading driver */
bb8c093b 9094 if (iwl4965_param_disable) {
b481de9c
ZY
9095 set_bit(STATUS_RF_KILL_SW, &priv->status);
9096 IWL_DEBUG_INFO("Radio disabled.\n");
9097 }
9098
9099 priv->iw_mode = IEEE80211_IF_TYPE_STA;
9100
9101 priv->ps_mode = 0;
9102 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
b481de9c
ZY
9103 priv->valid_antenna = 0x7; /* assume all 3 connected */
9104 priv->ps_mode = IWL_MIMO_PS_NONE;
b481de9c 9105
6440adb5 9106 /* Choose which receivers/antennas to use */
b481de9c
ZY
9107 iwl4965_set_rxon_chain(priv);
9108
9109 printk(KERN_INFO DRV_NAME
9110 ": Detected Intel Wireless WiFi Link 4965AGN\n");
9111
9112 /* Device-specific setup */
bb8c093b 9113 if (iwl4965_hw_set_hw_setting(priv)) {
b481de9c
ZY
9114 IWL_ERROR("failed to set hw settings\n");
9115 mutex_unlock(&priv->mutex);
9116 goto out_iounmap;
9117 }
9118
c8b0e6e1 9119#ifdef CONFIG_IWL4965_QOS
bb8c093b 9120 if (iwl4965_param_qos_enable)
b481de9c
ZY
9121 priv->qos_data.qos_enable = 1;
9122
bb8c093b 9123 iwl4965_reset_qos(priv);
b481de9c
ZY
9124
9125 priv->qos_data.qos_active = 0;
9126 priv->qos_data.qos_cap.val = 0;
c8b0e6e1 9127#endif /* CONFIG_IWL4965_QOS */
b481de9c 9128
bb8c093b
CH
9129 iwl4965_set_rxon_channel(priv, MODE_IEEE80211G, 6);
9130 iwl4965_setup_deferred_work(priv);
9131 iwl4965_setup_rx_handlers(priv);
b481de9c
ZY
9132
9133 priv->rates_mask = IWL_RATES_MASK;
9134 /* If power management is turned on, default to AC mode */
9135 priv->power_mode = IWL_POWER_AC;
9136 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
9137
bb8c093b 9138 iwl4965_disable_interrupts(priv);
49df2b33 9139
b481de9c
ZY
9140 pci_enable_msi(pdev);
9141
bb8c093b 9142 err = request_irq(pdev->irq, iwl4965_isr, IRQF_SHARED, DRV_NAME, priv);
b481de9c
ZY
9143 if (err) {
9144 IWL_ERROR("Error allocating IRQ %d\n", pdev->irq);
9145 goto out_disable_msi;
9146 }
9147
9148 mutex_lock(&priv->mutex);
9149
bb8c093b 9150 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9151 if (err) {
9152 IWL_ERROR("failed to create sysfs device attributes\n");
9153 mutex_unlock(&priv->mutex);
9154 goto out_release_irq;
9155 }
9156
9157 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
9158 * ucode filename and max sizes are card-specific. */
bb8c093b 9159 err = iwl4965_read_ucode(priv);
b481de9c
ZY
9160 if (err) {
9161 IWL_ERROR("Could not read microcode: %d\n", err);
9162 mutex_unlock(&priv->mutex);
9163 goto out_pci_alloc;
9164 }
9165
9166 mutex_unlock(&priv->mutex);
9167
01ebd063 9168 IWL_DEBUG_INFO("Queueing UP work.\n");
b481de9c
ZY
9169
9170 queue_work(priv->workqueue, &priv->up);
9171
9172 return 0;
9173
9174 out_pci_alloc:
bb8c093b 9175 iwl4965_dealloc_ucode_pci(priv);
b481de9c 9176
bb8c093b 9177 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9178
9179 out_release_irq:
9180 free_irq(pdev->irq, priv);
9181
9182 out_disable_msi:
9183 pci_disable_msi(pdev);
9184 destroy_workqueue(priv->workqueue);
9185 priv->workqueue = NULL;
bb8c093b 9186 iwl4965_unset_hw_setting(priv);
b481de9c
ZY
9187
9188 out_iounmap:
9189 pci_iounmap(pdev, priv->hw_base);
9190 out_pci_release_regions:
9191 pci_release_regions(pdev);
9192 out_pci_disable_device:
9193 pci_disable_device(pdev);
9194 pci_set_drvdata(pdev, NULL);
9195 out_ieee80211_free_hw:
9196 ieee80211_free_hw(priv->hw);
9197 out:
9198 return err;
9199}
9200
bb8c093b 9201static void iwl4965_pci_remove(struct pci_dev *pdev)
b481de9c 9202{
bb8c093b 9203 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
9204 struct list_head *p, *q;
9205 int i;
9206
9207 if (!priv)
9208 return;
9209
9210 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
9211
b481de9c 9212 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 9213
bb8c093b 9214 iwl4965_down(priv);
b481de9c
ZY
9215
9216 /* Free MAC hash list for ADHOC */
9217 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
9218 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
9219 list_del(p);
bb8c093b 9220 kfree(list_entry(p, struct iwl4965_ibss_seq, list));
b481de9c
ZY
9221 }
9222 }
9223
bb8c093b 9224 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c 9225
bb8c093b 9226 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
9227
9228 if (priv->rxq.bd)
bb8c093b
CH
9229 iwl4965_rx_queue_free(priv, &priv->rxq);
9230 iwl4965_hw_txq_ctx_free(priv);
b481de9c 9231
bb8c093b
CH
9232 iwl4965_unset_hw_setting(priv);
9233 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9234
9235 if (priv->mac80211_registered) {
9236 ieee80211_unregister_hw(priv->hw);
bb8c093b 9237 iwl4965_rate_control_unregister(priv->hw);
b481de9c
ZY
9238 }
9239
948c171c
MA
9240 /*netif_stop_queue(dev); */
9241 flush_workqueue(priv->workqueue);
9242
bb8c093b 9243 /* ieee80211_unregister_hw calls iwl4965_mac_stop, which flushes
b481de9c
ZY
9244 * priv->workqueue... so we can't take down the workqueue
9245 * until now... */
9246 destroy_workqueue(priv->workqueue);
9247 priv->workqueue = NULL;
9248
9249 free_irq(pdev->irq, priv);
9250 pci_disable_msi(pdev);
9251 pci_iounmap(pdev, priv->hw_base);
9252 pci_release_regions(pdev);
9253 pci_disable_device(pdev);
9254 pci_set_drvdata(pdev, NULL);
9255
9256 kfree(priv->channel_info);
9257
9258 kfree(priv->ieee_channels);
9259 kfree(priv->ieee_rates);
9260
9261 if (priv->ibss_beacon)
9262 dev_kfree_skb(priv->ibss_beacon);
9263
9264 ieee80211_free_hw(priv->hw);
9265}
9266
9267#ifdef CONFIG_PM
9268
bb8c093b 9269static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 9270{
bb8c093b 9271 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c 9272
b481de9c
ZY
9273 set_bit(STATUS_IN_SUSPEND, &priv->status);
9274
9275 /* Take down the device; powers it off, etc. */
bb8c093b 9276 iwl4965_down(priv);
b481de9c
ZY
9277
9278 if (priv->mac80211_registered)
9279 ieee80211_stop_queues(priv->hw);
9280
9281 pci_save_state(pdev);
9282 pci_disable_device(pdev);
9283 pci_set_power_state(pdev, PCI_D3hot);
9284
b481de9c
ZY
9285 return 0;
9286}
9287
bb8c093b 9288static void iwl4965_resume(struct iwl4965_priv *priv)
b481de9c
ZY
9289{
9290 unsigned long flags;
9291
9292 /* The following it a temporary work around due to the
9293 * suspend / resume not fully initializing the NIC correctly.
9294 * Without all of the following, resume will not attempt to take
9295 * down the NIC (it shouldn't really need to) and will just try
9296 * and bring the NIC back up. However that fails during the
bb8c093b
CH
9297 * ucode verification process. This then causes iwl4965_down to be
9298 * called *after* iwl4965_hw_nic_init() has succeeded -- which
b481de9c
ZY
9299 * then lets the next init sequence succeed. So, we've
9300 * replicated all of that NIC init code here... */
9301
bb8c093b 9302 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 9303
bb8c093b 9304 iwl4965_hw_nic_init(priv);
b481de9c 9305
bb8c093b
CH
9306 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
9307 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 9308 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
bb8c093b
CH
9309 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
9310 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
9311 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
9312
9313 /* tell the device to stop sending interrupts */
bb8c093b 9314 iwl4965_disable_interrupts(priv);
b481de9c
ZY
9315
9316 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 9317 iwl4965_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c 9318
bb8c093b
CH
9319 if (!iwl4965_grab_nic_access(priv)) {
9320 iwl4965_write_prph(priv, APMG_CLK_DIS_REG,
ac17a947 9321 APMG_CLK_VAL_DMA_CLK_RQT);
bb8c093b 9322 iwl4965_release_nic_access(priv);
b481de9c
ZY
9323 }
9324 spin_unlock_irqrestore(&priv->lock, flags);
9325
9326 udelay(5);
9327
bb8c093b 9328 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
9329
9330 /* Bring the device back up */
9331 clear_bit(STATUS_IN_SUSPEND, &priv->status);
9332 queue_work(priv->workqueue, &priv->up);
9333}
9334
bb8c093b 9335static int iwl4965_pci_resume(struct pci_dev *pdev)
b481de9c 9336{
bb8c093b 9337 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
9338 int err;
9339
9340 printk(KERN_INFO "Coming out of suspend...\n");
9341
b481de9c
ZY
9342 pci_set_power_state(pdev, PCI_D0);
9343 err = pci_enable_device(pdev);
9344 pci_restore_state(pdev);
9345
9346 /*
9347 * Suspend/Resume resets the PCI configuration space, so we have to
9348 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
9349 * from interfering with C3 CPU state. pci_restore_state won't help
9350 * here since it only restores the first 64 bytes pci config header.
9351 */
9352 pci_write_config_byte(pdev, 0x41, 0x00);
9353
bb8c093b 9354 iwl4965_resume(priv);
b481de9c
ZY
9355
9356 return 0;
9357}
9358
9359#endif /* CONFIG_PM */
9360
9361/*****************************************************************************
9362 *
9363 * driver and module entry point
9364 *
9365 *****************************************************************************/
9366
bb8c093b 9367static struct pci_driver iwl4965_driver = {
b481de9c 9368 .name = DRV_NAME,
bb8c093b
CH
9369 .id_table = iwl4965_hw_card_ids,
9370 .probe = iwl4965_pci_probe,
9371 .remove = __devexit_p(iwl4965_pci_remove),
b481de9c 9372#ifdef CONFIG_PM
bb8c093b
CH
9373 .suspend = iwl4965_pci_suspend,
9374 .resume = iwl4965_pci_resume,
b481de9c
ZY
9375#endif
9376};
9377
bb8c093b 9378static int __init iwl4965_init(void)
b481de9c
ZY
9379{
9380
9381 int ret;
9382 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
9383 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
bb8c093b 9384 ret = pci_register_driver(&iwl4965_driver);
b481de9c
ZY
9385 if (ret) {
9386 IWL_ERROR("Unable to initialize PCI module\n");
9387 return ret;
9388 }
c8b0e6e1 9389#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9390 ret = driver_create_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c
ZY
9391 if (ret) {
9392 IWL_ERROR("Unable to create driver sysfs file\n");
bb8c093b 9393 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9394 return ret;
9395 }
9396#endif
9397
9398 return ret;
9399}
9400
bb8c093b 9401static void __exit iwl4965_exit(void)
b481de9c 9402{
c8b0e6e1 9403#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9404 driver_remove_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c 9405#endif
bb8c093b 9406 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9407}
9408
bb8c093b 9409module_param_named(antenna, iwl4965_param_antenna, int, 0444);
b481de9c 9410MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
bb8c093b 9411module_param_named(disable, iwl4965_param_disable, int, 0444);
b481de9c 9412MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
bb8c093b 9413module_param_named(hwcrypto, iwl4965_param_hwcrypto, int, 0444);
b481de9c
ZY
9414MODULE_PARM_DESC(hwcrypto,
9415 "using hardware crypto engine (default 0 [software])\n");
bb8c093b 9416module_param_named(debug, iwl4965_param_debug, int, 0444);
b481de9c 9417MODULE_PARM_DESC(debug, "debug output mask");
bb8c093b 9418module_param_named(disable_hw_scan, iwl4965_param_disable_hw_scan, int, 0444);
b481de9c
ZY
9419MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
9420
bb8c093b 9421module_param_named(queues_num, iwl4965_param_queues_num, int, 0444);
b481de9c
ZY
9422MODULE_PARM_DESC(queues_num, "number of hw queues.");
9423
9424/* QoS */
bb8c093b 9425module_param_named(qos_enable, iwl4965_param_qos_enable, int, 0444);
b481de9c 9426MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
9ee1ba47
RR
9427module_param_named(amsdu_size_8K, iwl4965_param_amsdu_size_8K, int, 0444);
9428MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
b481de9c 9429
bb8c093b
CH
9430module_exit(iwl4965_exit);
9431module_init(iwl4965_init);