]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl4965-base.c
iwlwifi: fix problem when rf_killswitch change during suspend/resume
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlwifi / iwl4965-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/delay.h>
37#include <linux/skbuff.h>
38#include <linux/netdevice.h>
39#include <linux/wireless.h>
40#include <linux/firmware.h>
b481de9c
ZY
41#include <linux/etherdevice.h>
42#include <linux/if_arp.h>
43
b481de9c
ZY
44#include <net/mac80211.h>
45
46#include <asm/div64.h>
47
b481de9c
ZY
48#include "iwl-4965.h"
49#include "iwl-helpers.h"
50
c8b0e6e1 51#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 52u32 iwl4965_debug_level;
b481de9c
ZY
53#endif
54
bb8c093b
CH
55static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
56 struct iwl4965_tx_queue *txq);
416e1438 57
b481de9c
ZY
58/******************************************************************************
59 *
60 * module boiler plate
61 *
62 ******************************************************************************/
63
64/* module parameters */
6440adb5
BC
65static int iwl4965_param_disable_hw_scan; /* def: 0 = use 4965's h/w scan */
66static int iwl4965_param_debug; /* def: 0 = minimal debug log messages */
9fbab516
BC
67static int iwl4965_param_disable; /* def: enable radio */
68static int iwl4965_param_antenna; /* def: 0 = both antennas (use diversity) */
69int iwl4965_param_hwcrypto; /* def: using software encryption */
6440adb5
BC
70static int iwl4965_param_qos_enable = 1; /* def: 1 = use quality of service */
71int iwl4965_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 16 Tx queues */
9ee1ba47 72int iwl4965_param_amsdu_size_8K; /* def: enable 8K amsdu size */
b481de9c
ZY
73
74/*
75 * module name, copyright, version, etc.
76 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
77 */
78
79#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
80
c8b0e6e1 81#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
82#define VD "d"
83#else
84#define VD
85#endif
86
c8b0e6e1 87#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
88#define VS "s"
89#else
90#define VS
91#endif
92
d1283948 93#define IWLWIFI_VERSION "1.2.22k" VD VS
b481de9c
ZY
94#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
95#define DRV_VERSION IWLWIFI_VERSION
96
97/* Change firmware file name, using "-" and incrementing number,
98 * *only* when uCode interface or architecture changes so that it
99 * is not compatible with earlier drivers.
100 * This number will also appear in << 8 position of 1st dword of uCode file */
101#define IWL4965_UCODE_API "-1"
102
103MODULE_DESCRIPTION(DRV_DESCRIPTION);
104MODULE_VERSION(DRV_VERSION);
105MODULE_AUTHOR(DRV_COPYRIGHT);
106MODULE_LICENSE("GPL");
107
108__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
109{
110 u16 fc = le16_to_cpu(hdr->frame_control);
111 int hdr_len = ieee80211_get_hdrlen(fc);
112
113 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
114 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
115 return NULL;
116}
117
bb8c093b
CH
118static const struct ieee80211_hw_mode *iwl4965_get_hw_mode(
119 struct iwl4965_priv *priv, int mode)
b481de9c
ZY
120{
121 int i;
122
123 for (i = 0; i < 3; i++)
124 if (priv->modes[i].mode == mode)
125 return &priv->modes[i];
126
127 return NULL;
128}
129
bb8c093b 130static int iwl4965_is_empty_essid(const char *essid, int essid_len)
b481de9c
ZY
131{
132 /* Single white space is for Linksys APs */
133 if (essid_len == 1 && essid[0] == ' ')
134 return 1;
135
136 /* Otherwise, if the entire essid is 0, we assume it is hidden */
137 while (essid_len) {
138 essid_len--;
139 if (essid[essid_len] != '\0')
140 return 0;
141 }
142
143 return 1;
144}
145
bb8c093b 146static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
b481de9c
ZY
147{
148 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
149 const char *s = essid;
150 char *d = escaped;
151
bb8c093b 152 if (iwl4965_is_empty_essid(essid, essid_len)) {
b481de9c
ZY
153 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
154 return escaped;
155 }
156
157 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
158 while (essid_len--) {
159 if (*s == '\0') {
160 *d++ = '\\';
161 *d++ = '0';
162 s++;
163 } else
164 *d++ = *s++;
165 }
166 *d = '\0';
167 return escaped;
168}
169
bb8c093b 170static void iwl4965_print_hex_dump(int level, void *p, u32 len)
b481de9c 171{
c8b0e6e1 172#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 173 if (!(iwl4965_debug_level & level))
b481de9c
ZY
174 return;
175
176 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
177 p, len, 1);
178#endif
179}
180
181/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
182 * DMA services
183 *
184 * Theory of operation
185 *
6440adb5
BC
186 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
187 * of buffer descriptors, each of which points to one or more data buffers for
188 * the device to read from or fill. Driver and device exchange status of each
189 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
190 * entries in each circular buffer, to protect against confusing empty and full
191 * queue states.
192 *
193 * The device reads or writes the data in the queues via the device's several
194 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
195 *
196 * For Tx queue, there are low mark and high mark limits. If, after queuing
197 * the packet for Tx, free space become < low mark, Tx queue stopped. When
198 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
199 * Tx queue resumed.
200 *
6440adb5
BC
201 * The 4965 operates with up to 17 queues: One receive queue, one transmit
202 * queue (#4) for sending commands to the device firmware, and 15 other
203 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
e3851447
BC
204 *
205 * See more detailed info in iwl-4965-hw.h.
b481de9c
ZY
206 ***************************************************/
207
bb8c093b 208static int iwl4965_queue_space(const struct iwl4965_queue *q)
b481de9c 209{
fc4b6853 210 int s = q->read_ptr - q->write_ptr;
b481de9c 211
fc4b6853 212 if (q->read_ptr > q->write_ptr)
b481de9c
ZY
213 s -= q->n_bd;
214
215 if (s <= 0)
216 s += q->n_window;
217 /* keep some reserve to not confuse empty and full situations */
218 s -= 2;
219 if (s < 0)
220 s = 0;
221 return s;
222}
223
6440adb5
BC
224/**
225 * iwl4965_queue_inc_wrap - increment queue index, wrap back to beginning
226 * @index -- current index
227 * @n_bd -- total number of entries in queue (must be power of 2)
228 */
bb8c093b 229static inline int iwl4965_queue_inc_wrap(int index, int n_bd)
b481de9c
ZY
230{
231 return ++index & (n_bd - 1);
232}
233
6440adb5
BC
234/**
235 * iwl4965_queue_dec_wrap - decrement queue index, wrap back to end
236 * @index -- current index
237 * @n_bd -- total number of entries in queue (must be power of 2)
238 */
bb8c093b 239static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
b481de9c
ZY
240{
241 return --index & (n_bd - 1);
242}
243
bb8c093b 244static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
b481de9c 245{
fc4b6853
TW
246 return q->write_ptr > q->read_ptr ?
247 (i >= q->read_ptr && i < q->write_ptr) :
248 !(i < q->read_ptr && i >= q->write_ptr);
b481de9c
ZY
249}
250
bb8c093b 251static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
b481de9c 252{
6440adb5 253 /* This is for scan command, the big buffer at end of command array */
b481de9c 254 if (is_huge)
6440adb5 255 return q->n_window; /* must be power of 2 */
b481de9c 256
6440adb5 257 /* Otherwise, use normal size buffers */
b481de9c
ZY
258 return index & (q->n_window - 1);
259}
260
6440adb5
BC
261/**
262 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
263 */
bb8c093b 264static int iwl4965_queue_init(struct iwl4965_priv *priv, struct iwl4965_queue *q,
b481de9c
ZY
265 int count, int slots_num, u32 id)
266{
267 q->n_bd = count;
268 q->n_window = slots_num;
269 q->id = id;
270
bb8c093b
CH
271 /* count must be power-of-two size, otherwise iwl4965_queue_inc_wrap
272 * and iwl4965_queue_dec_wrap are broken. */
b481de9c
ZY
273 BUG_ON(!is_power_of_2(count));
274
275 /* slots_num must be power-of-two size, otherwise
276 * get_cmd_index is broken. */
277 BUG_ON(!is_power_of_2(slots_num));
278
279 q->low_mark = q->n_window / 4;
280 if (q->low_mark < 4)
281 q->low_mark = 4;
282
283 q->high_mark = q->n_window / 8;
284 if (q->high_mark < 2)
285 q->high_mark = 2;
286
fc4b6853 287 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
288
289 return 0;
290}
291
6440adb5
BC
292/**
293 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
294 */
bb8c093b
CH
295static int iwl4965_tx_queue_alloc(struct iwl4965_priv *priv,
296 struct iwl4965_tx_queue *txq, u32 id)
b481de9c
ZY
297{
298 struct pci_dev *dev = priv->pci_dev;
299
6440adb5
BC
300 /* Driver private data, only for Tx (not command) queues,
301 * not shared with device. */
b481de9c
ZY
302 if (id != IWL_CMD_QUEUE_NUM) {
303 txq->txb = kmalloc(sizeof(txq->txb[0]) *
304 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
305 if (!txq->txb) {
01ebd063 306 IWL_ERROR("kmalloc for auxiliary BD "
b481de9c
ZY
307 "structures failed\n");
308 goto error;
309 }
310 } else
311 txq->txb = NULL;
312
6440adb5
BC
313 /* Circular buffer of transmit frame descriptors (TFDs),
314 * shared with device */
b481de9c
ZY
315 txq->bd = pci_alloc_consistent(dev,
316 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
317 &txq->q.dma_addr);
318
319 if (!txq->bd) {
320 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
321 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
322 goto error;
323 }
324 txq->q.id = id;
325
326 return 0;
327
328 error:
329 if (txq->txb) {
330 kfree(txq->txb);
331 txq->txb = NULL;
332 }
333
334 return -ENOMEM;
335}
336
8b6eaea8
BC
337/**
338 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
339 */
bb8c093b
CH
340int iwl4965_tx_queue_init(struct iwl4965_priv *priv,
341 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c
ZY
342{
343 struct pci_dev *dev = priv->pci_dev;
344 int len;
345 int rc = 0;
346
8b6eaea8
BC
347 /*
348 * Alloc buffer array for commands (Tx or other types of commands).
349 * For the command queue (#4), allocate command space + one big
350 * command for scan, since scan command is very huge; the system will
351 * not have two scans at the same time, so only one is needed.
bb54244b 352 * For normal Tx queues (all other queues), no super-size command
8b6eaea8
BC
353 * space is needed.
354 */
bb8c093b 355 len = sizeof(struct iwl4965_cmd) * slots_num;
b481de9c
ZY
356 if (txq_id == IWL_CMD_QUEUE_NUM)
357 len += IWL_MAX_SCAN_SIZE;
358 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
359 if (!txq->cmd)
360 return -ENOMEM;
361
8b6eaea8 362 /* Alloc driver data array and TFD circular buffer */
bb8c093b 363 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
b481de9c
ZY
364 if (rc) {
365 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
366
367 return -ENOMEM;
368 }
369 txq->need_update = 0;
370
371 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
bb8c093b 372 * iwl4965_queue_inc_wrap and iwl4965_queue_dec_wrap are broken. */
b481de9c 373 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
8b6eaea8
BC
374
375 /* Initialize queue's high/low-water marks, and head/tail indexes */
bb8c093b 376 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 377
8b6eaea8 378 /* Tell device where to find queue */
bb8c093b 379 iwl4965_hw_tx_queue_init(priv, txq);
b481de9c
ZY
380
381 return 0;
382}
383
384/**
bb8c093b 385 * iwl4965_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
386 * @txq: Transmit queue to deallocate.
387 *
388 * Empty queue by removing and destroying all BD's.
6440adb5
BC
389 * Free all buffers.
390 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 391 */
bb8c093b 392void iwl4965_tx_queue_free(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
b481de9c 393{
bb8c093b 394 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
395 struct pci_dev *dev = priv->pci_dev;
396 int len;
397
398 if (q->n_bd == 0)
399 return;
400
401 /* first, empty all BD's */
fc4b6853 402 for (; q->write_ptr != q->read_ptr;
bb8c093b
CH
403 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd))
404 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c 405
bb8c093b 406 len = sizeof(struct iwl4965_cmd) * q->n_window;
b481de9c
ZY
407 if (q->id == IWL_CMD_QUEUE_NUM)
408 len += IWL_MAX_SCAN_SIZE;
409
6440adb5 410 /* De-alloc array of command/tx buffers */
b481de9c
ZY
411 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
412
6440adb5 413 /* De-alloc circular buffer of TFDs */
b481de9c 414 if (txq->q.n_bd)
bb8c093b 415 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
b481de9c
ZY
416 txq->q.n_bd, txq->bd, txq->q.dma_addr);
417
6440adb5 418 /* De-alloc array of per-TFD driver data */
b481de9c
ZY
419 if (txq->txb) {
420 kfree(txq->txb);
421 txq->txb = NULL;
422 }
423
6440adb5 424 /* 0-fill queue descriptor structure */
b481de9c
ZY
425 memset(txq, 0, sizeof(*txq));
426}
427
bb8c093b 428const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
b481de9c
ZY
429
430/*************** STATION TABLE MANAGEMENT ****
9fbab516 431 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
432 * the functionality provided here
433 */
434
435/**************************************************************/
436
01ebd063 437#if 0 /* temporary disable till we add real remove station */
6440adb5
BC
438/**
439 * iwl4965_remove_station - Remove driver's knowledge of station.
440 *
441 * NOTE: This does not remove station from device's station table.
442 */
bb8c093b 443static u8 iwl4965_remove_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
444{
445 int index = IWL_INVALID_STATION;
446 int i;
447 unsigned long flags;
448
449 spin_lock_irqsave(&priv->sta_lock, flags);
450
451 if (is_ap)
452 index = IWL_AP_ID;
453 else if (is_broadcast_ether_addr(addr))
454 index = priv->hw_setting.bcast_sta_id;
455 else
456 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
457 if (priv->stations[i].used &&
458 !compare_ether_addr(priv->stations[i].sta.sta.addr,
459 addr)) {
460 index = i;
461 break;
462 }
463
464 if (unlikely(index == IWL_INVALID_STATION))
465 goto out;
466
467 if (priv->stations[index].used) {
468 priv->stations[index].used = 0;
469 priv->num_stations--;
470 }
471
472 BUG_ON(priv->num_stations < 0);
473
474out:
475 spin_unlock_irqrestore(&priv->sta_lock, flags);
476 return 0;
477}
556f8db7 478#endif
b481de9c 479
6440adb5
BC
480/**
481 * iwl4965_clear_stations_table - Clear the driver's station table
482 *
483 * NOTE: This does not clear or otherwise alter the device's station table.
484 */
bb8c093b 485static void iwl4965_clear_stations_table(struct iwl4965_priv *priv)
b481de9c
ZY
486{
487 unsigned long flags;
488
489 spin_lock_irqsave(&priv->sta_lock, flags);
490
491 priv->num_stations = 0;
492 memset(priv->stations, 0, sizeof(priv->stations));
493
494 spin_unlock_irqrestore(&priv->sta_lock, flags);
495}
496
6440adb5
BC
497/**
498 * iwl4965_add_station_flags - Add station to tables in driver and device
499 */
67d62035
RR
500u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr,
501 int is_ap, u8 flags, void *ht_data)
b481de9c
ZY
502{
503 int i;
504 int index = IWL_INVALID_STATION;
bb8c093b 505 struct iwl4965_station_entry *station;
b481de9c 506 unsigned long flags_spin;
0795af57 507 DECLARE_MAC_BUF(mac);
b481de9c
ZY
508
509 spin_lock_irqsave(&priv->sta_lock, flags_spin);
510 if (is_ap)
511 index = IWL_AP_ID;
512 else if (is_broadcast_ether_addr(addr))
513 index = priv->hw_setting.bcast_sta_id;
514 else
515 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
516 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
517 addr)) {
518 index = i;
519 break;
520 }
521
522 if (!priv->stations[i].used &&
523 index == IWL_INVALID_STATION)
524 index = i;
525 }
526
527
9fbab516
BC
528 /* These two conditions have the same outcome, but keep them separate
529 since they have different meanings */
b481de9c
ZY
530 if (unlikely(index == IWL_INVALID_STATION)) {
531 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
532 return index;
533 }
534
535 if (priv->stations[index].used &&
536 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
537 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
538 return index;
539 }
540
541
0795af57 542 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
b481de9c
ZY
543 station = &priv->stations[index];
544 station->used = 1;
545 priv->num_stations++;
546
6440adb5 547 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 548 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
b481de9c
ZY
549 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
550 station->sta.mode = 0;
551 station->sta.sta.sta_id = index;
552 station->sta.station_flags = 0;
553
c8b0e6e1 554#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
555 /* BCAST station and IBSS stations do not work in HT mode */
556 if (index != priv->hw_setting.bcast_sta_id &&
557 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
67d62035
RR
558 iwl4965_set_ht_add_station(priv, index,
559 (struct ieee80211_ht_info *) ht_data);
c8b0e6e1 560#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
561
562 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
BC
563
564 /* Add station to device's station table */
bb8c093b 565 iwl4965_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
566 return index;
567
568}
569
570/*************** DRIVER STATUS FUNCTIONS *****/
571
bb8c093b 572static inline int iwl4965_is_ready(struct iwl4965_priv *priv)
b481de9c
ZY
573{
574 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
575 * set but EXIT_PENDING is not */
576 return test_bit(STATUS_READY, &priv->status) &&
577 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
578 !test_bit(STATUS_EXIT_PENDING, &priv->status);
579}
580
bb8c093b 581static inline int iwl4965_is_alive(struct iwl4965_priv *priv)
b481de9c
ZY
582{
583 return test_bit(STATUS_ALIVE, &priv->status);
584}
585
bb8c093b 586static inline int iwl4965_is_init(struct iwl4965_priv *priv)
b481de9c
ZY
587{
588 return test_bit(STATUS_INIT, &priv->status);
589}
590
bb8c093b 591static inline int iwl4965_is_rfkill(struct iwl4965_priv *priv)
b481de9c
ZY
592{
593 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
594 test_bit(STATUS_RF_KILL_SW, &priv->status);
595}
596
bb8c093b 597static inline int iwl4965_is_ready_rf(struct iwl4965_priv *priv)
b481de9c
ZY
598{
599
bb8c093b 600 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
601 return 0;
602
bb8c093b 603 return iwl4965_is_ready(priv);
b481de9c
ZY
604}
605
606/*************** HOST COMMAND QUEUE FUNCTIONS *****/
607
608#define IWL_CMD(x) case x : return #x
609
610static const char *get_cmd_string(u8 cmd)
611{
612 switch (cmd) {
613 IWL_CMD(REPLY_ALIVE);
614 IWL_CMD(REPLY_ERROR);
615 IWL_CMD(REPLY_RXON);
616 IWL_CMD(REPLY_RXON_ASSOC);
617 IWL_CMD(REPLY_QOS_PARAM);
618 IWL_CMD(REPLY_RXON_TIMING);
619 IWL_CMD(REPLY_ADD_STA);
620 IWL_CMD(REPLY_REMOVE_STA);
621 IWL_CMD(REPLY_REMOVE_ALL_STA);
622 IWL_CMD(REPLY_TX);
623 IWL_CMD(REPLY_RATE_SCALE);
624 IWL_CMD(REPLY_LEDS_CMD);
625 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
626 IWL_CMD(RADAR_NOTIFICATION);
627 IWL_CMD(REPLY_QUIET_CMD);
628 IWL_CMD(REPLY_CHANNEL_SWITCH);
629 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
630 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
631 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
632 IWL_CMD(POWER_TABLE_CMD);
633 IWL_CMD(PM_SLEEP_NOTIFICATION);
634 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
635 IWL_CMD(REPLY_SCAN_CMD);
636 IWL_CMD(REPLY_SCAN_ABORT_CMD);
637 IWL_CMD(SCAN_START_NOTIFICATION);
638 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
639 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
640 IWL_CMD(BEACON_NOTIFICATION);
641 IWL_CMD(REPLY_TX_BEACON);
642 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
643 IWL_CMD(QUIET_NOTIFICATION);
644 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
645 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
646 IWL_CMD(REPLY_BT_CONFIG);
647 IWL_CMD(REPLY_STATISTICS_CMD);
648 IWL_CMD(STATISTICS_NOTIFICATION);
649 IWL_CMD(REPLY_CARD_STATE_CMD);
650 IWL_CMD(CARD_STATE_NOTIFICATION);
651 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
652 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
653 IWL_CMD(SENSITIVITY_CMD);
654 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
655 IWL_CMD(REPLY_RX_PHY_CMD);
656 IWL_CMD(REPLY_RX_MPDU_CMD);
657 IWL_CMD(REPLY_4965_RX);
658 IWL_CMD(REPLY_COMPRESSED_BA);
659 default:
660 return "UNKNOWN";
661
662 }
663}
664
665#define HOST_COMPLETE_TIMEOUT (HZ / 2)
666
667/**
bb8c093b 668 * iwl4965_enqueue_hcmd - enqueue a uCode command
b481de9c
ZY
669 * @priv: device private data point
670 * @cmd: a point to the ucode command structure
671 *
672 * The function returns < 0 values to indicate the operation is
673 * failed. On success, it turns the index (> 0) of command in the
674 * command queue.
675 */
bb8c093b 676static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 677{
bb8c093b
CH
678 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
679 struct iwl4965_queue *q = &txq->q;
680 struct iwl4965_tfd_frame *tfd;
b481de9c 681 u32 *control_flags;
bb8c093b 682 struct iwl4965_cmd *out_cmd;
b481de9c
ZY
683 u32 idx;
684 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
685 dma_addr_t phys_addr;
686 int ret;
687 unsigned long flags;
688
689 /* If any of the command structures end up being larger than
690 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
691 * we will need to increase the size of the TFD entries */
692 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
693 !(cmd->meta.flags & CMD_SIZE_HUGE));
694
bb8c093b 695 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
b481de9c
ZY
696 IWL_ERROR("No space for Tx\n");
697 return -ENOSPC;
698 }
699
700 spin_lock_irqsave(&priv->hcmd_lock, flags);
701
fc4b6853 702 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
703 memset(tfd, 0, sizeof(*tfd));
704
705 control_flags = (u32 *) tfd;
706
fc4b6853 707 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
b481de9c
ZY
708 out_cmd = &txq->cmd[idx];
709
710 out_cmd->hdr.cmd = cmd->id;
711 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
712 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
713
714 /* At this point, the out_cmd now has all of the incoming cmd
715 * information */
716
717 out_cmd->hdr.flags = 0;
718 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
fc4b6853 719 INDEX_TO_SEQ(q->write_ptr));
b481de9c
ZY
720 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
721 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
722
723 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
bb8c093b
CH
724 offsetof(struct iwl4965_cmd, hdr);
725 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
b481de9c
ZY
726
727 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
728 "%d bytes at %d[%d]:%d\n",
729 get_cmd_string(out_cmd->hdr.cmd),
730 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fc4b6853 731 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
b481de9c
ZY
732
733 txq->need_update = 1;
6440adb5
BC
734
735 /* Set up entry in queue's byte count circular buffer */
b481de9c 736 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
6440adb5
BC
737
738 /* Increment and update queue's write index */
bb8c093b
CH
739 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
740 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
741
742 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
743 return ret ? ret : idx;
744}
745
bb8c093b 746static int iwl4965_send_cmd_async(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
747{
748 int ret;
749
750 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
751
752 /* An asynchronous command can not expect an SKB to be set. */
753 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
754
755 /* An asynchronous command MUST have a callback. */
756 BUG_ON(!cmd->meta.u.callback);
757
758 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
759 return -EBUSY;
760
bb8c093b 761 ret = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c 762 if (ret < 0) {
bb8c093b 763 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
764 get_cmd_string(cmd->id), ret);
765 return ret;
766 }
767 return 0;
768}
769
bb8c093b 770static int iwl4965_send_cmd_sync(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
771{
772 int cmd_idx;
773 int ret;
774 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
775
776 BUG_ON(cmd->meta.flags & CMD_ASYNC);
777
778 /* A synchronous command can not have a callback set. */
779 BUG_ON(cmd->meta.u.callback != NULL);
780
781 if (atomic_xchg(&entry, 1)) {
782 IWL_ERROR("Error sending %s: Already sending a host command\n",
783 get_cmd_string(cmd->id));
784 return -EBUSY;
785 }
786
787 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
788
789 if (cmd->meta.flags & CMD_WANT_SKB)
790 cmd->meta.source = &cmd->meta;
791
bb8c093b 792 cmd_idx = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c
ZY
793 if (cmd_idx < 0) {
794 ret = cmd_idx;
bb8c093b 795 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
796 get_cmd_string(cmd->id), ret);
797 goto out;
798 }
799
800 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
801 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
802 HOST_COMPLETE_TIMEOUT);
803 if (!ret) {
804 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
805 IWL_ERROR("Error sending %s: time out after %dms.\n",
806 get_cmd_string(cmd->id),
807 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
808
809 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
810 ret = -ETIMEDOUT;
811 goto cancel;
812 }
813 }
814
815 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
816 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
817 get_cmd_string(cmd->id));
818 ret = -ECANCELED;
819 goto fail;
820 }
821 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
822 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
823 get_cmd_string(cmd->id));
824 ret = -EIO;
825 goto fail;
826 }
827 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
828 IWL_ERROR("Error: Response NULL in '%s'\n",
829 get_cmd_string(cmd->id));
830 ret = -EIO;
831 goto out;
832 }
833
834 ret = 0;
835 goto out;
836
837cancel:
838 if (cmd->meta.flags & CMD_WANT_SKB) {
bb8c093b 839 struct iwl4965_cmd *qcmd;
b481de9c
ZY
840
841 /* Cancel the CMD_WANT_SKB flag for the cmd in the
842 * TX cmd queue. Otherwise in case the cmd comes
843 * in later, it will possibly set an invalid
844 * address (cmd->meta.source). */
845 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
846 qcmd->meta.flags &= ~CMD_WANT_SKB;
847 }
848fail:
849 if (cmd->meta.u.skb) {
850 dev_kfree_skb_any(cmd->meta.u.skb);
851 cmd->meta.u.skb = NULL;
852 }
853out:
854 atomic_set(&entry, 0);
855 return ret;
856}
857
bb8c093b 858int iwl4965_send_cmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 859{
b481de9c 860 if (cmd->meta.flags & CMD_ASYNC)
bb8c093b 861 return iwl4965_send_cmd_async(priv, cmd);
b481de9c 862
bb8c093b 863 return iwl4965_send_cmd_sync(priv, cmd);
b481de9c
ZY
864}
865
bb8c093b 866int iwl4965_send_cmd_pdu(struct iwl4965_priv *priv, u8 id, u16 len, const void *data)
b481de9c 867{
bb8c093b 868 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
869 .id = id,
870 .len = len,
871 .data = data,
872 };
873
bb8c093b 874 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
875}
876
bb8c093b 877static int __must_check iwl4965_send_cmd_u32(struct iwl4965_priv *priv, u8 id, u32 val)
b481de9c 878{
bb8c093b 879 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
880 .id = id,
881 .len = sizeof(val),
882 .data = &val,
883 };
884
bb8c093b 885 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
886}
887
bb8c093b 888int iwl4965_send_statistics_request(struct iwl4965_priv *priv)
b481de9c 889{
bb8c093b 890 return iwl4965_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
b481de9c
ZY
891}
892
893/**
bb8c093b 894 * iwl4965_rxon_add_station - add station into station table.
b481de9c
ZY
895 *
896 * there is only one AP station with id= IWL_AP_ID
9fbab516
BC
897 * NOTE: mutex must be held before calling this fnction
898 */
bb8c093b 899static int iwl4965_rxon_add_station(struct iwl4965_priv *priv,
b481de9c
ZY
900 const u8 *addr, int is_ap)
901{
556f8db7 902 u8 sta_id;
b481de9c 903
6440adb5 904 /* Add station to device's station table */
67d62035
RR
905#ifdef CONFIG_IWL4965_HT
906 struct ieee80211_conf *conf = &priv->hw->conf;
907 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
908
909 if ((is_ap) &&
910 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
911 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
912 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
913 0, cur_ht_config);
914 else
915#endif /* CONFIG_IWL4965_HT */
916 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
917 0, NULL);
6440adb5
BC
918
919 /* Set up default rate scaling table in device's station table */
b481de9c
ZY
920 iwl4965_add_station(priv, addr, is_ap);
921
556f8db7 922 return sta_id;
b481de9c
ZY
923}
924
925/**
bb8c093b 926 * iwl4965_set_rxon_channel - Set the phymode and channel values in staging RXON
b481de9c
ZY
927 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
928 * @channel: Any channel valid for the requested phymode
929
930 * In addition to setting the staging RXON, priv->phymode is also set.
931 *
932 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
933 * in the staging RXON flag structure based on the phymode
934 */
9fbab516
BC
935static int iwl4965_set_rxon_channel(struct iwl4965_priv *priv, u8 phymode,
936 u16 channel)
b481de9c 937{
bb8c093b 938 if (!iwl4965_get_channel_info(priv, phymode, channel)) {
b481de9c
ZY
939 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
940 channel, phymode);
941 return -EINVAL;
942 }
943
944 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
945 (priv->phymode == phymode))
946 return 0;
947
948 priv->staging_rxon.channel = cpu_to_le16(channel);
949 if (phymode == MODE_IEEE80211A)
950 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
951 else
952 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
953
954 priv->phymode = phymode;
955
956 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
957
958 return 0;
959}
960
961/**
bb8c093b 962 * iwl4965_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
963 *
964 * NOTE: This is really only useful during development and can eventually
965 * be #ifdef'd out once the driver is stable and folks aren't actively
966 * making changes
967 */
bb8c093b 968static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c
ZY
969{
970 int error = 0;
971 int counter = 1;
972
973 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
974 error |= le32_to_cpu(rxon->flags &
975 (RXON_FLG_TGJ_NARROW_BAND_MSK |
976 RXON_FLG_RADAR_DETECT_MSK));
977 if (error)
978 IWL_WARNING("check 24G fields %d | %d\n",
979 counter++, error);
980 } else {
981 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
982 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
983 if (error)
984 IWL_WARNING("check 52 fields %d | %d\n",
985 counter++, error);
986 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
987 if (error)
988 IWL_WARNING("check 52 CCK %d | %d\n",
989 counter++, error);
990 }
991 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
992 if (error)
993 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
994
995 /* make sure basic rates 6Mbps and 1Mbps are supported */
996 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
997 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
998 if (error)
999 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
1000
1001 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
1002 if (error)
1003 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
1004
1005 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
1006 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
1007 if (error)
1008 IWL_WARNING("check CCK and short slot %d | %d\n",
1009 counter++, error);
1010
1011 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
1012 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
1013 if (error)
1014 IWL_WARNING("check CCK & auto detect %d | %d\n",
1015 counter++, error);
1016
1017 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
1018 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
1019 if (error)
1020 IWL_WARNING("check TGG and auto detect %d | %d\n",
1021 counter++, error);
1022
1023 if (error)
1024 IWL_WARNING("Tuning to channel %d\n",
1025 le16_to_cpu(rxon->channel));
1026
1027 if (error) {
bb8c093b 1028 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
b481de9c
ZY
1029 return -1;
1030 }
1031 return 0;
1032}
1033
1034/**
9fbab516 1035 * iwl4965_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 1036 * @priv: staging_rxon is compared to active_rxon
b481de9c 1037 *
9fbab516
BC
1038 * If the RXON structure is changing enough to require a new tune,
1039 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
1040 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 1041 */
bb8c093b 1042static int iwl4965_full_rxon_required(struct iwl4965_priv *priv)
b481de9c
ZY
1043{
1044
1045 /* These items are only settable from the full RXON command */
1046 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
1047 compare_ether_addr(priv->staging_rxon.bssid_addr,
1048 priv->active_rxon.bssid_addr) ||
1049 compare_ether_addr(priv->staging_rxon.node_addr,
1050 priv->active_rxon.node_addr) ||
1051 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
1052 priv->active_rxon.wlap_bssid_addr) ||
1053 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
1054 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
1055 (priv->staging_rxon.air_propagation !=
1056 priv->active_rxon.air_propagation) ||
1057 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
1058 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
1059 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
1060 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
1061 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
1062 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
1063 return 1;
1064
1065 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
1066 * be updated with the RXON_ASSOC command -- however only some
1067 * flag transitions are allowed using RXON_ASSOC */
1068
1069 /* Check if we are not switching bands */
1070 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1071 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1072 return 1;
1073
1074 /* Check if we are switching association toggle */
1075 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1076 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1077 return 1;
1078
1079 return 0;
1080}
1081
bb8c093b 1082static int iwl4965_send_rxon_assoc(struct iwl4965_priv *priv)
b481de9c
ZY
1083{
1084 int rc = 0;
bb8c093b
CH
1085 struct iwl4965_rx_packet *res = NULL;
1086 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1087 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1088 .id = REPLY_RXON_ASSOC,
1089 .len = sizeof(rxon_assoc),
1090 .meta.flags = CMD_WANT_SKB,
1091 .data = &rxon_assoc,
1092 };
bb8c093b
CH
1093 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
1094 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
b481de9c
ZY
1095
1096 if ((rxon1->flags == rxon2->flags) &&
1097 (rxon1->filter_flags == rxon2->filter_flags) &&
1098 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1099 (rxon1->ofdm_ht_single_stream_basic_rates ==
1100 rxon2->ofdm_ht_single_stream_basic_rates) &&
1101 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1102 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1103 (rxon1->rx_chain == rxon2->rx_chain) &&
1104 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1105 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1106 return 0;
1107 }
1108
1109 rxon_assoc.flags = priv->staging_rxon.flags;
1110 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1111 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1112 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1113 rxon_assoc.reserved = 0;
1114 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1115 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1116 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1117 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1118 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1119
bb8c093b 1120 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1121 if (rc)
1122 return rc;
1123
bb8c093b 1124 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1125 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1126 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1127 rc = -EIO;
1128 }
1129
1130 priv->alloc_rxb_skb--;
1131 dev_kfree_skb_any(cmd.meta.u.skb);
1132
1133 return rc;
1134}
1135
1136/**
bb8c093b 1137 * iwl4965_commit_rxon - commit staging_rxon to hardware
b481de9c 1138 *
01ebd063 1139 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
1140 * the active_rxon structure is updated with the new data. This
1141 * function correctly transitions out of the RXON_ASSOC_MSK state if
1142 * a HW tune is required based on the RXON structure changes.
1143 */
bb8c093b 1144static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
b481de9c
ZY
1145{
1146 /* cast away the const for active_rxon in this function */
bb8c093b 1147 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
0795af57 1148 DECLARE_MAC_BUF(mac);
b481de9c
ZY
1149 int rc = 0;
1150
bb8c093b 1151 if (!iwl4965_is_alive(priv))
b481de9c
ZY
1152 return -1;
1153
1154 /* always get timestamp with Rx frame */
1155 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1156
bb8c093b 1157 rc = iwl4965_check_rxon_cmd(&priv->staging_rxon);
b481de9c
ZY
1158 if (rc) {
1159 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1160 return -EINVAL;
1161 }
1162
1163 /* If we don't need to send a full RXON, we can use
bb8c093b 1164 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 1165 * and other flags for the current radio configuration. */
bb8c093b
CH
1166 if (!iwl4965_full_rxon_required(priv)) {
1167 rc = iwl4965_send_rxon_assoc(priv);
b481de9c
ZY
1168 if (rc) {
1169 IWL_ERROR("Error setting RXON_ASSOC "
1170 "configuration (%d).\n", rc);
1171 return rc;
1172 }
1173
1174 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1175
1176 return 0;
1177 }
1178
1179 /* station table will be cleared */
1180 priv->assoc_station_added = 0;
1181
c8b0e6e1 1182#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1183 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1184 if (!priv->error_recovering)
1185 priv->start_calib = 0;
1186
1187 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1188#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1189
1190 /* If we are currently associated and the new config requires
1191 * an RXON_ASSOC and the new config wants the associated mask enabled,
1192 * we must clear the associated from the active configuration
1193 * before we apply the new config */
bb8c093b 1194 if (iwl4965_is_associated(priv) &&
b481de9c
ZY
1195 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1196 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1197 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1198
bb8c093b
CH
1199 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1200 sizeof(struct iwl4965_rxon_cmd),
b481de9c
ZY
1201 &priv->active_rxon);
1202
1203 /* If the mask clearing failed then we set
1204 * active_rxon back to what it was previously */
1205 if (rc) {
1206 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1207 IWL_ERROR("Error clearing ASSOC_MSK on current "
1208 "configuration (%d).\n", rc);
1209 return rc;
1210 }
b481de9c
ZY
1211 }
1212
1213 IWL_DEBUG_INFO("Sending RXON\n"
1214 "* with%s RXON_FILTER_ASSOC_MSK\n"
1215 "* channel = %d\n"
0795af57 1216 "* bssid = %s\n",
b481de9c
ZY
1217 ((priv->staging_rxon.filter_flags &
1218 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1219 le16_to_cpu(priv->staging_rxon.channel),
0795af57 1220 print_mac(mac, priv->staging_rxon.bssid_addr));
b481de9c
ZY
1221
1222 /* Apply the new configuration */
bb8c093b
CH
1223 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1224 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon);
b481de9c
ZY
1225 if (rc) {
1226 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1227 return rc;
1228 }
1229
bb8c093b 1230 iwl4965_clear_stations_table(priv);
556f8db7 1231
c8b0e6e1 1232#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1233 if (!priv->error_recovering)
1234 priv->start_calib = 0;
1235
1236 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1237 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1238#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1239
1240 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1241
1242 /* If we issue a new RXON command which required a tune then we must
1243 * send a new TXPOWER command or we won't be able to Tx any frames */
bb8c093b 1244 rc = iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
1245 if (rc) {
1246 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1247 return rc;
1248 }
1249
1250 /* Add the broadcast address so we can send broadcast frames */
bb8c093b 1251 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) ==
b481de9c
ZY
1252 IWL_INVALID_STATION) {
1253 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1254 return -EIO;
1255 }
1256
1257 /* If we have set the ASSOC_MSK and we are in BSS mode then
1258 * add the IWL_AP_ID to the station rate table */
bb8c093b 1259 if (iwl4965_is_associated(priv) &&
b481de9c 1260 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
bb8c093b 1261 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
b481de9c
ZY
1262 == IWL_INVALID_STATION) {
1263 IWL_ERROR("Error adding AP address for transmit.\n");
1264 return -EIO;
1265 }
1266 priv->assoc_station_added = 1;
1267 }
1268
1269 return 0;
1270}
1271
bb8c093b 1272static int iwl4965_send_bt_config(struct iwl4965_priv *priv)
b481de9c 1273{
bb8c093b 1274 struct iwl4965_bt_cmd bt_cmd = {
b481de9c
ZY
1275 .flags = 3,
1276 .lead_time = 0xAA,
1277 .max_kill = 1,
1278 .kill_ack_mask = 0,
1279 .kill_cts_mask = 0,
1280 };
1281
bb8c093b
CH
1282 return iwl4965_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1283 sizeof(struct iwl4965_bt_cmd), &bt_cmd);
b481de9c
ZY
1284}
1285
bb8c093b 1286static int iwl4965_send_scan_abort(struct iwl4965_priv *priv)
b481de9c
ZY
1287{
1288 int rc = 0;
bb8c093b
CH
1289 struct iwl4965_rx_packet *res;
1290 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1291 .id = REPLY_SCAN_ABORT_CMD,
1292 .meta.flags = CMD_WANT_SKB,
1293 };
1294
1295 /* If there isn't a scan actively going on in the hardware
1296 * then we are in between scan bands and not actually
1297 * actively scanning, so don't send the abort command */
1298 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1299 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1300 return 0;
1301 }
1302
bb8c093b 1303 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1304 if (rc) {
1305 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1306 return rc;
1307 }
1308
bb8c093b 1309 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1310 if (res->u.status != CAN_ABORT_STATUS) {
1311 /* The scan abort will return 1 for success or
1312 * 2 for "failure". A failure condition can be
1313 * due to simply not being in an active scan which
1314 * can occur if we send the scan abort before we
1315 * the microcode has notified us that a scan is
1316 * completed. */
1317 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1318 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1319 clear_bit(STATUS_SCAN_HW, &priv->status);
1320 }
1321
1322 dev_kfree_skb_any(cmd.meta.u.skb);
1323
1324 return rc;
1325}
1326
bb8c093b
CH
1327static int iwl4965_card_state_sync_callback(struct iwl4965_priv *priv,
1328 struct iwl4965_cmd *cmd,
b481de9c
ZY
1329 struct sk_buff *skb)
1330{
1331 return 1;
1332}
1333
1334/*
1335 * CARD_STATE_CMD
1336 *
9fbab516 1337 * Use: Sets the device's internal card state to enable, disable, or halt
b481de9c
ZY
1338 *
1339 * When in the 'enable' state the card operates as normal.
1340 * When in the 'disable' state, the card enters into a low power mode.
1341 * When in the 'halt' state, the card is shut down and must be fully
1342 * restarted to come back on.
1343 */
bb8c093b 1344static int iwl4965_send_card_state(struct iwl4965_priv *priv, u32 flags, u8 meta_flag)
b481de9c 1345{
bb8c093b 1346 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1347 .id = REPLY_CARD_STATE_CMD,
1348 .len = sizeof(u32),
1349 .data = &flags,
1350 .meta.flags = meta_flag,
1351 };
1352
1353 if (meta_flag & CMD_ASYNC)
bb8c093b 1354 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
b481de9c 1355
bb8c093b 1356 return iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1357}
1358
bb8c093b
CH
1359static int iwl4965_add_sta_sync_callback(struct iwl4965_priv *priv,
1360 struct iwl4965_cmd *cmd, struct sk_buff *skb)
b481de9c 1361{
bb8c093b 1362 struct iwl4965_rx_packet *res = NULL;
b481de9c
ZY
1363
1364 if (!skb) {
1365 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1366 return 1;
1367 }
1368
bb8c093b 1369 res = (struct iwl4965_rx_packet *)skb->data;
b481de9c
ZY
1370 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1371 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1372 res->hdr.flags);
1373 return 1;
1374 }
1375
1376 switch (res->u.add_sta.status) {
1377 case ADD_STA_SUCCESS_MSK:
1378 break;
1379 default:
1380 break;
1381 }
1382
1383 /* We didn't cache the SKB; let the caller free it */
1384 return 1;
1385}
1386
bb8c093b
CH
1387int iwl4965_send_add_station(struct iwl4965_priv *priv,
1388 struct iwl4965_addsta_cmd *sta, u8 flags)
b481de9c 1389{
bb8c093b 1390 struct iwl4965_rx_packet *res = NULL;
b481de9c 1391 int rc = 0;
bb8c093b 1392 struct iwl4965_host_cmd cmd = {
b481de9c 1393 .id = REPLY_ADD_STA,
bb8c093b 1394 .len = sizeof(struct iwl4965_addsta_cmd),
b481de9c
ZY
1395 .meta.flags = flags,
1396 .data = sta,
1397 };
1398
1399 if (flags & CMD_ASYNC)
bb8c093b 1400 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
b481de9c
ZY
1401 else
1402 cmd.meta.flags |= CMD_WANT_SKB;
1403
bb8c093b 1404 rc = iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1405
1406 if (rc || (flags & CMD_ASYNC))
1407 return rc;
1408
bb8c093b 1409 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1410 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1411 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1412 res->hdr.flags);
1413 rc = -EIO;
1414 }
1415
1416 if (rc == 0) {
1417 switch (res->u.add_sta.status) {
1418 case ADD_STA_SUCCESS_MSK:
1419 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1420 break;
1421 default:
1422 rc = -EIO;
1423 IWL_WARNING("REPLY_ADD_STA failed\n");
1424 break;
1425 }
1426 }
1427
1428 priv->alloc_rxb_skb--;
1429 dev_kfree_skb_any(cmd.meta.u.skb);
1430
1431 return rc;
1432}
1433
bb8c093b 1434static int iwl4965_update_sta_key_info(struct iwl4965_priv *priv,
b481de9c
ZY
1435 struct ieee80211_key_conf *keyconf,
1436 u8 sta_id)
1437{
1438 unsigned long flags;
1439 __le16 key_flags = 0;
1440
1441 switch (keyconf->alg) {
1442 case ALG_CCMP:
1443 key_flags |= STA_KEY_FLG_CCMP;
1444 key_flags |= cpu_to_le16(
1445 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1446 key_flags &= ~STA_KEY_FLG_INVALID;
1447 break;
1448 case ALG_TKIP:
1449 case ALG_WEP:
b481de9c
ZY
1450 default:
1451 return -EINVAL;
1452 }
1453 spin_lock_irqsave(&priv->sta_lock, flags);
1454 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1455 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1456 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1457 keyconf->keylen);
1458
1459 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1460 keyconf->keylen);
1461 priv->stations[sta_id].sta.key.key_flags = key_flags;
1462 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1463 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1464
1465 spin_unlock_irqrestore(&priv->sta_lock, flags);
1466
1467 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
bb8c093b 1468 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1469 return 0;
1470}
1471
bb8c093b 1472static int iwl4965_clear_sta_key_info(struct iwl4965_priv *priv, u8 sta_id)
b481de9c
ZY
1473{
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&priv->sta_lock, flags);
bb8c093b
CH
1477 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl4965_hw_key));
1478 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl4965_keyinfo));
b481de9c
ZY
1479 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1480 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1481 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1482 spin_unlock_irqrestore(&priv->sta_lock, flags);
1483
1484 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
bb8c093b 1485 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1486 return 0;
1487}
1488
bb8c093b 1489static void iwl4965_clear_free_frames(struct iwl4965_priv *priv)
b481de9c
ZY
1490{
1491 struct list_head *element;
1492
1493 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1494 priv->frames_count);
1495
1496 while (!list_empty(&priv->free_frames)) {
1497 element = priv->free_frames.next;
1498 list_del(element);
bb8c093b 1499 kfree(list_entry(element, struct iwl4965_frame, list));
b481de9c
ZY
1500 priv->frames_count--;
1501 }
1502
1503 if (priv->frames_count) {
1504 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1505 priv->frames_count);
1506 priv->frames_count = 0;
1507 }
1508}
1509
bb8c093b 1510static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl4965_priv *priv)
b481de9c 1511{
bb8c093b 1512 struct iwl4965_frame *frame;
b481de9c
ZY
1513 struct list_head *element;
1514 if (list_empty(&priv->free_frames)) {
1515 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1516 if (!frame) {
1517 IWL_ERROR("Could not allocate frame!\n");
1518 return NULL;
1519 }
1520
1521 priv->frames_count++;
1522 return frame;
1523 }
1524
1525 element = priv->free_frames.next;
1526 list_del(element);
bb8c093b 1527 return list_entry(element, struct iwl4965_frame, list);
b481de9c
ZY
1528}
1529
bb8c093b 1530static void iwl4965_free_frame(struct iwl4965_priv *priv, struct iwl4965_frame *frame)
b481de9c
ZY
1531{
1532 memset(frame, 0, sizeof(*frame));
1533 list_add(&frame->list, &priv->free_frames);
1534}
1535
bb8c093b 1536unsigned int iwl4965_fill_beacon_frame(struct iwl4965_priv *priv,
b481de9c
ZY
1537 struct ieee80211_hdr *hdr,
1538 const u8 *dest, int left)
1539{
1540
bb8c093b 1541 if (!iwl4965_is_associated(priv) || !priv->ibss_beacon ||
b481de9c
ZY
1542 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1543 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1544 return 0;
1545
1546 if (priv->ibss_beacon->len > left)
1547 return 0;
1548
1549 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1550
1551 return priv->ibss_beacon->len;
1552}
1553
bb8c093b 1554int iwl4965_rate_index_from_plcp(int plcp)
b481de9c
ZY
1555{
1556 int i = 0;
1557
77626355 1558 /* 4965 HT rate format */
b481de9c
ZY
1559 if (plcp & RATE_MCS_HT_MSK) {
1560 i = (plcp & 0xff);
1561
1562 if (i >= IWL_RATE_MIMO_6M_PLCP)
1563 i = i - IWL_RATE_MIMO_6M_PLCP;
1564
1565 i += IWL_FIRST_OFDM_RATE;
1566 /* skip 9M not supported in ht*/
1567 if (i >= IWL_RATE_9M_INDEX)
1568 i += 1;
1569 if ((i >= IWL_FIRST_OFDM_RATE) &&
1570 (i <= IWL_LAST_OFDM_RATE))
1571 return i;
77626355
BC
1572
1573 /* 4965 legacy rate format, search for match in table */
b481de9c 1574 } else {
bb8c093b
CH
1575 for (i = 0; i < ARRAY_SIZE(iwl4965_rates); i++)
1576 if (iwl4965_rates[i].plcp == (plcp &0xFF))
b481de9c
ZY
1577 return i;
1578 }
1579 return -1;
1580}
1581
bb8c093b 1582static u8 iwl4965_rate_get_lowest_plcp(int rate_mask)
b481de9c
ZY
1583{
1584 u8 i;
1585
1586 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1587 i = iwl4965_rates[i].next_ieee) {
b481de9c 1588 if (rate_mask & (1 << i))
bb8c093b 1589 return iwl4965_rates[i].plcp;
b481de9c
ZY
1590 }
1591
1592 return IWL_RATE_INVALID;
1593}
1594
bb8c093b 1595static int iwl4965_send_beacon_cmd(struct iwl4965_priv *priv)
b481de9c 1596{
bb8c093b 1597 struct iwl4965_frame *frame;
b481de9c
ZY
1598 unsigned int frame_size;
1599 int rc;
1600 u8 rate;
1601
bb8c093b 1602 frame = iwl4965_get_free_frame(priv);
b481de9c
ZY
1603
1604 if (!frame) {
1605 IWL_ERROR("Could not obtain free frame buffer for beacon "
1606 "command.\n");
1607 return -ENOMEM;
1608 }
1609
1610 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
bb8c093b 1611 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
b481de9c
ZY
1612 0xFF0);
1613 if (rate == IWL_INVALID_RATE)
1614 rate = IWL_RATE_6M_PLCP;
1615 } else {
bb8c093b 1616 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
b481de9c
ZY
1617 if (rate == IWL_INVALID_RATE)
1618 rate = IWL_RATE_1M_PLCP;
1619 }
1620
bb8c093b 1621 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1622
bb8c093b 1623 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1624 &frame->u.cmd[0]);
1625
bb8c093b 1626 iwl4965_free_frame(priv, frame);
b481de9c
ZY
1627
1628 return rc;
1629}
1630
1631/******************************************************************************
1632 *
1633 * EEPROM related functions
1634 *
1635 ******************************************************************************/
1636
bb8c093b 1637static void get_eeprom_mac(struct iwl4965_priv *priv, u8 *mac)
b481de9c
ZY
1638{
1639 memcpy(mac, priv->eeprom.mac_address, 6);
1640}
1641
1642/**
bb8c093b 1643 * iwl4965_eeprom_init - read EEPROM contents
b481de9c 1644 *
6440adb5 1645 * Load the EEPROM contents from adapter into priv->eeprom
b481de9c
ZY
1646 *
1647 * NOTE: This routine uses the non-debug IO access functions.
1648 */
bb8c093b 1649int iwl4965_eeprom_init(struct iwl4965_priv *priv)
b481de9c 1650{
0e5ce1f3 1651 __le16 *e = (__le16 *)&priv->eeprom;
bb8c093b 1652 u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP);
b481de9c
ZY
1653 u32 r;
1654 int sz = sizeof(priv->eeprom);
1655 int rc;
1656 int i;
1657 u16 addr;
1658
1659 /* The EEPROM structure has several padding buffers within it
1660 * and when adding new EEPROM maps is subject to programmer errors
1661 * which may be very difficult to identify without explicitly
1662 * checking the resulting size of the eeprom map. */
1663 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1664
1665 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1666 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1667 return -ENOENT;
1668 }
1669
6440adb5 1670 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
bb8c093b 1671 rc = iwl4965_eeprom_acquire_semaphore(priv);
b481de9c 1672 if (rc < 0) {
91e17473 1673 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
b481de9c
ZY
1674 return -ENOENT;
1675 }
1676
1677 /* eeprom is an array of 16bit values */
1678 for (addr = 0; addr < sz; addr += sizeof(u16)) {
bb8c093b
CH
1679 _iwl4965_write32(priv, CSR_EEPROM_REG, addr << 1);
1680 _iwl4965_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
b481de9c
ZY
1681
1682 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1683 i += IWL_EEPROM_ACCESS_DELAY) {
bb8c093b 1684 r = _iwl4965_read_direct32(priv, CSR_EEPROM_REG);
b481de9c
ZY
1685 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1686 break;
1687 udelay(IWL_EEPROM_ACCESS_DELAY);
1688 }
1689
1690 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1691 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1692 rc = -ETIMEDOUT;
1693 goto done;
1694 }
0e5ce1f3 1695 e[addr / 2] = cpu_to_le16(r >> 16);
b481de9c
ZY
1696 }
1697 rc = 0;
1698
1699done:
bb8c093b 1700 iwl4965_eeprom_release_semaphore(priv);
b481de9c
ZY
1701 return rc;
1702}
1703
1704/******************************************************************************
1705 *
1706 * Misc. internal state and helper functions
1707 *
1708 ******************************************************************************/
c8b0e6e1 1709#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
1710
1711/**
bb8c093b 1712 * iwl4965_report_frame - dump frame to syslog during debug sessions
b481de9c 1713 *
9fbab516 1714 * You may hack this function to show different aspects of received frames,
b481de9c
ZY
1715 * including selective frame dumps.
1716 * group100 parameter selects whether to show 1 out of 100 good frames.
1717 *
9fbab516
BC
1718 * TODO: This was originally written for 3945, need to audit for
1719 * proper operation with 4965.
b481de9c 1720 */
bb8c093b
CH
1721void iwl4965_report_frame(struct iwl4965_priv *priv,
1722 struct iwl4965_rx_packet *pkt,
b481de9c
ZY
1723 struct ieee80211_hdr *header, int group100)
1724{
1725 u32 to_us;
1726 u32 print_summary = 0;
1727 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1728 u32 hundred = 0;
1729 u32 dataframe = 0;
1730 u16 fc;
1731 u16 seq_ctl;
1732 u16 channel;
1733 u16 phy_flags;
1734 int rate_sym;
1735 u16 length;
1736 u16 status;
1737 u16 bcn_tmr;
1738 u32 tsf_low;
1739 u64 tsf;
1740 u8 rssi;
1741 u8 agc;
1742 u16 sig_avg;
1743 u16 noise_diff;
bb8c093b
CH
1744 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1745 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1746 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
b481de9c
ZY
1747 u8 *data = IWL_RX_DATA(pkt);
1748
1749 /* MAC header */
1750 fc = le16_to_cpu(header->frame_control);
1751 seq_ctl = le16_to_cpu(header->seq_ctrl);
1752
1753 /* metadata */
1754 channel = le16_to_cpu(rx_hdr->channel);
1755 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1756 rate_sym = rx_hdr->rate;
1757 length = le16_to_cpu(rx_hdr->len);
1758
1759 /* end-of-frame status and timestamp */
1760 status = le32_to_cpu(rx_end->status);
1761 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1762 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1763 tsf = le64_to_cpu(rx_end->timestamp);
1764
1765 /* signal statistics */
1766 rssi = rx_stats->rssi;
1767 agc = rx_stats->agc;
1768 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1769 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1770
1771 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1772
1773 /* if data frame is to us and all is good,
1774 * (optionally) print summary for only 1 out of every 100 */
1775 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1776 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1777 dataframe = 1;
1778 if (!group100)
1779 print_summary = 1; /* print each frame */
1780 else if (priv->framecnt_to_us < 100) {
1781 priv->framecnt_to_us++;
1782 print_summary = 0;
1783 } else {
1784 priv->framecnt_to_us = 0;
1785 print_summary = 1;
1786 hundred = 1;
1787 }
1788 } else {
1789 /* print summary for all other frames */
1790 print_summary = 1;
1791 }
1792
1793 if (print_summary) {
1794 char *title;
1795 u32 rate;
1796
1797 if (hundred)
1798 title = "100Frames";
1799 else if (fc & IEEE80211_FCTL_RETRY)
1800 title = "Retry";
1801 else if (ieee80211_is_assoc_response(fc))
1802 title = "AscRsp";
1803 else if (ieee80211_is_reassoc_response(fc))
1804 title = "RasRsp";
1805 else if (ieee80211_is_probe_response(fc)) {
1806 title = "PrbRsp";
1807 print_dump = 1; /* dump frame contents */
1808 } else if (ieee80211_is_beacon(fc)) {
1809 title = "Beacon";
1810 print_dump = 1; /* dump frame contents */
1811 } else if (ieee80211_is_atim(fc))
1812 title = "ATIM";
1813 else if (ieee80211_is_auth(fc))
1814 title = "Auth";
1815 else if (ieee80211_is_deauth(fc))
1816 title = "DeAuth";
1817 else if (ieee80211_is_disassoc(fc))
1818 title = "DisAssoc";
1819 else
1820 title = "Frame";
1821
bb8c093b 1822 rate = iwl4965_rate_index_from_plcp(rate_sym);
b481de9c
ZY
1823 if (rate == -1)
1824 rate = 0;
1825 else
bb8c093b 1826 rate = iwl4965_rates[rate].ieee / 2;
b481de9c
ZY
1827
1828 /* print frame summary.
1829 * MAC addresses show just the last byte (for brevity),
1830 * but you can hack it to show more, if you'd like to. */
1831 if (dataframe)
1832 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1833 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1834 title, fc, header->addr1[5],
1835 length, rssi, channel, rate);
1836 else {
1837 /* src/dst addresses assume managed mode */
1838 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1839 "src=0x%02x, rssi=%u, tim=%lu usec, "
1840 "phy=0x%02x, chnl=%d\n",
1841 title, fc, header->addr1[5],
1842 header->addr3[5], rssi,
1843 tsf_low - priv->scan_start_tsf,
1844 phy_flags, channel);
1845 }
1846 }
1847 if (print_dump)
bb8c093b 1848 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
b481de9c
ZY
1849}
1850#endif
1851
bb8c093b 1852static void iwl4965_unset_hw_setting(struct iwl4965_priv *priv)
b481de9c
ZY
1853{
1854 if (priv->hw_setting.shared_virt)
1855 pci_free_consistent(priv->pci_dev,
bb8c093b 1856 sizeof(struct iwl4965_shared),
b481de9c
ZY
1857 priv->hw_setting.shared_virt,
1858 priv->hw_setting.shared_phys);
1859}
1860
1861/**
bb8c093b 1862 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
b481de9c
ZY
1863 *
1864 * return : set the bit for each supported rate insert in ie
1865 */
bb8c093b 1866static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
c7c46676 1867 u16 basic_rate, int *left)
b481de9c
ZY
1868{
1869 u16 ret_rates = 0, bit;
1870 int i;
c7c46676
TW
1871 u8 *cnt = ie;
1872 u8 *rates = ie + 1;
b481de9c
ZY
1873
1874 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1875 if (bit & supported_rate) {
1876 ret_rates |= bit;
bb8c093b 1877 rates[*cnt] = iwl4965_rates[i].ieee |
c7c46676
TW
1878 ((bit & basic_rate) ? 0x80 : 0x00);
1879 (*cnt)++;
1880 (*left)--;
1881 if ((*left <= 0) ||
1882 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
b481de9c
ZY
1883 break;
1884 }
1885 }
1886
1887 return ret_rates;
1888}
1889
c8b0e6e1 1890#ifdef CONFIG_IWL4965_HT
bb8c093b 1891void static iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8fb88032
RR
1892 struct ieee80211_ht_cap *ht_cap,
1893 u8 use_current_config);
b481de9c
ZY
1894#endif
1895
1896/**
bb8c093b 1897 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
b481de9c 1898 */
bb8c093b 1899static u16 iwl4965_fill_probe_req(struct iwl4965_priv *priv,
b481de9c
ZY
1900 struct ieee80211_mgmt *frame,
1901 int left, int is_direct)
1902{
1903 int len = 0;
1904 u8 *pos = NULL;
bee488db 1905 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
8fb88032
RR
1906#ifdef CONFIG_IWL4965_HT
1907 struct ieee80211_hw_mode *mode;
1908#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
1909
1910 /* Make sure there is enough space for the probe request,
1911 * two mandatory IEs and the data */
1912 left -= 24;
1913 if (left < 0)
1914 return 0;
1915 len += 24;
1916
1917 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
bb8c093b 1918 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c 1919 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
bb8c093b 1920 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c
ZY
1921 frame->seq_ctrl = 0;
1922
1923 /* fill in our indirect SSID IE */
1924 /* ...next IE... */
1925
1926 left -= 2;
1927 if (left < 0)
1928 return 0;
1929 len += 2;
1930 pos = &(frame->u.probe_req.variable[0]);
1931 *pos++ = WLAN_EID_SSID;
1932 *pos++ = 0;
1933
1934 /* fill in our direct SSID IE... */
1935 if (is_direct) {
1936 /* ...next IE... */
1937 left -= 2 + priv->essid_len;
1938 if (left < 0)
1939 return 0;
1940 /* ... fill it in... */
1941 *pos++ = WLAN_EID_SSID;
1942 *pos++ = priv->essid_len;
1943 memcpy(pos, priv->essid, priv->essid_len);
1944 pos += priv->essid_len;
1945 len += 2 + priv->essid_len;
1946 }
1947
1948 /* fill in supported rate */
1949 /* ...next IE... */
1950 left -= 2;
1951 if (left < 0)
1952 return 0;
c7c46676 1953
b481de9c
ZY
1954 /* ... fill it in... */
1955 *pos++ = WLAN_EID_SUPP_RATES;
1956 *pos = 0;
c7c46676 1957
bee488db 1958 /* exclude 60M rate */
1959 active_rates = priv->rates_mask;
1960 active_rates &= ~IWL_RATE_60M_MASK;
1961
1962 active_rate_basic = active_rates & IWL_BASIC_RATES_MASK;
b481de9c 1963
c7c46676 1964 cck_rates = IWL_CCK_RATES_MASK & active_rates;
bb8c093b 1965 ret_rates = iwl4965_supported_rate_to_ie(pos, cck_rates,
bee488db 1966 active_rate_basic, &left);
c7c46676
TW
1967 active_rates &= ~ret_rates;
1968
bb8c093b 1969 ret_rates = iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1970 active_rate_basic, &left);
c7c46676
TW
1971 active_rates &= ~ret_rates;
1972
b481de9c
ZY
1973 len += 2 + *pos;
1974 pos += (*pos) + 1;
c7c46676 1975 if (active_rates == 0)
b481de9c
ZY
1976 goto fill_end;
1977
1978 /* fill in supported extended rate */
1979 /* ...next IE... */
1980 left -= 2;
1981 if (left < 0)
1982 return 0;
1983 /* ... fill it in... */
1984 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1985 *pos = 0;
bb8c093b 1986 iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1987 active_rate_basic, &left);
b481de9c
ZY
1988 if (*pos > 0)
1989 len += 2 + *pos;
1990
c8b0e6e1 1991#ifdef CONFIG_IWL4965_HT
8fb88032
RR
1992 mode = priv->hw->conf.mode;
1993 if (mode->ht_info.ht_supported) {
b481de9c
ZY
1994 pos += (*pos) + 1;
1995 *pos++ = WLAN_EID_HT_CAPABILITY;
8fb88032
RR
1996 *pos++ = sizeof(struct ieee80211_ht_cap);
1997 iwl4965_set_ht_capab(priv->hw,
1998 (struct ieee80211_ht_cap *)pos, 0);
1999 len += 2 + sizeof(struct ieee80211_ht_cap);
b481de9c 2000 }
c8b0e6e1 2001#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
2002
2003 fill_end:
2004 return (u16)len;
2005}
2006
2007/*
2008 * QoS support
2009*/
c8b0e6e1 2010#ifdef CONFIG_IWL4965_QOS
bb8c093b
CH
2011static int iwl4965_send_qos_params_command(struct iwl4965_priv *priv,
2012 struct iwl4965_qosparam_cmd *qos)
b481de9c
ZY
2013{
2014
bb8c093b
CH
2015 return iwl4965_send_cmd_pdu(priv, REPLY_QOS_PARAM,
2016 sizeof(struct iwl4965_qosparam_cmd), qos);
b481de9c
ZY
2017}
2018
bb8c093b 2019static void iwl4965_reset_qos(struct iwl4965_priv *priv)
b481de9c
ZY
2020{
2021 u16 cw_min = 15;
2022 u16 cw_max = 1023;
2023 u8 aifs = 2;
2024 u8 is_legacy = 0;
2025 unsigned long flags;
2026 int i;
2027
2028 spin_lock_irqsave(&priv->lock, flags);
2029 priv->qos_data.qos_active = 0;
2030
2031 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
2032 if (priv->qos_data.qos_enable)
2033 priv->qos_data.qos_active = 1;
2034 if (!(priv->active_rate & 0xfff0)) {
2035 cw_min = 31;
2036 is_legacy = 1;
2037 }
2038 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2039 if (priv->qos_data.qos_enable)
2040 priv->qos_data.qos_active = 1;
2041 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
2042 cw_min = 31;
2043 is_legacy = 1;
2044 }
2045
2046 if (priv->qos_data.qos_active)
2047 aifs = 3;
2048
2049 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
2050 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
2051 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
2052 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
2053 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
2054
2055 if (priv->qos_data.qos_active) {
2056 i = 1;
2057 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
2058 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
2059 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
2060 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2061 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2062
2063 i = 2;
2064 priv->qos_data.def_qos_parm.ac[i].cw_min =
2065 cpu_to_le16((cw_min + 1) / 2 - 1);
2066 priv->qos_data.def_qos_parm.ac[i].cw_max =
2067 cpu_to_le16(cw_max);
2068 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2069 if (is_legacy)
2070 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2071 cpu_to_le16(6016);
2072 else
2073 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2074 cpu_to_le16(3008);
2075 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2076
2077 i = 3;
2078 priv->qos_data.def_qos_parm.ac[i].cw_min =
2079 cpu_to_le16((cw_min + 1) / 4 - 1);
2080 priv->qos_data.def_qos_parm.ac[i].cw_max =
2081 cpu_to_le16((cw_max + 1) / 2 - 1);
2082 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2083 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2084 if (is_legacy)
2085 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2086 cpu_to_le16(3264);
2087 else
2088 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2089 cpu_to_le16(1504);
2090 } else {
2091 for (i = 1; i < 4; i++) {
2092 priv->qos_data.def_qos_parm.ac[i].cw_min =
2093 cpu_to_le16(cw_min);
2094 priv->qos_data.def_qos_parm.ac[i].cw_max =
2095 cpu_to_le16(cw_max);
2096 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2097 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2098 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2099 }
2100 }
2101 IWL_DEBUG_QOS("set QoS to default \n");
2102
2103 spin_unlock_irqrestore(&priv->lock, flags);
2104}
2105
bb8c093b 2106static void iwl4965_activate_qos(struct iwl4965_priv *priv, u8 force)
b481de9c
ZY
2107{
2108 unsigned long flags;
2109
b481de9c
ZY
2110 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2111 return;
2112
2113 if (!priv->qos_data.qos_enable)
2114 return;
2115
2116 spin_lock_irqsave(&priv->lock, flags);
2117 priv->qos_data.def_qos_parm.qos_flags = 0;
2118
2119 if (priv->qos_data.qos_cap.q_AP.queue_request &&
2120 !priv->qos_data.qos_cap.q_AP.txop_request)
2121 priv->qos_data.def_qos_parm.qos_flags |=
2122 QOS_PARAM_FLG_TXOP_TYPE_MSK;
b481de9c
ZY
2123 if (priv->qos_data.qos_active)
2124 priv->qos_data.def_qos_parm.qos_flags |=
2125 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2126
c8b0e6e1 2127#ifdef CONFIG_IWL4965_HT
fd105e79 2128 if (priv->current_ht_config.is_ht)
f1f1f5c7 2129 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
c8b0e6e1 2130#endif /* CONFIG_IWL4965_HT */
f1f1f5c7 2131
b481de9c
ZY
2132 spin_unlock_irqrestore(&priv->lock, flags);
2133
bb8c093b 2134 if (force || iwl4965_is_associated(priv)) {
f1f1f5c7
TW
2135 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2136 priv->qos_data.qos_active,
2137 priv->qos_data.def_qos_parm.qos_flags);
b481de9c 2138
bb8c093b 2139 iwl4965_send_qos_params_command(priv,
b481de9c
ZY
2140 &(priv->qos_data.def_qos_parm));
2141 }
2142}
2143
c8b0e6e1 2144#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
2145/*
2146 * Power management (not Tx power!) functions
2147 */
2148#define MSEC_TO_USEC 1024
2149
2150#define NOSLP __constant_cpu_to_le16(0), 0, 0
2151#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
2152#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2153#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2154 __constant_cpu_to_le32(X1), \
2155 __constant_cpu_to_le32(X2), \
2156 __constant_cpu_to_le32(X3), \
2157 __constant_cpu_to_le32(X4)}
2158
2159
2160/* default power management (not Tx power) table values */
2161/* for tim 0-10 */
bb8c093b 2162static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
b481de9c
ZY
2163 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2164 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2165 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2166 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2167 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2168 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2169};
2170
2171/* for tim > 10 */
bb8c093b 2172static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
b481de9c
ZY
2173 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2174 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2175 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2176 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2177 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2178 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2179 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2180 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2181 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2182 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2183};
2184
bb8c093b 2185int iwl4965_power_init_handle(struct iwl4965_priv *priv)
b481de9c
ZY
2186{
2187 int rc = 0, i;
bb8c093b
CH
2188 struct iwl4965_power_mgr *pow_data;
2189 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
b481de9c
ZY
2190 u16 pci_pm;
2191
2192 IWL_DEBUG_POWER("Initialize power \n");
2193
2194 pow_data = &(priv->power_data);
2195
2196 memset(pow_data, 0, sizeof(*pow_data));
2197
2198 pow_data->active_index = IWL_POWER_RANGE_0;
2199 pow_data->dtim_val = 0xffff;
2200
2201 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2202 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2203
2204 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2205 if (rc != 0)
2206 return 0;
2207 else {
bb8c093b 2208 struct iwl4965_powertable_cmd *cmd;
b481de9c
ZY
2209
2210 IWL_DEBUG_POWER("adjust power command flags\n");
2211
2212 for (i = 0; i < IWL_POWER_AC; i++) {
2213 cmd = &pow_data->pwr_range_0[i].cmd;
2214
2215 if (pci_pm & 0x1)
2216 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2217 else
2218 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2219 }
2220 }
2221 return rc;
2222}
2223
bb8c093b
CH
2224static int iwl4965_update_power_cmd(struct iwl4965_priv *priv,
2225 struct iwl4965_powertable_cmd *cmd, u32 mode)
b481de9c
ZY
2226{
2227 int rc = 0, i;
2228 u8 skip;
2229 u32 max_sleep = 0;
bb8c093b 2230 struct iwl4965_power_vec_entry *range;
b481de9c 2231 u8 period = 0;
bb8c093b 2232 struct iwl4965_power_mgr *pow_data;
b481de9c
ZY
2233
2234 if (mode > IWL_POWER_INDEX_5) {
2235 IWL_DEBUG_POWER("Error invalid power mode \n");
2236 return -1;
2237 }
2238 pow_data = &(priv->power_data);
2239
2240 if (pow_data->active_index == IWL_POWER_RANGE_0)
2241 range = &pow_data->pwr_range_0[0];
2242 else
2243 range = &pow_data->pwr_range_1[1];
2244
bb8c093b 2245 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
b481de9c
ZY
2246
2247#ifdef IWL_MAC80211_DISABLE
2248 if (priv->assoc_network != NULL) {
2249 unsigned long flags;
2250
2251 period = priv->assoc_network->tim.tim_period;
2252 }
2253#endif /*IWL_MAC80211_DISABLE */
2254 skip = range[mode].no_dtim;
2255
2256 if (period == 0) {
2257 period = 1;
2258 skip = 0;
2259 }
2260
2261 if (skip == 0) {
2262 max_sleep = period;
2263 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2264 } else {
2265 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2266 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2267 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2268 }
2269
2270 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2271 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2272 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2273 }
2274
2275 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2276 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2277 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2278 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2279 le32_to_cpu(cmd->sleep_interval[0]),
2280 le32_to_cpu(cmd->sleep_interval[1]),
2281 le32_to_cpu(cmd->sleep_interval[2]),
2282 le32_to_cpu(cmd->sleep_interval[3]),
2283 le32_to_cpu(cmd->sleep_interval[4]));
2284
2285 return rc;
2286}
2287
bb8c093b 2288static int iwl4965_send_power_mode(struct iwl4965_priv *priv, u32 mode)
b481de9c 2289{
9a62f73b 2290 u32 uninitialized_var(final_mode);
b481de9c 2291 int rc;
bb8c093b 2292 struct iwl4965_powertable_cmd cmd;
b481de9c
ZY
2293
2294 /* If on battery, set to 3,
01ebd063 2295 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
2296 * else user level */
2297 switch (mode) {
2298 case IWL_POWER_BATTERY:
2299 final_mode = IWL_POWER_INDEX_3;
2300 break;
2301 case IWL_POWER_AC:
2302 final_mode = IWL_POWER_MODE_CAM;
2303 break;
2304 default:
2305 final_mode = mode;
2306 break;
2307 }
2308
2309 cmd.keep_alive_beacons = 0;
2310
bb8c093b 2311 iwl4965_update_power_cmd(priv, &cmd, final_mode);
b481de9c 2312
bb8c093b 2313 rc = iwl4965_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
b481de9c
ZY
2314
2315 if (final_mode == IWL_POWER_MODE_CAM)
2316 clear_bit(STATUS_POWER_PMI, &priv->status);
2317 else
2318 set_bit(STATUS_POWER_PMI, &priv->status);
2319
2320 return rc;
2321}
2322
bb8c093b 2323int iwl4965_is_network_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
2324{
2325 /* Filter incoming packets to determine if they are targeted toward
2326 * this network, discarding packets coming from ourselves */
2327 switch (priv->iw_mode) {
2328 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2329 /* packets from our adapter are dropped (echo) */
2330 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2331 return 0;
2332 /* {broad,multi}cast packets to our IBSS go through */
2333 if (is_multicast_ether_addr(header->addr1))
2334 return !compare_ether_addr(header->addr3, priv->bssid);
2335 /* packets to our adapter go through */
2336 return !compare_ether_addr(header->addr1, priv->mac_addr);
2337 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2338 /* packets from our adapter are dropped (echo) */
2339 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2340 return 0;
2341 /* {broad,multi}cast packets to our BSS go through */
2342 if (is_multicast_ether_addr(header->addr1))
2343 return !compare_ether_addr(header->addr2, priv->bssid);
2344 /* packets to our adapter go through */
2345 return !compare_ether_addr(header->addr1, priv->mac_addr);
2346 }
2347
2348 return 1;
2349}
2350
2351#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2352
bb8c093b 2353static const char *iwl4965_get_tx_fail_reason(u32 status)
b481de9c
ZY
2354{
2355 switch (status & TX_STATUS_MSK) {
2356 case TX_STATUS_SUCCESS:
2357 return "SUCCESS";
2358 TX_STATUS_ENTRY(SHORT_LIMIT);
2359 TX_STATUS_ENTRY(LONG_LIMIT);
2360 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2361 TX_STATUS_ENTRY(MGMNT_ABORT);
2362 TX_STATUS_ENTRY(NEXT_FRAG);
2363 TX_STATUS_ENTRY(LIFE_EXPIRE);
2364 TX_STATUS_ENTRY(DEST_PS);
2365 TX_STATUS_ENTRY(ABORTED);
2366 TX_STATUS_ENTRY(BT_RETRY);
2367 TX_STATUS_ENTRY(STA_INVALID);
2368 TX_STATUS_ENTRY(FRAG_DROPPED);
2369 TX_STATUS_ENTRY(TID_DISABLE);
2370 TX_STATUS_ENTRY(FRAME_FLUSHED);
2371 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2372 TX_STATUS_ENTRY(TX_LOCKED);
2373 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2374 }
2375
2376 return "UNKNOWN";
2377}
2378
2379/**
bb8c093b 2380 * iwl4965_scan_cancel - Cancel any currently executing HW scan
b481de9c
ZY
2381 *
2382 * NOTE: priv->mutex is not required before calling this function
2383 */
bb8c093b 2384static int iwl4965_scan_cancel(struct iwl4965_priv *priv)
b481de9c
ZY
2385{
2386 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2387 clear_bit(STATUS_SCANNING, &priv->status);
2388 return 0;
2389 }
2390
2391 if (test_bit(STATUS_SCANNING, &priv->status)) {
2392 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2393 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2394 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2395 queue_work(priv->workqueue, &priv->abort_scan);
2396
2397 } else
2398 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2399
2400 return test_bit(STATUS_SCANNING, &priv->status);
2401 }
2402
2403 return 0;
2404}
2405
2406/**
bb8c093b 2407 * iwl4965_scan_cancel_timeout - Cancel any currently executing HW scan
b481de9c
ZY
2408 * @ms: amount of time to wait (in milliseconds) for scan to abort
2409 *
2410 * NOTE: priv->mutex must be held before calling this function
2411 */
bb8c093b 2412static int iwl4965_scan_cancel_timeout(struct iwl4965_priv *priv, unsigned long ms)
b481de9c
ZY
2413{
2414 unsigned long now = jiffies;
2415 int ret;
2416
bb8c093b 2417 ret = iwl4965_scan_cancel(priv);
b481de9c
ZY
2418 if (ret && ms) {
2419 mutex_unlock(&priv->mutex);
2420 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2421 test_bit(STATUS_SCANNING, &priv->status))
2422 msleep(1);
2423 mutex_lock(&priv->mutex);
2424
2425 return test_bit(STATUS_SCANNING, &priv->status);
2426 }
2427
2428 return ret;
2429}
2430
bb8c093b 2431static void iwl4965_sequence_reset(struct iwl4965_priv *priv)
b481de9c
ZY
2432{
2433 /* Reset ieee stats */
2434
2435 /* We don't reset the net_device_stats (ieee->stats) on
2436 * re-association */
2437
2438 priv->last_seq_num = -1;
2439 priv->last_frag_num = -1;
2440 priv->last_packet_time = 0;
2441
bb8c093b 2442 iwl4965_scan_cancel(priv);
b481de9c
ZY
2443}
2444
2445#define MAX_UCODE_BEACON_INTERVAL 4096
2446#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2447
bb8c093b 2448static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
2449{
2450 u16 new_val = 0;
2451 u16 beacon_factor = 0;
2452
2453 beacon_factor =
2454 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2455 / MAX_UCODE_BEACON_INTERVAL;
2456 new_val = beacon_val / beacon_factor;
2457
2458 return cpu_to_le16(new_val);
2459}
2460
bb8c093b 2461static void iwl4965_setup_rxon_timing(struct iwl4965_priv *priv)
b481de9c
ZY
2462{
2463 u64 interval_tm_unit;
2464 u64 tsf, result;
2465 unsigned long flags;
2466 struct ieee80211_conf *conf = NULL;
2467 u16 beacon_int = 0;
2468
2469 conf = ieee80211_get_hw_conf(priv->hw);
2470
2471 spin_lock_irqsave(&priv->lock, flags);
2472 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2473 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2474
2475 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2476
2477 tsf = priv->timestamp1;
2478 tsf = ((tsf << 32) | priv->timestamp0);
2479
2480 beacon_int = priv->beacon_int;
2481 spin_unlock_irqrestore(&priv->lock, flags);
2482
2483 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2484 if (beacon_int == 0) {
2485 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2486 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2487 } else {
2488 priv->rxon_timing.beacon_interval =
2489 cpu_to_le16(beacon_int);
2490 priv->rxon_timing.beacon_interval =
bb8c093b 2491 iwl4965_adjust_beacon_interval(
b481de9c
ZY
2492 le16_to_cpu(priv->rxon_timing.beacon_interval));
2493 }
2494
2495 priv->rxon_timing.atim_window = 0;
2496 } else {
2497 priv->rxon_timing.beacon_interval =
bb8c093b 2498 iwl4965_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
2499 /* TODO: we need to get atim_window from upper stack
2500 * for now we set to 0 */
2501 priv->rxon_timing.atim_window = 0;
2502 }
2503
2504 interval_tm_unit =
2505 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2506 result = do_div(tsf, interval_tm_unit);
2507 priv->rxon_timing.beacon_init_val =
2508 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2509
2510 IWL_DEBUG_ASSOC
2511 ("beacon interval %d beacon timer %d beacon tim %d\n",
2512 le16_to_cpu(priv->rxon_timing.beacon_interval),
2513 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2514 le16_to_cpu(priv->rxon_timing.atim_window));
2515}
2516
bb8c093b 2517static int iwl4965_scan_initiate(struct iwl4965_priv *priv)
b481de9c
ZY
2518{
2519 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2520 IWL_ERROR("APs don't scan.\n");
2521 return 0;
2522 }
2523
bb8c093b 2524 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
2525 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2526 return -EIO;
2527 }
2528
2529 if (test_bit(STATUS_SCANNING, &priv->status)) {
2530 IWL_DEBUG_SCAN("Scan already in progress.\n");
2531 return -EAGAIN;
2532 }
2533
2534 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2535 IWL_DEBUG_SCAN("Scan request while abort pending. "
2536 "Queuing.\n");
2537 return -EAGAIN;
2538 }
2539
2540 IWL_DEBUG_INFO("Starting scan...\n");
2541 priv->scan_bands = 2;
2542 set_bit(STATUS_SCANNING, &priv->status);
2543 priv->scan_start = jiffies;
2544 priv->scan_pass_start = priv->scan_start;
2545
2546 queue_work(priv->workqueue, &priv->request_scan);
2547
2548 return 0;
2549}
2550
bb8c093b 2551static int iwl4965_set_rxon_hwcrypto(struct iwl4965_priv *priv, int hw_decrypt)
b481de9c 2552{
bb8c093b 2553 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
b481de9c
ZY
2554
2555 if (hw_decrypt)
2556 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2557 else
2558 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2559
2560 return 0;
2561}
2562
bb8c093b 2563static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode)
b481de9c
ZY
2564{
2565 if (phymode == MODE_IEEE80211A) {
2566 priv->staging_rxon.flags &=
2567 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2568 | RXON_FLG_CCK_MSK);
2569 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2570 } else {
bb8c093b 2571 /* Copied from iwl4965_bg_post_associate() */
b481de9c
ZY
2572 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2573 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2574 else
2575 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2576
2577 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2578 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2579
2580 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2581 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2582 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2583 }
2584}
2585
2586/*
01ebd063 2587 * initialize rxon structure with default values from eeprom
b481de9c 2588 */
bb8c093b 2589static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
b481de9c 2590{
bb8c093b 2591 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
2592
2593 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2594
2595 switch (priv->iw_mode) {
2596 case IEEE80211_IF_TYPE_AP:
2597 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2598 break;
2599
2600 case IEEE80211_IF_TYPE_STA:
2601 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2602 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2603 break;
2604
2605 case IEEE80211_IF_TYPE_IBSS:
2606 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2607 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2608 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2609 RXON_FILTER_ACCEPT_GRP_MSK;
2610 break;
2611
2612 case IEEE80211_IF_TYPE_MNTR:
2613 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2614 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2615 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2616 break;
2617 }
2618
2619#if 0
2620 /* TODO: Figure out when short_preamble would be set and cache from
2621 * that */
2622 if (!hw_to_local(priv->hw)->short_preamble)
2623 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2624 else
2625 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2626#endif
2627
bb8c093b 2628 ch_info = iwl4965_get_channel_info(priv, priv->phymode,
b481de9c
ZY
2629 le16_to_cpu(priv->staging_rxon.channel));
2630
2631 if (!ch_info)
2632 ch_info = &priv->channel_info[0];
2633
2634 /*
2635 * in some case A channels are all non IBSS
2636 * in this case force B/G channel
2637 */
2638 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2639 !(is_channel_ibss(ch_info)))
2640 ch_info = &priv->channel_info[0];
2641
2642 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2643 if (is_channel_a_band(ch_info))
2644 priv->phymode = MODE_IEEE80211A;
2645 else
2646 priv->phymode = MODE_IEEE80211G;
2647
bb8c093b 2648 iwl4965_set_flags_for_phymode(priv, priv->phymode);
b481de9c
ZY
2649
2650 priv->staging_rxon.ofdm_basic_rates =
2651 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2652 priv->staging_rxon.cck_basic_rates =
2653 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2654
2655 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2656 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2657 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2658 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2659 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2660 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2661 iwl4965_set_rxon_chain(priv);
2662}
2663
bb8c093b 2664static int iwl4965_set_mode(struct iwl4965_priv *priv, int mode)
b481de9c 2665{
b481de9c 2666 if (mode == IEEE80211_IF_TYPE_IBSS) {
bb8c093b 2667 const struct iwl4965_channel_info *ch_info;
b481de9c 2668
bb8c093b 2669 ch_info = iwl4965_get_channel_info(priv,
b481de9c
ZY
2670 priv->phymode,
2671 le16_to_cpu(priv->staging_rxon.channel));
2672
2673 if (!ch_info || !is_channel_ibss(ch_info)) {
2674 IWL_ERROR("channel %d not IBSS channel\n",
2675 le16_to_cpu(priv->staging_rxon.channel));
2676 return -EINVAL;
2677 }
2678 }
2679
b481de9c
ZY
2680 priv->iw_mode = mode;
2681
bb8c093b 2682 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
2683 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2684
bb8c093b 2685 iwl4965_clear_stations_table(priv);
b481de9c 2686
fde3571f
MA
2687 /* dont commit rxon if rf-kill is on*/
2688 if (!iwl4965_is_ready_rf(priv))
2689 return -EAGAIN;
2690
2691 cancel_delayed_work(&priv->scan_check);
2692 if (iwl4965_scan_cancel_timeout(priv, 100)) {
2693 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2694 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2695 return -EAGAIN;
2696 }
2697
bb8c093b 2698 iwl4965_commit_rxon(priv);
b481de9c
ZY
2699
2700 return 0;
2701}
2702
bb8c093b 2703static void iwl4965_build_tx_cmd_hwcrypto(struct iwl4965_priv *priv,
b481de9c 2704 struct ieee80211_tx_control *ctl,
bb8c093b 2705 struct iwl4965_cmd *cmd,
b481de9c
ZY
2706 struct sk_buff *skb_frag,
2707 int last_frag)
2708{
bb8c093b 2709 struct iwl4965_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
b481de9c
ZY
2710
2711 switch (keyinfo->alg) {
2712 case ALG_CCMP:
2713 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2714 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2715 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2716 break;
2717
2718 case ALG_TKIP:
2719#if 0
2720 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2721
2722 if (last_frag)
2723 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2724 8);
2725 else
2726 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2727#endif
2728 break;
2729
2730 case ALG_WEP:
2731 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2732 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2733
2734 if (keyinfo->keylen == 13)
2735 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2736
2737 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2738
2739 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2740 "with key %d\n", ctl->key_idx);
2741 break;
2742
b481de9c
ZY
2743 default:
2744 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2745 break;
2746 }
2747}
2748
2749/*
2750 * handle build REPLY_TX command notification.
2751 */
bb8c093b
CH
2752static void iwl4965_build_tx_cmd_basic(struct iwl4965_priv *priv,
2753 struct iwl4965_cmd *cmd,
b481de9c
ZY
2754 struct ieee80211_tx_control *ctrl,
2755 struct ieee80211_hdr *hdr,
2756 int is_unicast, u8 std_id)
2757{
2758 __le16 *qc;
2759 u16 fc = le16_to_cpu(hdr->frame_control);
2760 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2761
2762 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2763 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2764 tx_flags |= TX_CMD_FLG_ACK_MSK;
2765 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2766 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2767 if (ieee80211_is_probe_response(fc) &&
2768 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2769 tx_flags |= TX_CMD_FLG_TSF_MSK;
2770 } else {
2771 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2772 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2773 }
2774
87e4f7df
TW
2775 if (ieee80211_is_back_request(fc))
2776 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
2777
2778
b481de9c
ZY
2779 cmd->cmd.tx.sta_id = std_id;
2780 if (ieee80211_get_morefrag(hdr))
2781 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2782
2783 qc = ieee80211_get_qos_ctrl(hdr);
2784 if (qc) {
2785 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2786 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2787 } else
2788 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2789
2790 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2791 tx_flags |= TX_CMD_FLG_RTS_MSK;
2792 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2793 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2794 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2795 tx_flags |= TX_CMD_FLG_CTS_MSK;
2796 }
2797
2798 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2799 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2800
2801 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2802 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2803 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2804 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
bc434dd2 2805 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 2806 else
bc434dd2 2807 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
b481de9c
ZY
2808 } else
2809 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2810
2811 cmd->cmd.tx.driver_txop = 0;
2812 cmd->cmd.tx.tx_flags = tx_flags;
2813 cmd->cmd.tx.next_frame_len = 0;
2814}
2815
6440adb5
BC
2816/**
2817 * iwl4965_get_sta_id - Find station's index within station table
2818 *
2819 * If new IBSS station, create new entry in station table
2820 */
9fbab516
BC
2821static int iwl4965_get_sta_id(struct iwl4965_priv *priv,
2822 struct ieee80211_hdr *hdr)
b481de9c
ZY
2823{
2824 int sta_id;
2825 u16 fc = le16_to_cpu(hdr->frame_control);
0795af57 2826 DECLARE_MAC_BUF(mac);
b481de9c 2827
6440adb5 2828 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
2829 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2830 is_multicast_ether_addr(hdr->addr1))
2831 return priv->hw_setting.bcast_sta_id;
2832
2833 switch (priv->iw_mode) {
2834
6440adb5
BC
2835 /* If we are a client station in a BSS network, use the special
2836 * AP station entry (that's the only station we communicate with) */
b481de9c
ZY
2837 case IEEE80211_IF_TYPE_STA:
2838 return IWL_AP_ID;
2839
2840 /* If we are an AP, then find the station, or use BCAST */
2841 case IEEE80211_IF_TYPE_AP:
bb8c093b 2842 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2843 if (sta_id != IWL_INVALID_STATION)
2844 return sta_id;
2845 return priv->hw_setting.bcast_sta_id;
2846
6440adb5
BC
2847 /* If this frame is going out to an IBSS network, find the station,
2848 * or create a new station table entry */
b481de9c 2849 case IEEE80211_IF_TYPE_IBSS:
bb8c093b 2850 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2851 if (sta_id != IWL_INVALID_STATION)
2852 return sta_id;
2853
6440adb5 2854 /* Create new station table entry */
67d62035
RR
2855 sta_id = iwl4965_add_station_flags(priv, hdr->addr1,
2856 0, CMD_ASYNC, NULL);
b481de9c
ZY
2857
2858 if (sta_id != IWL_INVALID_STATION)
2859 return sta_id;
2860
0795af57 2861 IWL_DEBUG_DROP("Station %s not in station map. "
b481de9c 2862 "Defaulting to broadcast...\n",
0795af57 2863 print_mac(mac, hdr->addr1));
bb8c093b 2864 iwl4965_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
b481de9c
ZY
2865 return priv->hw_setting.bcast_sta_id;
2866
2867 default:
01ebd063 2868 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
b481de9c
ZY
2869 return priv->hw_setting.bcast_sta_id;
2870 }
2871}
2872
2873/*
2874 * start REPLY_TX command process
2875 */
bb8c093b 2876static int iwl4965_tx_skb(struct iwl4965_priv *priv,
b481de9c
ZY
2877 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2878{
2879 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bb8c093b 2880 struct iwl4965_tfd_frame *tfd;
b481de9c
ZY
2881 u32 *control_flags;
2882 int txq_id = ctl->queue;
bb8c093b
CH
2883 struct iwl4965_tx_queue *txq = NULL;
2884 struct iwl4965_queue *q = NULL;
b481de9c
ZY
2885 dma_addr_t phys_addr;
2886 dma_addr_t txcmd_phys;
87e4f7df 2887 dma_addr_t scratch_phys;
bb8c093b 2888 struct iwl4965_cmd *out_cmd = NULL;
b481de9c
ZY
2889 u16 len, idx, len_org;
2890 u8 id, hdr_len, unicast;
2891 u8 sta_id;
2892 u16 seq_number = 0;
2893 u16 fc;
2894 __le16 *qc;
2895 u8 wait_write_ptr = 0;
2896 unsigned long flags;
2897 int rc;
2898
2899 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 2900 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
2901 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2902 goto drop_unlock;
2903 }
2904
32bfd35d
JB
2905 if (!priv->vif) {
2906 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
b481de9c
ZY
2907 goto drop_unlock;
2908 }
2909
2910 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2911 IWL_ERROR("ERROR: No TX rate available.\n");
2912 goto drop_unlock;
2913 }
2914
2915 unicast = !is_multicast_ether_addr(hdr->addr1);
2916 id = 0;
2917
2918 fc = le16_to_cpu(hdr->frame_control);
2919
c8b0e6e1 2920#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
2921 if (ieee80211_is_auth(fc))
2922 IWL_DEBUG_TX("Sending AUTH frame\n");
2923 else if (ieee80211_is_assoc_request(fc))
2924 IWL_DEBUG_TX("Sending ASSOC frame\n");
2925 else if (ieee80211_is_reassoc_request(fc))
2926 IWL_DEBUG_TX("Sending REASSOC frame\n");
2927#endif
2928
7878a5a4
MA
2929 /* drop all data frame if we are not associated */
2930 if (!iwl4965_is_associated(priv) && !priv->assoc_id &&
b481de9c 2931 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
bb8c093b 2932 IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n");
b481de9c
ZY
2933 goto drop_unlock;
2934 }
2935
2936 spin_unlock_irqrestore(&priv->lock, flags);
2937
2938 hdr_len = ieee80211_get_hdrlen(fc);
6440adb5
BC
2939
2940 /* Find (or create) index into station table for destination station */
bb8c093b 2941 sta_id = iwl4965_get_sta_id(priv, hdr);
b481de9c 2942 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
2943 DECLARE_MAC_BUF(mac);
2944
2945 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2946 print_mac(mac, hdr->addr1));
b481de9c
ZY
2947 goto drop;
2948 }
2949
2950 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2951
2952 qc = ieee80211_get_qos_ctrl(hdr);
2953 if (qc) {
2954 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2955 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2956 IEEE80211_SCTL_SEQ;
2957 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2958 (hdr->seq_ctrl &
2959 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2960 seq_number += 0x10;
c8b0e6e1
CH
2961#ifdef CONFIG_IWL4965_HT
2962#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
2963 /* aggregation is on for this <sta,tid> */
2964 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG)
2965 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
c8b0e6e1
CH
2966#endif /* CONFIG_IWL4965_HT_AGG */
2967#endif /* CONFIG_IWL4965_HT */
b481de9c 2968 }
6440adb5
BC
2969
2970 /* Descriptor for chosen Tx queue */
b481de9c
ZY
2971 txq = &priv->txq[txq_id];
2972 q = &txq->q;
2973
2974 spin_lock_irqsave(&priv->lock, flags);
2975
6440adb5 2976 /* Set up first empty TFD within this queue's circular TFD buffer */
fc4b6853 2977 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
2978 memset(tfd, 0, sizeof(*tfd));
2979 control_flags = (u32 *) tfd;
fc4b6853 2980 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 2981
6440adb5 2982 /* Set up driver data for this TFD */
bb8c093b 2983 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
fc4b6853
TW
2984 txq->txb[q->write_ptr].skb[0] = skb;
2985 memcpy(&(txq->txb[q->write_ptr].status.control),
b481de9c 2986 ctl, sizeof(struct ieee80211_tx_control));
6440adb5
BC
2987
2988 /* Set up first empty entry in queue's array of Tx/cmd buffers */
b481de9c
ZY
2989 out_cmd = &txq->cmd[idx];
2990 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2991 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
6440adb5
BC
2992
2993 /*
2994 * Set up the Tx-command (not MAC!) header.
2995 * Store the chosen Tx queue and TFD index within the sequence field;
2996 * after Tx, uCode's Tx response will return this value so driver can
2997 * locate the frame within the tx queue and do post-tx processing.
2998 */
b481de9c
ZY
2999 out_cmd->hdr.cmd = REPLY_TX;
3000 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 3001 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
BC
3002
3003 /* Copy MAC header from skb into command buffer */
b481de9c
ZY
3004 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
3005
6440adb5
BC
3006 /*
3007 * Use the first empty entry in this queue's command buffer array
3008 * to contain the Tx command and MAC header concatenated together
3009 * (payload data will be in another buffer).
3010 * Size of this varies, due to varying MAC header length.
3011 * If end is not dword aligned, we'll have 2 extra bytes at the end
3012 * of the MAC header (device reads on dword boundaries).
3013 * We'll tell device about this padding later.
3014 */
b481de9c 3015 len = priv->hw_setting.tx_cmd_len +
bb8c093b 3016 sizeof(struct iwl4965_cmd_header) + hdr_len;
b481de9c
ZY
3017
3018 len_org = len;
3019 len = (len + 3) & ~3;
3020
3021 if (len_org != len)
3022 len_org = 1;
3023 else
3024 len_org = 0;
3025
6440adb5
BC
3026 /* Physical address of this Tx command's header (not MAC header!),
3027 * within command buffer array. */
bb8c093b
CH
3028 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl4965_cmd) * idx +
3029 offsetof(struct iwl4965_cmd, hdr);
b481de9c 3030
6440adb5
BC
3031 /* Add buffer containing Tx command and MAC(!) header to TFD's
3032 * first entry */
bb8c093b 3033 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
b481de9c
ZY
3034
3035 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
bb8c093b 3036 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
b481de9c 3037
6440adb5
BC
3038 /* Set up TFD's 2nd entry to point directly to remainder of skb,
3039 * if any (802.11 null frames have no payload). */
b481de9c
ZY
3040 len = skb->len - hdr_len;
3041 if (len) {
3042 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
3043 len, PCI_DMA_TODEVICE);
bb8c093b 3044 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
b481de9c
ZY
3045 }
3046
6440adb5 3047 /* Tell 4965 about any 2-byte padding after MAC header */
b481de9c
ZY
3048 if (len_org)
3049 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
3050
6440adb5 3051 /* Total # bytes to be transmitted */
b481de9c
ZY
3052 len = (u16)skb->len;
3053 out_cmd->cmd.tx.len = cpu_to_le16(len);
3054
3055 /* TODO need this for burst mode later on */
bb8c093b 3056 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
b481de9c
ZY
3057
3058 /* set is_hcca to 0; it probably will never be implemented */
bb8c093b 3059 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
b481de9c 3060
87e4f7df
TW
3061 scratch_phys = txcmd_phys + sizeof(struct iwl4965_cmd_header) +
3062 offsetof(struct iwl4965_tx_cmd, scratch);
3063 out_cmd->cmd.tx.dram_lsb_ptr = cpu_to_le32(scratch_phys);
3064 out_cmd->cmd.tx.dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
3065
3066#ifdef CONFIG_IWL4965_HT_AGG
3067#ifdef CONFIG_IWL4965_HT
3068 /* TODO: move this functionality to rate scaling */
3069 iwl4965_tl_get_stats(priv, hdr);
3070#endif /* CONFIG_IWL4965_HT_AGG */
3071#endif /*CONFIG_IWL4965_HT */
3072
b481de9c
ZY
3073
3074 if (!ieee80211_get_morefrag(hdr)) {
3075 txq->need_update = 1;
3076 if (qc) {
3077 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
3078 priv->stations[sta_id].tid[tid].seq_number = seq_number;
3079 }
3080 } else {
3081 wait_write_ptr = 1;
3082 txq->need_update = 0;
3083 }
3084
bb8c093b 3085 iwl4965_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
b481de9c
ZY
3086 sizeof(out_cmd->cmd.tx));
3087
bb8c093b 3088 iwl4965_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
b481de9c
ZY
3089 ieee80211_get_hdrlen(fc));
3090
6440adb5 3091 /* Set up entry for this TFD in Tx byte-count array */
b481de9c
ZY
3092 iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
3093
6440adb5 3094 /* Tell device the write index *just past* this latest filled TFD */
bb8c093b
CH
3095 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
3096 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3097 spin_unlock_irqrestore(&priv->lock, flags);
3098
3099 if (rc)
3100 return rc;
3101
bb8c093b 3102 if ((iwl4965_queue_space(q) < q->high_mark)
b481de9c
ZY
3103 && priv->mac80211_registered) {
3104 if (wait_write_ptr) {
3105 spin_lock_irqsave(&priv->lock, flags);
3106 txq->need_update = 1;
bb8c093b 3107 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3108 spin_unlock_irqrestore(&priv->lock, flags);
3109 }
3110
3111 ieee80211_stop_queue(priv->hw, ctl->queue);
3112 }
3113
3114 return 0;
3115
3116drop_unlock:
3117 spin_unlock_irqrestore(&priv->lock, flags);
3118drop:
3119 return -1;
3120}
3121
bb8c093b 3122static void iwl4965_set_rate(struct iwl4965_priv *priv)
b481de9c
ZY
3123{
3124 const struct ieee80211_hw_mode *hw = NULL;
3125 struct ieee80211_rate *rate;
3126 int i;
3127
bb8c093b 3128 hw = iwl4965_get_hw_mode(priv, priv->phymode);
c4ba9621
SA
3129 if (!hw) {
3130 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
3131 return;
3132 }
b481de9c
ZY
3133
3134 priv->active_rate = 0;
3135 priv->active_rate_basic = 0;
3136
3137 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
3138 hw->mode == MODE_IEEE80211A ?
3139 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
3140
3141 for (i = 0; i < hw->num_rates; i++) {
3142 rate = &(hw->rates[i]);
3143 if ((rate->val < IWL_RATE_COUNT) &&
3144 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3145 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
bb8c093b 3146 rate->val, iwl4965_rates[rate->val].plcp,
b481de9c
ZY
3147 (rate->flags & IEEE80211_RATE_BASIC) ?
3148 "*" : "");
3149 priv->active_rate |= (1 << rate->val);
3150 if (rate->flags & IEEE80211_RATE_BASIC)
3151 priv->active_rate_basic |= (1 << rate->val);
3152 } else
3153 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
bb8c093b 3154 rate->val, iwl4965_rates[rate->val].plcp);
b481de9c
ZY
3155 }
3156
3157 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
3158 priv->active_rate, priv->active_rate_basic);
3159
3160 /*
3161 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
3162 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
3163 * OFDM
3164 */
3165 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
3166 priv->staging_rxon.cck_basic_rates =
3167 ((priv->active_rate_basic &
3168 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
3169 else
3170 priv->staging_rxon.cck_basic_rates =
3171 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
3172
3173 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
3174 priv->staging_rxon.ofdm_basic_rates =
3175 ((priv->active_rate_basic &
3176 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
3177 IWL_FIRST_OFDM_RATE) & 0xFF;
3178 else
3179 priv->staging_rxon.ofdm_basic_rates =
3180 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3181}
3182
bb8c093b 3183static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio)
b481de9c
ZY
3184{
3185 unsigned long flags;
3186
3187 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
3188 return;
3189
3190 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
3191 disable_radio ? "OFF" : "ON");
3192
3193 if (disable_radio) {
bb8c093b 3194 iwl4965_scan_cancel(priv);
b481de9c
ZY
3195 /* FIXME: This is a workaround for AP */
3196 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3197 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3198 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
3199 CSR_UCODE_SW_BIT_RFKILL);
3200 spin_unlock_irqrestore(&priv->lock, flags);
bb8c093b 3201 iwl4965_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
b481de9c
ZY
3202 set_bit(STATUS_RF_KILL_SW, &priv->status);
3203 }
3204 return;
3205 }
3206
3207 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3208 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
3209
3210 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3211 spin_unlock_irqrestore(&priv->lock, flags);
3212
3213 /* wake up ucode */
3214 msleep(10);
3215
3216 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
3217 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
3218 if (!iwl4965_grab_nic_access(priv))
3219 iwl4965_release_nic_access(priv);
b481de9c
ZY
3220 spin_unlock_irqrestore(&priv->lock, flags);
3221
3222 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3223 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3224 "disabled by HW switch\n");
3225 return;
3226 }
3227
3228 queue_work(priv->workqueue, &priv->restart);
3229 return;
3230}
3231
bb8c093b 3232void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb,
b481de9c
ZY
3233 u32 decrypt_res, struct ieee80211_rx_status *stats)
3234{
3235 u16 fc =
3236 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3237
3238 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3239 return;
3240
3241 if (!(fc & IEEE80211_FCTL_PROTECTED))
3242 return;
3243
3244 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3245 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3246 case RX_RES_STATUS_SEC_TYPE_TKIP:
3247 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3248 RX_RES_STATUS_BAD_ICV_MIC)
3249 stats->flag |= RX_FLAG_MMIC_ERROR;
3250 case RX_RES_STATUS_SEC_TYPE_WEP:
3251 case RX_RES_STATUS_SEC_TYPE_CCMP:
3252 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3253 RX_RES_STATUS_DECRYPT_OK) {
3254 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3255 stats->flag |= RX_FLAG_DECRYPTED;
3256 }
3257 break;
3258
3259 default:
3260 break;
3261 }
3262}
3263
b481de9c
ZY
3264
3265#define IWL_PACKET_RETRY_TIME HZ
3266
bb8c093b 3267int iwl4965_is_duplicate_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
3268{
3269 u16 sc = le16_to_cpu(header->seq_ctrl);
3270 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3271 u16 frag = sc & IEEE80211_SCTL_FRAG;
3272 u16 *last_seq, *last_frag;
3273 unsigned long *last_time;
3274
3275 switch (priv->iw_mode) {
3276 case IEEE80211_IF_TYPE_IBSS:{
3277 struct list_head *p;
bb8c093b 3278 struct iwl4965_ibss_seq *entry = NULL;
b481de9c
ZY
3279 u8 *mac = header->addr2;
3280 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3281
3282 __list_for_each(p, &priv->ibss_mac_hash[index]) {
bb8c093b 3283 entry = list_entry(p, struct iwl4965_ibss_seq, list);
b481de9c
ZY
3284 if (!compare_ether_addr(entry->mac, mac))
3285 break;
3286 }
3287 if (p == &priv->ibss_mac_hash[index]) {
3288 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3289 if (!entry) {
bc434dd2 3290 IWL_ERROR("Cannot malloc new mac entry\n");
b481de9c
ZY
3291 return 0;
3292 }
3293 memcpy(entry->mac, mac, ETH_ALEN);
3294 entry->seq_num = seq;
3295 entry->frag_num = frag;
3296 entry->packet_time = jiffies;
bc434dd2 3297 list_add(&entry->list, &priv->ibss_mac_hash[index]);
b481de9c
ZY
3298 return 0;
3299 }
3300 last_seq = &entry->seq_num;
3301 last_frag = &entry->frag_num;
3302 last_time = &entry->packet_time;
3303 break;
3304 }
3305 case IEEE80211_IF_TYPE_STA:
3306 last_seq = &priv->last_seq_num;
3307 last_frag = &priv->last_frag_num;
3308 last_time = &priv->last_packet_time;
3309 break;
3310 default:
3311 return 0;
3312 }
3313 if ((*last_seq == seq) &&
3314 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3315 if (*last_frag == frag)
3316 goto drop;
3317 if (*last_frag + 1 != frag)
3318 /* out-of-order fragment */
3319 goto drop;
3320 } else
3321 *last_seq = seq;
3322
3323 *last_frag = frag;
3324 *last_time = jiffies;
3325 return 0;
3326
3327 drop:
3328 return 1;
3329}
3330
c8b0e6e1 3331#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
3332
3333#include "iwl-spectrum.h"
3334
3335#define BEACON_TIME_MASK_LOW 0x00FFFFFF
3336#define BEACON_TIME_MASK_HIGH 0xFF000000
3337#define TIME_UNIT 1024
3338
3339/*
3340 * extended beacon time format
3341 * time in usec will be changed into a 32-bit value in 8:24 format
3342 * the high 1 byte is the beacon counts
3343 * the lower 3 bytes is the time in usec within one beacon interval
3344 */
3345
bb8c093b 3346static u32 iwl4965_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
3347{
3348 u32 quot;
3349 u32 rem;
3350 u32 interval = beacon_interval * 1024;
3351
3352 if (!interval || !usec)
3353 return 0;
3354
3355 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3356 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3357
3358 return (quot << 24) + rem;
3359}
3360
3361/* base is usually what we get from ucode with each received frame,
3362 * the same as HW timer counter counting down
3363 */
3364
bb8c093b 3365static __le32 iwl4965_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
3366{
3367 u32 base_low = base & BEACON_TIME_MASK_LOW;
3368 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3369 u32 interval = beacon_interval * TIME_UNIT;
3370 u32 res = (base & BEACON_TIME_MASK_HIGH) +
3371 (addon & BEACON_TIME_MASK_HIGH);
3372
3373 if (base_low > addon_low)
3374 res += base_low - addon_low;
3375 else if (base_low < addon_low) {
3376 res += interval + base_low - addon_low;
3377 res += (1 << 24);
3378 } else
3379 res += (1 << 24);
3380
3381 return cpu_to_le32(res);
3382}
3383
bb8c093b 3384static int iwl4965_get_measurement(struct iwl4965_priv *priv,
b481de9c
ZY
3385 struct ieee80211_measurement_params *params,
3386 u8 type)
3387{
bb8c093b
CH
3388 struct iwl4965_spectrum_cmd spectrum;
3389 struct iwl4965_rx_packet *res;
3390 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
3391 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3392 .data = (void *)&spectrum,
3393 .meta.flags = CMD_WANT_SKB,
3394 };
3395 u32 add_time = le64_to_cpu(params->start_time);
3396 int rc;
3397 int spectrum_resp_status;
3398 int duration = le16_to_cpu(params->duration);
3399
bb8c093b 3400 if (iwl4965_is_associated(priv))
b481de9c 3401 add_time =
bb8c093b 3402 iwl4965_usecs_to_beacons(
b481de9c
ZY
3403 le64_to_cpu(params->start_time) - priv->last_tsf,
3404 le16_to_cpu(priv->rxon_timing.beacon_interval));
3405
3406 memset(&spectrum, 0, sizeof(spectrum));
3407
3408 spectrum.channel_count = cpu_to_le16(1);
3409 spectrum.flags =
3410 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3411 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3412 cmd.len = sizeof(spectrum);
3413 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3414
bb8c093b 3415 if (iwl4965_is_associated(priv))
b481de9c 3416 spectrum.start_time =
bb8c093b 3417 iwl4965_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
3418 add_time,
3419 le16_to_cpu(priv->rxon_timing.beacon_interval));
3420 else
3421 spectrum.start_time = 0;
3422
3423 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3424 spectrum.channels[0].channel = params->channel;
3425 spectrum.channels[0].type = type;
3426 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3427 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3428 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3429
bb8c093b 3430 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
3431 if (rc)
3432 return rc;
3433
bb8c093b 3434 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
3435 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3436 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3437 rc = -EIO;
3438 }
3439
3440 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3441 switch (spectrum_resp_status) {
3442 case 0: /* Command will be handled */
3443 if (res->u.spectrum.id != 0xff) {
3444 IWL_DEBUG_INFO
3445 ("Replaced existing measurement: %d\n",
3446 res->u.spectrum.id);
3447 priv->measurement_status &= ~MEASUREMENT_READY;
3448 }
3449 priv->measurement_status |= MEASUREMENT_ACTIVE;
3450 rc = 0;
3451 break;
3452
3453 case 1: /* Command will not be handled */
3454 rc = -EAGAIN;
3455 break;
3456 }
3457
3458 dev_kfree_skb_any(cmd.meta.u.skb);
3459
3460 return rc;
3461}
3462#endif
3463
bb8c093b
CH
3464static void iwl4965_txstatus_to_ieee(struct iwl4965_priv *priv,
3465 struct iwl4965_tx_info *tx_sta)
b481de9c
ZY
3466{
3467
3468 tx_sta->status.ack_signal = 0;
3469 tx_sta->status.excessive_retries = 0;
3470 tx_sta->status.queue_length = 0;
3471 tx_sta->status.queue_number = 0;
3472
3473 if (in_interrupt())
3474 ieee80211_tx_status_irqsafe(priv->hw,
3475 tx_sta->skb[0], &(tx_sta->status));
3476 else
3477 ieee80211_tx_status(priv->hw,
3478 tx_sta->skb[0], &(tx_sta->status));
3479
3480 tx_sta->skb[0] = NULL;
3481}
3482
3483/**
6440adb5 3484 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
b481de9c 3485 *
6440adb5
BC
3486 * When FW advances 'R' index, all entries between old and new 'R' index
3487 * need to be reclaimed. As result, some free space forms. If there is
3488 * enough free space (> low mark), wake the stack that feeds us.
b481de9c 3489 */
bb8c093b 3490int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index)
b481de9c 3491{
bb8c093b
CH
3492 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
3493 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
3494 int nfreed = 0;
3495
3496 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3497 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3498 "is out of range [0-%d] %d %d.\n", txq_id,
fc4b6853 3499 index, q->n_bd, q->write_ptr, q->read_ptr);
b481de9c
ZY
3500 return 0;
3501 }
3502
bb8c093b 3503 for (index = iwl4965_queue_inc_wrap(index, q->n_bd);
fc4b6853 3504 q->read_ptr != index;
bb8c093b 3505 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd)) {
b481de9c 3506 if (txq_id != IWL_CMD_QUEUE_NUM) {
bb8c093b 3507 iwl4965_txstatus_to_ieee(priv,
fc4b6853 3508 &(txq->txb[txq->q.read_ptr]));
bb8c093b 3509 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c
ZY
3510 } else if (nfreed > 1) {
3511 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
fc4b6853 3512 q->write_ptr, q->read_ptr);
b481de9c
ZY
3513 queue_work(priv->workqueue, &priv->restart);
3514 }
3515 nfreed++;
3516 }
3517
bb8c093b 3518 if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
b481de9c
ZY
3519 (txq_id != IWL_CMD_QUEUE_NUM) &&
3520 priv->mac80211_registered)
3521 ieee80211_wake_queue(priv->hw, txq_id);
3522
3523
3524 return nfreed;
3525}
3526
bb8c093b 3527static int iwl4965_is_tx_success(u32 status)
b481de9c
ZY
3528{
3529 status &= TX_STATUS_MSK;
3530 return (status == TX_STATUS_SUCCESS)
3531 || (status == TX_STATUS_DIRECT_DONE);
3532}
3533
3534/******************************************************************************
3535 *
3536 * Generic RX handler implementations
3537 *
3538 ******************************************************************************/
c8b0e6e1
CH
3539#ifdef CONFIG_IWL4965_HT
3540#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3541
bb8c093b 3542static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv,
b481de9c
ZY
3543 struct ieee80211_hdr *hdr)
3544{
3545 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
3546 return IWL_AP_ID;
3547 else {
3548 u8 *da = ieee80211_get_DA(hdr);
bb8c093b 3549 return iwl4965_hw_find_station(priv, da);
b481de9c
ZY
3550 }
3551}
3552
bb8c093b
CH
3553static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
3554 struct iwl4965_priv *priv, int txq_id, int idx)
b481de9c
ZY
3555{
3556 if (priv->txq[txq_id].txb[idx].skb[0])
3557 return (struct ieee80211_hdr *)priv->txq[txq_id].
3558 txb[idx].skb[0]->data;
3559 return NULL;
3560}
3561
bb8c093b 3562static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
b481de9c
ZY
3563{
3564 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
3565 tx_resp->frame_count);
3566 return le32_to_cpu(*scd_ssn) & MAX_SN;
3567
3568}
6440adb5
BC
3569
3570/**
3571 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
3572 */
bb8c093b
CH
3573static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3574 struct iwl4965_ht_agg *agg,
3575 struct iwl4965_tx_resp *tx_resp,
b481de9c
ZY
3576 u16 start_idx)
3577{
3578 u32 status;
3579 __le32 *frame_status = &tx_resp->status;
3580 struct ieee80211_tx_status *tx_status = NULL;
3581 struct ieee80211_hdr *hdr = NULL;
3582 int i, sh;
3583 int txq_id, idx;
3584 u16 seq;
3585
3586 if (agg->wait_for_ba)
6440adb5 3587 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
b481de9c
ZY
3588
3589 agg->frame_count = tx_resp->frame_count;
3590 agg->start_idx = start_idx;
3591 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3592 agg->bitmap0 = agg->bitmap1 = 0;
3593
6440adb5 3594 /* # frames attempted by Tx command */
b481de9c 3595 if (agg->frame_count == 1) {
6440adb5 3596 /* Only one frame was attempted; no block-ack will arrive */
bb8c093b 3597 struct iwl4965_tx_queue *txq ;
b481de9c
ZY
3598 status = le32_to_cpu(frame_status[0]);
3599
3600 txq_id = agg->txq_id;
3601 txq = &priv->txq[txq_id];
3602 /* FIXME: code repetition */
3603 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n",
3604 agg->frame_count, agg->start_idx);
3605
fc4b6853 3606 tx_status = &(priv->txq[txq_id].txb[txq->q.read_ptr].status);
b481de9c
ZY
3607 tx_status->retry_count = tx_resp->failure_frame;
3608 tx_status->queue_number = status & 0xff;
3609 tx_status->queue_length = tx_resp->bt_kill_count;
3610 tx_status->queue_length |= tx_resp->failure_rts;
3611
bb8c093b 3612 tx_status->flags = iwl4965_is_tx_success(status)?
b481de9c
ZY
3613 IEEE80211_TX_STATUS_ACK : 0;
3614 tx_status->control.tx_rate =
bb8c093b 3615 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3616 /* FIXME: code repetition end */
3617
3618 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3619 status & 0xff, tx_resp->failure_frame);
3620 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
bb8c093b 3621 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
b481de9c
ZY
3622
3623 agg->wait_for_ba = 0;
3624 } else {
6440adb5 3625 /* Two or more frames were attempted; expect block-ack */
b481de9c
ZY
3626 u64 bitmap = 0;
3627 int start = agg->start_idx;
3628
6440adb5 3629 /* Construct bit-map of pending frames within Tx window */
b481de9c
ZY
3630 for (i = 0; i < agg->frame_count; i++) {
3631 u16 sc;
3632 status = le32_to_cpu(frame_status[i]);
3633 seq = status >> 16;
3634 idx = SEQ_TO_INDEX(seq);
3635 txq_id = SEQ_TO_QUEUE(seq);
3636
3637 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3638 AGG_TX_STATE_ABORT_MSK))
3639 continue;
3640
3641 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3642 agg->frame_count, txq_id, idx);
3643
bb8c093b 3644 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
b481de9c
ZY
3645
3646 sc = le16_to_cpu(hdr->seq_ctrl);
3647 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3648 IWL_ERROR("BUG_ON idx doesn't match seq control"
3649 " idx=%d, seq_idx=%d, seq=%d\n",
3650 idx, SEQ_TO_SN(sc),
3651 hdr->seq_ctrl);
3652 return -1;
3653 }
3654
3655 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
3656 i, idx, SEQ_TO_SN(sc));
3657
3658 sh = idx - start;
3659 if (sh > 64) {
3660 sh = (start - idx) + 0xff;
3661 bitmap = bitmap << sh;
3662 sh = 0;
3663 start = idx;
3664 } else if (sh < -64)
3665 sh = 0xff - (start - idx);
3666 else if (sh < 0) {
3667 sh = start - idx;
3668 start = idx;
3669 bitmap = bitmap << sh;
3670 sh = 0;
3671 }
3672 bitmap |= (1 << sh);
3673 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3674 start, (u32)(bitmap & 0xFFFFFFFF));
3675 }
3676
3677 agg->bitmap0 = bitmap & 0xFFFFFFFF;
3678 agg->bitmap1 = bitmap >> 32;
3679 agg->start_idx = start;
3680 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3681 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n",
3682 agg->frame_count, agg->start_idx,
3683 agg->bitmap0);
3684
3685 if (bitmap)
3686 agg->wait_for_ba = 1;
3687 }
3688 return 0;
3689}
3690#endif
3691#endif
3692
6440adb5
BC
3693/**
3694 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3695 */
bb8c093b
CH
3696static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3697 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3698{
bb8c093b 3699 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3700 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3701 int txq_id = SEQ_TO_QUEUE(sequence);
3702 int index = SEQ_TO_INDEX(sequence);
bb8c093b 3703 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
b481de9c 3704 struct ieee80211_tx_status *tx_status;
bb8c093b 3705 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
b481de9c 3706 u32 status = le32_to_cpu(tx_resp->status);
c8b0e6e1
CH
3707#ifdef CONFIG_IWL4965_HT
3708#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
3709 int tid, sta_id;
3710#endif
3711#endif
3712
3713 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3714 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3715 "is out of range [0-%d] %d %d\n", txq_id,
fc4b6853
TW
3716 index, txq->q.n_bd, txq->q.write_ptr,
3717 txq->q.read_ptr);
b481de9c
ZY
3718 return;
3719 }
3720
c8b0e6e1
CH
3721#ifdef CONFIG_IWL4965_HT
3722#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3723 if (txq->sched_retry) {
bb8c093b 3724 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
b481de9c 3725 struct ieee80211_hdr *hdr =
bb8c093b
CH
3726 iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3727 struct iwl4965_ht_agg *agg = NULL;
b481de9c
ZY
3728 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3729
3730 if (qc == NULL) {
3731 IWL_ERROR("BUG_ON qc is null!!!!\n");
3732 return;
3733 }
3734
3735 tid = le16_to_cpu(*qc) & 0xf;
3736
bb8c093b 3737 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
b481de9c
ZY
3738 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3739 IWL_ERROR("Station not known for\n");
3740 return;
3741 }
3742
3743 agg = &priv->stations[sta_id].tid[tid].agg;
3744
3745 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index);
3746
3747 if ((tx_resp->frame_count == 1) &&
bb8c093b 3748 !iwl4965_is_tx_success(status)) {
b481de9c
ZY
3749 /* TODO: send BAR */
3750 }
3751
fc4b6853 3752 if ((txq->q.read_ptr != (scd_ssn & 0xff))) {
bb8c093b 3753 index = iwl4965_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
b481de9c
ZY
3754 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3755 "%d index %d\n", scd_ssn , index);
bb8c093b 3756 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
3757 }
3758 } else {
c8b0e6e1
CH
3759#endif /* CONFIG_IWL4965_HT_AGG */
3760#endif /* CONFIG_IWL4965_HT */
fc4b6853 3761 tx_status = &(txq->txb[txq->q.read_ptr].status);
b481de9c
ZY
3762
3763 tx_status->retry_count = tx_resp->failure_frame;
3764 tx_status->queue_number = status;
3765 tx_status->queue_length = tx_resp->bt_kill_count;
3766 tx_status->queue_length |= tx_resp->failure_rts;
3767
3768 tx_status->flags =
bb8c093b 3769 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
b481de9c
ZY
3770
3771 tx_status->control.tx_rate =
bb8c093b 3772 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3773
3774 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
bb8c093b 3775 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
b481de9c
ZY
3776 status, le32_to_cpu(tx_resp->rate_n_flags),
3777 tx_resp->failure_frame);
3778
3779 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3780 if (index != -1)
bb8c093b 3781 iwl4965_tx_queue_reclaim(priv, txq_id, index);
c8b0e6e1
CH
3782#ifdef CONFIG_IWL4965_HT
3783#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3784 }
c8b0e6e1
CH
3785#endif /* CONFIG_IWL4965_HT_AGG */
3786#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
3787
3788 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3789 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3790}
3791
3792
bb8c093b
CH
3793static void iwl4965_rx_reply_alive(struct iwl4965_priv *priv,
3794 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3795{
bb8c093b
CH
3796 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3797 struct iwl4965_alive_resp *palive;
b481de9c
ZY
3798 struct delayed_work *pwork;
3799
3800 palive = &pkt->u.alive_frame;
3801
3802 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3803 "0x%01X 0x%01X\n",
3804 palive->is_valid, palive->ver_type,
3805 palive->ver_subtype);
3806
3807 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3808 IWL_DEBUG_INFO("Initialization Alive received.\n");
3809 memcpy(&priv->card_alive_init,
3810 &pkt->u.alive_frame,
bb8c093b 3811 sizeof(struct iwl4965_init_alive_resp));
b481de9c
ZY
3812 pwork = &priv->init_alive_start;
3813 } else {
3814 IWL_DEBUG_INFO("Runtime Alive received.\n");
3815 memcpy(&priv->card_alive, &pkt->u.alive_frame,
bb8c093b 3816 sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
3817 pwork = &priv->alive_start;
3818 }
3819
3820 /* We delay the ALIVE response by 5ms to
3821 * give the HW RF Kill time to activate... */
3822 if (palive->is_valid == UCODE_VALID_OK)
3823 queue_delayed_work(priv->workqueue, pwork,
3824 msecs_to_jiffies(5));
3825 else
3826 IWL_WARNING("uCode did not respond OK.\n");
3827}
3828
bb8c093b
CH
3829static void iwl4965_rx_reply_add_sta(struct iwl4965_priv *priv,
3830 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3831{
bb8c093b 3832 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3833
3834 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3835 return;
3836}
3837
bb8c093b
CH
3838static void iwl4965_rx_reply_error(struct iwl4965_priv *priv,
3839 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3840{
bb8c093b 3841 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3842
3843 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3844 "seq 0x%04X ser 0x%08X\n",
3845 le32_to_cpu(pkt->u.err_resp.error_type),
3846 get_cmd_string(pkt->u.err_resp.cmd_id),
3847 pkt->u.err_resp.cmd_id,
3848 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3849 le32_to_cpu(pkt->u.err_resp.error_info));
3850}
3851
3852#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3853
bb8c093b 3854static void iwl4965_rx_csa(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3855{
bb8c093b
CH
3856 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3857 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon;
3858 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
3859 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3860 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3861 rxon->channel = csa->channel;
3862 priv->staging_rxon.channel = csa->channel;
3863}
3864
bb8c093b
CH
3865static void iwl4965_rx_spectrum_measure_notif(struct iwl4965_priv *priv,
3866 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3867{
c8b0e6e1 3868#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
bb8c093b
CH
3869 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3870 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
3871
3872 if (!report->state) {
3873 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3874 "Spectrum Measure Notification: Start\n");
3875 return;
3876 }
3877
3878 memcpy(&priv->measure_report, report, sizeof(*report));
3879 priv->measurement_status |= MEASUREMENT_READY;
3880#endif
3881}
3882
bb8c093b
CH
3883static void iwl4965_rx_pm_sleep_notif(struct iwl4965_priv *priv,
3884 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3885{
c8b0e6e1 3886#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3887 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3888 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
3889 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3890 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3891#endif
3892}
3893
bb8c093b
CH
3894static void iwl4965_rx_pm_debug_statistics_notif(struct iwl4965_priv *priv,
3895 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3896{
bb8c093b 3897 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3898 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3899 "notification for %s:\n",
3900 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
bb8c093b 3901 iwl4965_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
b481de9c
ZY
3902}
3903
bb8c093b 3904static void iwl4965_bg_beacon_update(struct work_struct *work)
b481de9c 3905{
bb8c093b
CH
3906 struct iwl4965_priv *priv =
3907 container_of(work, struct iwl4965_priv, beacon_update);
b481de9c
ZY
3908 struct sk_buff *beacon;
3909
3910 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
32bfd35d 3911 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL);
b481de9c
ZY
3912
3913 if (!beacon) {
3914 IWL_ERROR("update beacon failed\n");
3915 return;
3916 }
3917
3918 mutex_lock(&priv->mutex);
3919 /* new beacon skb is allocated every time; dispose previous.*/
3920 if (priv->ibss_beacon)
3921 dev_kfree_skb(priv->ibss_beacon);
3922
3923 priv->ibss_beacon = beacon;
3924 mutex_unlock(&priv->mutex);
3925
bb8c093b 3926 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
3927}
3928
bb8c093b
CH
3929static void iwl4965_rx_beacon_notif(struct iwl4965_priv *priv,
3930 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3931{
c8b0e6e1 3932#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3933 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3934 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3935 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
b481de9c
ZY
3936
3937 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3938 "tsf %d %d rate %d\n",
3939 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3940 beacon->beacon_notify_hdr.failure_frame,
3941 le32_to_cpu(beacon->ibss_mgr_status),
3942 le32_to_cpu(beacon->high_tsf),
3943 le32_to_cpu(beacon->low_tsf), rate);
3944#endif
3945
3946 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3947 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3948 queue_work(priv->workqueue, &priv->beacon_update);
3949}
3950
3951/* Service response to REPLY_SCAN_CMD (0x80) */
bb8c093b
CH
3952static void iwl4965_rx_reply_scan(struct iwl4965_priv *priv,
3953 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3954{
c8b0e6e1 3955#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3956 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3957 struct iwl4965_scanreq_notification *notif =
3958 (struct iwl4965_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
3959
3960 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3961#endif
3962}
3963
3964/* Service SCAN_START_NOTIFICATION (0x82) */
bb8c093b
CH
3965static void iwl4965_rx_scan_start_notif(struct iwl4965_priv *priv,
3966 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3967{
bb8c093b
CH
3968 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3969 struct iwl4965_scanstart_notification *notif =
3970 (struct iwl4965_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
3971 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3972 IWL_DEBUG_SCAN("Scan start: "
3973 "%d [802.11%s] "
3974 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3975 notif->channel,
3976 notif->band ? "bg" : "a",
3977 notif->tsf_high,
3978 notif->tsf_low, notif->status, notif->beacon_timer);
3979}
3980
3981/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
bb8c093b
CH
3982static void iwl4965_rx_scan_results_notif(struct iwl4965_priv *priv,
3983 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3984{
bb8c093b
CH
3985 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3986 struct iwl4965_scanresults_notification *notif =
3987 (struct iwl4965_scanresults_notification *)pkt->u.raw;
b481de9c
ZY
3988
3989 IWL_DEBUG_SCAN("Scan ch.res: "
3990 "%d [802.11%s] "
3991 "(TSF: 0x%08X:%08X) - %d "
3992 "elapsed=%lu usec (%dms since last)\n",
3993 notif->channel,
3994 notif->band ? "bg" : "a",
3995 le32_to_cpu(notif->tsf_high),
3996 le32_to_cpu(notif->tsf_low),
3997 le32_to_cpu(notif->statistics[0]),
3998 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3999 jiffies_to_msecs(elapsed_jiffies
4000 (priv->last_scan_jiffies, jiffies)));
4001
4002 priv->last_scan_jiffies = jiffies;
7878a5a4 4003 priv->next_scan_jiffies = 0;
b481de9c
ZY
4004}
4005
4006/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
bb8c093b
CH
4007static void iwl4965_rx_scan_complete_notif(struct iwl4965_priv *priv,
4008 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4009{
bb8c093b
CH
4010 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4011 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
b481de9c
ZY
4012
4013 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
4014 scan_notif->scanned_channels,
4015 scan_notif->tsf_low,
4016 scan_notif->tsf_high, scan_notif->status);
4017
4018 /* The HW is no longer scanning */
4019 clear_bit(STATUS_SCAN_HW, &priv->status);
4020
4021 /* The scan completion notification came in, so kill that timer... */
4022 cancel_delayed_work(&priv->scan_check);
4023
4024 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
4025 (priv->scan_bands == 2) ? "2.4" : "5.2",
4026 jiffies_to_msecs(elapsed_jiffies
4027 (priv->scan_pass_start, jiffies)));
4028
4029 /* Remove this scanned band from the list
4030 * of pending bands to scan */
4031 priv->scan_bands--;
4032
4033 /* If a request to abort was given, or the scan did not succeed
4034 * then we reset the scan state machine and terminate,
4035 * re-queuing another scan if one has been requested */
4036 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4037 IWL_DEBUG_INFO("Aborted scan completed.\n");
4038 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
4039 } else {
4040 /* If there are more bands on this scan pass reschedule */
4041 if (priv->scan_bands > 0)
4042 goto reschedule;
4043 }
4044
4045 priv->last_scan_jiffies = jiffies;
7878a5a4 4046 priv->next_scan_jiffies = 0;
b481de9c
ZY
4047 IWL_DEBUG_INFO("Setting scan to off\n");
4048
4049 clear_bit(STATUS_SCANNING, &priv->status);
4050
4051 IWL_DEBUG_INFO("Scan took %dms\n",
4052 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
4053
4054 queue_work(priv->workqueue, &priv->scan_completed);
4055
4056 return;
4057
4058reschedule:
4059 priv->scan_pass_start = jiffies;
4060 queue_work(priv->workqueue, &priv->request_scan);
4061}
4062
4063/* Handle notification from uCode that card's power state is changing
4064 * due to software, hardware, or critical temperature RFKILL */
bb8c093b
CH
4065static void iwl4965_rx_card_state_notif(struct iwl4965_priv *priv,
4066 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4067{
bb8c093b 4068 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
4069 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4070 unsigned long status = priv->status;
4071
4072 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
4073 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4074 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
4075
4076 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
4077 RF_CARD_DISABLED)) {
4078
bb8c093b 4079 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
4080 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4081
bb8c093b
CH
4082 if (!iwl4965_grab_nic_access(priv)) {
4083 iwl4965_write_direct32(
b481de9c
ZY
4084 priv, HBUS_TARG_MBX_C,
4085 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4086
bb8c093b 4087 iwl4965_release_nic_access(priv);
b481de9c
ZY
4088 }
4089
4090 if (!(flags & RXON_CARD_DISABLED)) {
bb8c093b 4091 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 4092 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
bb8c093b
CH
4093 if (!iwl4965_grab_nic_access(priv)) {
4094 iwl4965_write_direct32(
b481de9c
ZY
4095 priv, HBUS_TARG_MBX_C,
4096 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4097
bb8c093b 4098 iwl4965_release_nic_access(priv);
b481de9c
ZY
4099 }
4100 }
4101
4102 if (flags & RF_CARD_DISABLED) {
bb8c093b 4103 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c 4104 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
bb8c093b
CH
4105 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
4106 if (!iwl4965_grab_nic_access(priv))
4107 iwl4965_release_nic_access(priv);
b481de9c
ZY
4108 }
4109 }
4110
4111 if (flags & HW_CARD_DISABLED)
4112 set_bit(STATUS_RF_KILL_HW, &priv->status);
4113 else
4114 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4115
4116
4117 if (flags & SW_CARD_DISABLED)
4118 set_bit(STATUS_RF_KILL_SW, &priv->status);
4119 else
4120 clear_bit(STATUS_RF_KILL_SW, &priv->status);
4121
4122 if (!(flags & RXON_CARD_DISABLED))
bb8c093b 4123 iwl4965_scan_cancel(priv);
b481de9c
ZY
4124
4125 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
4126 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
4127 (test_bit(STATUS_RF_KILL_SW, &status) !=
4128 test_bit(STATUS_RF_KILL_SW, &priv->status)))
4129 queue_work(priv->workqueue, &priv->rf_kill);
4130 else
4131 wake_up_interruptible(&priv->wait_command_queue);
4132}
4133
4134/**
bb8c093b 4135 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
4136 *
4137 * Setup the RX handlers for each of the reply types sent from the uCode
4138 * to the host.
4139 *
4140 * This function chains into the hardware specific files for them to setup
4141 * any hardware specific handlers as well.
4142 */
bb8c093b 4143static void iwl4965_setup_rx_handlers(struct iwl4965_priv *priv)
b481de9c 4144{
bb8c093b
CH
4145 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
4146 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
4147 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
4148 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
b481de9c 4149 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
4150 iwl4965_rx_spectrum_measure_notif;
4151 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl4965_rx_pm_sleep_notif;
b481de9c 4152 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
4153 iwl4965_rx_pm_debug_statistics_notif;
4154 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
b481de9c 4155
9fbab516
BC
4156 /*
4157 * The same handler is used for both the REPLY to a discrete
4158 * statistics request from the host as well as for the periodic
4159 * statistics notifications (after received beacons) from the uCode.
b481de9c 4160 */
bb8c093b
CH
4161 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics;
4162 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics;
b481de9c 4163
bb8c093b
CH
4164 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
4165 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
b481de9c 4166 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 4167 iwl4965_rx_scan_results_notif;
b481de9c 4168 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
4169 iwl4965_rx_scan_complete_notif;
4170 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
4171 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
b481de9c 4172
9fbab516 4173 /* Set up hardware specific Rx handlers */
bb8c093b 4174 iwl4965_hw_rx_handler_setup(priv);
b481de9c
ZY
4175}
4176
4177/**
bb8c093b 4178 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
4179 * @rxb: Rx buffer to reclaim
4180 *
4181 * If an Rx buffer has an async callback associated with it the callback
4182 * will be executed. The attached skb (if present) will only be freed
4183 * if the callback returns 1
4184 */
bb8c093b
CH
4185static void iwl4965_tx_cmd_complete(struct iwl4965_priv *priv,
4186 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4187{
bb8c093b 4188 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4189 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4190 int txq_id = SEQ_TO_QUEUE(sequence);
4191 int index = SEQ_TO_INDEX(sequence);
4192 int huge = sequence & SEQ_HUGE_FRAME;
4193 int cmd_index;
bb8c093b 4194 struct iwl4965_cmd *cmd;
b481de9c
ZY
4195
4196 /* If a Tx command is being handled and it isn't in the actual
4197 * command queue then there a command routing bug has been introduced
4198 * in the queue management code. */
4199 if (txq_id != IWL_CMD_QUEUE_NUM)
4200 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
4201 txq_id, pkt->hdr.cmd);
4202 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
4203
4204 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
4205 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
4206
4207 /* Input error checking is done when commands are added to queue. */
4208 if (cmd->meta.flags & CMD_WANT_SKB) {
4209 cmd->meta.source->u.skb = rxb->skb;
4210 rxb->skb = NULL;
4211 } else if (cmd->meta.u.callback &&
4212 !cmd->meta.u.callback(priv, cmd, rxb->skb))
4213 rxb->skb = NULL;
4214
bb8c093b 4215 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
4216
4217 if (!(cmd->meta.flags & CMD_ASYNC)) {
4218 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4219 wake_up_interruptible(&priv->wait_command_queue);
4220 }
4221}
4222
4223/************************** RX-FUNCTIONS ****************************/
4224/*
4225 * Rx theory of operation
4226 *
9fbab516
BC
4227 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
4228 * each of which point to Receive Buffers to be filled by 4965. These get
4229 * used not only for Rx frames, but for any command response or notification
4230 * from the 4965. The driver and 4965 manage the Rx buffers by means
4231 * of indexes into the circular buffer.
b481de9c
ZY
4232 *
4233 * Rx Queue Indexes
4234 * The host/firmware share two index registers for managing the Rx buffers.
4235 *
4236 * The READ index maps to the first position that the firmware may be writing
4237 * to -- the driver can read up to (but not including) this position and get
4238 * good data.
4239 * The READ index is managed by the firmware once the card is enabled.
4240 *
4241 * The WRITE index maps to the last position the driver has read from -- the
4242 * position preceding WRITE is the last slot the firmware can place a packet.
4243 *
4244 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4245 * WRITE = READ.
4246 *
9fbab516 4247 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
4248 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4249 *
9fbab516 4250 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
4251 * and fire the RX interrupt. The driver can then query the READ index and
4252 * process as many packets as possible, moving the WRITE index forward as it
4253 * resets the Rx queue buffers with new memory.
4254 *
4255 * The management in the driver is as follows:
4256 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
4257 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 4258 * to replenish the iwl->rxq->rx_free.
bb8c093b 4259 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
4260 * iwl->rxq is replenished and the READ INDEX is updated (updating the
4261 * 'processed' and 'read' driver indexes as well)
4262 * + A received packet is processed and handed to the kernel network stack,
4263 * detached from the iwl->rxq. The driver 'processed' index is updated.
4264 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
4265 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
4266 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
4267 * were enough free buffers and RX_STALLED is set it is cleared.
4268 *
4269 *
4270 * Driver sequence:
4271 *
9fbab516
BC
4272 * iwl4965_rx_queue_alloc() Allocates rx_free
4273 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 4274 * iwl4965_rx_queue_restock
9fbab516 4275 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
4276 * queue, updates firmware pointers, and updates
4277 * the WRITE index. If insufficient rx_free buffers
bb8c093b 4278 * are available, schedules iwl4965_rx_replenish
b481de9c
ZY
4279 *
4280 * -- enable interrupts --
9fbab516 4281 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
b481de9c
ZY
4282 * READ INDEX, detaching the SKB from the pool.
4283 * Moves the packet buffer from queue to rx_used.
bb8c093b 4284 * Calls iwl4965_rx_queue_restock to refill any empty
b481de9c
ZY
4285 * slots.
4286 * ...
4287 *
4288 */
4289
4290/**
bb8c093b 4291 * iwl4965_rx_queue_space - Return number of free slots available in queue.
b481de9c 4292 */
bb8c093b 4293static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
b481de9c
ZY
4294{
4295 int s = q->read - q->write;
4296 if (s <= 0)
4297 s += RX_QUEUE_SIZE;
4298 /* keep some buffer to not confuse full and empty queue */
4299 s -= 2;
4300 if (s < 0)
4301 s = 0;
4302 return s;
4303}
4304
4305/**
bb8c093b 4306 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
b481de9c 4307 */
bb8c093b 4308int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, struct iwl4965_rx_queue *q)
b481de9c
ZY
4309{
4310 u32 reg = 0;
4311 int rc = 0;
4312 unsigned long flags;
4313
4314 spin_lock_irqsave(&q->lock, flags);
4315
4316 if (q->need_update == 0)
4317 goto exit_unlock;
4318
6440adb5 4319 /* If power-saving is in use, make sure device is awake */
b481de9c 4320 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
bb8c093b 4321 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4322
4323 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
bb8c093b 4324 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4325 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4326 goto exit_unlock;
4327 }
4328
bb8c093b 4329 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4330 if (rc)
4331 goto exit_unlock;
4332
6440adb5 4333 /* Device expects a multiple of 8 */
bb8c093b 4334 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
b481de9c 4335 q->write & ~0x7);
bb8c093b 4336 iwl4965_release_nic_access(priv);
6440adb5
BC
4337
4338 /* Else device is assumed to be awake */
b481de9c 4339 } else
6440adb5 4340 /* Device expects a multiple of 8 */
bb8c093b 4341 iwl4965_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
b481de9c
ZY
4342
4343
4344 q->need_update = 0;
4345
4346 exit_unlock:
4347 spin_unlock_irqrestore(&q->lock, flags);
4348 return rc;
4349}
4350
4351/**
9fbab516 4352 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 4353 */
bb8c093b 4354static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl4965_priv *priv,
b481de9c
ZY
4355 dma_addr_t dma_addr)
4356{
4357 return cpu_to_le32((u32)(dma_addr >> 8));
4358}
4359
4360
4361/**
bb8c093b 4362 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 4363 *
9fbab516 4364 * If there are slots in the RX queue that need to be restocked,
b481de9c 4365 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 4366 * as we can, pulling from rx_free.
b481de9c
ZY
4367 *
4368 * This moves the 'write' index forward to catch up with 'processed', and
4369 * also updates the memory address in the firmware to reference the new
4370 * target buffer.
4371 */
bb8c093b 4372static int iwl4965_rx_queue_restock(struct iwl4965_priv *priv)
b481de9c 4373{
bb8c093b 4374 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4375 struct list_head *element;
bb8c093b 4376 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4377 unsigned long flags;
4378 int write, rc;
4379
4380 spin_lock_irqsave(&rxq->lock, flags);
4381 write = rxq->write & ~0x7;
bb8c093b 4382 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 4383 /* Get next free Rx buffer, remove from free list */
b481de9c 4384 element = rxq->rx_free.next;
bb8c093b 4385 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
b481de9c 4386 list_del(element);
6440adb5
BC
4387
4388 /* Point to Rx buffer via next RBD in circular buffer */
bb8c093b 4389 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
b481de9c
ZY
4390 rxq->queue[rxq->write] = rxb;
4391 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4392 rxq->free_count--;
4393 }
4394 spin_unlock_irqrestore(&rxq->lock, flags);
4395 /* If the pre-allocated buffer pool is dropping low, schedule to
4396 * refill it */
4397 if (rxq->free_count <= RX_LOW_WATERMARK)
4398 queue_work(priv->workqueue, &priv->rx_replenish);
4399
4400
6440adb5
BC
4401 /* If we've added more space for the firmware to place data, tell it.
4402 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
4403 if ((write != (rxq->write & ~0x7))
4404 || (abs(rxq->write - rxq->read) > 7)) {
4405 spin_lock_irqsave(&rxq->lock, flags);
4406 rxq->need_update = 1;
4407 spin_unlock_irqrestore(&rxq->lock, flags);
bb8c093b 4408 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
4409 if (rc)
4410 return rc;
4411 }
4412
4413 return 0;
4414}
4415
4416/**
bb8c093b 4417 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
4418 *
4419 * When moving to rx_free an SKB is allocated for the slot.
4420 *
bb8c093b 4421 * Also restock the Rx queue via iwl4965_rx_queue_restock.
01ebd063 4422 * This is called as a scheduled work item (except for during initialization)
b481de9c 4423 */
5c0eef96 4424static void iwl4965_rx_allocate(struct iwl4965_priv *priv)
b481de9c 4425{
bb8c093b 4426 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4427 struct list_head *element;
bb8c093b 4428 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4429 unsigned long flags;
4430 spin_lock_irqsave(&rxq->lock, flags);
4431 while (!list_empty(&rxq->rx_used)) {
4432 element = rxq->rx_used.next;
bb8c093b 4433 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
6440adb5
BC
4434
4435 /* Alloc a new receive buffer */
b481de9c 4436 rxb->skb =
9ee1ba47
RR
4437 alloc_skb(priv->hw_setting.rx_buf_size,
4438 __GFP_NOWARN | GFP_ATOMIC);
b481de9c
ZY
4439 if (!rxb->skb) {
4440 if (net_ratelimit())
4441 printk(KERN_CRIT DRV_NAME
4442 ": Can not allocate SKB buffers\n");
4443 /* We don't reschedule replenish work here -- we will
4444 * call the restock method and if it still needs
4445 * more buffers it will schedule replenish */
4446 break;
4447 }
4448 priv->alloc_rxb_skb++;
4449 list_del(element);
6440adb5
BC
4450
4451 /* Get physical address of RB/SKB */
b481de9c
ZY
4452 rxb->dma_addr =
4453 pci_map_single(priv->pci_dev, rxb->skb->data,
9ee1ba47 4454 priv->hw_setting.rx_buf_size, PCI_DMA_FROMDEVICE);
b481de9c
ZY
4455 list_add_tail(&rxb->list, &rxq->rx_free);
4456 rxq->free_count++;
4457 }
4458 spin_unlock_irqrestore(&rxq->lock, flags);
5c0eef96
MA
4459}
4460
4461/*
4462 * this should be called while priv->lock is locked
4463*/
4fd1f841 4464static void __iwl4965_rx_replenish(void *data)
5c0eef96
MA
4465{
4466 struct iwl4965_priv *priv = data;
4467
4468 iwl4965_rx_allocate(priv);
4469 iwl4965_rx_queue_restock(priv);
4470}
4471
4472
4473void iwl4965_rx_replenish(void *data)
4474{
4475 struct iwl4965_priv *priv = data;
4476 unsigned long flags;
4477
4478 iwl4965_rx_allocate(priv);
b481de9c
ZY
4479
4480 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 4481 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4482 spin_unlock_irqrestore(&priv->lock, flags);
4483}
4484
4485/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
9fbab516 4486 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
b481de9c
ZY
4487 * This free routine walks the list of POOL entries and if SKB is set to
4488 * non NULL it is unmapped and freed
4489 */
bb8c093b 4490static void iwl4965_rx_queue_free(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4491{
4492 int i;
4493 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4494 if (rxq->pool[i].skb != NULL) {
4495 pci_unmap_single(priv->pci_dev,
4496 rxq->pool[i].dma_addr,
9ee1ba47
RR
4497 priv->hw_setting.rx_buf_size,
4498 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4499 dev_kfree_skb(rxq->pool[i].skb);
4500 }
4501 }
4502
4503 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4504 rxq->dma_addr);
4505 rxq->bd = NULL;
4506}
4507
bb8c093b 4508int iwl4965_rx_queue_alloc(struct iwl4965_priv *priv)
b481de9c 4509{
bb8c093b 4510 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4511 struct pci_dev *dev = priv->pci_dev;
4512 int i;
4513
4514 spin_lock_init(&rxq->lock);
4515 INIT_LIST_HEAD(&rxq->rx_free);
4516 INIT_LIST_HEAD(&rxq->rx_used);
6440adb5
BC
4517
4518 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
b481de9c
ZY
4519 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4520 if (!rxq->bd)
4521 return -ENOMEM;
6440adb5 4522
b481de9c
ZY
4523 /* Fill the rx_used queue with _all_ of the Rx buffers */
4524 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4525 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
6440adb5 4526
b481de9c
ZY
4527 /* Set us so that we have processed and used all buffers, but have
4528 * not restocked the Rx queue with fresh buffers */
4529 rxq->read = rxq->write = 0;
4530 rxq->free_count = 0;
4531 rxq->need_update = 0;
4532 return 0;
4533}
4534
bb8c093b 4535void iwl4965_rx_queue_reset(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4536{
4537 unsigned long flags;
4538 int i;
4539 spin_lock_irqsave(&rxq->lock, flags);
4540 INIT_LIST_HEAD(&rxq->rx_free);
4541 INIT_LIST_HEAD(&rxq->rx_used);
4542 /* Fill the rx_used queue with _all_ of the Rx buffers */
4543 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4544 /* In the reset function, these buffers may have been allocated
4545 * to an SKB, so we need to unmap and free potential storage */
4546 if (rxq->pool[i].skb != NULL) {
4547 pci_unmap_single(priv->pci_dev,
4548 rxq->pool[i].dma_addr,
9ee1ba47
RR
4549 priv->hw_setting.rx_buf_size,
4550 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4551 priv->alloc_rxb_skb--;
4552 dev_kfree_skb(rxq->pool[i].skb);
4553 rxq->pool[i].skb = NULL;
4554 }
4555 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4556 }
4557
4558 /* Set us so that we have processed and used all buffers, but have
4559 * not restocked the Rx queue with fresh buffers */
4560 rxq->read = rxq->write = 0;
4561 rxq->free_count = 0;
4562 spin_unlock_irqrestore(&rxq->lock, flags);
4563}
4564
4565/* Convert linear signal-to-noise ratio into dB */
4566static u8 ratio2dB[100] = {
4567/* 0 1 2 3 4 5 6 7 8 9 */
4568 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4569 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4570 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4571 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4572 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4573 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4574 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4575 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4576 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4577 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4578};
4579
4580/* Calculates a relative dB value from a ratio of linear
4581 * (i.e. not dB) signal levels.
4582 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 4583int iwl4965_calc_db_from_ratio(int sig_ratio)
b481de9c 4584{
c899a575
AB
4585 /* 1000:1 or higher just report as 60 dB */
4586 if (sig_ratio >= 1000)
b481de9c
ZY
4587 return 60;
4588
c899a575 4589 /* 100:1 or higher, divide by 10 and use table,
b481de9c 4590 * add 20 dB to make up for divide by 10 */
c899a575 4591 if (sig_ratio >= 100)
b481de9c
ZY
4592 return (20 + (int)ratio2dB[sig_ratio/10]);
4593
4594 /* We shouldn't see this */
4595 if (sig_ratio < 1)
4596 return 0;
4597
4598 /* Use table for ratios 1:1 - 99:1 */
4599 return (int)ratio2dB[sig_ratio];
4600}
4601
4602#define PERFECT_RSSI (-20) /* dBm */
4603#define WORST_RSSI (-95) /* dBm */
4604#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4605
4606/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4607 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4608 * about formulas used below. */
bb8c093b 4609int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
4610{
4611 int sig_qual;
4612 int degradation = PERFECT_RSSI - rssi_dbm;
4613
4614 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4615 * as indicator; formula is (signal dbm - noise dbm).
4616 * SNR at or above 40 is a great signal (100%).
4617 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4618 * Weakest usable signal is usually 10 - 15 dB SNR. */
4619 if (noise_dbm) {
4620 if (rssi_dbm - noise_dbm >= 40)
4621 return 100;
4622 else if (rssi_dbm < noise_dbm)
4623 return 0;
4624 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4625
4626 /* Else use just the signal level.
4627 * This formula is a least squares fit of data points collected and
4628 * compared with a reference system that had a percentage (%) display
4629 * for signal quality. */
4630 } else
4631 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4632 (15 * RSSI_RANGE + 62 * degradation)) /
4633 (RSSI_RANGE * RSSI_RANGE);
4634
4635 if (sig_qual > 100)
4636 sig_qual = 100;
4637 else if (sig_qual < 1)
4638 sig_qual = 0;
4639
4640 return sig_qual;
4641}
4642
4643/**
9fbab516 4644 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
4645 *
4646 * Uses the priv->rx_handlers callback function array to invoke
4647 * the appropriate handlers, including command responses,
4648 * frame-received notifications, and other notifications.
4649 */
bb8c093b 4650static void iwl4965_rx_handle(struct iwl4965_priv *priv)
b481de9c 4651{
bb8c093b
CH
4652 struct iwl4965_rx_mem_buffer *rxb;
4653 struct iwl4965_rx_packet *pkt;
4654 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4655 u32 r, i;
4656 int reclaim;
4657 unsigned long flags;
5c0eef96
MA
4658 u8 fill_rx = 0;
4659 u32 count = 0;
b481de9c 4660
6440adb5
BC
4661 /* uCode's read index (stored in shared DRAM) indicates the last Rx
4662 * buffer that the driver may process (last buffer filled by ucode). */
bb8c093b 4663 r = iwl4965_hw_get_rx_read(priv);
b481de9c
ZY
4664 i = rxq->read;
4665
4666 /* Rx interrupt, but nothing sent from uCode */
4667 if (i == r)
4668 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4669
5c0eef96
MA
4670 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
4671 fill_rx = 1;
4672
b481de9c
ZY
4673 while (i != r) {
4674 rxb = rxq->queue[i];
4675
9fbab516 4676 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
4677 * then a bug has been introduced in the queue refilling
4678 * routines -- catch it here */
4679 BUG_ON(rxb == NULL);
4680
4681 rxq->queue[i] = NULL;
4682
4683 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
9ee1ba47 4684 priv->hw_setting.rx_buf_size,
b481de9c 4685 PCI_DMA_FROMDEVICE);
bb8c093b 4686 pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4687
4688 /* Reclaim a command buffer only if this packet is a response
4689 * to a (driver-originated) command.
4690 * If the packet (e.g. Rx frame) originated from uCode,
4691 * there is no command buffer to reclaim.
4692 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4693 * but apparently a few don't get set; catch them here. */
4694 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4695 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4696 (pkt->hdr.cmd != REPLY_4965_RX) &&
cfe01709 4697 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
b481de9c
ZY
4698 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4699 (pkt->hdr.cmd != REPLY_TX);
4700
4701 /* Based on type of command response or notification,
4702 * handle those that need handling via function in
bb8c093b 4703 * rx_handlers table. See iwl4965_setup_rx_handlers() */
b481de9c
ZY
4704 if (priv->rx_handlers[pkt->hdr.cmd]) {
4705 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4706 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4707 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4708 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4709 } else {
4710 /* No handling needed */
4711 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4712 "r %d i %d No handler needed for %s, 0x%02x\n",
4713 r, i, get_cmd_string(pkt->hdr.cmd),
4714 pkt->hdr.cmd);
4715 }
4716
4717 if (reclaim) {
9fbab516
BC
4718 /* Invoke any callbacks, transfer the skb to caller, and
4719 * fire off the (possibly) blocking iwl4965_send_cmd()
b481de9c
ZY
4720 * as we reclaim the driver command queue */
4721 if (rxb && rxb->skb)
bb8c093b 4722 iwl4965_tx_cmd_complete(priv, rxb);
b481de9c
ZY
4723 else
4724 IWL_WARNING("Claim null rxb?\n");
4725 }
4726
4727 /* For now we just don't re-use anything. We can tweak this
4728 * later to try and re-use notification packets and SKBs that
4729 * fail to Rx correctly */
4730 if (rxb->skb != NULL) {
4731 priv->alloc_rxb_skb--;
4732 dev_kfree_skb_any(rxb->skb);
4733 rxb->skb = NULL;
4734 }
4735
4736 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
9ee1ba47
RR
4737 priv->hw_setting.rx_buf_size,
4738 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4739 spin_lock_irqsave(&rxq->lock, flags);
4740 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4741 spin_unlock_irqrestore(&rxq->lock, flags);
4742 i = (i + 1) & RX_QUEUE_MASK;
5c0eef96
MA
4743 /* If there are a lot of unused frames,
4744 * restock the Rx queue so ucode wont assert. */
4745 if (fill_rx) {
4746 count++;
4747 if (count >= 8) {
4748 priv->rxq.read = i;
4749 __iwl4965_rx_replenish(priv);
4750 count = 0;
4751 }
4752 }
b481de9c
ZY
4753 }
4754
4755 /* Backtrack one entry */
4756 priv->rxq.read = i;
bb8c093b 4757 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4758}
4759
6440adb5
BC
4760/**
4761 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware
4762 */
bb8c093b
CH
4763static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
4764 struct iwl4965_tx_queue *txq)
b481de9c
ZY
4765{
4766 u32 reg = 0;
4767 int rc = 0;
4768 int txq_id = txq->q.id;
4769
4770 if (txq->need_update == 0)
4771 return rc;
4772
4773 /* if we're trying to save power */
4774 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4775 /* wake up nic if it's powered down ...
4776 * uCode will wake up, and interrupt us again, so next
4777 * time we'll skip this part. */
bb8c093b 4778 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4779
4780 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4781 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
bb8c093b 4782 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4783 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4784 return rc;
4785 }
4786
4787 /* restore this queue's parameters in nic hardware. */
bb8c093b 4788 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4789 if (rc)
4790 return rc;
bb8c093b 4791 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
fc4b6853 4792 txq->q.write_ptr | (txq_id << 8));
bb8c093b 4793 iwl4965_release_nic_access(priv);
b481de9c
ZY
4794
4795 /* else not in power-save mode, uCode will never sleep when we're
4796 * trying to tx (during RFKILL, we're not trying to tx). */
4797 } else
bb8c093b 4798 iwl4965_write32(priv, HBUS_TARG_WRPTR,
fc4b6853 4799 txq->q.write_ptr | (txq_id << 8));
b481de9c
ZY
4800
4801 txq->need_update = 0;
4802
4803 return rc;
4804}
4805
c8b0e6e1 4806#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 4807static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c 4808{
0795af57
JP
4809 DECLARE_MAC_BUF(mac);
4810
b481de9c 4811 IWL_DEBUG_RADIO("RX CONFIG:\n");
bb8c093b 4812 iwl4965_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
4813 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4814 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4815 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4816 le32_to_cpu(rxon->filter_flags));
4817 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4818 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4819 rxon->ofdm_basic_rates);
4820 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
0795af57
JP
4821 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n",
4822 print_mac(mac, rxon->node_addr));
4823 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4824 print_mac(mac, rxon->bssid_addr));
b481de9c
ZY
4825 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4826}
4827#endif
4828
bb8c093b 4829static void iwl4965_enable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4830{
4831 IWL_DEBUG_ISR("Enabling interrupts\n");
4832 set_bit(STATUS_INT_ENABLED, &priv->status);
bb8c093b 4833 iwl4965_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
4834}
4835
bb8c093b 4836static inline void iwl4965_disable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4837{
4838 clear_bit(STATUS_INT_ENABLED, &priv->status);
4839
4840 /* disable interrupts from uCode/NIC to host */
bb8c093b 4841 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
4842
4843 /* acknowledge/clear/reset any interrupts still pending
4844 * from uCode or flow handler (Rx/Tx DMA) */
bb8c093b
CH
4845 iwl4965_write32(priv, CSR_INT, 0xffffffff);
4846 iwl4965_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
4847 IWL_DEBUG_ISR("Disabled interrupts\n");
4848}
4849
4850static const char *desc_lookup(int i)
4851{
4852 switch (i) {
4853 case 1:
4854 return "FAIL";
4855 case 2:
4856 return "BAD_PARAM";
4857 case 3:
4858 return "BAD_CHECKSUM";
4859 case 4:
4860 return "NMI_INTERRUPT";
4861 case 5:
4862 return "SYSASSERT";
4863 case 6:
4864 return "FATAL_ERROR";
4865 }
4866
4867 return "UNKNOWN";
4868}
4869
4870#define ERROR_START_OFFSET (1 * sizeof(u32))
4871#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4872
bb8c093b 4873static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv)
b481de9c
ZY
4874{
4875 u32 data2, line;
4876 u32 desc, time, count, base, data1;
4877 u32 blink1, blink2, ilink1, ilink2;
4878 int rc;
4879
4880 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4881
bb8c093b 4882 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4883 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4884 return;
4885 }
4886
bb8c093b 4887 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4888 if (rc) {
4889 IWL_WARNING("Can not read from adapter at this time.\n");
4890 return;
4891 }
4892
bb8c093b 4893 count = iwl4965_read_targ_mem(priv, base);
b481de9c
ZY
4894
4895 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4896 IWL_ERROR("Start IWL Error Log Dump:\n");
4897 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4898 priv->status, priv->config, count);
4899 }
4900
bb8c093b
CH
4901 desc = iwl4965_read_targ_mem(priv, base + 1 * sizeof(u32));
4902 blink1 = iwl4965_read_targ_mem(priv, base + 3 * sizeof(u32));
4903 blink2 = iwl4965_read_targ_mem(priv, base + 4 * sizeof(u32));
4904 ilink1 = iwl4965_read_targ_mem(priv, base + 5 * sizeof(u32));
4905 ilink2 = iwl4965_read_targ_mem(priv, base + 6 * sizeof(u32));
4906 data1 = iwl4965_read_targ_mem(priv, base + 7 * sizeof(u32));
4907 data2 = iwl4965_read_targ_mem(priv, base + 8 * sizeof(u32));
4908 line = iwl4965_read_targ_mem(priv, base + 9 * sizeof(u32));
4909 time = iwl4965_read_targ_mem(priv, base + 11 * sizeof(u32));
b481de9c
ZY
4910
4911 IWL_ERROR("Desc Time "
4912 "data1 data2 line\n");
4913 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4914 desc_lookup(desc), desc, time, data1, data2, line);
4915 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4916 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4917 ilink1, ilink2);
4918
bb8c093b 4919 iwl4965_release_nic_access(priv);
b481de9c
ZY
4920}
4921
4922#define EVENT_START_OFFSET (4 * sizeof(u32))
4923
4924/**
bb8c093b 4925 * iwl4965_print_event_log - Dump error event log to syslog
b481de9c 4926 *
bb8c093b 4927 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
b481de9c 4928 */
bb8c093b 4929static void iwl4965_print_event_log(struct iwl4965_priv *priv, u32 start_idx,
b481de9c
ZY
4930 u32 num_events, u32 mode)
4931{
4932 u32 i;
4933 u32 base; /* SRAM byte address of event log header */
4934 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4935 u32 ptr; /* SRAM byte address of log data */
4936 u32 ev, time, data; /* event log data */
4937
4938 if (num_events == 0)
4939 return;
4940
4941 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4942
4943 if (mode == 0)
4944 event_size = 2 * sizeof(u32);
4945 else
4946 event_size = 3 * sizeof(u32);
4947
4948 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4949
4950 /* "time" is actually "data" for mode 0 (no timestamp).
4951 * place event id # at far right for easier visual parsing. */
4952 for (i = 0; i < num_events; i++) {
bb8c093b 4953 ev = iwl4965_read_targ_mem(priv, ptr);
b481de9c 4954 ptr += sizeof(u32);
bb8c093b 4955 time = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4956 ptr += sizeof(u32);
4957 if (mode == 0)
4958 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4959 else {
bb8c093b 4960 data = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4961 ptr += sizeof(u32);
4962 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4963 }
4964 }
4965}
4966
bb8c093b 4967static void iwl4965_dump_nic_event_log(struct iwl4965_priv *priv)
b481de9c
ZY
4968{
4969 int rc;
4970 u32 base; /* SRAM byte address of event log header */
4971 u32 capacity; /* event log capacity in # entries */
4972 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4973 u32 num_wraps; /* # times uCode wrapped to top of log */
4974 u32 next_entry; /* index of next entry to be written by uCode */
4975 u32 size; /* # entries that we'll print */
4976
4977 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
bb8c093b 4978 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4979 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4980 return;
4981 }
4982
bb8c093b 4983 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4984 if (rc) {
4985 IWL_WARNING("Can not read from adapter at this time.\n");
4986 return;
4987 }
4988
4989 /* event log header */
bb8c093b
CH
4990 capacity = iwl4965_read_targ_mem(priv, base);
4991 mode = iwl4965_read_targ_mem(priv, base + (1 * sizeof(u32)));
4992 num_wraps = iwl4965_read_targ_mem(priv, base + (2 * sizeof(u32)));
4993 next_entry = iwl4965_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
4994
4995 size = num_wraps ? capacity : next_entry;
4996
4997 /* bail out if nothing in log */
4998 if (size == 0) {
583fab37 4999 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
bb8c093b 5000 iwl4965_release_nic_access(priv);
b481de9c
ZY
5001 return;
5002 }
5003
583fab37 5004 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
5005 size, num_wraps);
5006
5007 /* if uCode has wrapped back to top of log, start at the oldest entry,
5008 * i.e the next one that uCode would fill. */
5009 if (num_wraps)
bb8c093b 5010 iwl4965_print_event_log(priv, next_entry,
b481de9c
ZY
5011 capacity - next_entry, mode);
5012
5013 /* (then/else) start at top of log */
bb8c093b 5014 iwl4965_print_event_log(priv, 0, next_entry, mode);
b481de9c 5015
bb8c093b 5016 iwl4965_release_nic_access(priv);
b481de9c
ZY
5017}
5018
5019/**
bb8c093b 5020 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 5021 */
bb8c093b 5022static void iwl4965_irq_handle_error(struct iwl4965_priv *priv)
b481de9c 5023{
bb8c093b 5024 /* Set the FW error flag -- cleared on iwl4965_down */
b481de9c
ZY
5025 set_bit(STATUS_FW_ERROR, &priv->status);
5026
5027 /* Cancel currently queued command. */
5028 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
5029
c8b0e6e1 5030#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5031 if (iwl4965_debug_level & IWL_DL_FW_ERRORS) {
5032 iwl4965_dump_nic_error_log(priv);
5033 iwl4965_dump_nic_event_log(priv);
5034 iwl4965_print_rx_config_cmd(&priv->staging_rxon);
b481de9c
ZY
5035 }
5036#endif
5037
5038 wake_up_interruptible(&priv->wait_command_queue);
5039
5040 /* Keep the restart process from trying to send host
5041 * commands by clearing the INIT status bit */
5042 clear_bit(STATUS_READY, &priv->status);
5043
5044 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5045 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
5046 "Restarting adapter due to uCode error.\n");
5047
bb8c093b 5048 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5049 memcpy(&priv->recovery_rxon, &priv->active_rxon,
5050 sizeof(priv->recovery_rxon));
5051 priv->error_recovering = 1;
5052 }
5053 queue_work(priv->workqueue, &priv->restart);
5054 }
5055}
5056
bb8c093b 5057static void iwl4965_error_recovery(struct iwl4965_priv *priv)
b481de9c
ZY
5058{
5059 unsigned long flags;
5060
5061 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
5062 sizeof(priv->staging_rxon));
5063 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5064 iwl4965_commit_rxon(priv);
b481de9c 5065
bb8c093b 5066 iwl4965_rxon_add_station(priv, priv->bssid, 1);
b481de9c
ZY
5067
5068 spin_lock_irqsave(&priv->lock, flags);
5069 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
5070 priv->error_recovering = 0;
5071 spin_unlock_irqrestore(&priv->lock, flags);
5072}
5073
bb8c093b 5074static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
b481de9c
ZY
5075{
5076 u32 inta, handled = 0;
5077 u32 inta_fh;
5078 unsigned long flags;
c8b0e6e1 5079#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
5080 u32 inta_mask;
5081#endif
5082
5083 spin_lock_irqsave(&priv->lock, flags);
5084
5085 /* Ack/clear/reset pending uCode interrupts.
5086 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
5087 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
bb8c093b
CH
5088 inta = iwl4965_read32(priv, CSR_INT);
5089 iwl4965_write32(priv, CSR_INT, inta);
b481de9c
ZY
5090
5091 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
5092 * Any new interrupts that happen after this, either while we're
5093 * in this tasklet, or later, will show up in next ISR/tasklet. */
bb8c093b
CH
5094 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
5095 iwl4965_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 5096
c8b0e6e1 5097#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5098 if (iwl4965_debug_level & IWL_DL_ISR) {
9fbab516
BC
5099 /* just for debug */
5100 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
b481de9c
ZY
5101 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5102 inta, inta_mask, inta_fh);
5103 }
5104#endif
5105
5106 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
5107 * atomic, make sure that inta covers all the interrupts that
5108 * we've discovered, even if FH interrupt came in just after
5109 * reading CSR_INT. */
5110 if (inta_fh & CSR_FH_INT_RX_MASK)
5111 inta |= CSR_INT_BIT_FH_RX;
5112 if (inta_fh & CSR_FH_INT_TX_MASK)
5113 inta |= CSR_INT_BIT_FH_TX;
5114
5115 /* Now service all interrupt bits discovered above. */
5116 if (inta & CSR_INT_BIT_HW_ERR) {
5117 IWL_ERROR("Microcode HW error detected. Restarting.\n");
5118
5119 /* Tell the device to stop sending interrupts */
bb8c093b 5120 iwl4965_disable_interrupts(priv);
b481de9c 5121
bb8c093b 5122 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5123
5124 handled |= CSR_INT_BIT_HW_ERR;
5125
5126 spin_unlock_irqrestore(&priv->lock, flags);
5127
5128 return;
5129 }
5130
c8b0e6e1 5131#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5132 if (iwl4965_debug_level & (IWL_DL_ISR)) {
b481de9c
ZY
5133 /* NIC fires this, but we don't use it, redundant with WAKEUP */
5134 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
5135 IWL_DEBUG_ISR("Microcode started or stopped.\n");
5136
5137 /* Alive notification via Rx interrupt will do the real work */
5138 if (inta & CSR_INT_BIT_ALIVE)
5139 IWL_DEBUG_ISR("Alive interrupt\n");
5140 }
5141#endif
5142 /* Safely ignore these bits for debug checks below */
5143 inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
5144
9fbab516 5145 /* HW RF KILL switch toggled */
b481de9c
ZY
5146 if (inta & CSR_INT_BIT_RF_KILL) {
5147 int hw_rf_kill = 0;
bb8c093b 5148 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
5149 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5150 hw_rf_kill = 1;
5151
5152 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
5153 "RF_KILL bit toggled to %s.\n",
5154 hw_rf_kill ? "disable radio":"enable radio");
5155
5156 /* Queue restart only if RF_KILL switch was set to "kill"
5157 * when we loaded driver, and is now set to "enable".
5158 * After we're Alive, RF_KILL gets handled by
5159 * iwl_rx_card_state_notif() */
53e49093
ZY
5160 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
5161 clear_bit(STATUS_RF_KILL_HW, &priv->status);
b481de9c 5162 queue_work(priv->workqueue, &priv->restart);
53e49093 5163 }
b481de9c
ZY
5164
5165 handled |= CSR_INT_BIT_RF_KILL;
5166 }
5167
9fbab516 5168 /* Chip got too hot and stopped itself */
b481de9c
ZY
5169 if (inta & CSR_INT_BIT_CT_KILL) {
5170 IWL_ERROR("Microcode CT kill error detected.\n");
5171 handled |= CSR_INT_BIT_CT_KILL;
5172 }
5173
5174 /* Error detected by uCode */
5175 if (inta & CSR_INT_BIT_SW_ERR) {
5176 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
5177 inta);
bb8c093b 5178 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5179 handled |= CSR_INT_BIT_SW_ERR;
5180 }
5181
5182 /* uCode wakes up after power-down sleep */
5183 if (inta & CSR_INT_BIT_WAKEUP) {
5184 IWL_DEBUG_ISR("Wakeup interrupt\n");
bb8c093b
CH
5185 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq);
5186 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]);
5187 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]);
5188 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]);
5189 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]);
5190 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]);
5191 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
5192
5193 handled |= CSR_INT_BIT_WAKEUP;
5194 }
5195
5196 /* All uCode command responses, including Tx command responses,
5197 * Rx "responses" (frame-received notification), and other
5198 * notifications from uCode come through here*/
5199 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 5200 iwl4965_rx_handle(priv);
b481de9c
ZY
5201 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
5202 }
5203
5204 if (inta & CSR_INT_BIT_FH_TX) {
5205 IWL_DEBUG_ISR("Tx interrupt\n");
5206 handled |= CSR_INT_BIT_FH_TX;
5207 }
5208
5209 if (inta & ~handled)
5210 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
5211
5212 if (inta & ~CSR_INI_SET_MASK) {
5213 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
5214 inta & ~CSR_INI_SET_MASK);
5215 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
5216 }
5217
5218 /* Re-enable all interrupts */
bb8c093b 5219 iwl4965_enable_interrupts(priv);
b481de9c 5220
c8b0e6e1 5221#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5222 if (iwl4965_debug_level & (IWL_DL_ISR)) {
5223 inta = iwl4965_read32(priv, CSR_INT);
5224 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
5225 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5226 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
5227 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
5228 }
5229#endif
5230 spin_unlock_irqrestore(&priv->lock, flags);
5231}
5232
bb8c093b 5233static irqreturn_t iwl4965_isr(int irq, void *data)
b481de9c 5234{
bb8c093b 5235 struct iwl4965_priv *priv = data;
b481de9c
ZY
5236 u32 inta, inta_mask;
5237 u32 inta_fh;
5238 if (!priv)
5239 return IRQ_NONE;
5240
5241 spin_lock(&priv->lock);
5242
5243 /* Disable (but don't clear!) interrupts here to avoid
5244 * back-to-back ISRs and sporadic interrupts from our NIC.
5245 * If we have something to service, the tasklet will re-enable ints.
5246 * If we *don't* have something, we'll re-enable before leaving here. */
bb8c093b
CH
5247 inta_mask = iwl4965_read32(priv, CSR_INT_MASK); /* just for debug */
5248 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
5249
5250 /* Discover which interrupts are active/pending */
bb8c093b
CH
5251 inta = iwl4965_read32(priv, CSR_INT);
5252 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5253
5254 /* Ignore interrupt if there's nothing in NIC to service.
5255 * This may be due to IRQ shared with another device,
5256 * or due to sporadic interrupts thrown from our NIC. */
5257 if (!inta && !inta_fh) {
5258 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5259 goto none;
5260 }
5261
5262 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
66fbb541
ON
5263 /* Hardware disappeared. It might have already raised
5264 * an interrupt */
b481de9c 5265 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
66fbb541 5266 goto unplugged;
b481de9c
ZY
5267 }
5268
5269 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5270 inta, inta_mask, inta_fh);
5271
bb8c093b 5272 /* iwl4965_irq_tasklet() will service interrupts and re-enable them */
b481de9c 5273 tasklet_schedule(&priv->irq_tasklet);
b481de9c 5274
66fbb541
ON
5275 unplugged:
5276 spin_unlock(&priv->lock);
b481de9c
ZY
5277 return IRQ_HANDLED;
5278
5279 none:
5280 /* re-enable interrupts here since we don't have anything to service. */
bb8c093b 5281 iwl4965_enable_interrupts(priv);
b481de9c
ZY
5282 spin_unlock(&priv->lock);
5283 return IRQ_NONE;
5284}
5285
5286/************************** EEPROM BANDS ****************************
5287 *
bb8c093b 5288 * The iwl4965_eeprom_band definitions below provide the mapping from the
b481de9c
ZY
5289 * EEPROM contents to the specific channel number supported for each
5290 * band.
5291 *
bb8c093b 5292 * For example, iwl4965_priv->eeprom.band_3_channels[4] from the band_3
b481de9c
ZY
5293 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
5294 * The specific geography and calibration information for that channel
5295 * is contained in the eeprom map itself.
5296 *
5297 * During init, we copy the eeprom information and channel map
5298 * information into priv->channel_info_24/52 and priv->channel_map_24/52
5299 *
5300 * channel_map_24/52 provides the index in the channel_info array for a
5301 * given channel. We have to have two separate maps as there is channel
5302 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
5303 * band_2
5304 *
5305 * A value of 0xff stored in the channel_map indicates that the channel
5306 * is not supported by the hardware at all.
5307 *
5308 * A value of 0xfe in the channel_map indicates that the channel is not
5309 * valid for Tx with the current hardware. This means that
5310 * while the system can tune and receive on a given channel, it may not
5311 * be able to associate or transmit any frames on that
5312 * channel. There is no corresponding channel information for that
5313 * entry.
5314 *
5315 *********************************************************************/
5316
5317/* 2.4 GHz */
bb8c093b 5318static const u8 iwl4965_eeprom_band_1[14] = {
b481de9c
ZY
5319 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
5320};
5321
5322/* 5.2 GHz bands */
9fbab516 5323static const u8 iwl4965_eeprom_band_2[] = { /* 4915-5080MHz */
b481de9c
ZY
5324 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
5325};
5326
9fbab516 5327static const u8 iwl4965_eeprom_band_3[] = { /* 5170-5320MHz */
b481de9c
ZY
5328 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
5329};
5330
bb8c093b 5331static const u8 iwl4965_eeprom_band_4[] = { /* 5500-5700MHz */
b481de9c
ZY
5332 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
5333};
5334
bb8c093b 5335static const u8 iwl4965_eeprom_band_5[] = { /* 5725-5825MHz */
b481de9c
ZY
5336 145, 149, 153, 157, 161, 165
5337};
5338
bb8c093b 5339static u8 iwl4965_eeprom_band_6[] = { /* 2.4 FAT channel */
b481de9c
ZY
5340 1, 2, 3, 4, 5, 6, 7
5341};
5342
bb8c093b 5343static u8 iwl4965_eeprom_band_7[] = { /* 5.2 FAT channel */
b481de9c
ZY
5344 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
5345};
5346
9fbab516
BC
5347static void iwl4965_init_band_reference(const struct iwl4965_priv *priv,
5348 int band,
b481de9c 5349 int *eeprom_ch_count,
bb8c093b 5350 const struct iwl4965_eeprom_channel
b481de9c
ZY
5351 **eeprom_ch_info,
5352 const u8 **eeprom_ch_index)
5353{
5354 switch (band) {
5355 case 1: /* 2.4GHz band */
bb8c093b 5356 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_1);
b481de9c 5357 *eeprom_ch_info = priv->eeprom.band_1_channels;
bb8c093b 5358 *eeprom_ch_index = iwl4965_eeprom_band_1;
b481de9c 5359 break;
9fbab516 5360 case 2: /* 4.9GHz band */
bb8c093b 5361 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_2);
b481de9c 5362 *eeprom_ch_info = priv->eeprom.band_2_channels;
bb8c093b 5363 *eeprom_ch_index = iwl4965_eeprom_band_2;
b481de9c
ZY
5364 break;
5365 case 3: /* 5.2GHz band */
bb8c093b 5366 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_3);
b481de9c 5367 *eeprom_ch_info = priv->eeprom.band_3_channels;
bb8c093b 5368 *eeprom_ch_index = iwl4965_eeprom_band_3;
b481de9c 5369 break;
9fbab516 5370 case 4: /* 5.5GHz band */
bb8c093b 5371 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_4);
b481de9c 5372 *eeprom_ch_info = priv->eeprom.band_4_channels;
bb8c093b 5373 *eeprom_ch_index = iwl4965_eeprom_band_4;
b481de9c 5374 break;
9fbab516 5375 case 5: /* 5.7GHz band */
bb8c093b 5376 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c 5377 *eeprom_ch_info = priv->eeprom.band_5_channels;
bb8c093b 5378 *eeprom_ch_index = iwl4965_eeprom_band_5;
b481de9c 5379 break;
9fbab516 5380 case 6: /* 2.4GHz FAT channels */
bb8c093b 5381 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_6);
b481de9c 5382 *eeprom_ch_info = priv->eeprom.band_24_channels;
bb8c093b 5383 *eeprom_ch_index = iwl4965_eeprom_band_6;
b481de9c 5384 break;
9fbab516 5385 case 7: /* 5 GHz FAT channels */
bb8c093b 5386 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_7);
b481de9c 5387 *eeprom_ch_info = priv->eeprom.band_52_channels;
bb8c093b 5388 *eeprom_ch_index = iwl4965_eeprom_band_7;
b481de9c
ZY
5389 break;
5390 default:
5391 BUG();
5392 return;
5393 }
5394}
5395
6440adb5
BC
5396/**
5397 * iwl4965_get_channel_info - Find driver's private channel info
5398 *
5399 * Based on band and channel number.
5400 */
bb8c093b 5401const struct iwl4965_channel_info *iwl4965_get_channel_info(const struct iwl4965_priv *priv,
b481de9c
ZY
5402 int phymode, u16 channel)
5403{
5404 int i;
5405
5406 switch (phymode) {
5407 case MODE_IEEE80211A:
5408 for (i = 14; i < priv->channel_count; i++) {
5409 if (priv->channel_info[i].channel == channel)
5410 return &priv->channel_info[i];
5411 }
5412 break;
5413
5414 case MODE_IEEE80211B:
5415 case MODE_IEEE80211G:
5416 if (channel >= 1 && channel <= 14)
5417 return &priv->channel_info[channel - 1];
5418 break;
5419
5420 }
5421
5422 return NULL;
5423}
5424
5425#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
5426 ? # x " " : "")
5427
6440adb5
BC
5428/**
5429 * iwl4965_init_channel_map - Set up driver's info for all possible channels
5430 */
bb8c093b 5431static int iwl4965_init_channel_map(struct iwl4965_priv *priv)
b481de9c
ZY
5432{
5433 int eeprom_ch_count = 0;
5434 const u8 *eeprom_ch_index = NULL;
bb8c093b 5435 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL;
b481de9c 5436 int band, ch;
bb8c093b 5437 struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5438
5439 if (priv->channel_count) {
5440 IWL_DEBUG_INFO("Channel map already initialized.\n");
5441 return 0;
5442 }
5443
5444 if (priv->eeprom.version < 0x2f) {
5445 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5446 priv->eeprom.version);
5447 return -EINVAL;
5448 }
5449
5450 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5451
5452 priv->channel_count =
bb8c093b
CH
5453 ARRAY_SIZE(iwl4965_eeprom_band_1) +
5454 ARRAY_SIZE(iwl4965_eeprom_band_2) +
5455 ARRAY_SIZE(iwl4965_eeprom_band_3) +
5456 ARRAY_SIZE(iwl4965_eeprom_band_4) +
5457 ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c
ZY
5458
5459 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5460
bb8c093b 5461 priv->channel_info = kzalloc(sizeof(struct iwl4965_channel_info) *
b481de9c
ZY
5462 priv->channel_count, GFP_KERNEL);
5463 if (!priv->channel_info) {
5464 IWL_ERROR("Could not allocate channel_info\n");
5465 priv->channel_count = 0;
5466 return -ENOMEM;
5467 }
5468
5469 ch_info = priv->channel_info;
5470
5471 /* Loop through the 5 EEPROM bands adding them in order to the
5472 * channel map we maintain (that contains additional information than
5473 * what just in the EEPROM) */
5474 for (band = 1; band <= 5; band++) {
5475
bb8c093b 5476 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5477 &eeprom_ch_info, &eeprom_ch_index);
5478
5479 /* Loop through each band adding each of the channels */
5480 for (ch = 0; ch < eeprom_ch_count; ch++) {
5481 ch_info->channel = eeprom_ch_index[ch];
5482 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5483 MODE_IEEE80211A;
5484
5485 /* permanently store EEPROM's channel regulatory flags
5486 * and max power in channel info database. */
5487 ch_info->eeprom = eeprom_ch_info[ch];
5488
5489 /* Copy the run-time flags so they are there even on
5490 * invalid channels */
5491 ch_info->flags = eeprom_ch_info[ch].flags;
5492
5493 if (!(is_channel_valid(ch_info))) {
5494 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5495 "No traffic\n",
5496 ch_info->channel,
5497 ch_info->flags,
5498 is_channel_a_band(ch_info) ?
5499 "5.2" : "2.4");
5500 ch_info++;
5501 continue;
5502 }
5503
5504 /* Initialize regulatory-based run-time data */
5505 ch_info->max_power_avg = ch_info->curr_txpow =
5506 eeprom_ch_info[ch].max_power_avg;
5507 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5508 ch_info->min_power = 0;
5509
5510 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5511 " %ddBm): Ad-Hoc %ssupported\n",
5512 ch_info->channel,
5513 is_channel_a_band(ch_info) ?
5514 "5.2" : "2.4",
5515 CHECK_AND_PRINT(IBSS),
5516 CHECK_AND_PRINT(ACTIVE),
5517 CHECK_AND_PRINT(RADAR),
5518 CHECK_AND_PRINT(WIDE),
5519 CHECK_AND_PRINT(NARROW),
5520 CHECK_AND_PRINT(DFS),
5521 eeprom_ch_info[ch].flags,
5522 eeprom_ch_info[ch].max_power_avg,
5523 ((eeprom_ch_info[ch].
5524 flags & EEPROM_CHANNEL_IBSS)
5525 && !(eeprom_ch_info[ch].
5526 flags & EEPROM_CHANNEL_RADAR))
5527 ? "" : "not ");
5528
5529 /* Set the user_txpower_limit to the highest power
5530 * supported by any channel */
5531 if (eeprom_ch_info[ch].max_power_avg >
5532 priv->user_txpower_limit)
5533 priv->user_txpower_limit =
5534 eeprom_ch_info[ch].max_power_avg;
5535
5536 ch_info++;
5537 }
5538 }
5539
6440adb5 5540 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */
b481de9c
ZY
5541 for (band = 6; band <= 7; band++) {
5542 int phymode;
5543 u8 fat_extension_chan;
5544
bb8c093b 5545 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5546 &eeprom_ch_info, &eeprom_ch_index);
5547
6440adb5 5548 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
b481de9c 5549 phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A;
6440adb5 5550
b481de9c
ZY
5551 /* Loop through each band adding each of the channels */
5552 for (ch = 0; ch < eeprom_ch_count; ch++) {
5553
5554 if ((band == 6) &&
5555 ((eeprom_ch_index[ch] == 5) ||
5556 (eeprom_ch_index[ch] == 6) ||
5557 (eeprom_ch_index[ch] == 7)))
5558 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
5559 else
5560 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
5561
6440adb5 5562 /* Set up driver's info for lower half */
b481de9c
ZY
5563 iwl4965_set_fat_chan_info(priv, phymode,
5564 eeprom_ch_index[ch],
5565 &(eeprom_ch_info[ch]),
5566 fat_extension_chan);
5567
6440adb5 5568 /* Set up driver's info for upper half */
b481de9c
ZY
5569 iwl4965_set_fat_chan_info(priv, phymode,
5570 (eeprom_ch_index[ch] + 4),
5571 &(eeprom_ch_info[ch]),
5572 HT_IE_EXT_CHANNEL_BELOW);
5573 }
5574 }
5575
5576 return 0;
5577}
5578
5579/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5580 * sending probe req. This should be set long enough to hear probe responses
5581 * from more than one AP. */
5582#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
5583#define IWL_ACTIVE_DWELL_TIME_52 (10)
5584
5585/* For faster active scanning, scan will move to the next channel if fewer than
5586 * PLCP_QUIET_THRESH packets are heard on this channel within
5587 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
5588 * time if it's a quiet channel (nothing responded to our probe, and there's
5589 * no other traffic).
5590 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5591#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
5592#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
5593
5594/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5595 * Must be set longer than active dwell time.
5596 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5597#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
5598#define IWL_PASSIVE_DWELL_TIME_52 (10)
5599#define IWL_PASSIVE_DWELL_BASE (100)
5600#define IWL_CHANNEL_TUNE_TIME 5
5601
bb8c093b 5602static inline u16 iwl4965_get_active_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c
ZY
5603{
5604 if (phymode == MODE_IEEE80211A)
5605 return IWL_ACTIVE_DWELL_TIME_52;
5606 else
5607 return IWL_ACTIVE_DWELL_TIME_24;
5608}
5609
bb8c093b 5610static u16 iwl4965_get_passive_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c 5611{
bb8c093b 5612 u16 active = iwl4965_get_active_dwell_time(priv, phymode);
b481de9c
ZY
5613 u16 passive = (phymode != MODE_IEEE80211A) ?
5614 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5615 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5616
bb8c093b 5617 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5618 /* If we're associated, we clamp the maximum passive
5619 * dwell time to be 98% of the beacon interval (minus
5620 * 2 * channel tune time) */
5621 passive = priv->beacon_int;
5622 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5623 passive = IWL_PASSIVE_DWELL_BASE;
5624 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5625 }
5626
5627 if (passive <= active)
5628 passive = active + 1;
5629
5630 return passive;
5631}
5632
bb8c093b 5633static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
b481de9c 5634 u8 is_active, u8 direct_mask,
bb8c093b 5635 struct iwl4965_scan_channel *scan_ch)
b481de9c
ZY
5636{
5637 const struct ieee80211_channel *channels = NULL;
5638 const struct ieee80211_hw_mode *hw_mode;
bb8c093b 5639 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5640 u16 passive_dwell = 0;
5641 u16 active_dwell = 0;
5642 int added, i;
5643
bb8c093b 5644 hw_mode = iwl4965_get_hw_mode(priv, phymode);
b481de9c
ZY
5645 if (!hw_mode)
5646 return 0;
5647
5648 channels = hw_mode->channels;
5649
bb8c093b
CH
5650 active_dwell = iwl4965_get_active_dwell_time(priv, phymode);
5651 passive_dwell = iwl4965_get_passive_dwell_time(priv, phymode);
b481de9c
ZY
5652
5653 for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5654 if (channels[i].chan ==
5655 le16_to_cpu(priv->active_rxon.channel)) {
bb8c093b 5656 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5657 IWL_DEBUG_SCAN
5658 ("Skipping current channel %d\n",
5659 le16_to_cpu(priv->active_rxon.channel));
5660 continue;
5661 }
5662 } else if (priv->only_active_channel)
5663 continue;
5664
5665 scan_ch->channel = channels[i].chan;
5666
9fbab516
BC
5667 ch_info = iwl4965_get_channel_info(priv, phymode,
5668 scan_ch->channel);
b481de9c
ZY
5669 if (!is_channel_valid(ch_info)) {
5670 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5671 scan_ch->channel);
5672 continue;
5673 }
5674
5675 if (!is_active || is_channel_passive(ch_info) ||
5676 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5677 scan_ch->type = 0; /* passive */
5678 else
5679 scan_ch->type = 1; /* active */
5680
5681 if (scan_ch->type & 1)
5682 scan_ch->type |= (direct_mask << 1);
5683
5684 if (is_channel_narrow(ch_info))
5685 scan_ch->type |= (1 << 7);
5686
5687 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5688 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5689
9fbab516 5690 /* Set txpower levels to defaults */
b481de9c
ZY
5691 scan_ch->tpc.dsp_atten = 110;
5692 /* scan_pwr_info->tpc.dsp_atten; */
5693
5694 /*scan_pwr_info->tpc.tx_gain; */
5695 if (phymode == MODE_IEEE80211A)
5696 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5697 else {
5698 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5699 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516
BC
5700 * power level:
5701 * scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3;
b481de9c
ZY
5702 */
5703 }
5704
5705 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5706 scan_ch->channel,
5707 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5708 (scan_ch->type & 1) ?
5709 active_dwell : passive_dwell);
5710
5711 scan_ch++;
5712 added++;
5713 }
5714
5715 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5716 return added;
5717}
5718
bb8c093b 5719static void iwl4965_reset_channel_flag(struct iwl4965_priv *priv)
b481de9c
ZY
5720{
5721 int i, j;
5722 for (i = 0; i < 3; i++) {
5723 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5724 for (j = 0; j < hw_mode->num_channels; j++)
5725 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5726 }
5727}
5728
bb8c093b 5729static void iwl4965_init_hw_rates(struct iwl4965_priv *priv,
b481de9c
ZY
5730 struct ieee80211_rate *rates)
5731{
5732 int i;
5733
5734 for (i = 0; i < IWL_RATE_COUNT; i++) {
bb8c093b 5735 rates[i].rate = iwl4965_rates[i].ieee * 5;
b481de9c
ZY
5736 rates[i].val = i; /* Rate scaling will work on indexes */
5737 rates[i].val2 = i;
5738 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5739 /* Only OFDM have the bits-per-symbol set */
5740 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5741 rates[i].flags |= IEEE80211_RATE_OFDM;
5742 else {
5743 /*
5744 * If CCK 1M then set rate flag to CCK else CCK_2
5745 * which is CCK | PREAMBLE2
5746 */
bb8c093b 5747 rates[i].flags |= (iwl4965_rates[i].plcp == 10) ?
b481de9c
ZY
5748 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5749 }
5750
5751 /* Set up which ones are basic rates... */
5752 if (IWL_BASIC_RATES_MASK & (1 << i))
5753 rates[i].flags |= IEEE80211_RATE_BASIC;
5754 }
b481de9c
ZY
5755}
5756
5757/**
bb8c093b 5758 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 5759 */
bb8c093b 5760static int iwl4965_init_geos(struct iwl4965_priv *priv)
b481de9c 5761{
bb8c093b 5762 struct iwl4965_channel_info *ch;
b481de9c
ZY
5763 struct ieee80211_hw_mode *modes;
5764 struct ieee80211_channel *channels;
5765 struct ieee80211_channel *geo_ch;
5766 struct ieee80211_rate *rates;
5767 int i = 0;
5768 enum {
5769 A = 0,
5770 B = 1,
5771 G = 2,
b481de9c 5772 };
326eeee8 5773 int mode_count = 3;
b481de9c
ZY
5774
5775 if (priv->modes) {
5776 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5777 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5778 return 0;
5779 }
5780
5781 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5782 GFP_KERNEL);
5783 if (!modes)
5784 return -ENOMEM;
5785
5786 channels = kzalloc(sizeof(struct ieee80211_channel) *
5787 priv->channel_count, GFP_KERNEL);
5788 if (!channels) {
5789 kfree(modes);
5790 return -ENOMEM;
5791 }
5792
5793 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5794 GFP_KERNEL);
5795 if (!rates) {
5796 kfree(modes);
5797 kfree(channels);
5798 return -ENOMEM;
5799 }
5800
5801 /* 0 = 802.11a
5802 * 1 = 802.11b
5803 * 2 = 802.11g
5804 */
5805
5806 /* 5.2GHz channels start after the 2.4GHz channels */
5807 modes[A].mode = MODE_IEEE80211A;
bb8c093b 5808 modes[A].channels = &channels[ARRAY_SIZE(iwl4965_eeprom_band_1)];
b481de9c
ZY
5809 modes[A].rates = rates;
5810 modes[A].num_rates = 8; /* just OFDM */
5811 modes[A].rates = &rates[4];
5812 modes[A].num_channels = 0;
326eeee8
RR
5813#ifdef CONFIG_IWL4965_HT
5814 iwl4965_init_ht_hw_capab(&modes[A].ht_info, MODE_IEEE80211A);
5815#endif
b481de9c
ZY
5816
5817 modes[B].mode = MODE_IEEE80211B;
5818 modes[B].channels = channels;
5819 modes[B].rates = rates;
5820 modes[B].num_rates = 4; /* just CCK */
5821 modes[B].num_channels = 0;
5822
5823 modes[G].mode = MODE_IEEE80211G;
5824 modes[G].channels = channels;
5825 modes[G].rates = rates;
5826 modes[G].num_rates = 12; /* OFDM & CCK */
5827 modes[G].num_channels = 0;
326eeee8
RR
5828#ifdef CONFIG_IWL4965_HT
5829 iwl4965_init_ht_hw_capab(&modes[G].ht_info, MODE_IEEE80211G);
5830#endif
b481de9c
ZY
5831
5832 priv->ieee_channels = channels;
5833 priv->ieee_rates = rates;
5834
bb8c093b 5835 iwl4965_init_hw_rates(priv, rates);
b481de9c
ZY
5836
5837 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5838 ch = &priv->channel_info[i];
5839
5840 if (!is_channel_valid(ch)) {
5841 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5842 "skipping.\n",
5843 ch->channel, is_channel_a_band(ch) ?
5844 "5.2" : "2.4");
5845 continue;
5846 }
5847
5848 if (is_channel_a_band(ch)) {
5849 geo_ch = &modes[A].channels[modes[A].num_channels++];
b481de9c
ZY
5850 } else {
5851 geo_ch = &modes[B].channels[modes[B].num_channels++];
5852 modes[G].num_channels++;
b481de9c
ZY
5853 }
5854
5855 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5856 geo_ch->chan = ch->channel;
5857 geo_ch->power_level = ch->max_power_avg;
5858 geo_ch->antenna_max = 0xff;
5859
5860 if (is_channel_valid(ch)) {
5861 geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5862 if (ch->flags & EEPROM_CHANNEL_IBSS)
5863 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5864
5865 if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5866 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5867
5868 if (ch->flags & EEPROM_CHANNEL_RADAR)
5869 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5870
5871 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5872 priv->max_channel_txpower_limit =
5873 ch->max_power_avg;
5874 }
5875
5876 geo_ch->val = geo_ch->flag;
5877 }
5878
5879 if ((modes[A].num_channels == 0) && priv->is_abg) {
5880 printk(KERN_INFO DRV_NAME
5881 ": Incorrectly detected BG card as ABG. Please send "
5882 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5883 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5884 priv->is_abg = 0;
5885 }
5886
5887 printk(KERN_INFO DRV_NAME
5888 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5889 modes[G].num_channels, modes[A].num_channels);
5890
5891 /*
5892 * NOTE: We register these in preference of order -- the
5893 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5894 * a phymode based on rates or AP capabilities but seems to
5895 * configure it purely on if the channel being configured
5896 * is supported by a mode -- and the first match is taken
5897 */
5898
5899 if (modes[G].num_channels)
5900 ieee80211_register_hwmode(priv->hw, &modes[G]);
5901 if (modes[B].num_channels)
5902 ieee80211_register_hwmode(priv->hw, &modes[B]);
5903 if (modes[A].num_channels)
5904 ieee80211_register_hwmode(priv->hw, &modes[A]);
5905
5906 priv->modes = modes;
5907 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5908
5909 return 0;
5910}
5911
5912/******************************************************************************
5913 *
5914 * uCode download functions
5915 *
5916 ******************************************************************************/
5917
bb8c093b 5918static void iwl4965_dealloc_ucode_pci(struct iwl4965_priv *priv)
b481de9c
ZY
5919{
5920 if (priv->ucode_code.v_addr != NULL) {
5921 pci_free_consistent(priv->pci_dev,
5922 priv->ucode_code.len,
5923 priv->ucode_code.v_addr,
5924 priv->ucode_code.p_addr);
5925 priv->ucode_code.v_addr = NULL;
5926 }
5927 if (priv->ucode_data.v_addr != NULL) {
5928 pci_free_consistent(priv->pci_dev,
5929 priv->ucode_data.len,
5930 priv->ucode_data.v_addr,
5931 priv->ucode_data.p_addr);
5932 priv->ucode_data.v_addr = NULL;
5933 }
5934 if (priv->ucode_data_backup.v_addr != NULL) {
5935 pci_free_consistent(priv->pci_dev,
5936 priv->ucode_data_backup.len,
5937 priv->ucode_data_backup.v_addr,
5938 priv->ucode_data_backup.p_addr);
5939 priv->ucode_data_backup.v_addr = NULL;
5940 }
5941 if (priv->ucode_init.v_addr != NULL) {
5942 pci_free_consistent(priv->pci_dev,
5943 priv->ucode_init.len,
5944 priv->ucode_init.v_addr,
5945 priv->ucode_init.p_addr);
5946 priv->ucode_init.v_addr = NULL;
5947 }
5948 if (priv->ucode_init_data.v_addr != NULL) {
5949 pci_free_consistent(priv->pci_dev,
5950 priv->ucode_init_data.len,
5951 priv->ucode_init_data.v_addr,
5952 priv->ucode_init_data.p_addr);
5953 priv->ucode_init_data.v_addr = NULL;
5954 }
5955 if (priv->ucode_boot.v_addr != NULL) {
5956 pci_free_consistent(priv->pci_dev,
5957 priv->ucode_boot.len,
5958 priv->ucode_boot.v_addr,
5959 priv->ucode_boot.p_addr);
5960 priv->ucode_boot.v_addr = NULL;
5961 }
5962}
5963
5964/**
bb8c093b 5965 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
5966 * looking at all data.
5967 */
4fd1f841 5968static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 *image,
9fbab516 5969 u32 len)
b481de9c
ZY
5970{
5971 u32 val;
5972 u32 save_len = len;
5973 int rc = 0;
5974 u32 errcnt;
5975
5976 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5977
bb8c093b 5978 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
5979 if (rc)
5980 return rc;
5981
bb8c093b 5982 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
b481de9c
ZY
5983
5984 errcnt = 0;
5985 for (; len > 0; len -= sizeof(u32), image++) {
5986 /* read data comes through single port, auto-incr addr */
5987 /* NOTE: Use the debugless read so we don't flood kernel log
5988 * if IWL_DL_IO is set */
bb8c093b 5989 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
5990 if (val != le32_to_cpu(*image)) {
5991 IWL_ERROR("uCode INST section is invalid at "
5992 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5993 save_len - len, val, le32_to_cpu(*image));
5994 rc = -EIO;
5995 errcnt++;
5996 if (errcnt >= 20)
5997 break;
5998 }
5999 }
6000
bb8c093b 6001 iwl4965_release_nic_access(priv);
b481de9c
ZY
6002
6003 if (!errcnt)
6004 IWL_DEBUG_INFO
6005 ("ucode image in INSTRUCTION memory is good\n");
6006
6007 return rc;
6008}
6009
6010
6011/**
bb8c093b 6012 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
6013 * using sample data 100 bytes apart. If these sample points are good,
6014 * it's a pretty good bet that everything between them is good, too.
6015 */
bb8c093b 6016static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
6017{
6018 u32 val;
6019 int rc = 0;
6020 u32 errcnt = 0;
6021 u32 i;
6022
6023 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
6024
bb8c093b 6025 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6026 if (rc)
6027 return rc;
6028
6029 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
6030 /* read data comes through single port, auto-incr addr */
6031 /* NOTE: Use the debugless read so we don't flood kernel log
6032 * if IWL_DL_IO is set */
bb8c093b 6033 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR,
b481de9c 6034 i + RTC_INST_LOWER_BOUND);
bb8c093b 6035 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
6036 if (val != le32_to_cpu(*image)) {
6037#if 0 /* Enable this if you want to see details */
6038 IWL_ERROR("uCode INST section is invalid at "
6039 "offset 0x%x, is 0x%x, s/b 0x%x\n",
6040 i, val, *image);
6041#endif
6042 rc = -EIO;
6043 errcnt++;
6044 if (errcnt >= 3)
6045 break;
6046 }
6047 }
6048
bb8c093b 6049 iwl4965_release_nic_access(priv);
b481de9c
ZY
6050
6051 return rc;
6052}
6053
6054
6055/**
bb8c093b 6056 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
6057 * and verify its contents
6058 */
bb8c093b 6059static int iwl4965_verify_ucode(struct iwl4965_priv *priv)
b481de9c
ZY
6060{
6061 __le32 *image;
6062 u32 len;
6063 int rc = 0;
6064
6065 /* Try bootstrap */
6066 image = (__le32 *)priv->ucode_boot.v_addr;
6067 len = priv->ucode_boot.len;
bb8c093b 6068 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6069 if (rc == 0) {
6070 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
6071 return 0;
6072 }
6073
6074 /* Try initialize */
6075 image = (__le32 *)priv->ucode_init.v_addr;
6076 len = priv->ucode_init.len;
bb8c093b 6077 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6078 if (rc == 0) {
6079 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
6080 return 0;
6081 }
6082
6083 /* Try runtime/protocol */
6084 image = (__le32 *)priv->ucode_code.v_addr;
6085 len = priv->ucode_code.len;
bb8c093b 6086 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6087 if (rc == 0) {
6088 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
6089 return 0;
6090 }
6091
6092 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
6093
9fbab516
BC
6094 /* Since nothing seems to match, show first several data entries in
6095 * instruction SRAM, so maybe visual inspection will give a clue.
6096 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
6097 image = (__le32 *)priv->ucode_boot.v_addr;
6098 len = priv->ucode_boot.len;
bb8c093b 6099 rc = iwl4965_verify_inst_full(priv, image, len);
b481de9c
ZY
6100
6101 return rc;
6102}
6103
6104
6105/* check contents of special bootstrap uCode SRAM */
bb8c093b 6106static int iwl4965_verify_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6107{
6108 __le32 *image = priv->ucode_boot.v_addr;
6109 u32 len = priv->ucode_boot.len;
6110 u32 reg;
6111 u32 val;
6112
6113 IWL_DEBUG_INFO("Begin verify bsm\n");
6114
6115 /* verify BSM SRAM contents */
bb8c093b 6116 val = iwl4965_read_prph(priv, BSM_WR_DWCOUNT_REG);
b481de9c
ZY
6117 for (reg = BSM_SRAM_LOWER_BOUND;
6118 reg < BSM_SRAM_LOWER_BOUND + len;
6119 reg += sizeof(u32), image ++) {
bb8c093b 6120 val = iwl4965_read_prph(priv, reg);
b481de9c
ZY
6121 if (val != le32_to_cpu(*image)) {
6122 IWL_ERROR("BSM uCode verification failed at "
6123 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
6124 BSM_SRAM_LOWER_BOUND,
6125 reg - BSM_SRAM_LOWER_BOUND, len,
6126 val, le32_to_cpu(*image));
6127 return -EIO;
6128 }
6129 }
6130
6131 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
6132
6133 return 0;
6134}
6135
6136/**
bb8c093b 6137 * iwl4965_load_bsm - Load bootstrap instructions
b481de9c
ZY
6138 *
6139 * BSM operation:
6140 *
6141 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
6142 * in special SRAM that does not power down during RFKILL. When powering back
6143 * up after power-saving sleeps (or during initial uCode load), the BSM loads
6144 * the bootstrap program into the on-board processor, and starts it.
6145 *
6146 * The bootstrap program loads (via DMA) instructions and data for a new
6147 * program from host DRAM locations indicated by the host driver in the
6148 * BSM_DRAM_* registers. Once the new program is loaded, it starts
6149 * automatically.
6150 *
6151 * When initializing the NIC, the host driver points the BSM to the
6152 * "initialize" uCode image. This uCode sets up some internal data, then
6153 * notifies host via "initialize alive" that it is complete.
6154 *
6155 * The host then replaces the BSM_DRAM_* pointer values to point to the
6156 * normal runtime uCode instructions and a backup uCode data cache buffer
6157 * (filled initially with starting data values for the on-board processor),
6158 * then triggers the "initialize" uCode to load and launch the runtime uCode,
6159 * which begins normal operation.
6160 *
6161 * When doing a power-save shutdown, runtime uCode saves data SRAM into
6162 * the backup data cache in DRAM before SRAM is powered down.
6163 *
6164 * When powering back up, the BSM loads the bootstrap program. This reloads
6165 * the runtime uCode instructions and the backup data cache into SRAM,
6166 * and re-launches the runtime uCode from where it left off.
6167 */
bb8c093b 6168static int iwl4965_load_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6169{
6170 __le32 *image = priv->ucode_boot.v_addr;
6171 u32 len = priv->ucode_boot.len;
6172 dma_addr_t pinst;
6173 dma_addr_t pdata;
6174 u32 inst_len;
6175 u32 data_len;
6176 int rc;
6177 int i;
6178 u32 done;
6179 u32 reg_offset;
6180
6181 IWL_DEBUG_INFO("Begin load bsm\n");
6182
6183 /* make sure bootstrap program is no larger than BSM's SRAM size */
6184 if (len > IWL_MAX_BSM_SIZE)
6185 return -EINVAL;
6186
6187 /* Tell bootstrap uCode where to find the "Initialize" uCode
9fbab516 6188 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
bb8c093b 6189 * NOTE: iwl4965_initialize_alive_start() will replace these values,
b481de9c
ZY
6190 * after the "initialize" uCode has run, to point to
6191 * runtime/protocol instructions and backup data cache. */
6192 pinst = priv->ucode_init.p_addr >> 4;
6193 pdata = priv->ucode_init_data.p_addr >> 4;
6194 inst_len = priv->ucode_init.len;
6195 data_len = priv->ucode_init_data.len;
6196
bb8c093b 6197 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6198 if (rc)
6199 return rc;
6200
bb8c093b
CH
6201 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6202 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6203 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
6204 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
b481de9c
ZY
6205
6206 /* Fill BSM memory with bootstrap instructions */
6207 for (reg_offset = BSM_SRAM_LOWER_BOUND;
6208 reg_offset < BSM_SRAM_LOWER_BOUND + len;
6209 reg_offset += sizeof(u32), image++)
bb8c093b 6210 _iwl4965_write_prph(priv, reg_offset,
b481de9c
ZY
6211 le32_to_cpu(*image));
6212
bb8c093b 6213 rc = iwl4965_verify_bsm(priv);
b481de9c 6214 if (rc) {
bb8c093b 6215 iwl4965_release_nic_access(priv);
b481de9c
ZY
6216 return rc;
6217 }
6218
6219 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
bb8c093b
CH
6220 iwl4965_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
6221 iwl4965_write_prph(priv, BSM_WR_MEM_DST_REG,
b481de9c 6222 RTC_INST_LOWER_BOUND);
bb8c093b 6223 iwl4965_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
b481de9c
ZY
6224
6225 /* Load bootstrap code into instruction SRAM now,
6226 * to prepare to load "initialize" uCode */
bb8c093b 6227 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6228 BSM_WR_CTRL_REG_BIT_START);
6229
6230 /* Wait for load of bootstrap uCode to finish */
6231 for (i = 0; i < 100; i++) {
bb8c093b 6232 done = iwl4965_read_prph(priv, BSM_WR_CTRL_REG);
b481de9c
ZY
6233 if (!(done & BSM_WR_CTRL_REG_BIT_START))
6234 break;
6235 udelay(10);
6236 }
6237 if (i < 100)
6238 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
6239 else {
6240 IWL_ERROR("BSM write did not complete!\n");
6241 return -EIO;
6242 }
6243
6244 /* Enable future boot loads whenever power management unit triggers it
6245 * (e.g. when powering back up after power-save shutdown) */
bb8c093b 6246 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6247 BSM_WR_CTRL_REG_BIT_START_EN);
6248
bb8c093b 6249 iwl4965_release_nic_access(priv);
b481de9c
ZY
6250
6251 return 0;
6252}
6253
bb8c093b 6254static void iwl4965_nic_start(struct iwl4965_priv *priv)
b481de9c
ZY
6255{
6256 /* Remove all resets to allow NIC to operate */
bb8c093b 6257 iwl4965_write32(priv, CSR_RESET, 0);
b481de9c
ZY
6258}
6259
90e759d1
TW
6260static int iwl4965_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
6261{
6262 desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
6263 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
6264}
6265
b481de9c 6266/**
bb8c093b 6267 * iwl4965_read_ucode - Read uCode images from disk file.
b481de9c
ZY
6268 *
6269 * Copy into buffers for card to fetch via bus-mastering
6270 */
bb8c093b 6271static int iwl4965_read_ucode(struct iwl4965_priv *priv)
b481de9c 6272{
bb8c093b 6273 struct iwl4965_ucode *ucode;
90e759d1 6274 int ret;
b481de9c
ZY
6275 const struct firmware *ucode_raw;
6276 const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode";
6277 u8 *src;
6278 size_t len;
6279 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
6280
6281 /* Ask kernel firmware_class module to get the boot firmware off disk.
6282 * request_firmware() is synchronous, file is in memory on return. */
90e759d1
TW
6283 ret = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
6284 if (ret < 0) {
6285 IWL_ERROR("%s firmware file req failed: Reason %d\n",
6286 name, ret);
b481de9c
ZY
6287 goto error;
6288 }
6289
6290 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
6291 name, ucode_raw->size);
6292
6293 /* Make sure that we got at least our header! */
6294 if (ucode_raw->size < sizeof(*ucode)) {
6295 IWL_ERROR("File size way too small!\n");
90e759d1 6296 ret = -EINVAL;
b481de9c
ZY
6297 goto err_release;
6298 }
6299
6300 /* Data from ucode file: header followed by uCode images */
6301 ucode = (void *)ucode_raw->data;
6302
6303 ver = le32_to_cpu(ucode->ver);
6304 inst_size = le32_to_cpu(ucode->inst_size);
6305 data_size = le32_to_cpu(ucode->data_size);
6306 init_size = le32_to_cpu(ucode->init_size);
6307 init_data_size = le32_to_cpu(ucode->init_data_size);
6308 boot_size = le32_to_cpu(ucode->boot_size);
6309
6310 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
6311 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
6312 inst_size);
6313 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
6314 data_size);
6315 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
6316 init_size);
6317 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
6318 init_data_size);
6319 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
6320 boot_size);
6321
6322 /* Verify size of file vs. image size info in file's header */
6323 if (ucode_raw->size < sizeof(*ucode) +
6324 inst_size + data_size + init_size +
6325 init_data_size + boot_size) {
6326
6327 IWL_DEBUG_INFO("uCode file size %d too small\n",
6328 (int)ucode_raw->size);
90e759d1 6329 ret = -EINVAL;
b481de9c
ZY
6330 goto err_release;
6331 }
6332
6333 /* Verify that uCode images will fit in card's SRAM */
6334 if (inst_size > IWL_MAX_INST_SIZE) {
90e759d1
TW
6335 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
6336 inst_size);
6337 ret = -EINVAL;
b481de9c
ZY
6338 goto err_release;
6339 }
6340
6341 if (data_size > IWL_MAX_DATA_SIZE) {
90e759d1
TW
6342 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
6343 data_size);
6344 ret = -EINVAL;
b481de9c
ZY
6345 goto err_release;
6346 }
6347 if (init_size > IWL_MAX_INST_SIZE) {
6348 IWL_DEBUG_INFO
90e759d1
TW
6349 ("uCode init instr len %d too large to fit in\n",
6350 init_size);
6351 ret = -EINVAL;
b481de9c
ZY
6352 goto err_release;
6353 }
6354 if (init_data_size > IWL_MAX_DATA_SIZE) {
6355 IWL_DEBUG_INFO
90e759d1
TW
6356 ("uCode init data len %d too large to fit in\n",
6357 init_data_size);
6358 ret = -EINVAL;
b481de9c
ZY
6359 goto err_release;
6360 }
6361 if (boot_size > IWL_MAX_BSM_SIZE) {
6362 IWL_DEBUG_INFO
90e759d1
TW
6363 ("uCode boot instr len %d too large to fit in\n",
6364 boot_size);
6365 ret = -EINVAL;
b481de9c
ZY
6366 goto err_release;
6367 }
6368
6369 /* Allocate ucode buffers for card's bus-master loading ... */
6370
6371 /* Runtime instructions and 2 copies of data:
6372 * 1) unmodified from disk
6373 * 2) backup cache for save/restore during power-downs */
6374 priv->ucode_code.len = inst_size;
90e759d1 6375 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
6376
6377 priv->ucode_data.len = data_size;
90e759d1 6378 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
6379
6380 priv->ucode_data_backup.len = data_size;
90e759d1 6381 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c
ZY
6382
6383 /* Initialization instructions and data */
90e759d1
TW
6384 if (init_size && init_data_size) {
6385 priv->ucode_init.len = init_size;
6386 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
6387
6388 priv->ucode_init_data.len = init_data_size;
6389 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
6390
6391 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
6392 goto err_pci_alloc;
6393 }
b481de9c
ZY
6394
6395 /* Bootstrap (instructions only, no data) */
90e759d1
TW
6396 if (boot_size) {
6397 priv->ucode_boot.len = boot_size;
6398 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 6399
90e759d1
TW
6400 if (!priv->ucode_boot.v_addr)
6401 goto err_pci_alloc;
6402 }
b481de9c
ZY
6403
6404 /* Copy images into buffers for card's bus-master reads ... */
6405
6406 /* Runtime instructions (first block of data in file) */
6407 src = &ucode->data[0];
6408 len = priv->ucode_code.len;
90e759d1 6409 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
6410 memcpy(priv->ucode_code.v_addr, src, len);
6411 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
6412 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
6413
6414 /* Runtime data (2nd block)
bb8c093b 6415 * NOTE: Copy into backup buffer will be done in iwl4965_up() */
b481de9c
ZY
6416 src = &ucode->data[inst_size];
6417 len = priv->ucode_data.len;
90e759d1 6418 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
6419 memcpy(priv->ucode_data.v_addr, src, len);
6420 memcpy(priv->ucode_data_backup.v_addr, src, len);
6421
6422 /* Initialization instructions (3rd block) */
6423 if (init_size) {
6424 src = &ucode->data[inst_size + data_size];
6425 len = priv->ucode_init.len;
90e759d1
TW
6426 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
6427 len);
b481de9c
ZY
6428 memcpy(priv->ucode_init.v_addr, src, len);
6429 }
6430
6431 /* Initialization data (4th block) */
6432 if (init_data_size) {
6433 src = &ucode->data[inst_size + data_size + init_size];
6434 len = priv->ucode_init_data.len;
90e759d1
TW
6435 IWL_DEBUG_INFO("Copying (but not loading) init data len %Zd\n",
6436 len);
b481de9c
ZY
6437 memcpy(priv->ucode_init_data.v_addr, src, len);
6438 }
6439
6440 /* Bootstrap instructions (5th block) */
6441 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
6442 len = priv->ucode_boot.len;
90e759d1 6443 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %Zd\n", len);
b481de9c
ZY
6444 memcpy(priv->ucode_boot.v_addr, src, len);
6445
6446 /* We have our copies now, allow OS release its copies */
6447 release_firmware(ucode_raw);
6448 return 0;
6449
6450 err_pci_alloc:
6451 IWL_ERROR("failed to allocate pci memory\n");
90e759d1 6452 ret = -ENOMEM;
bb8c093b 6453 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
6454
6455 err_release:
6456 release_firmware(ucode_raw);
6457
6458 error:
90e759d1 6459 return ret;
b481de9c
ZY
6460}
6461
6462
6463/**
bb8c093b 6464 * iwl4965_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
6465 *
6466 * Tell initialization uCode where to find runtime uCode.
6467 *
6468 * BSM registers initially contain pointers to initialization uCode.
6469 * We need to replace them to load runtime uCode inst and data,
6470 * and to save runtime data when powering down.
6471 */
bb8c093b 6472static int iwl4965_set_ucode_ptrs(struct iwl4965_priv *priv)
b481de9c
ZY
6473{
6474 dma_addr_t pinst;
6475 dma_addr_t pdata;
6476 int rc = 0;
6477 unsigned long flags;
6478
6479 /* bits 35:4 for 4965 */
6480 pinst = priv->ucode_code.p_addr >> 4;
6481 pdata = priv->ucode_data_backup.p_addr >> 4;
6482
6483 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 6484 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6485 if (rc) {
6486 spin_unlock_irqrestore(&priv->lock, flags);
6487 return rc;
6488 }
6489
6490 /* Tell bootstrap uCode where to find image to load */
bb8c093b
CH
6491 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6492 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6493 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
6494 priv->ucode_data.len);
6495
6496 /* Inst bytecount must be last to set up, bit 31 signals uCode
6497 * that all new ptr/size info is in place */
bb8c093b 6498 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
6499 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6500
bb8c093b 6501 iwl4965_release_nic_access(priv);
b481de9c
ZY
6502
6503 spin_unlock_irqrestore(&priv->lock, flags);
6504
6505 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6506
6507 return rc;
6508}
6509
6510/**
bb8c093b 6511 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
6512 *
6513 * Called after REPLY_ALIVE notification received from "initialize" uCode.
6514 *
6515 * The 4965 "initialize" ALIVE reply contains calibration data for:
6516 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
6517 * (3945 does not contain this data).
6518 *
6519 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6520*/
bb8c093b 6521static void iwl4965_init_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6522{
6523 /* Check alive response for "valid" sign from uCode */
6524 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
6525 /* We had an error bringing up the hardware, so take it
6526 * all the way back down so we can try again */
6527 IWL_DEBUG_INFO("Initialize Alive failed.\n");
6528 goto restart;
6529 }
6530
6531 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
6532 * This is a paranoid check, because we would not have gotten the
6533 * "initialize" alive if code weren't properly loaded. */
bb8c093b 6534 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6535 /* Runtime instruction load was bad;
6536 * take it all the way back down so we can try again */
6537 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
6538 goto restart;
6539 }
6540
6541 /* Calculate temperature */
6542 priv->temperature = iwl4965_get_temperature(priv);
6543
6544 /* Send pointers to protocol/runtime uCode image ... init code will
6545 * load and launch runtime uCode, which will send us another "Alive"
6546 * notification. */
6547 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 6548 if (iwl4965_set_ucode_ptrs(priv)) {
b481de9c
ZY
6549 /* Runtime instruction load won't happen;
6550 * take it all the way back down so we can try again */
6551 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
6552 goto restart;
6553 }
6554 return;
6555
6556 restart:
6557 queue_work(priv->workqueue, &priv->restart);
6558}
6559
6560
6561/**
bb8c093b 6562 * iwl4965_alive_start - called after REPLY_ALIVE notification received
b481de9c 6563 * from protocol/runtime uCode (initialization uCode's
bb8c093b 6564 * Alive gets handled by iwl4965_init_alive_start()).
b481de9c 6565 */
bb8c093b 6566static void iwl4965_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6567{
6568 int rc = 0;
6569
6570 IWL_DEBUG_INFO("Runtime Alive received.\n");
6571
6572 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
6573 /* We had an error bringing up the hardware, so take it
6574 * all the way back down so we can try again */
6575 IWL_DEBUG_INFO("Alive failed.\n");
6576 goto restart;
6577 }
6578
6579 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
6580 * This is a paranoid check, because we would not have gotten the
6581 * "runtime" alive if code weren't properly loaded. */
bb8c093b 6582 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6583 /* Runtime instruction load was bad;
6584 * take it all the way back down so we can try again */
6585 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
6586 goto restart;
6587 }
6588
bb8c093b 6589 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6590
6591 rc = iwl4965_alive_notify(priv);
6592 if (rc) {
6593 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
6594 rc);
6595 goto restart;
6596 }
6597
9fbab516 6598 /* After the ALIVE response, we can send host commands to 4965 uCode */
b481de9c
ZY
6599 set_bit(STATUS_ALIVE, &priv->status);
6600
6601 /* Clear out the uCode error bit if it is set */
6602 clear_bit(STATUS_FW_ERROR, &priv->status);
6603
bb8c093b 6604 rc = iwl4965_init_channel_map(priv);
b481de9c
ZY
6605 if (rc) {
6606 IWL_ERROR("initializing regulatory failed: %d\n", rc);
6607 return;
6608 }
6609
bb8c093b 6610 iwl4965_init_geos(priv);
5a66926a 6611 iwl4965_reset_channel_flag(priv);
b481de9c 6612
bb8c093b 6613 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
6614 return;
6615
5a66926a 6616 ieee80211_start_queues(priv->hw);
b481de9c
ZY
6617
6618 priv->active_rate = priv->rates_mask;
6619 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
6620
bb8c093b 6621 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 6622
bb8c093b
CH
6623 if (iwl4965_is_associated(priv)) {
6624 struct iwl4965_rxon_cmd *active_rxon =
6625 (struct iwl4965_rxon_cmd *)(&priv->active_rxon);
b481de9c
ZY
6626
6627 memcpy(&priv->staging_rxon, &priv->active_rxon,
6628 sizeof(priv->staging_rxon));
6629 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6630 } else {
6631 /* Initialize our rx_config data */
bb8c093b 6632 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
6633 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
6634 }
6635
9fbab516 6636 /* Configure Bluetooth device coexistence support */
bb8c093b 6637 iwl4965_send_bt_config(priv);
b481de9c
ZY
6638
6639 /* Configure the adapter for unassociated operation */
bb8c093b 6640 iwl4965_commit_rxon(priv);
b481de9c
ZY
6641
6642 /* At this point, the NIC is initialized and operational */
6643 priv->notif_missed_beacons = 0;
6644 set_bit(STATUS_READY, &priv->status);
6645
6646 iwl4965_rf_kill_ct_config(priv);
5a66926a 6647
b481de9c 6648 IWL_DEBUG_INFO("ALIVE processing complete.\n");
5a66926a 6649 wake_up_interruptible(&priv->wait_command_queue);
b481de9c
ZY
6650
6651 if (priv->error_recovering)
bb8c093b 6652 iwl4965_error_recovery(priv);
b481de9c
ZY
6653
6654 return;
6655
6656 restart:
6657 queue_work(priv->workqueue, &priv->restart);
6658}
6659
bb8c093b 6660static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv);
b481de9c 6661
bb8c093b 6662static void __iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6663{
6664 unsigned long flags;
6665 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
6666 struct ieee80211_conf *conf = NULL;
6667
6668 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
6669
6670 conf = ieee80211_get_hw_conf(priv->hw);
6671
6672 if (!exit_pending)
6673 set_bit(STATUS_EXIT_PENDING, &priv->status);
6674
bb8c093b 6675 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6676
6677 /* Unblock any waiting calls */
6678 wake_up_interruptible_all(&priv->wait_command_queue);
6679
b481de9c
ZY
6680 /* Wipe out the EXIT_PENDING status bit if we are not actually
6681 * exiting the module */
6682 if (!exit_pending)
6683 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6684
6685 /* stop and reset the on-board processor */
bb8c093b 6686 iwl4965_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
6687
6688 /* tell the device to stop sending interrupts */
bb8c093b 6689 iwl4965_disable_interrupts(priv);
b481de9c
ZY
6690
6691 if (priv->mac80211_registered)
6692 ieee80211_stop_queues(priv->hw);
6693
bb8c093b 6694 /* If we have not previously called iwl4965_init() then
b481de9c 6695 * clear all bits but the RF Kill and SUSPEND bits and return */
bb8c093b 6696 if (!iwl4965_is_init(priv)) {
b481de9c
ZY
6697 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6698 STATUS_RF_KILL_HW |
6699 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6700 STATUS_RF_KILL_SW |
6701 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6702 STATUS_IN_SUSPEND;
6703 goto exit;
6704 }
6705
6706 /* ...otherwise clear out all the status bits but the RF Kill and
6707 * SUSPEND bits and continue taking the NIC down. */
6708 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6709 STATUS_RF_KILL_HW |
6710 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6711 STATUS_RF_KILL_SW |
6712 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6713 STATUS_IN_SUSPEND |
6714 test_bit(STATUS_FW_ERROR, &priv->status) <<
6715 STATUS_FW_ERROR;
6716
6717 spin_lock_irqsave(&priv->lock, flags);
9fbab516
BC
6718 iwl4965_clear_bit(priv, CSR_GP_CNTRL,
6719 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
6720 spin_unlock_irqrestore(&priv->lock, flags);
6721
bb8c093b
CH
6722 iwl4965_hw_txq_ctx_stop(priv);
6723 iwl4965_hw_rxq_stop(priv);
b481de9c
ZY
6724
6725 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
6726 if (!iwl4965_grab_nic_access(priv)) {
6727 iwl4965_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 6728 APMG_CLK_VAL_DMA_CLK_RQT);
bb8c093b 6729 iwl4965_release_nic_access(priv);
b481de9c
ZY
6730 }
6731 spin_unlock_irqrestore(&priv->lock, flags);
6732
6733 udelay(5);
6734
bb8c093b
CH
6735 iwl4965_hw_nic_stop_master(priv);
6736 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6737 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
6738
6739 exit:
bb8c093b 6740 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
6741
6742 if (priv->ibss_beacon)
6743 dev_kfree_skb(priv->ibss_beacon);
6744 priv->ibss_beacon = NULL;
6745
6746 /* clear out any free frames */
bb8c093b 6747 iwl4965_clear_free_frames(priv);
b481de9c
ZY
6748}
6749
bb8c093b 6750static void iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6751{
6752 mutex_lock(&priv->mutex);
bb8c093b 6753 __iwl4965_down(priv);
b481de9c 6754 mutex_unlock(&priv->mutex);
b24d22b1 6755
bb8c093b 6756 iwl4965_cancel_deferred_work(priv);
b481de9c
ZY
6757}
6758
6759#define MAX_HW_RESTARTS 5
6760
bb8c093b 6761static int __iwl4965_up(struct iwl4965_priv *priv)
b481de9c
ZY
6762{
6763 int rc, i;
b481de9c
ZY
6764
6765 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6766 IWL_WARNING("Exit pending; will not bring the NIC up\n");
6767 return -EIO;
6768 }
6769
6770 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6771 IWL_WARNING("Radio disabled by SW RF kill (module "
6772 "parameter)\n");
e655b9f0
ZY
6773 return -ENODEV;
6774 }
6775
6776 /* If platform's RF_KILL switch is NOT set to KILL */
6777 if (iwl4965_read32(priv, CSR_GP_CNTRL) &
6778 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6779 clear_bit(STATUS_RF_KILL_HW, &priv->status);
6780 else {
6781 set_bit(STATUS_RF_KILL_HW, &priv->status);
6782 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
6783 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
6784 return -ENODEV;
6785 }
b481de9c
ZY
6786 }
6787
a781cf94
RC
6788 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
6789 IWL_ERROR("ucode not available for device bringup\n");
6790 return -EIO;
6791 }
6792
bb8c093b 6793 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 6794
bb8c093b 6795 rc = iwl4965_hw_nic_init(priv);
b481de9c
ZY
6796 if (rc) {
6797 IWL_ERROR("Unable to int nic\n");
6798 return rc;
6799 }
6800
6801 /* make sure rfkill handshake bits are cleared */
bb8c093b
CH
6802 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6803 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
6804 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6805
6806 /* clear (again), then enable host interrupts */
bb8c093b
CH
6807 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
6808 iwl4965_enable_interrupts(priv);
b481de9c
ZY
6809
6810 /* really make sure rfkill handshake bits are cleared */
bb8c093b
CH
6811 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6812 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
6813
6814 /* Copy original ucode data image from disk into backup cache.
6815 * This will be used to initialize the on-board processor's
6816 * data SRAM for a clean start when the runtime program first loads. */
6817 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
5a66926a 6818 priv->ucode_data.len);
b481de9c 6819
e655b9f0
ZY
6820 /* We return success when we resume from suspend and rf_kill is on. */
6821 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
b481de9c 6822 return 0;
b481de9c
ZY
6823
6824 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6825
bb8c093b 6826 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6827
6828 /* load bootstrap state machine,
6829 * load bootstrap program into processor's memory,
6830 * prepare to load the "initialize" uCode */
bb8c093b 6831 rc = iwl4965_load_bsm(priv);
b481de9c
ZY
6832
6833 if (rc) {
6834 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
6835 continue;
6836 }
6837
6838 /* start card; "initialize" will load runtime ucode */
bb8c093b 6839 iwl4965_nic_start(priv);
b481de9c 6840
b481de9c
ZY
6841 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
6842
6843 return 0;
6844 }
6845
6846 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 6847 __iwl4965_down(priv);
b481de9c
ZY
6848
6849 /* tried to restart and config the device for as long as our
6850 * patience could withstand */
6851 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
6852 return -EIO;
6853}
6854
6855
6856/*****************************************************************************
6857 *
6858 * Workqueue callbacks
6859 *
6860 *****************************************************************************/
6861
bb8c093b 6862static void iwl4965_bg_init_alive_start(struct work_struct *data)
b481de9c 6863{
bb8c093b
CH
6864 struct iwl4965_priv *priv =
6865 container_of(data, struct iwl4965_priv, init_alive_start.work);
b481de9c
ZY
6866
6867 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6868 return;
6869
6870 mutex_lock(&priv->mutex);
bb8c093b 6871 iwl4965_init_alive_start(priv);
b481de9c
ZY
6872 mutex_unlock(&priv->mutex);
6873}
6874
bb8c093b 6875static void iwl4965_bg_alive_start(struct work_struct *data)
b481de9c 6876{
bb8c093b
CH
6877 struct iwl4965_priv *priv =
6878 container_of(data, struct iwl4965_priv, alive_start.work);
b481de9c
ZY
6879
6880 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6881 return;
6882
6883 mutex_lock(&priv->mutex);
bb8c093b 6884 iwl4965_alive_start(priv);
b481de9c
ZY
6885 mutex_unlock(&priv->mutex);
6886}
6887
bb8c093b 6888static void iwl4965_bg_rf_kill(struct work_struct *work)
b481de9c 6889{
bb8c093b 6890 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, rf_kill);
b481de9c
ZY
6891
6892 wake_up_interruptible(&priv->wait_command_queue);
6893
6894 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6895 return;
6896
6897 mutex_lock(&priv->mutex);
6898
bb8c093b 6899 if (!iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6900 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6901 "HW and/or SW RF Kill no longer active, restarting "
6902 "device\n");
6903 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6904 queue_work(priv->workqueue, &priv->restart);
6905 } else {
6906
6907 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6908 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6909 "disabled by SW switch\n");
6910 else
6911 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6912 "Kill switch must be turned off for "
6913 "wireless networking to work.\n");
6914 }
6915 mutex_unlock(&priv->mutex);
6916}
6917
6918#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6919
bb8c093b 6920static void iwl4965_bg_scan_check(struct work_struct *data)
b481de9c 6921{
bb8c093b
CH
6922 struct iwl4965_priv *priv =
6923 container_of(data, struct iwl4965_priv, scan_check.work);
b481de9c
ZY
6924
6925 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6926 return;
6927
6928 mutex_lock(&priv->mutex);
6929 if (test_bit(STATUS_SCANNING, &priv->status) ||
6930 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6931 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6932 "Scan completion watchdog resetting adapter (%dms)\n",
6933 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
052c4b9f 6934
b481de9c 6935 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
bb8c093b 6936 iwl4965_send_scan_abort(priv);
b481de9c
ZY
6937 }
6938 mutex_unlock(&priv->mutex);
6939}
6940
bb8c093b 6941static void iwl4965_bg_request_scan(struct work_struct *data)
b481de9c 6942{
bb8c093b
CH
6943 struct iwl4965_priv *priv =
6944 container_of(data, struct iwl4965_priv, request_scan);
6945 struct iwl4965_host_cmd cmd = {
b481de9c 6946 .id = REPLY_SCAN_CMD,
bb8c093b 6947 .len = sizeof(struct iwl4965_scan_cmd),
b481de9c
ZY
6948 .meta.flags = CMD_SIZE_HUGE,
6949 };
6950 int rc = 0;
bb8c093b 6951 struct iwl4965_scan_cmd *scan;
b481de9c
ZY
6952 struct ieee80211_conf *conf = NULL;
6953 u8 direct_mask;
6954 int phymode;
6955
6956 conf = ieee80211_get_hw_conf(priv->hw);
6957
6958 mutex_lock(&priv->mutex);
6959
bb8c093b 6960 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
6961 IWL_WARNING("request scan called when driver not ready.\n");
6962 goto done;
6963 }
6964
6965 /* Make sure the scan wasn't cancelled before this queued work
6966 * was given the chance to run... */
6967 if (!test_bit(STATUS_SCANNING, &priv->status))
6968 goto done;
6969
6970 /* This should never be called or scheduled if there is currently
6971 * a scan active in the hardware. */
6972 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6973 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6974 "Ignoring second request.\n");
6975 rc = -EIO;
6976 goto done;
6977 }
6978
6979 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6980 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
6981 goto done;
6982 }
6983
6984 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6985 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6986 goto done;
6987 }
6988
bb8c093b 6989 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6990 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6991 goto done;
6992 }
6993
6994 if (!test_bit(STATUS_READY, &priv->status)) {
6995 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
6996 goto done;
6997 }
6998
6999 if (!priv->scan_bands) {
7000 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
7001 goto done;
7002 }
7003
7004 if (!priv->scan) {
bb8c093b 7005 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) +
b481de9c
ZY
7006 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
7007 if (!priv->scan) {
7008 rc = -ENOMEM;
7009 goto done;
7010 }
7011 }
7012 scan = priv->scan;
bb8c093b 7013 memset(scan, 0, sizeof(struct iwl4965_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
7014
7015 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
7016 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
7017
bb8c093b 7018 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
7019 u16 interval = 0;
7020 u32 extra;
7021 u32 suspend_time = 100;
7022 u32 scan_suspend_time = 100;
7023 unsigned long flags;
7024
7025 IWL_DEBUG_INFO("Scanning while associated...\n");
7026
7027 spin_lock_irqsave(&priv->lock, flags);
7028 interval = priv->beacon_int;
7029 spin_unlock_irqrestore(&priv->lock, flags);
7030
7031 scan->suspend_time = 0;
052c4b9f 7032 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
7033 if (!interval)
7034 interval = suspend_time;
7035
7036 extra = (suspend_time / interval) << 22;
7037 scan_suspend_time = (extra |
7038 ((suspend_time % interval) * 1024));
7039 scan->suspend_time = cpu_to_le32(scan_suspend_time);
7040 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
7041 scan_suspend_time, interval);
7042 }
7043
7044 /* We should add the ability for user to lock to PASSIVE ONLY */
7045 if (priv->one_direct_scan) {
7046 IWL_DEBUG_SCAN
7047 ("Kicking off one direct scan for '%s'\n",
bb8c093b 7048 iwl4965_escape_essid(priv->direct_ssid,
b481de9c
ZY
7049 priv->direct_ssid_len));
7050 scan->direct_scan[0].id = WLAN_EID_SSID;
7051 scan->direct_scan[0].len = priv->direct_ssid_len;
7052 memcpy(scan->direct_scan[0].ssid,
7053 priv->direct_ssid, priv->direct_ssid_len);
7054 direct_mask = 1;
bb8c093b 7055 } else if (!iwl4965_is_associated(priv) && priv->essid_len) {
b481de9c
ZY
7056 scan->direct_scan[0].id = WLAN_EID_SSID;
7057 scan->direct_scan[0].len = priv->essid_len;
7058 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
7059 direct_mask = 1;
7060 } else
7061 direct_mask = 0;
7062
7063 /* We don't build a direct scan probe request; the uCode will do
7064 * that based on the direct_mask added to each channel entry */
7065 scan->tx_cmd.len = cpu_to_le16(
bb8c093b 7066 iwl4965_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
b481de9c
ZY
7067 IWL_MAX_SCAN_SIZE - sizeof(scan), 0));
7068 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
7069 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
7070 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
7071
7072 /* flags + rate selection */
7073
7074 scan->tx_cmd.tx_flags |= cpu_to_le32(0x200);
7075
7076 switch (priv->scan_bands) {
7077 case 2:
7078 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
7079 scan->tx_cmd.rate_n_flags =
bb8c093b 7080 iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
b481de9c
ZY
7081 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
7082
7083 scan->good_CRC_th = 0;
7084 phymode = MODE_IEEE80211G;
7085 break;
7086
7087 case 1:
7088 scan->tx_cmd.rate_n_flags =
bb8c093b 7089 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
b481de9c
ZY
7090 RATE_MCS_ANT_B_MSK);
7091 scan->good_CRC_th = IWL_GOOD_CRC_TH;
7092 phymode = MODE_IEEE80211A;
7093 break;
7094
7095 default:
7096 IWL_WARNING("Invalid scan band count\n");
7097 goto done;
7098 }
7099
7100 /* select Rx chains */
7101
7102 /* Force use of chains B and C (0x6) for scan Rx.
7103 * Avoid A (0x1) because of its off-channel reception on A-band.
7104 * MIMO is not used here, but value is required to make uCode happy. */
7105 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
7106 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
7107 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
7108 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
7109
7110 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
7111 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
7112
7113 if (direct_mask)
7114 IWL_DEBUG_SCAN
7115 ("Initiating direct scan for %s.\n",
bb8c093b 7116 iwl4965_escape_essid(priv->essid, priv->essid_len));
b481de9c
ZY
7117 else
7118 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
7119
7120 scan->channel_count =
bb8c093b 7121 iwl4965_get_channels_for_scan(
b481de9c
ZY
7122 priv, phymode, 1, /* active */
7123 direct_mask,
7124 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
7125
7126 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 7127 scan->channel_count * sizeof(struct iwl4965_scan_channel);
b481de9c
ZY
7128 cmd.data = scan;
7129 scan->len = cpu_to_le16(cmd.len);
7130
7131 set_bit(STATUS_SCAN_HW, &priv->status);
bb8c093b 7132 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
7133 if (rc)
7134 goto done;
7135
7136 queue_delayed_work(priv->workqueue, &priv->scan_check,
7137 IWL_SCAN_CHECK_WATCHDOG);
7138
7139 mutex_unlock(&priv->mutex);
7140 return;
7141
7142 done:
01ebd063 7143 /* inform mac80211 scan aborted */
b481de9c
ZY
7144 queue_work(priv->workqueue, &priv->scan_completed);
7145 mutex_unlock(&priv->mutex);
7146}
7147
bb8c093b 7148static void iwl4965_bg_up(struct work_struct *data)
b481de9c 7149{
bb8c093b 7150 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, up);
b481de9c
ZY
7151
7152 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7153 return;
7154
7155 mutex_lock(&priv->mutex);
bb8c093b 7156 __iwl4965_up(priv);
b481de9c
ZY
7157 mutex_unlock(&priv->mutex);
7158}
7159
bb8c093b 7160static void iwl4965_bg_restart(struct work_struct *data)
b481de9c 7161{
bb8c093b 7162 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, restart);
b481de9c
ZY
7163
7164 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7165 return;
7166
bb8c093b 7167 iwl4965_down(priv);
b481de9c
ZY
7168 queue_work(priv->workqueue, &priv->up);
7169}
7170
bb8c093b 7171static void iwl4965_bg_rx_replenish(struct work_struct *data)
b481de9c 7172{
bb8c093b
CH
7173 struct iwl4965_priv *priv =
7174 container_of(data, struct iwl4965_priv, rx_replenish);
b481de9c
ZY
7175
7176 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7177 return;
7178
7179 mutex_lock(&priv->mutex);
bb8c093b 7180 iwl4965_rx_replenish(priv);
b481de9c
ZY
7181 mutex_unlock(&priv->mutex);
7182}
7183
7878a5a4
MA
7184#define IWL_DELAY_NEXT_SCAN (HZ*2)
7185
bb8c093b 7186static void iwl4965_bg_post_associate(struct work_struct *data)
b481de9c 7187{
bb8c093b 7188 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv,
b481de9c
ZY
7189 post_associate.work);
7190
7191 int rc = 0;
7192 struct ieee80211_conf *conf = NULL;
0795af57 7193 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7194
7195 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7196 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
7197 return;
7198 }
7199
0795af57
JP
7200 IWL_DEBUG_ASSOC("Associated as %d to: %s\n",
7201 priv->assoc_id,
7202 print_mac(mac, priv->active_rxon.bssid_addr));
b481de9c
ZY
7203
7204
7205 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7206 return;
7207
7208 mutex_lock(&priv->mutex);
7209
32bfd35d 7210 if (!priv->vif || !priv->is_open) {
948c171c
MA
7211 mutex_unlock(&priv->mutex);
7212 return;
7213 }
bb8c093b 7214 iwl4965_scan_cancel_timeout(priv, 200);
052c4b9f 7215
b481de9c
ZY
7216 conf = ieee80211_get_hw_conf(priv->hw);
7217
7218 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7219 iwl4965_commit_rxon(priv);
b481de9c 7220
bb8c093b
CH
7221 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7222 iwl4965_setup_rxon_timing(priv);
7223 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7224 sizeof(priv->rxon_timing), &priv->rxon_timing);
7225 if (rc)
7226 IWL_WARNING("REPLY_RXON_TIMING failed - "
7227 "Attempting to continue.\n");
7228
7229 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7230
c8b0e6e1 7231#ifdef CONFIG_IWL4965_HT
fd105e79
RR
7232 if (priv->current_ht_config.is_ht)
7233 iwl4965_set_rxon_ht(priv, &priv->current_ht_config);
c8b0e6e1 7234#endif /* CONFIG_IWL4965_HT*/
b481de9c
ZY
7235 iwl4965_set_rxon_chain(priv);
7236 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7237
7238 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
7239 priv->assoc_id, priv->beacon_int);
7240
7241 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7242 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7243 else
7244 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7245
7246 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7247 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
7248 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
7249 else
7250 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7251
7252 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7253 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7254
7255 }
7256
bb8c093b 7257 iwl4965_commit_rxon(priv);
b481de9c
ZY
7258
7259 switch (priv->iw_mode) {
7260 case IEEE80211_IF_TYPE_STA:
bb8c093b 7261 iwl4965_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
7262 break;
7263
7264 case IEEE80211_IF_TYPE_IBSS:
7265
7266 /* clear out the station table */
bb8c093b 7267 iwl4965_clear_stations_table(priv);
b481de9c 7268
bb8c093b
CH
7269 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
7270 iwl4965_rxon_add_station(priv, priv->bssid, 0);
7271 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
7272 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7273
7274 break;
7275
7276 default:
7277 IWL_ERROR("%s Should not be called in %d mode\n",
7278 __FUNCTION__, priv->iw_mode);
7279 break;
7280 }
7281
bb8c093b 7282 iwl4965_sequence_reset(priv);
b481de9c 7283
c8b0e6e1 7284#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
7285 /* Enable Rx differential gain and sensitivity calibrations */
7286 iwl4965_chain_noise_reset(priv);
7287 priv->start_calib = 1;
c8b0e6e1 7288#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
7289
7290 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7291 priv->assoc_station_added = 1;
7292
c8b0e6e1 7293#ifdef CONFIG_IWL4965_QOS
bb8c093b 7294 iwl4965_activate_qos(priv, 0);
c8b0e6e1 7295#endif /* CONFIG_IWL4965_QOS */
7878a5a4
MA
7296 /* we have just associated, don't start scan too early */
7297 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
b481de9c
ZY
7298 mutex_unlock(&priv->mutex);
7299}
7300
bb8c093b 7301static void iwl4965_bg_abort_scan(struct work_struct *work)
b481de9c 7302{
bb8c093b 7303 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, abort_scan);
b481de9c 7304
bb8c093b 7305 if (!iwl4965_is_ready(priv))
b481de9c
ZY
7306 return;
7307
7308 mutex_lock(&priv->mutex);
7309
7310 set_bit(STATUS_SCAN_ABORTING, &priv->status);
bb8c093b 7311 iwl4965_send_scan_abort(priv);
b481de9c
ZY
7312
7313 mutex_unlock(&priv->mutex);
7314}
7315
76bb77e0
ZY
7316static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf);
7317
bb8c093b 7318static void iwl4965_bg_scan_completed(struct work_struct *work)
b481de9c 7319{
bb8c093b
CH
7320 struct iwl4965_priv *priv =
7321 container_of(work, struct iwl4965_priv, scan_completed);
b481de9c
ZY
7322
7323 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
7324
7325 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7326 return;
7327
a0646470
ZY
7328 if (test_bit(STATUS_CONF_PENDING, &priv->status))
7329 iwl4965_mac_config(priv->hw, ieee80211_get_hw_conf(priv->hw));
76bb77e0 7330
b481de9c
ZY
7331 ieee80211_scan_completed(priv->hw);
7332
7333 /* Since setting the TXPOWER may have been deferred while
7334 * performing the scan, fire one off */
7335 mutex_lock(&priv->mutex);
bb8c093b 7336 iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
7337 mutex_unlock(&priv->mutex);
7338}
7339
7340/*****************************************************************************
7341 *
7342 * mac80211 entry point functions
7343 *
7344 *****************************************************************************/
7345
5a66926a
ZY
7346#define UCODE_READY_TIMEOUT (2 * HZ)
7347
bb8c093b 7348static int iwl4965_mac_start(struct ieee80211_hw *hw)
b481de9c 7349{
bb8c093b 7350 struct iwl4965_priv *priv = hw->priv;
5a66926a 7351 int ret;
b481de9c
ZY
7352
7353 IWL_DEBUG_MAC80211("enter\n");
7354
5a66926a
ZY
7355 if (pci_enable_device(priv->pci_dev)) {
7356 IWL_ERROR("Fail to pci_enable_device\n");
7357 return -ENODEV;
7358 }
7359 pci_restore_state(priv->pci_dev);
7360 pci_enable_msi(priv->pci_dev);
7361
7362 ret = request_irq(priv->pci_dev->irq, iwl4965_isr, IRQF_SHARED,
7363 DRV_NAME, priv);
7364 if (ret) {
7365 IWL_ERROR("Error allocating IRQ %d\n", priv->pci_dev->irq);
7366 goto out_disable_msi;
7367 }
7368
b481de9c
ZY
7369 /* we should be verifying the device is ready to be opened */
7370 mutex_lock(&priv->mutex);
7371
5a66926a
ZY
7372 memset(&priv->staging_rxon, 0, sizeof(struct iwl4965_rxon_cmd));
7373 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
7374 * ucode filename and max sizes are card-specific. */
b481de9c 7375
5a66926a
ZY
7376 if (!priv->ucode_code.len) {
7377 ret = iwl4965_read_ucode(priv);
7378 if (ret) {
7379 IWL_ERROR("Could not read microcode: %d\n", ret);
7380 mutex_unlock(&priv->mutex);
7381 goto out_release_irq;
7382 }
7383 }
b481de9c 7384
e655b9f0 7385 ret = __iwl4965_up(priv);
5a66926a 7386
b481de9c 7387 mutex_unlock(&priv->mutex);
5a66926a 7388
e655b9f0
ZY
7389 if (ret)
7390 goto out_release_irq;
7391
7392 IWL_DEBUG_INFO("Start UP work done.\n");
7393
7394 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
7395 return 0;
7396
5a66926a
ZY
7397 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
7398 * mac80211 will not be run successfully. */
7399 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
7400 test_bit(STATUS_READY, &priv->status),
7401 UCODE_READY_TIMEOUT);
7402 if (!ret) {
7403 if (!test_bit(STATUS_READY, &priv->status)) {
7404 IWL_ERROR("Wait for START_ALIVE timeout after %dms.\n",
7405 jiffies_to_msecs(UCODE_READY_TIMEOUT));
7406 ret = -ETIMEDOUT;
7407 goto out_release_irq;
7408 }
7409 }
7410
e655b9f0 7411 priv->is_open = 1;
b481de9c
ZY
7412 IWL_DEBUG_MAC80211("leave\n");
7413 return 0;
5a66926a
ZY
7414
7415out_release_irq:
7416 free_irq(priv->pci_dev->irq, priv);
7417out_disable_msi:
7418 pci_disable_msi(priv->pci_dev);
e655b9f0
ZY
7419 pci_disable_device(priv->pci_dev);
7420 priv->is_open = 0;
7421 IWL_DEBUG_MAC80211("leave - failed\n");
5a66926a 7422 return ret;
b481de9c
ZY
7423}
7424
bb8c093b 7425static void iwl4965_mac_stop(struct ieee80211_hw *hw)
b481de9c 7426{
bb8c093b 7427 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7428
7429 IWL_DEBUG_MAC80211("enter\n");
948c171c 7430
e655b9f0
ZY
7431 if (!priv->is_open) {
7432 IWL_DEBUG_MAC80211("leave - skip\n");
7433 return;
7434 }
7435
b481de9c 7436 priv->is_open = 0;
5a66926a
ZY
7437
7438 if (iwl4965_is_ready_rf(priv)) {
e655b9f0
ZY
7439 /* stop mac, cancel any scan request and clear
7440 * RXON_FILTER_ASSOC_MSK BIT
7441 */
5a66926a
ZY
7442 mutex_lock(&priv->mutex);
7443 iwl4965_scan_cancel_timeout(priv, 100);
7444 cancel_delayed_work(&priv->post_associate);
fde3571f 7445 mutex_unlock(&priv->mutex);
fde3571f
MA
7446 }
7447
5a66926a
ZY
7448 iwl4965_down(priv);
7449
7450 flush_workqueue(priv->workqueue);
7451 free_irq(priv->pci_dev->irq, priv);
7452 pci_disable_msi(priv->pci_dev);
7453 pci_save_state(priv->pci_dev);
7454 pci_disable_device(priv->pci_dev);
948c171c 7455
b481de9c 7456 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7457}
7458
bb8c093b 7459static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
7460 struct ieee80211_tx_control *ctl)
7461{
bb8c093b 7462 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7463
7464 IWL_DEBUG_MAC80211("enter\n");
7465
7466 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
7467 IWL_DEBUG_MAC80211("leave - monitor\n");
7468 return -1;
7469 }
7470
7471 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
7472 ctl->tx_rate);
7473
bb8c093b 7474 if (iwl4965_tx_skb(priv, skb, ctl))
b481de9c
ZY
7475 dev_kfree_skb_any(skb);
7476
7477 IWL_DEBUG_MAC80211("leave\n");
7478 return 0;
7479}
7480
bb8c093b 7481static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7482 struct ieee80211_if_init_conf *conf)
7483{
bb8c093b 7484 struct iwl4965_priv *priv = hw->priv;
b481de9c 7485 unsigned long flags;
0795af57 7486 DECLARE_MAC_BUF(mac);
b481de9c 7487
32bfd35d 7488 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
b481de9c 7489
32bfd35d
JB
7490 if (priv->vif) {
7491 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
b481de9c
ZY
7492 return 0;
7493 }
7494
7495 spin_lock_irqsave(&priv->lock, flags);
32bfd35d 7496 priv->vif = conf->vif;
b481de9c
ZY
7497
7498 spin_unlock_irqrestore(&priv->lock, flags);
7499
7500 mutex_lock(&priv->mutex);
864792e3
TW
7501
7502 if (conf->mac_addr) {
7503 IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr));
7504 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
7505 }
b481de9c 7506
5a66926a
ZY
7507 if (iwl4965_is_ready(priv))
7508 iwl4965_set_mode(priv, conf->type);
7509
b481de9c
ZY
7510 mutex_unlock(&priv->mutex);
7511
5a66926a 7512 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7513 return 0;
7514}
7515
7516/**
bb8c093b 7517 * iwl4965_mac_config - mac80211 config callback
b481de9c
ZY
7518 *
7519 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
7520 * be set inappropriately and the driver currently sets the hardware up to
7521 * use it whenever needed.
7522 */
bb8c093b 7523static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
b481de9c 7524{
bb8c093b
CH
7525 struct iwl4965_priv *priv = hw->priv;
7526 const struct iwl4965_channel_info *ch_info;
b481de9c 7527 unsigned long flags;
76bb77e0 7528 int ret = 0;
b481de9c
ZY
7529
7530 mutex_lock(&priv->mutex);
7531 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel);
7532
12342c47
ZY
7533 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
7534
bb8c093b 7535 if (!iwl4965_is_ready(priv)) {
b481de9c 7536 IWL_DEBUG_MAC80211("leave - not ready\n");
76bb77e0
ZY
7537 ret = -EIO;
7538 goto out;
b481de9c
ZY
7539 }
7540
bb8c093b 7541 if (unlikely(!iwl4965_param_disable_hw_scan &&
b481de9c 7542 test_bit(STATUS_SCANNING, &priv->status))) {
a0646470
ZY
7543 IWL_DEBUG_MAC80211("leave - scanning\n");
7544 set_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 7545 mutex_unlock(&priv->mutex);
a0646470 7546 return 0;
b481de9c
ZY
7547 }
7548
7549 spin_lock_irqsave(&priv->lock, flags);
7550
bb8c093b 7551 ch_info = iwl4965_get_channel_info(priv, conf->phymode, conf->channel);
b481de9c
ZY
7552 if (!is_channel_valid(ch_info)) {
7553 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
7554 conf->channel, conf->phymode);
7555 IWL_DEBUG_MAC80211("leave - invalid channel\n");
7556 spin_unlock_irqrestore(&priv->lock, flags);
76bb77e0
ZY
7557 ret = -EINVAL;
7558 goto out;
b481de9c
ZY
7559 }
7560
c8b0e6e1 7561#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
7562 /* if we are switching fron ht to 2.4 clear flags
7563 * from any ht related info since 2.4 does not
7564 * support ht */
7565 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel)
7566#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7567 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
7568#endif
7569 )
7570 priv->staging_rxon.flags = 0;
c8b0e6e1 7571#endif /* CONFIG_IWL4965_HT */
b481de9c 7572
bb8c093b 7573 iwl4965_set_rxon_channel(priv, conf->phymode, conf->channel);
b481de9c 7574
bb8c093b 7575 iwl4965_set_flags_for_phymode(priv, conf->phymode);
b481de9c
ZY
7576
7577 /* The list of supported rates and rate mask can be different
7578 * for each phymode; since the phymode may have changed, reset
7579 * the rate mask to what mac80211 lists */
bb8c093b 7580 iwl4965_set_rate(priv);
b481de9c
ZY
7581
7582 spin_unlock_irqrestore(&priv->lock, flags);
7583
7584#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7585 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 7586 iwl4965_hw_channel_switch(priv, conf->channel);
76bb77e0 7587 goto out;
b481de9c
ZY
7588 }
7589#endif
7590
bb8c093b 7591 iwl4965_radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
7592
7593 if (!conf->radio_enabled) {
7594 IWL_DEBUG_MAC80211("leave - radio disabled\n");
76bb77e0 7595 goto out;
b481de9c
ZY
7596 }
7597
bb8c093b 7598 if (iwl4965_is_rfkill(priv)) {
b481de9c 7599 IWL_DEBUG_MAC80211("leave - RF kill\n");
76bb77e0
ZY
7600 ret = -EIO;
7601 goto out;
b481de9c
ZY
7602 }
7603
bb8c093b 7604 iwl4965_set_rate(priv);
b481de9c
ZY
7605
7606 if (memcmp(&priv->active_rxon,
7607 &priv->staging_rxon, sizeof(priv->staging_rxon)))
bb8c093b 7608 iwl4965_commit_rxon(priv);
b481de9c
ZY
7609 else
7610 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
7611
7612 IWL_DEBUG_MAC80211("leave\n");
7613
a0646470
ZY
7614out:
7615 clear_bit(STATUS_CONF_PENDING, &priv->status);
5a66926a 7616 mutex_unlock(&priv->mutex);
76bb77e0 7617 return ret;
b481de9c
ZY
7618}
7619
bb8c093b 7620static void iwl4965_config_ap(struct iwl4965_priv *priv)
b481de9c
ZY
7621{
7622 int rc = 0;
7623
7624 if (priv->status & STATUS_EXIT_PENDING)
7625 return;
7626
7627 /* The following should be done only at AP bring up */
7628 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
7629
7630 /* RXON - unassoc (to set timing command) */
7631 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7632 iwl4965_commit_rxon(priv);
b481de9c
ZY
7633
7634 /* RXON Timing */
bb8c093b
CH
7635 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7636 iwl4965_setup_rxon_timing(priv);
7637 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7638 sizeof(priv->rxon_timing), &priv->rxon_timing);
7639 if (rc)
7640 IWL_WARNING("REPLY_RXON_TIMING failed - "
7641 "Attempting to continue.\n");
7642
7643 iwl4965_set_rxon_chain(priv);
7644
7645 /* FIXME: what should be the assoc_id for AP? */
7646 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7647 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7648 priv->staging_rxon.flags |=
7649 RXON_FLG_SHORT_PREAMBLE_MSK;
7650 else
7651 priv->staging_rxon.flags &=
7652 ~RXON_FLG_SHORT_PREAMBLE_MSK;
7653
7654 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7655 if (priv->assoc_capability &
7656 WLAN_CAPABILITY_SHORT_SLOT_TIME)
7657 priv->staging_rxon.flags |=
7658 RXON_FLG_SHORT_SLOT_MSK;
7659 else
7660 priv->staging_rxon.flags &=
7661 ~RXON_FLG_SHORT_SLOT_MSK;
7662
7663 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7664 priv->staging_rxon.flags &=
7665 ~RXON_FLG_SHORT_SLOT_MSK;
7666 }
7667 /* restore RXON assoc */
7668 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 7669 iwl4965_commit_rxon(priv);
c8b0e6e1 7670#ifdef CONFIG_IWL4965_QOS
bb8c093b 7671 iwl4965_activate_qos(priv, 1);
b481de9c 7672#endif
bb8c093b 7673 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
e1493deb 7674 }
bb8c093b 7675 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7676
7677 /* FIXME - we need to add code here to detect a totally new
7678 * configuration, reset the AP, unassoc, rxon timing, assoc,
7679 * clear sta table, add BCAST sta... */
7680}
7681
32bfd35d
JB
7682static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
7683 struct ieee80211_vif *vif,
b481de9c
ZY
7684 struct ieee80211_if_conf *conf)
7685{
bb8c093b 7686 struct iwl4965_priv *priv = hw->priv;
0795af57 7687 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7688 unsigned long flags;
7689 int rc;
7690
7691 if (conf == NULL)
7692 return -EIO;
7693
7694 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7695 (!conf->beacon || !conf->ssid_len)) {
7696 IWL_DEBUG_MAC80211
7697 ("Leaving in AP mode because HostAPD is not ready.\n");
7698 return 0;
7699 }
7700
5a66926a
ZY
7701 if (!iwl4965_is_alive(priv))
7702 return -EAGAIN;
7703
b481de9c
ZY
7704 mutex_lock(&priv->mutex);
7705
b481de9c 7706 if (conf->bssid)
0795af57
JP
7707 IWL_DEBUG_MAC80211("bssid: %s\n",
7708 print_mac(mac, conf->bssid));
b481de9c 7709
4150c572
JB
7710/*
7711 * very dubious code was here; the probe filtering flag is never set:
7712 *
b481de9c
ZY
7713 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7714 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572
JB
7715 */
7716 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
b481de9c
ZY
7717 IWL_DEBUG_MAC80211("leave - scanning\n");
7718 mutex_unlock(&priv->mutex);
7719 return 0;
7720 }
7721
32bfd35d
JB
7722 if (priv->vif != vif) {
7723 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
b481de9c
ZY
7724 mutex_unlock(&priv->mutex);
7725 return 0;
7726 }
7727
7728 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7729 if (!conf->bssid) {
7730 conf->bssid = priv->mac_addr;
7731 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
0795af57
JP
7732 IWL_DEBUG_MAC80211("bssid was set to: %s\n",
7733 print_mac(mac, conf->bssid));
b481de9c
ZY
7734 }
7735 if (priv->ibss_beacon)
7736 dev_kfree_skb(priv->ibss_beacon);
7737
7738 priv->ibss_beacon = conf->beacon;
7739 }
7740
fde3571f
MA
7741 if (iwl4965_is_rfkill(priv))
7742 goto done;
7743
b481de9c
ZY
7744 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
7745 !is_multicast_ether_addr(conf->bssid)) {
7746 /* If there is currently a HW scan going on in the background
7747 * then we need to cancel it else the RXON below will fail. */
bb8c093b 7748 if (iwl4965_scan_cancel_timeout(priv, 100)) {
b481de9c
ZY
7749 IWL_WARNING("Aborted scan still in progress "
7750 "after 100ms\n");
7751 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
7752 mutex_unlock(&priv->mutex);
7753 return -EAGAIN;
7754 }
7755 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
7756
7757 /* TODO: Audit driver for usage of these members and see
7758 * if mac80211 deprecates them (priv->bssid looks like it
7759 * shouldn't be there, but I haven't scanned the IBSS code
7760 * to verify) - jpk */
7761 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
7762
7763 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b 7764 iwl4965_config_ap(priv);
b481de9c 7765 else {
bb8c093b 7766 rc = iwl4965_commit_rxon(priv);
b481de9c 7767 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
bb8c093b 7768 iwl4965_rxon_add_station(
b481de9c
ZY
7769 priv, priv->active_rxon.bssid_addr, 1);
7770 }
7771
7772 } else {
bb8c093b 7773 iwl4965_scan_cancel_timeout(priv, 100);
b481de9c 7774 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7775 iwl4965_commit_rxon(priv);
b481de9c
ZY
7776 }
7777
fde3571f 7778 done:
b481de9c
ZY
7779 spin_lock_irqsave(&priv->lock, flags);
7780 if (!conf->ssid_len)
7781 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7782 else
7783 memcpy(priv->essid, conf->ssid, conf->ssid_len);
7784
7785 priv->essid_len = conf->ssid_len;
7786 spin_unlock_irqrestore(&priv->lock, flags);
7787
7788 IWL_DEBUG_MAC80211("leave\n");
7789 mutex_unlock(&priv->mutex);
7790
7791 return 0;
7792}
7793
bb8c093b 7794static void iwl4965_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
7795 unsigned int changed_flags,
7796 unsigned int *total_flags,
7797 int mc_count, struct dev_addr_list *mc_list)
7798{
7799 /*
7800 * XXX: dummy
bb8c093b 7801 * see also iwl4965_connection_init_rx_config
4150c572
JB
7802 */
7803 *total_flags = 0;
7804}
7805
bb8c093b 7806static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7807 struct ieee80211_if_init_conf *conf)
7808{
bb8c093b 7809 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7810
7811 IWL_DEBUG_MAC80211("enter\n");
7812
7813 mutex_lock(&priv->mutex);
948c171c 7814
fde3571f
MA
7815 if (iwl4965_is_ready_rf(priv)) {
7816 iwl4965_scan_cancel_timeout(priv, 100);
7817 cancel_delayed_work(&priv->post_associate);
7818 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7819 iwl4965_commit_rxon(priv);
7820 }
32bfd35d
JB
7821 if (priv->vif == conf->vif) {
7822 priv->vif = NULL;
b481de9c
ZY
7823 memset(priv->bssid, 0, ETH_ALEN);
7824 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7825 priv->essid_len = 0;
7826 }
7827 mutex_unlock(&priv->mutex);
7828
7829 IWL_DEBUG_MAC80211("leave\n");
7830
7831}
471b3efd
JB
7832
7833static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
7834 struct ieee80211_vif *vif,
7835 struct ieee80211_bss_conf *bss_conf,
7836 u32 changes)
220173b0 7837{
bb8c093b 7838 struct iwl4965_priv *priv = hw->priv;
220173b0 7839
471b3efd
JB
7840 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
7841 if (bss_conf->use_short_preamble)
220173b0
TW
7842 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7843 else
7844 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7845 }
7846
471b3efd
JB
7847 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
7848 if (bss_conf->use_cts_prot && (priv->phymode != MODE_IEEE80211A))
220173b0
TW
7849 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
7850 else
7851 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
7852 }
7853
471b3efd
JB
7854 if (changes & BSS_CHANGED_ASSOC) {
7855 /*
7856 * TODO:
7857 * do stuff instead of sniffing assoc resp
7858 */
7859 }
7860
bb8c093b
CH
7861 if (iwl4965_is_associated(priv))
7862 iwl4965_send_rxon_assoc(priv);
220173b0 7863}
b481de9c 7864
bb8c093b 7865static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
7866{
7867 int rc = 0;
7868 unsigned long flags;
bb8c093b 7869 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7870
7871 IWL_DEBUG_MAC80211("enter\n");
7872
052c4b9f 7873 mutex_lock(&priv->mutex);
b481de9c
ZY
7874 spin_lock_irqsave(&priv->lock, flags);
7875
bb8c093b 7876 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7877 rc = -EIO;
7878 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7879 goto out_unlock;
7880 }
7881
7882 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
7883 rc = -EIO;
7884 IWL_ERROR("ERROR: APs don't scan\n");
7885 goto out_unlock;
7886 }
7887
7878a5a4
MA
7888 /* we don't schedule scan within next_scan_jiffies period */
7889 if (priv->next_scan_jiffies &&
7890 time_after(priv->next_scan_jiffies, jiffies)) {
7891 rc = -EAGAIN;
7892 goto out_unlock;
7893 }
b481de9c 7894 /* if we just finished scan ask for delay */
7878a5a4
MA
7895 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies +
7896 IWL_DELAY_NEXT_SCAN, jiffies)) {
b481de9c
ZY
7897 rc = -EAGAIN;
7898 goto out_unlock;
7899 }
7900 if (len) {
7878a5a4 7901 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
bb8c093b 7902 iwl4965_escape_essid(ssid, len), (int)len);
b481de9c
ZY
7903
7904 priv->one_direct_scan = 1;
7905 priv->direct_ssid_len = (u8)
7906 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
7907 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
948c171c
MA
7908 } else
7909 priv->one_direct_scan = 0;
b481de9c 7910
bb8c093b 7911 rc = iwl4965_scan_initiate(priv);
b481de9c
ZY
7912
7913 IWL_DEBUG_MAC80211("leave\n");
7914
7915out_unlock:
7916 spin_unlock_irqrestore(&priv->lock, flags);
052c4b9f 7917 mutex_unlock(&priv->mutex);
b481de9c
ZY
7918
7919 return rc;
7920}
7921
bb8c093b 7922static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
b481de9c
ZY
7923 const u8 *local_addr, const u8 *addr,
7924 struct ieee80211_key_conf *key)
7925{
bb8c093b 7926 struct iwl4965_priv *priv = hw->priv;
0795af57 7927 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7928 int rc = 0;
7929 u8 sta_id;
7930
7931 IWL_DEBUG_MAC80211("enter\n");
7932
bb8c093b 7933 if (!iwl4965_param_hwcrypto) {
b481de9c
ZY
7934 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7935 return -EOPNOTSUPP;
7936 }
7937
7938 if (is_zero_ether_addr(addr))
7939 /* only support pairwise keys */
7940 return -EOPNOTSUPP;
7941
bb8c093b 7942 sta_id = iwl4965_hw_find_station(priv, addr);
b481de9c 7943 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
7944 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7945 print_mac(mac, addr));
b481de9c
ZY
7946 return -EINVAL;
7947 }
7948
7949 mutex_lock(&priv->mutex);
7950
bb8c093b 7951 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 7952
b481de9c
ZY
7953 switch (cmd) {
7954 case SET_KEY:
bb8c093b 7955 rc = iwl4965_update_sta_key_info(priv, key, sta_id);
b481de9c 7956 if (!rc) {
bb8c093b
CH
7957 iwl4965_set_rxon_hwcrypto(priv, 1);
7958 iwl4965_commit_rxon(priv);
b481de9c
ZY
7959 key->hw_key_idx = sta_id;
7960 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7961 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7962 }
7963 break;
7964 case DISABLE_KEY:
bb8c093b 7965 rc = iwl4965_clear_sta_key_info(priv, sta_id);
b481de9c 7966 if (!rc) {
bb8c093b
CH
7967 iwl4965_set_rxon_hwcrypto(priv, 0);
7968 iwl4965_commit_rxon(priv);
b481de9c
ZY
7969 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7970 }
7971 break;
7972 default:
7973 rc = -EINVAL;
7974 }
7975
7976 IWL_DEBUG_MAC80211("leave\n");
7977 mutex_unlock(&priv->mutex);
7978
7979 return rc;
7980}
7981
bb8c093b 7982static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
b481de9c
ZY
7983 const struct ieee80211_tx_queue_params *params)
7984{
bb8c093b 7985 struct iwl4965_priv *priv = hw->priv;
c8b0e6e1 7986#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7987 unsigned long flags;
7988 int q;
0054b34d 7989#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
7990
7991 IWL_DEBUG_MAC80211("enter\n");
7992
bb8c093b 7993 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7994 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7995 return -EIO;
7996 }
7997
7998 if (queue >= AC_NUM) {
7999 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
8000 return 0;
8001 }
8002
c8b0e6e1 8003#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
8004 if (!priv->qos_data.qos_enable) {
8005 priv->qos_data.qos_active = 0;
8006 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
8007 return 0;
8008 }
8009 q = AC_NUM - 1 - queue;
8010
8011 spin_lock_irqsave(&priv->lock, flags);
8012
8013 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
8014 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
8015 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
8016 priv->qos_data.def_qos_parm.ac[q].edca_txop =
8017 cpu_to_le16((params->burst_time * 100));
8018
8019 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
8020 priv->qos_data.qos_active = 1;
8021
8022 spin_unlock_irqrestore(&priv->lock, flags);
8023
8024 mutex_lock(&priv->mutex);
8025 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b
CH
8026 iwl4965_activate_qos(priv, 1);
8027 else if (priv->assoc_id && iwl4965_is_associated(priv))
8028 iwl4965_activate_qos(priv, 0);
b481de9c
ZY
8029
8030 mutex_unlock(&priv->mutex);
8031
c8b0e6e1 8032#endif /*CONFIG_IWL4965_QOS */
b481de9c
ZY
8033
8034 IWL_DEBUG_MAC80211("leave\n");
8035 return 0;
8036}
8037
bb8c093b 8038static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
8039 struct ieee80211_tx_queue_stats *stats)
8040{
bb8c093b 8041 struct iwl4965_priv *priv = hw->priv;
b481de9c 8042 int i, avail;
bb8c093b
CH
8043 struct iwl4965_tx_queue *txq;
8044 struct iwl4965_queue *q;
b481de9c
ZY
8045 unsigned long flags;
8046
8047 IWL_DEBUG_MAC80211("enter\n");
8048
bb8c093b 8049 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8050 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8051 return -EIO;
8052 }
8053
8054 spin_lock_irqsave(&priv->lock, flags);
8055
8056 for (i = 0; i < AC_NUM; i++) {
8057 txq = &priv->txq[i];
8058 q = &txq->q;
bb8c093b 8059 avail = iwl4965_queue_space(q);
b481de9c
ZY
8060
8061 stats->data[i].len = q->n_window - avail;
8062 stats->data[i].limit = q->n_window - q->high_mark;
8063 stats->data[i].count = q->n_window;
8064
8065 }
8066 spin_unlock_irqrestore(&priv->lock, flags);
8067
8068 IWL_DEBUG_MAC80211("leave\n");
8069
8070 return 0;
8071}
8072
bb8c093b 8073static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
b481de9c
ZY
8074 struct ieee80211_low_level_stats *stats)
8075{
8076 IWL_DEBUG_MAC80211("enter\n");
8077 IWL_DEBUG_MAC80211("leave\n");
8078
8079 return 0;
8080}
8081
bb8c093b 8082static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
b481de9c
ZY
8083{
8084 IWL_DEBUG_MAC80211("enter\n");
8085 IWL_DEBUG_MAC80211("leave\n");
8086
8087 return 0;
8088}
8089
bb8c093b 8090static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 8091{
bb8c093b 8092 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8093 unsigned long flags;
8094
8095 mutex_lock(&priv->mutex);
8096 IWL_DEBUG_MAC80211("enter\n");
8097
8098 priv->lq_mngr.lq_ready = 0;
c8b0e6e1 8099#ifdef CONFIG_IWL4965_HT
b481de9c 8100 spin_lock_irqsave(&priv->lock, flags);
fd105e79 8101 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
b481de9c 8102 spin_unlock_irqrestore(&priv->lock, flags);
c8b0e6e1 8103#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
8104/* if (priv->lq_mngr.agg_ctrl.granted_ba)
8105 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/
8106
bb8c093b 8107 memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl4965_agg_control));
b481de9c
ZY
8108 priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10;
8109 priv->lq_mngr.agg_ctrl.ba_timeout = 5000;
8110 priv->lq_mngr.agg_ctrl.auto_agg = 1;
8111
8112 if (priv->lq_mngr.agg_ctrl.auto_agg)
8113 priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED;
c8b0e6e1
CH
8114#endif /*CONFIG_IWL4965_HT_AGG */
8115#endif /* CONFIG_IWL4965_HT */
b481de9c 8116
c8b0e6e1 8117#ifdef CONFIG_IWL4965_QOS
bb8c093b 8118 iwl4965_reset_qos(priv);
b481de9c
ZY
8119#endif
8120
8121 cancel_delayed_work(&priv->post_associate);
8122
8123 spin_lock_irqsave(&priv->lock, flags);
8124 priv->assoc_id = 0;
8125 priv->assoc_capability = 0;
8126 priv->call_post_assoc_from_beacon = 0;
8127 priv->assoc_station_added = 0;
8128
8129 /* new association get rid of ibss beacon skb */
8130 if (priv->ibss_beacon)
8131 dev_kfree_skb(priv->ibss_beacon);
8132
8133 priv->ibss_beacon = NULL;
8134
8135 priv->beacon_int = priv->hw->conf.beacon_int;
8136 priv->timestamp1 = 0;
8137 priv->timestamp0 = 0;
8138 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
8139 priv->beacon_int = 0;
8140
8141 spin_unlock_irqrestore(&priv->lock, flags);
8142
fde3571f
MA
8143 if (!iwl4965_is_ready_rf(priv)) {
8144 IWL_DEBUG_MAC80211("leave - not ready\n");
8145 mutex_unlock(&priv->mutex);
8146 return;
8147 }
8148
052c4b9f 8149 /* we are restarting association process
8150 * clear RXON_FILTER_ASSOC_MSK bit
8151 */
8152 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
bb8c093b 8153 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 8154 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 8155 iwl4965_commit_rxon(priv);
052c4b9f 8156 }
8157
b481de9c
ZY
8158 /* Per mac80211.h: This is only used in IBSS mode... */
8159 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
052c4b9f 8160
b481de9c
ZY
8161 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
8162 mutex_unlock(&priv->mutex);
8163 return;
8164 }
8165
b481de9c
ZY
8166 priv->only_active_channel = 0;
8167
bb8c093b 8168 iwl4965_set_rate(priv);
b481de9c
ZY
8169
8170 mutex_unlock(&priv->mutex);
8171
8172 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
8173}
8174
bb8c093b 8175static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
8176 struct ieee80211_tx_control *control)
8177{
bb8c093b 8178 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8179 unsigned long flags;
8180
8181 mutex_lock(&priv->mutex);
8182 IWL_DEBUG_MAC80211("enter\n");
8183
bb8c093b 8184 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8185 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8186 mutex_unlock(&priv->mutex);
8187 return -EIO;
8188 }
8189
8190 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
8191 IWL_DEBUG_MAC80211("leave - not IBSS\n");
8192 mutex_unlock(&priv->mutex);
8193 return -EIO;
8194 }
8195
8196 spin_lock_irqsave(&priv->lock, flags);
8197
8198 if (priv->ibss_beacon)
8199 dev_kfree_skb(priv->ibss_beacon);
8200
8201 priv->ibss_beacon = skb;
8202
8203 priv->assoc_id = 0;
8204
8205 IWL_DEBUG_MAC80211("leave\n");
8206 spin_unlock_irqrestore(&priv->lock, flags);
8207
c8b0e6e1 8208#ifdef CONFIG_IWL4965_QOS
bb8c093b 8209 iwl4965_reset_qos(priv);
b481de9c
ZY
8210#endif
8211
8212 queue_work(priv->workqueue, &priv->post_associate.work);
8213
8214 mutex_unlock(&priv->mutex);
8215
8216 return 0;
8217}
8218
c8b0e6e1 8219#ifdef CONFIG_IWL4965_HT
b481de9c 8220
fd105e79
RR
8221static void iwl4965_ht_info_fill(struct ieee80211_conf *conf,
8222 struct iwl4965_priv *priv)
b481de9c 8223{
fd105e79
RR
8224 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
8225 struct ieee80211_ht_info *ht_conf = &conf->ht_conf;
8226 struct ieee80211_ht_bss_info *ht_bss_conf = &conf->ht_bss_conf;
b481de9c
ZY
8227
8228 IWL_DEBUG_MAC80211("enter: \n");
8229
fd105e79
RR
8230 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) {
8231 iwl_conf->is_ht = 0;
8232 return;
b481de9c
ZY
8233 }
8234
fd105e79
RR
8235 iwl_conf->is_ht = 1;
8236 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8237
8238 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
8239 iwl_conf->sgf |= 0x1;
8240 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
8241 iwl_conf->sgf |= 0x2;
8242
8243 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
8244 iwl_conf->max_amsdu_size =
8245 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
8246 iwl_conf->supported_chan_width =
8247 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
8248 iwl_conf->tx_mimo_ps_mode =
8249 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8250 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
8251
8252 iwl_conf->control_channel = ht_bss_conf->primary_channel;
8253 iwl_conf->extension_chan_offset =
8254 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
8255 iwl_conf->tx_chan_width =
8256 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
8257 iwl_conf->ht_protection =
8258 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
8259 iwl_conf->non_GF_STA_present =
8260 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
8261
8262 IWL_DEBUG_MAC80211("control channel %d\n",
8263 iwl_conf->control_channel);
b481de9c 8264 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
8265}
8266
bb8c093b 8267static int iwl4965_mac_conf_ht(struct ieee80211_hw *hw,
fd105e79 8268 struct ieee80211_conf *conf)
b481de9c 8269{
bb8c093b 8270 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8271
8272 IWL_DEBUG_MAC80211("enter: \n");
8273
fd105e79 8274 iwl4965_ht_info_fill(conf, priv);
b481de9c
ZY
8275 iwl4965_set_rxon_chain(priv);
8276
8277 if (priv && priv->assoc_id &&
8278 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
8279 unsigned long flags;
8280
8281 spin_lock_irqsave(&priv->lock, flags);
8282 if (priv->beacon_int)
8283 queue_work(priv->workqueue, &priv->post_associate.work);
8284 else
8285 priv->call_post_assoc_from_beacon = 1;
8286 spin_unlock_irqrestore(&priv->lock, flags);
8287 }
8288
fd105e79
RR
8289 IWL_DEBUG_MAC80211("leave:\n");
8290 return 0;
b481de9c
ZY
8291}
8292
bb8c093b 8293static void iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8fb88032
RR
8294 struct ieee80211_ht_cap *ht_cap,
8295 u8 use_current_config)
b481de9c 8296{
8fb88032
RR
8297 struct ieee80211_conf *conf = &hw->conf;
8298 struct ieee80211_hw_mode *mode = conf->mode;
b481de9c 8299
8fb88032
RR
8300 if (use_current_config) {
8301 ht_cap->cap_info = cpu_to_le16(conf->ht_conf.cap);
8302 memcpy(ht_cap->supp_mcs_set,
8303 conf->ht_conf.supp_mcs_set, 16);
8304 } else {
8305 ht_cap->cap_info = cpu_to_le16(mode->ht_info.cap);
8306 memcpy(ht_cap->supp_mcs_set,
8307 mode->ht_info.supp_mcs_set, 16);
8308 }
8309 ht_cap->ampdu_params_info =
8310 (mode->ht_info.ampdu_factor & IEEE80211_HT_CAP_AMPDU_FACTOR) |
8311 ((mode->ht_info.ampdu_density << 2) &
8312 IEEE80211_HT_CAP_AMPDU_DENSITY);
b481de9c
ZY
8313}
8314
c8b0e6e1 8315#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
8316
8317/*****************************************************************************
8318 *
8319 * sysfs attributes
8320 *
8321 *****************************************************************************/
8322
c8b0e6e1 8323#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
8324
8325/*
8326 * The following adds a new attribute to the sysfs representation
8327 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
8328 * used for controlling the debug level.
8329 *
8330 * See the level definitions in iwl for details.
8331 */
8332
8333static ssize_t show_debug_level(struct device_driver *d, char *buf)
8334{
bb8c093b 8335 return sprintf(buf, "0x%08X\n", iwl4965_debug_level);
b481de9c
ZY
8336}
8337static ssize_t store_debug_level(struct device_driver *d,
8338 const char *buf, size_t count)
8339{
8340 char *p = (char *)buf;
8341 u32 val;
8342
8343 val = simple_strtoul(p, &p, 0);
8344 if (p == buf)
8345 printk(KERN_INFO DRV_NAME
8346 ": %s is not in hex or decimal form.\n", buf);
8347 else
bb8c093b 8348 iwl4965_debug_level = val;
b481de9c
ZY
8349
8350 return strnlen(buf, count);
8351}
8352
8353static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
8354 show_debug_level, store_debug_level);
8355
c8b0e6e1 8356#endif /* CONFIG_IWL4965_DEBUG */
b481de9c
ZY
8357
8358static ssize_t show_rf_kill(struct device *d,
8359 struct device_attribute *attr, char *buf)
8360{
8361 /*
8362 * 0 - RF kill not enabled
8363 * 1 - SW based RF kill active (sysfs)
8364 * 2 - HW based RF kill active
8365 * 3 - Both HW and SW based RF kill active
8366 */
bb8c093b 8367 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8368 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
8369 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
8370
8371 return sprintf(buf, "%i\n", val);
8372}
8373
8374static ssize_t store_rf_kill(struct device *d,
8375 struct device_attribute *attr,
8376 const char *buf, size_t count)
8377{
bb8c093b 8378 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8379
8380 mutex_lock(&priv->mutex);
bb8c093b 8381 iwl4965_radio_kill_sw(priv, buf[0] == '1');
b481de9c
ZY
8382 mutex_unlock(&priv->mutex);
8383
8384 return count;
8385}
8386
8387static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
8388
8389static ssize_t show_temperature(struct device *d,
8390 struct device_attribute *attr, char *buf)
8391{
bb8c093b 8392 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c 8393
bb8c093b 8394 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8395 return -EAGAIN;
8396
bb8c093b 8397 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv));
b481de9c
ZY
8398}
8399
8400static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
8401
8402static ssize_t show_rs_window(struct device *d,
8403 struct device_attribute *attr,
8404 char *buf)
8405{
bb8c093b
CH
8406 struct iwl4965_priv *priv = d->driver_data;
8407 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
b481de9c
ZY
8408}
8409static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
8410
8411static ssize_t show_tx_power(struct device *d,
8412 struct device_attribute *attr, char *buf)
8413{
bb8c093b 8414 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8415 return sprintf(buf, "%d\n", priv->user_txpower_limit);
8416}
8417
8418static ssize_t store_tx_power(struct device *d,
8419 struct device_attribute *attr,
8420 const char *buf, size_t count)
8421{
bb8c093b 8422 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8423 char *p = (char *)buf;
8424 u32 val;
8425
8426 val = simple_strtoul(p, &p, 10);
8427 if (p == buf)
8428 printk(KERN_INFO DRV_NAME
8429 ": %s is not in decimal form.\n", buf);
8430 else
bb8c093b 8431 iwl4965_hw_reg_set_txpower(priv, val);
b481de9c
ZY
8432
8433 return count;
8434}
8435
8436static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
8437
8438static ssize_t show_flags(struct device *d,
8439 struct device_attribute *attr, char *buf)
8440{
bb8c093b 8441 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8442
8443 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
8444}
8445
8446static ssize_t store_flags(struct device *d,
8447 struct device_attribute *attr,
8448 const char *buf, size_t count)
8449{
bb8c093b 8450 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8451 u32 flags = simple_strtoul(buf, NULL, 0);
8452
8453 mutex_lock(&priv->mutex);
8454 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
8455 /* Cancel any currently running scans... */
bb8c093b 8456 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8457 IWL_WARNING("Could not cancel scan.\n");
8458 else {
8459 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
8460 flags);
8461 priv->staging_rxon.flags = cpu_to_le32(flags);
bb8c093b 8462 iwl4965_commit_rxon(priv);
b481de9c
ZY
8463 }
8464 }
8465 mutex_unlock(&priv->mutex);
8466
8467 return count;
8468}
8469
8470static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
8471
8472static ssize_t show_filter_flags(struct device *d,
8473 struct device_attribute *attr, char *buf)
8474{
bb8c093b 8475 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8476
8477 return sprintf(buf, "0x%04X\n",
8478 le32_to_cpu(priv->active_rxon.filter_flags));
8479}
8480
8481static ssize_t store_filter_flags(struct device *d,
8482 struct device_attribute *attr,
8483 const char *buf, size_t count)
8484{
bb8c093b 8485 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8486 u32 filter_flags = simple_strtoul(buf, NULL, 0);
8487
8488 mutex_lock(&priv->mutex);
8489 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
8490 /* Cancel any currently running scans... */
bb8c093b 8491 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8492 IWL_WARNING("Could not cancel scan.\n");
8493 else {
8494 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
8495 "0x%04X\n", filter_flags);
8496 priv->staging_rxon.filter_flags =
8497 cpu_to_le32(filter_flags);
bb8c093b 8498 iwl4965_commit_rxon(priv);
b481de9c
ZY
8499 }
8500 }
8501 mutex_unlock(&priv->mutex);
8502
8503 return count;
8504}
8505
8506static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
8507 store_filter_flags);
8508
8509static ssize_t show_tune(struct device *d,
8510 struct device_attribute *attr, char *buf)
8511{
bb8c093b 8512 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8513
8514 return sprintf(buf, "0x%04X\n",
8515 (priv->phymode << 8) |
8516 le16_to_cpu(priv->active_rxon.channel));
8517}
8518
bb8c093b 8519static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode);
b481de9c
ZY
8520
8521static ssize_t store_tune(struct device *d,
8522 struct device_attribute *attr,
8523 const char *buf, size_t count)
8524{
bb8c093b 8525 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8526 char *p = (char *)buf;
8527 u16 tune = simple_strtoul(p, &p, 0);
8528 u8 phymode = (tune >> 8) & 0xff;
8529 u16 channel = tune & 0xff;
8530
8531 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
8532
8533 mutex_lock(&priv->mutex);
8534 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
8535 (priv->phymode != phymode)) {
bb8c093b 8536 const struct iwl4965_channel_info *ch_info;
b481de9c 8537
bb8c093b 8538 ch_info = iwl4965_get_channel_info(priv, phymode, channel);
b481de9c
ZY
8539 if (!ch_info) {
8540 IWL_WARNING("Requested invalid phymode/channel "
8541 "combination: %d %d\n", phymode, channel);
8542 mutex_unlock(&priv->mutex);
8543 return -EINVAL;
8544 }
8545
8546 /* Cancel any currently running scans... */
bb8c093b 8547 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8548 IWL_WARNING("Could not cancel scan.\n");
8549 else {
8550 IWL_DEBUG_INFO("Committing phymode and "
8551 "rxon.channel = %d %d\n",
8552 phymode, channel);
8553
bb8c093b
CH
8554 iwl4965_set_rxon_channel(priv, phymode, channel);
8555 iwl4965_set_flags_for_phymode(priv, phymode);
b481de9c 8556
bb8c093b
CH
8557 iwl4965_set_rate(priv);
8558 iwl4965_commit_rxon(priv);
b481de9c
ZY
8559 }
8560 }
8561 mutex_unlock(&priv->mutex);
8562
8563 return count;
8564}
8565
8566static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
8567
c8b0e6e1 8568#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8569
8570static ssize_t show_measurement(struct device *d,
8571 struct device_attribute *attr, char *buf)
8572{
bb8c093b
CH
8573 struct iwl4965_priv *priv = dev_get_drvdata(d);
8574 struct iwl4965_spectrum_notification measure_report;
b481de9c
ZY
8575 u32 size = sizeof(measure_report), len = 0, ofs = 0;
8576 u8 *data = (u8 *) & measure_report;
8577 unsigned long flags;
8578
8579 spin_lock_irqsave(&priv->lock, flags);
8580 if (!(priv->measurement_status & MEASUREMENT_READY)) {
8581 spin_unlock_irqrestore(&priv->lock, flags);
8582 return 0;
8583 }
8584 memcpy(&measure_report, &priv->measure_report, size);
8585 priv->measurement_status = 0;
8586 spin_unlock_irqrestore(&priv->lock, flags);
8587
8588 while (size && (PAGE_SIZE - len)) {
8589 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8590 PAGE_SIZE - len, 1);
8591 len = strlen(buf);
8592 if (PAGE_SIZE - len)
8593 buf[len++] = '\n';
8594
8595 ofs += 16;
8596 size -= min(size, 16U);
8597 }
8598
8599 return len;
8600}
8601
8602static ssize_t store_measurement(struct device *d,
8603 struct device_attribute *attr,
8604 const char *buf, size_t count)
8605{
bb8c093b 8606 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8607 struct ieee80211_measurement_params params = {
8608 .channel = le16_to_cpu(priv->active_rxon.channel),
8609 .start_time = cpu_to_le64(priv->last_tsf),
8610 .duration = cpu_to_le16(1),
8611 };
8612 u8 type = IWL_MEASURE_BASIC;
8613 u8 buffer[32];
8614 u8 channel;
8615
8616 if (count) {
8617 char *p = buffer;
8618 strncpy(buffer, buf, min(sizeof(buffer), count));
8619 channel = simple_strtoul(p, NULL, 0);
8620 if (channel)
8621 params.channel = channel;
8622
8623 p = buffer;
8624 while (*p && *p != ' ')
8625 p++;
8626 if (*p)
8627 type = simple_strtoul(p + 1, NULL, 0);
8628 }
8629
8630 IWL_DEBUG_INFO("Invoking measurement of type %d on "
8631 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 8632 iwl4965_get_measurement(priv, &params, type);
b481de9c
ZY
8633
8634 return count;
8635}
8636
8637static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
8638 show_measurement, store_measurement);
c8b0e6e1 8639#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */
b481de9c
ZY
8640
8641static ssize_t store_retry_rate(struct device *d,
8642 struct device_attribute *attr,
8643 const char *buf, size_t count)
8644{
bb8c093b 8645 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8646
8647 priv->retry_rate = simple_strtoul(buf, NULL, 0);
8648 if (priv->retry_rate <= 0)
8649 priv->retry_rate = 1;
8650
8651 return count;
8652}
8653
8654static ssize_t show_retry_rate(struct device *d,
8655 struct device_attribute *attr, char *buf)
8656{
bb8c093b 8657 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8658 return sprintf(buf, "%d", priv->retry_rate);
8659}
8660
8661static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
8662 store_retry_rate);
8663
8664static ssize_t store_power_level(struct device *d,
8665 struct device_attribute *attr,
8666 const char *buf, size_t count)
8667{
bb8c093b 8668 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8669 int rc;
8670 int mode;
8671
8672 mode = simple_strtoul(buf, NULL, 0);
8673 mutex_lock(&priv->mutex);
8674
bb8c093b 8675 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
8676 rc = -EAGAIN;
8677 goto out;
8678 }
8679
8680 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
8681 mode = IWL_POWER_AC;
8682 else
8683 mode |= IWL_POWER_ENABLED;
8684
8685 if (mode != priv->power_mode) {
bb8c093b 8686 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
8687 if (rc) {
8688 IWL_DEBUG_MAC80211("failed setting power mode.\n");
8689 goto out;
8690 }
8691 priv->power_mode = mode;
8692 }
8693
8694 rc = count;
8695
8696 out:
8697 mutex_unlock(&priv->mutex);
8698 return rc;
8699}
8700
8701#define MAX_WX_STRING 80
8702
8703/* Values are in microsecond */
8704static const s32 timeout_duration[] = {
8705 350000,
8706 250000,
8707 75000,
8708 37000,
8709 25000,
8710};
8711static const s32 period_duration[] = {
8712 400000,
8713 700000,
8714 1000000,
8715 1000000,
8716 1000000
8717};
8718
8719static ssize_t show_power_level(struct device *d,
8720 struct device_attribute *attr, char *buf)
8721{
bb8c093b 8722 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8723 int level = IWL_POWER_LEVEL(priv->power_mode);
8724 char *p = buf;
8725
8726 p += sprintf(p, "%d ", level);
8727 switch (level) {
8728 case IWL_POWER_MODE_CAM:
8729 case IWL_POWER_AC:
8730 p += sprintf(p, "(AC)");
8731 break;
8732 case IWL_POWER_BATTERY:
8733 p += sprintf(p, "(BATTERY)");
8734 break;
8735 default:
8736 p += sprintf(p,
8737 "(Timeout %dms, Period %dms)",
8738 timeout_duration[level - 1] / 1000,
8739 period_duration[level - 1] / 1000);
8740 }
8741
8742 if (!(priv->power_mode & IWL_POWER_ENABLED))
8743 p += sprintf(p, " OFF\n");
8744 else
8745 p += sprintf(p, " \n");
8746
8747 return (p - buf + 1);
8748
8749}
8750
8751static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8752 store_power_level);
8753
8754static ssize_t show_channels(struct device *d,
8755 struct device_attribute *attr, char *buf)
8756{
bb8c093b 8757 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8758 int len = 0, i;
8759 struct ieee80211_channel *channels = NULL;
8760 const struct ieee80211_hw_mode *hw_mode = NULL;
8761 int count = 0;
8762
bb8c093b 8763 if (!iwl4965_is_ready(priv))
b481de9c
ZY
8764 return -EAGAIN;
8765
bb8c093b 8766 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211G);
b481de9c 8767 if (!hw_mode)
bb8c093b 8768 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211B);
b481de9c
ZY
8769 if (hw_mode) {
8770 channels = hw_mode->channels;
8771 count = hw_mode->num_channels;
8772 }
8773
8774 len +=
8775 sprintf(&buf[len],
8776 "Displaying %d channels in 2.4GHz band "
8777 "(802.11bg):\n", count);
8778
8779 for (i = 0; i < count; i++)
8780 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8781 channels[i].chan,
8782 channels[i].power_level,
8783 channels[i].
8784 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8785 " (IEEE 802.11h required)" : "",
8786 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8787 || (channels[i].
8788 flag &
8789 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8790 ", IBSS",
8791 channels[i].
8792 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8793 "active/passive" : "passive only");
8794
bb8c093b 8795 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211A);
b481de9c
ZY
8796 if (hw_mode) {
8797 channels = hw_mode->channels;
8798 count = hw_mode->num_channels;
8799 } else {
8800 channels = NULL;
8801 count = 0;
8802 }
8803
8804 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8805 "(802.11a):\n", count);
8806
8807 for (i = 0; i < count; i++)
8808 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8809 channels[i].chan,
8810 channels[i].power_level,
8811 channels[i].
8812 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8813 " (IEEE 802.11h required)" : "",
8814 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8815 || (channels[i].
8816 flag &
8817 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8818 ", IBSS",
8819 channels[i].
8820 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8821 "active/passive" : "passive only");
8822
8823 return len;
8824}
8825
8826static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8827
8828static ssize_t show_statistics(struct device *d,
8829 struct device_attribute *attr, char *buf)
8830{
bb8c093b
CH
8831 struct iwl4965_priv *priv = dev_get_drvdata(d);
8832 u32 size = sizeof(struct iwl4965_notif_statistics);
b481de9c
ZY
8833 u32 len = 0, ofs = 0;
8834 u8 *data = (u8 *) & priv->statistics;
8835 int rc = 0;
8836
bb8c093b 8837 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8838 return -EAGAIN;
8839
8840 mutex_lock(&priv->mutex);
bb8c093b 8841 rc = iwl4965_send_statistics_request(priv);
b481de9c
ZY
8842 mutex_unlock(&priv->mutex);
8843
8844 if (rc) {
8845 len = sprintf(buf,
8846 "Error sending statistics request: 0x%08X\n", rc);
8847 return len;
8848 }
8849
8850 while (size && (PAGE_SIZE - len)) {
8851 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8852 PAGE_SIZE - len, 1);
8853 len = strlen(buf);
8854 if (PAGE_SIZE - len)
8855 buf[len++] = '\n';
8856
8857 ofs += 16;
8858 size -= min(size, 16U);
8859 }
8860
8861 return len;
8862}
8863
8864static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8865
8866static ssize_t show_antenna(struct device *d,
8867 struct device_attribute *attr, char *buf)
8868{
bb8c093b 8869 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c 8870
bb8c093b 8871 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8872 return -EAGAIN;
8873
8874 return sprintf(buf, "%d\n", priv->antenna);
8875}
8876
8877static ssize_t store_antenna(struct device *d,
8878 struct device_attribute *attr,
8879 const char *buf, size_t count)
8880{
8881 int ant;
bb8c093b 8882 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8883
8884 if (count == 0)
8885 return 0;
8886
8887 if (sscanf(buf, "%1i", &ant) != 1) {
8888 IWL_DEBUG_INFO("not in hex or decimal form.\n");
8889 return count;
8890 }
8891
8892 if ((ant >= 0) && (ant <= 2)) {
8893 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 8894 priv->antenna = (enum iwl4965_antenna)ant;
b481de9c
ZY
8895 } else
8896 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
8897
8898
8899 return count;
8900}
8901
8902static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8903
8904static ssize_t show_status(struct device *d,
8905 struct device_attribute *attr, char *buf)
8906{
bb8c093b
CH
8907 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8908 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8909 return -EAGAIN;
8910 return sprintf(buf, "0x%08x\n", (int)priv->status);
8911}
8912
8913static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
8914
8915static ssize_t dump_error_log(struct device *d,
8916 struct device_attribute *attr,
8917 const char *buf, size_t count)
8918{
8919 char *p = (char *)buf;
8920
8921 if (p[0] == '1')
bb8c093b 8922 iwl4965_dump_nic_error_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8923
8924 return strnlen(buf, count);
8925}
8926
8927static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
8928
8929static ssize_t dump_event_log(struct device *d,
8930 struct device_attribute *attr,
8931 const char *buf, size_t count)
8932{
8933 char *p = (char *)buf;
8934
8935 if (p[0] == '1')
bb8c093b 8936 iwl4965_dump_nic_event_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8937
8938 return strnlen(buf, count);
8939}
8940
8941static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
8942
8943/*****************************************************************************
8944 *
8945 * driver setup and teardown
8946 *
8947 *****************************************************************************/
8948
bb8c093b 8949static void iwl4965_setup_deferred_work(struct iwl4965_priv *priv)
b481de9c
ZY
8950{
8951 priv->workqueue = create_workqueue(DRV_NAME);
8952
8953 init_waitqueue_head(&priv->wait_command_queue);
8954
bb8c093b
CH
8955 INIT_WORK(&priv->up, iwl4965_bg_up);
8956 INIT_WORK(&priv->restart, iwl4965_bg_restart);
8957 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
8958 INIT_WORK(&priv->scan_completed, iwl4965_bg_scan_completed);
8959 INIT_WORK(&priv->request_scan, iwl4965_bg_request_scan);
8960 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
8961 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
8962 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
8963 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate);
8964 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
8965 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
8966 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check);
8967
8968 iwl4965_hw_setup_deferred_work(priv);
b481de9c
ZY
8969
8970 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 8971 iwl4965_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
8972}
8973
bb8c093b 8974static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv)
b481de9c 8975{
bb8c093b 8976 iwl4965_hw_cancel_deferred_work(priv);
b481de9c 8977
3ae6a054 8978 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
8979 cancel_delayed_work(&priv->scan_check);
8980 cancel_delayed_work(&priv->alive_start);
8981 cancel_delayed_work(&priv->post_associate);
8982 cancel_work_sync(&priv->beacon_update);
8983}
8984
bb8c093b 8985static struct attribute *iwl4965_sysfs_entries[] = {
b481de9c
ZY
8986 &dev_attr_antenna.attr,
8987 &dev_attr_channels.attr,
8988 &dev_attr_dump_errors.attr,
8989 &dev_attr_dump_events.attr,
8990 &dev_attr_flags.attr,
8991 &dev_attr_filter_flags.attr,
c8b0e6e1 8992#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8993 &dev_attr_measurement.attr,
8994#endif
8995 &dev_attr_power_level.attr,
8996 &dev_attr_retry_rate.attr,
8997 &dev_attr_rf_kill.attr,
8998 &dev_attr_rs_window.attr,
8999 &dev_attr_statistics.attr,
9000 &dev_attr_status.attr,
9001 &dev_attr_temperature.attr,
9002 &dev_attr_tune.attr,
9003 &dev_attr_tx_power.attr,
9004
9005 NULL
9006};
9007
bb8c093b 9008static struct attribute_group iwl4965_attribute_group = {
b481de9c 9009 .name = NULL, /* put in device directory */
bb8c093b 9010 .attrs = iwl4965_sysfs_entries,
b481de9c
ZY
9011};
9012
bb8c093b
CH
9013static struct ieee80211_ops iwl4965_hw_ops = {
9014 .tx = iwl4965_mac_tx,
9015 .start = iwl4965_mac_start,
9016 .stop = iwl4965_mac_stop,
9017 .add_interface = iwl4965_mac_add_interface,
9018 .remove_interface = iwl4965_mac_remove_interface,
9019 .config = iwl4965_mac_config,
9020 .config_interface = iwl4965_mac_config_interface,
9021 .configure_filter = iwl4965_configure_filter,
9022 .set_key = iwl4965_mac_set_key,
9023 .get_stats = iwl4965_mac_get_stats,
9024 .get_tx_stats = iwl4965_mac_get_tx_stats,
9025 .conf_tx = iwl4965_mac_conf_tx,
9026 .get_tsf = iwl4965_mac_get_tsf,
9027 .reset_tsf = iwl4965_mac_reset_tsf,
9028 .beacon_update = iwl4965_mac_beacon_update,
471b3efd 9029 .bss_info_changed = iwl4965_bss_info_changed,
c8b0e6e1 9030#ifdef CONFIG_IWL4965_HT
bb8c093b 9031 .conf_ht = iwl4965_mac_conf_ht,
9ab46173 9032 .ampdu_action = iwl4965_mac_ampdu_action,
c8b0e6e1 9033#ifdef CONFIG_IWL4965_HT_AGG
bb8c093b
CH
9034 .ht_tx_agg_start = iwl4965_mac_ht_tx_agg_start,
9035 .ht_tx_agg_stop = iwl4965_mac_ht_tx_agg_stop,
c8b0e6e1
CH
9036#endif /* CONFIG_IWL4965_HT_AGG */
9037#endif /* CONFIG_IWL4965_HT */
bb8c093b 9038 .hw_scan = iwl4965_mac_hw_scan
b481de9c
ZY
9039};
9040
bb8c093b 9041static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
9042{
9043 int err = 0;
bb8c093b 9044 struct iwl4965_priv *priv;
b481de9c
ZY
9045 struct ieee80211_hw *hw;
9046 int i;
5a66926a 9047 DECLARE_MAC_BUF(mac);
b481de9c 9048
6440adb5
BC
9049 /* Disabling hardware scan means that mac80211 will perform scans
9050 * "the hard way", rather than using device's scan. */
bb8c093b 9051 if (iwl4965_param_disable_hw_scan) {
b481de9c 9052 IWL_DEBUG_INFO("Disabling hw_scan\n");
bb8c093b 9053 iwl4965_hw_ops.hw_scan = NULL;
b481de9c
ZY
9054 }
9055
bb8c093b
CH
9056 if ((iwl4965_param_queues_num > IWL_MAX_NUM_QUEUES) ||
9057 (iwl4965_param_queues_num < IWL_MIN_NUM_QUEUES)) {
b481de9c
ZY
9058 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
9059 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
9060 err = -EINVAL;
9061 goto out;
9062 }
9063
9064 /* mac80211 allocates memory for this device instance, including
9065 * space for this driver's private structure */
bb8c093b 9066 hw = ieee80211_alloc_hw(sizeof(struct iwl4965_priv), &iwl4965_hw_ops);
b481de9c
ZY
9067 if (hw == NULL) {
9068 IWL_ERROR("Can not allocate network device\n");
9069 err = -ENOMEM;
9070 goto out;
9071 }
9072 SET_IEEE80211_DEV(hw, &pdev->dev);
9073
f51359a8
JB
9074 hw->rate_control_algorithm = "iwl-4965-rs";
9075
b481de9c
ZY
9076 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
9077 priv = hw->priv;
9078 priv->hw = hw;
9079
9080 priv->pci_dev = pdev;
bb8c093b 9081 priv->antenna = (enum iwl4965_antenna)iwl4965_param_antenna;
c8b0e6e1 9082#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9083 iwl4965_debug_level = iwl4965_param_debug;
b481de9c
ZY
9084 atomic_set(&priv->restrict_refcnt, 0);
9085#endif
9086 priv->retry_rate = 1;
9087
9088 priv->ibss_beacon = NULL;
9089
9090 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
9091 * the range of signal quality values that we'll provide.
9092 * Negative values for level/noise indicate that we'll provide dBm.
9093 * For WE, at least, non-0 values here *enable* display of values
9094 * in app (iwconfig). */
9095 hw->max_rssi = -20; /* signal level, negative indicates dBm */
9096 hw->max_noise = -20; /* noise level, negative indicates dBm */
9097 hw->max_signal = 100; /* link quality indication (%) */
9098
9099 /* Tell mac80211 our Tx characteristics */
9100 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
9101
6440adb5 9102 /* Default value; 4 EDCA QOS priorities */
b481de9c 9103 hw->queues = 4;
c8b0e6e1
CH
9104#ifdef CONFIG_IWL4965_HT
9105#ifdef CONFIG_IWL4965_HT_AGG
6440adb5 9106 /* Enhanced value; more queues, to support 11n aggregation */
b481de9c 9107 hw->queues = 16;
c8b0e6e1
CH
9108#endif /* CONFIG_IWL4965_HT_AGG */
9109#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
9110
9111 spin_lock_init(&priv->lock);
9112 spin_lock_init(&priv->power_data.lock);
9113 spin_lock_init(&priv->sta_lock);
9114 spin_lock_init(&priv->hcmd_lock);
9115 spin_lock_init(&priv->lq_mngr.lock);
9116
9117 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
9118 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
9119
9120 INIT_LIST_HEAD(&priv->free_frames);
9121
9122 mutex_init(&priv->mutex);
9123 if (pci_enable_device(pdev)) {
9124 err = -ENODEV;
9125 goto out_ieee80211_free_hw;
9126 }
9127
9128 pci_set_master(pdev);
9129
6440adb5 9130 /* Clear the driver's (not device's) station table */
bb8c093b 9131 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9132
9133 priv->data_retry_limit = -1;
9134 priv->ieee_channels = NULL;
9135 priv->ieee_rates = NULL;
9136 priv->phymode = -1;
9137
9138 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
9139 if (!err)
9140 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
9141 if (err) {
9142 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
9143 goto out_pci_disable_device;
9144 }
9145
9146 pci_set_drvdata(pdev, priv);
9147 err = pci_request_regions(pdev, DRV_NAME);
9148 if (err)
9149 goto out_pci_disable_device;
6440adb5 9150
b481de9c
ZY
9151 /* We disable the RETRY_TIMEOUT register (0x41) to keep
9152 * PCI Tx retries from interfering with C3 CPU state */
9153 pci_write_config_byte(pdev, 0x41, 0x00);
6440adb5 9154
b481de9c
ZY
9155 priv->hw_base = pci_iomap(pdev, 0, 0);
9156 if (!priv->hw_base) {
9157 err = -ENODEV;
9158 goto out_pci_release_regions;
9159 }
9160
9161 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
9162 (unsigned long long) pci_resource_len(pdev, 0));
9163 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
9164
9165 /* Initialize module parameter values here */
9166
6440adb5 9167 /* Disable radio (SW RF KILL) via parameter when loading driver */
bb8c093b 9168 if (iwl4965_param_disable) {
b481de9c
ZY
9169 set_bit(STATUS_RF_KILL_SW, &priv->status);
9170 IWL_DEBUG_INFO("Radio disabled.\n");
9171 }
9172
9173 priv->iw_mode = IEEE80211_IF_TYPE_STA;
9174
9175 priv->ps_mode = 0;
9176 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
b481de9c
ZY
9177 priv->valid_antenna = 0x7; /* assume all 3 connected */
9178 priv->ps_mode = IWL_MIMO_PS_NONE;
b481de9c 9179
6440adb5 9180 /* Choose which receivers/antennas to use */
b481de9c
ZY
9181 iwl4965_set_rxon_chain(priv);
9182
9183 printk(KERN_INFO DRV_NAME
9184 ": Detected Intel Wireless WiFi Link 4965AGN\n");
9185
9186 /* Device-specific setup */
bb8c093b 9187 if (iwl4965_hw_set_hw_setting(priv)) {
b481de9c 9188 IWL_ERROR("failed to set hw settings\n");
b481de9c
ZY
9189 goto out_iounmap;
9190 }
9191
c8b0e6e1 9192#ifdef CONFIG_IWL4965_QOS
bb8c093b 9193 if (iwl4965_param_qos_enable)
b481de9c
ZY
9194 priv->qos_data.qos_enable = 1;
9195
bb8c093b 9196 iwl4965_reset_qos(priv);
b481de9c
ZY
9197
9198 priv->qos_data.qos_active = 0;
9199 priv->qos_data.qos_cap.val = 0;
c8b0e6e1 9200#endif /* CONFIG_IWL4965_QOS */
b481de9c 9201
bb8c093b
CH
9202 iwl4965_set_rxon_channel(priv, MODE_IEEE80211G, 6);
9203 iwl4965_setup_deferred_work(priv);
9204 iwl4965_setup_rx_handlers(priv);
b481de9c
ZY
9205
9206 priv->rates_mask = IWL_RATES_MASK;
9207 /* If power management is turned on, default to AC mode */
9208 priv->power_mode = IWL_POWER_AC;
9209 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
9210
bb8c093b 9211 iwl4965_disable_interrupts(priv);
49df2b33 9212
bb8c093b 9213 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9214 if (err) {
9215 IWL_ERROR("failed to create sysfs device attributes\n");
b481de9c
ZY
9216 goto out_release_irq;
9217 }
9218
5a66926a
ZY
9219 /* nic init */
9220 iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS,
9221 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
9222
9223 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
9224 err = iwl4965_poll_bit(priv, CSR_GP_CNTRL,
9225 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9226 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
9227 if (err < 0) {
9228 IWL_DEBUG_INFO("Failed to init the card\n");
9229 goto out_remove_sysfs;
9230 }
9231 /* Read the EEPROM */
9232 err = iwl4965_eeprom_init(priv);
b481de9c 9233 if (err) {
5a66926a
ZY
9234 IWL_ERROR("Unable to init EEPROM\n");
9235 goto out_remove_sysfs;
b481de9c 9236 }
5a66926a
ZY
9237 /* MAC Address location in EEPROM same for 3945/4965 */
9238 get_eeprom_mac(priv, priv->mac_addr);
9239 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
9240 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
b481de9c 9241
5a66926a
ZY
9242 iwl4965_rate_control_register(priv->hw);
9243 err = ieee80211_register_hw(priv->hw);
9244 if (err) {
9245 IWL_ERROR("Failed to register network device (error %d)\n", err);
9246 goto out_remove_sysfs;
9247 }
b481de9c 9248
5a66926a
ZY
9249 priv->hw->conf.beacon_int = 100;
9250 priv->mac80211_registered = 1;
9251 pci_save_state(pdev);
9252 pci_disable_device(pdev);
b481de9c
ZY
9253
9254 return 0;
9255
5a66926a 9256 out_remove_sysfs:
bb8c093b 9257 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9258
9259 out_release_irq:
b481de9c
ZY
9260 destroy_workqueue(priv->workqueue);
9261 priv->workqueue = NULL;
bb8c093b 9262 iwl4965_unset_hw_setting(priv);
b481de9c
ZY
9263
9264 out_iounmap:
9265 pci_iounmap(pdev, priv->hw_base);
9266 out_pci_release_regions:
9267 pci_release_regions(pdev);
9268 out_pci_disable_device:
9269 pci_disable_device(pdev);
9270 pci_set_drvdata(pdev, NULL);
9271 out_ieee80211_free_hw:
9272 ieee80211_free_hw(priv->hw);
9273 out:
9274 return err;
9275}
9276
bb8c093b 9277static void iwl4965_pci_remove(struct pci_dev *pdev)
b481de9c 9278{
bb8c093b 9279 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
9280 struct list_head *p, *q;
9281 int i;
9282
9283 if (!priv)
9284 return;
9285
9286 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
9287
b481de9c 9288 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 9289
bb8c093b 9290 iwl4965_down(priv);
b481de9c
ZY
9291
9292 /* Free MAC hash list for ADHOC */
9293 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
9294 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
9295 list_del(p);
bb8c093b 9296 kfree(list_entry(p, struct iwl4965_ibss_seq, list));
b481de9c
ZY
9297 }
9298 }
9299
bb8c093b 9300 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c 9301
bb8c093b 9302 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
9303
9304 if (priv->rxq.bd)
bb8c093b
CH
9305 iwl4965_rx_queue_free(priv, &priv->rxq);
9306 iwl4965_hw_txq_ctx_free(priv);
b481de9c 9307
bb8c093b
CH
9308 iwl4965_unset_hw_setting(priv);
9309 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9310
9311 if (priv->mac80211_registered) {
9312 ieee80211_unregister_hw(priv->hw);
bb8c093b 9313 iwl4965_rate_control_unregister(priv->hw);
b481de9c
ZY
9314 }
9315
948c171c
MA
9316 /*netif_stop_queue(dev); */
9317 flush_workqueue(priv->workqueue);
9318
bb8c093b 9319 /* ieee80211_unregister_hw calls iwl4965_mac_stop, which flushes
b481de9c
ZY
9320 * priv->workqueue... so we can't take down the workqueue
9321 * until now... */
9322 destroy_workqueue(priv->workqueue);
9323 priv->workqueue = NULL;
9324
b481de9c
ZY
9325 pci_iounmap(pdev, priv->hw_base);
9326 pci_release_regions(pdev);
9327 pci_disable_device(pdev);
9328 pci_set_drvdata(pdev, NULL);
9329
9330 kfree(priv->channel_info);
9331
9332 kfree(priv->ieee_channels);
9333 kfree(priv->ieee_rates);
9334
9335 if (priv->ibss_beacon)
9336 dev_kfree_skb(priv->ibss_beacon);
9337
9338 ieee80211_free_hw(priv->hw);
9339}
9340
9341#ifdef CONFIG_PM
9342
bb8c093b 9343static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 9344{
bb8c093b 9345 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c 9346
e655b9f0
ZY
9347 if (priv->is_open) {
9348 set_bit(STATUS_IN_SUSPEND, &priv->status);
9349 iwl4965_mac_stop(priv->hw);
9350 priv->is_open = 1;
9351 }
b481de9c 9352
b481de9c
ZY
9353 pci_set_power_state(pdev, PCI_D3hot);
9354
b481de9c
ZY
9355 return 0;
9356}
9357
bb8c093b 9358static int iwl4965_pci_resume(struct pci_dev *pdev)
b481de9c 9359{
bb8c093b 9360 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c 9361
b481de9c 9362 pci_set_power_state(pdev, PCI_D0);
b481de9c 9363
e655b9f0
ZY
9364 if (priv->is_open)
9365 iwl4965_mac_start(priv->hw);
b481de9c 9366
e655b9f0 9367 clear_bit(STATUS_IN_SUSPEND, &priv->status);
b481de9c
ZY
9368 return 0;
9369}
9370
9371#endif /* CONFIG_PM */
9372
9373/*****************************************************************************
9374 *
9375 * driver and module entry point
9376 *
9377 *****************************************************************************/
9378
bb8c093b 9379static struct pci_driver iwl4965_driver = {
b481de9c 9380 .name = DRV_NAME,
bb8c093b
CH
9381 .id_table = iwl4965_hw_card_ids,
9382 .probe = iwl4965_pci_probe,
9383 .remove = __devexit_p(iwl4965_pci_remove),
b481de9c 9384#ifdef CONFIG_PM
bb8c093b
CH
9385 .suspend = iwl4965_pci_suspend,
9386 .resume = iwl4965_pci_resume,
b481de9c
ZY
9387#endif
9388};
9389
bb8c093b 9390static int __init iwl4965_init(void)
b481de9c
ZY
9391{
9392
9393 int ret;
9394 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
9395 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
bb8c093b 9396 ret = pci_register_driver(&iwl4965_driver);
b481de9c
ZY
9397 if (ret) {
9398 IWL_ERROR("Unable to initialize PCI module\n");
9399 return ret;
9400 }
c8b0e6e1 9401#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9402 ret = driver_create_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c
ZY
9403 if (ret) {
9404 IWL_ERROR("Unable to create driver sysfs file\n");
bb8c093b 9405 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9406 return ret;
9407 }
9408#endif
9409
9410 return ret;
9411}
9412
bb8c093b 9413static void __exit iwl4965_exit(void)
b481de9c 9414{
c8b0e6e1 9415#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9416 driver_remove_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c 9417#endif
bb8c093b 9418 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9419}
9420
bb8c093b 9421module_param_named(antenna, iwl4965_param_antenna, int, 0444);
b481de9c 9422MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
bb8c093b 9423module_param_named(disable, iwl4965_param_disable, int, 0444);
b481de9c 9424MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
bb8c093b 9425module_param_named(hwcrypto, iwl4965_param_hwcrypto, int, 0444);
b481de9c
ZY
9426MODULE_PARM_DESC(hwcrypto,
9427 "using hardware crypto engine (default 0 [software])\n");
bb8c093b 9428module_param_named(debug, iwl4965_param_debug, int, 0444);
b481de9c 9429MODULE_PARM_DESC(debug, "debug output mask");
bb8c093b 9430module_param_named(disable_hw_scan, iwl4965_param_disable_hw_scan, int, 0444);
b481de9c
ZY
9431MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
9432
bb8c093b 9433module_param_named(queues_num, iwl4965_param_queues_num, int, 0444);
b481de9c
ZY
9434MODULE_PARM_DESC(queues_num, "number of hw queues.");
9435
9436/* QoS */
bb8c093b 9437module_param_named(qos_enable, iwl4965_param_qos_enable, int, 0444);
b481de9c 9438MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
9ee1ba47
RR
9439module_param_named(amsdu_size_8K, iwl4965_param_amsdu_size_8K, int, 0444);
9440MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
b481de9c 9441
bb8c093b
CH
9442module_exit(iwl4965_exit);
9443module_init(iwl4965_init);