]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl4965-base.c
iwlwifi: style fixes to usage of << and >> operators
[mirror_ubuntu-zesty-kernel.git] / drivers / net / wireless / iwlwifi / iwl4965-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/delay.h>
37#include <linux/skbuff.h>
38#include <linux/netdevice.h>
39#include <linux/wireless.h>
40#include <linux/firmware.h>
b481de9c
ZY
41#include <linux/etherdevice.h>
42#include <linux/if_arp.h>
43
b481de9c
ZY
44#include <net/mac80211.h>
45
46#include <asm/div64.h>
47
b481de9c
ZY
48#include "iwl-4965.h"
49#include "iwl-helpers.h"
50
c8b0e6e1 51#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 52u32 iwl4965_debug_level;
b481de9c
ZY
53#endif
54
bb8c093b
CH
55static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
56 struct iwl4965_tx_queue *txq);
416e1438 57
b481de9c
ZY
58/******************************************************************************
59 *
60 * module boiler plate
61 *
62 ******************************************************************************/
63
64/* module parameters */
6440adb5
BC
65static int iwl4965_param_disable_hw_scan; /* def: 0 = use 4965's h/w scan */
66static int iwl4965_param_debug; /* def: 0 = minimal debug log messages */
9fbab516
BC
67static int iwl4965_param_disable; /* def: enable radio */
68static int iwl4965_param_antenna; /* def: 0 = both antennas (use diversity) */
69int iwl4965_param_hwcrypto; /* def: using software encryption */
6440adb5
BC
70static int iwl4965_param_qos_enable = 1; /* def: 1 = use quality of service */
71int iwl4965_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 16 Tx queues */
9ee1ba47 72int iwl4965_param_amsdu_size_8K; /* def: enable 8K amsdu size */
b481de9c
ZY
73
74/*
75 * module name, copyright, version, etc.
76 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
77 */
78
79#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
80
c8b0e6e1 81#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
82#define VD "d"
83#else
84#define VD
85#endif
86
c8b0e6e1 87#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
88#define VS "s"
89#else
90#define VS
91#endif
92
71972664 93#define IWLWIFI_VERSION "1.2.23k" VD VS
b481de9c
ZY
94#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
95#define DRV_VERSION IWLWIFI_VERSION
96
97/* Change firmware file name, using "-" and incrementing number,
98 * *only* when uCode interface or architecture changes so that it
99 * is not compatible with earlier drivers.
100 * This number will also appear in << 8 position of 1st dword of uCode file */
101#define IWL4965_UCODE_API "-1"
102
103MODULE_DESCRIPTION(DRV_DESCRIPTION);
104MODULE_VERSION(DRV_VERSION);
105MODULE_AUTHOR(DRV_COPYRIGHT);
106MODULE_LICENSE("GPL");
107
108__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
109{
110 u16 fc = le16_to_cpu(hdr->frame_control);
111 int hdr_len = ieee80211_get_hdrlen(fc);
112
113 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
114 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
115 return NULL;
116}
117
bb8c093b
CH
118static const struct ieee80211_hw_mode *iwl4965_get_hw_mode(
119 struct iwl4965_priv *priv, int mode)
b481de9c
ZY
120{
121 int i;
122
123 for (i = 0; i < 3; i++)
124 if (priv->modes[i].mode == mode)
125 return &priv->modes[i];
126
127 return NULL;
128}
129
bb8c093b 130static int iwl4965_is_empty_essid(const char *essid, int essid_len)
b481de9c
ZY
131{
132 /* Single white space is for Linksys APs */
133 if (essid_len == 1 && essid[0] == ' ')
134 return 1;
135
136 /* Otherwise, if the entire essid is 0, we assume it is hidden */
137 while (essid_len) {
138 essid_len--;
139 if (essid[essid_len] != '\0')
140 return 0;
141 }
142
143 return 1;
144}
145
bb8c093b 146static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
b481de9c
ZY
147{
148 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
149 const char *s = essid;
150 char *d = escaped;
151
bb8c093b 152 if (iwl4965_is_empty_essid(essid, essid_len)) {
b481de9c
ZY
153 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
154 return escaped;
155 }
156
157 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
158 while (essid_len--) {
159 if (*s == '\0') {
160 *d++ = '\\';
161 *d++ = '0';
162 s++;
163 } else
164 *d++ = *s++;
165 }
166 *d = '\0';
167 return escaped;
168}
169
bb8c093b 170static void iwl4965_print_hex_dump(int level, void *p, u32 len)
b481de9c 171{
c8b0e6e1 172#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 173 if (!(iwl4965_debug_level & level))
b481de9c
ZY
174 return;
175
176 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
177 p, len, 1);
178#endif
179}
180
181/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
182 * DMA services
183 *
184 * Theory of operation
185 *
6440adb5
BC
186 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
187 * of buffer descriptors, each of which points to one or more data buffers for
188 * the device to read from or fill. Driver and device exchange status of each
189 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
190 * entries in each circular buffer, to protect against confusing empty and full
191 * queue states.
192 *
193 * The device reads or writes the data in the queues via the device's several
194 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
195 *
196 * For Tx queue, there are low mark and high mark limits. If, after queuing
197 * the packet for Tx, free space become < low mark, Tx queue stopped. When
198 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
199 * Tx queue resumed.
200 *
6440adb5
BC
201 * The 4965 operates with up to 17 queues: One receive queue, one transmit
202 * queue (#4) for sending commands to the device firmware, and 15 other
203 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
e3851447
BC
204 *
205 * See more detailed info in iwl-4965-hw.h.
b481de9c
ZY
206 ***************************************************/
207
bb8c093b 208static int iwl4965_queue_space(const struct iwl4965_queue *q)
b481de9c 209{
fc4b6853 210 int s = q->read_ptr - q->write_ptr;
b481de9c 211
fc4b6853 212 if (q->read_ptr > q->write_ptr)
b481de9c
ZY
213 s -= q->n_bd;
214
215 if (s <= 0)
216 s += q->n_window;
217 /* keep some reserve to not confuse empty and full situations */
218 s -= 2;
219 if (s < 0)
220 s = 0;
221 return s;
222}
223
6440adb5
BC
224/**
225 * iwl4965_queue_inc_wrap - increment queue index, wrap back to beginning
226 * @index -- current index
227 * @n_bd -- total number of entries in queue (must be power of 2)
228 */
bb8c093b 229static inline int iwl4965_queue_inc_wrap(int index, int n_bd)
b481de9c
ZY
230{
231 return ++index & (n_bd - 1);
232}
233
6440adb5
BC
234/**
235 * iwl4965_queue_dec_wrap - decrement queue index, wrap back to end
236 * @index -- current index
237 * @n_bd -- total number of entries in queue (must be power of 2)
238 */
bb8c093b 239static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
b481de9c
ZY
240{
241 return --index & (n_bd - 1);
242}
243
bb8c093b 244static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
b481de9c 245{
fc4b6853
TW
246 return q->write_ptr > q->read_ptr ?
247 (i >= q->read_ptr && i < q->write_ptr) :
248 !(i < q->read_ptr && i >= q->write_ptr);
b481de9c
ZY
249}
250
bb8c093b 251static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
b481de9c 252{
6440adb5 253 /* This is for scan command, the big buffer at end of command array */
b481de9c 254 if (is_huge)
6440adb5 255 return q->n_window; /* must be power of 2 */
b481de9c 256
6440adb5 257 /* Otherwise, use normal size buffers */
b481de9c
ZY
258 return index & (q->n_window - 1);
259}
260
6440adb5
BC
261/**
262 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
263 */
bb8c093b 264static int iwl4965_queue_init(struct iwl4965_priv *priv, struct iwl4965_queue *q,
b481de9c
ZY
265 int count, int slots_num, u32 id)
266{
267 q->n_bd = count;
268 q->n_window = slots_num;
269 q->id = id;
270
bb8c093b
CH
271 /* count must be power-of-two size, otherwise iwl4965_queue_inc_wrap
272 * and iwl4965_queue_dec_wrap are broken. */
b481de9c
ZY
273 BUG_ON(!is_power_of_2(count));
274
275 /* slots_num must be power-of-two size, otherwise
276 * get_cmd_index is broken. */
277 BUG_ON(!is_power_of_2(slots_num));
278
279 q->low_mark = q->n_window / 4;
280 if (q->low_mark < 4)
281 q->low_mark = 4;
282
283 q->high_mark = q->n_window / 8;
284 if (q->high_mark < 2)
285 q->high_mark = 2;
286
fc4b6853 287 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
288
289 return 0;
290}
291
6440adb5
BC
292/**
293 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
294 */
bb8c093b
CH
295static int iwl4965_tx_queue_alloc(struct iwl4965_priv *priv,
296 struct iwl4965_tx_queue *txq, u32 id)
b481de9c
ZY
297{
298 struct pci_dev *dev = priv->pci_dev;
299
6440adb5
BC
300 /* Driver private data, only for Tx (not command) queues,
301 * not shared with device. */
b481de9c
ZY
302 if (id != IWL_CMD_QUEUE_NUM) {
303 txq->txb = kmalloc(sizeof(txq->txb[0]) *
304 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
305 if (!txq->txb) {
01ebd063 306 IWL_ERROR("kmalloc for auxiliary BD "
b481de9c
ZY
307 "structures failed\n");
308 goto error;
309 }
310 } else
311 txq->txb = NULL;
312
6440adb5
BC
313 /* Circular buffer of transmit frame descriptors (TFDs),
314 * shared with device */
b481de9c
ZY
315 txq->bd = pci_alloc_consistent(dev,
316 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
317 &txq->q.dma_addr);
318
319 if (!txq->bd) {
320 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
321 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
322 goto error;
323 }
324 txq->q.id = id;
325
326 return 0;
327
328 error:
329 if (txq->txb) {
330 kfree(txq->txb);
331 txq->txb = NULL;
332 }
333
334 return -ENOMEM;
335}
336
8b6eaea8
BC
337/**
338 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
339 */
bb8c093b
CH
340int iwl4965_tx_queue_init(struct iwl4965_priv *priv,
341 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c
ZY
342{
343 struct pci_dev *dev = priv->pci_dev;
344 int len;
345 int rc = 0;
346
8b6eaea8
BC
347 /*
348 * Alloc buffer array for commands (Tx or other types of commands).
349 * For the command queue (#4), allocate command space + one big
350 * command for scan, since scan command is very huge; the system will
351 * not have two scans at the same time, so only one is needed.
bb54244b 352 * For normal Tx queues (all other queues), no super-size command
8b6eaea8
BC
353 * space is needed.
354 */
bb8c093b 355 len = sizeof(struct iwl4965_cmd) * slots_num;
b481de9c
ZY
356 if (txq_id == IWL_CMD_QUEUE_NUM)
357 len += IWL_MAX_SCAN_SIZE;
358 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
359 if (!txq->cmd)
360 return -ENOMEM;
361
8b6eaea8 362 /* Alloc driver data array and TFD circular buffer */
bb8c093b 363 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
b481de9c
ZY
364 if (rc) {
365 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
366
367 return -ENOMEM;
368 }
369 txq->need_update = 0;
370
371 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
bb8c093b 372 * iwl4965_queue_inc_wrap and iwl4965_queue_dec_wrap are broken. */
b481de9c 373 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
8b6eaea8
BC
374
375 /* Initialize queue's high/low-water marks, and head/tail indexes */
bb8c093b 376 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 377
8b6eaea8 378 /* Tell device where to find queue */
bb8c093b 379 iwl4965_hw_tx_queue_init(priv, txq);
b481de9c
ZY
380
381 return 0;
382}
383
384/**
bb8c093b 385 * iwl4965_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
386 * @txq: Transmit queue to deallocate.
387 *
388 * Empty queue by removing and destroying all BD's.
6440adb5
BC
389 * Free all buffers.
390 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 391 */
bb8c093b 392void iwl4965_tx_queue_free(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
b481de9c 393{
bb8c093b 394 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
395 struct pci_dev *dev = priv->pci_dev;
396 int len;
397
398 if (q->n_bd == 0)
399 return;
400
401 /* first, empty all BD's */
fc4b6853 402 for (; q->write_ptr != q->read_ptr;
bb8c093b
CH
403 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd))
404 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c 405
bb8c093b 406 len = sizeof(struct iwl4965_cmd) * q->n_window;
b481de9c
ZY
407 if (q->id == IWL_CMD_QUEUE_NUM)
408 len += IWL_MAX_SCAN_SIZE;
409
6440adb5 410 /* De-alloc array of command/tx buffers */
b481de9c
ZY
411 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
412
6440adb5 413 /* De-alloc circular buffer of TFDs */
b481de9c 414 if (txq->q.n_bd)
bb8c093b 415 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
b481de9c
ZY
416 txq->q.n_bd, txq->bd, txq->q.dma_addr);
417
6440adb5 418 /* De-alloc array of per-TFD driver data */
b481de9c
ZY
419 if (txq->txb) {
420 kfree(txq->txb);
421 txq->txb = NULL;
422 }
423
6440adb5 424 /* 0-fill queue descriptor structure */
b481de9c
ZY
425 memset(txq, 0, sizeof(*txq));
426}
427
bb8c093b 428const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
b481de9c
ZY
429
430/*************** STATION TABLE MANAGEMENT ****
9fbab516 431 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
432 * the functionality provided here
433 */
434
435/**************************************************************/
436
01ebd063 437#if 0 /* temporary disable till we add real remove station */
6440adb5
BC
438/**
439 * iwl4965_remove_station - Remove driver's knowledge of station.
440 *
441 * NOTE: This does not remove station from device's station table.
442 */
bb8c093b 443static u8 iwl4965_remove_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
444{
445 int index = IWL_INVALID_STATION;
446 int i;
447 unsigned long flags;
448
449 spin_lock_irqsave(&priv->sta_lock, flags);
450
451 if (is_ap)
452 index = IWL_AP_ID;
453 else if (is_broadcast_ether_addr(addr))
454 index = priv->hw_setting.bcast_sta_id;
455 else
456 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
457 if (priv->stations[i].used &&
458 !compare_ether_addr(priv->stations[i].sta.sta.addr,
459 addr)) {
460 index = i;
461 break;
462 }
463
464 if (unlikely(index == IWL_INVALID_STATION))
465 goto out;
466
467 if (priv->stations[index].used) {
468 priv->stations[index].used = 0;
469 priv->num_stations--;
470 }
471
472 BUG_ON(priv->num_stations < 0);
473
474out:
475 spin_unlock_irqrestore(&priv->sta_lock, flags);
476 return 0;
477}
556f8db7 478#endif
b481de9c 479
6440adb5
BC
480/**
481 * iwl4965_clear_stations_table - Clear the driver's station table
482 *
483 * NOTE: This does not clear or otherwise alter the device's station table.
484 */
bb8c093b 485static void iwl4965_clear_stations_table(struct iwl4965_priv *priv)
b481de9c
ZY
486{
487 unsigned long flags;
488
489 spin_lock_irqsave(&priv->sta_lock, flags);
490
491 priv->num_stations = 0;
492 memset(priv->stations, 0, sizeof(priv->stations));
493
494 spin_unlock_irqrestore(&priv->sta_lock, flags);
495}
496
6440adb5
BC
497/**
498 * iwl4965_add_station_flags - Add station to tables in driver and device
499 */
67d62035
RR
500u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr,
501 int is_ap, u8 flags, void *ht_data)
b481de9c
ZY
502{
503 int i;
504 int index = IWL_INVALID_STATION;
bb8c093b 505 struct iwl4965_station_entry *station;
b481de9c 506 unsigned long flags_spin;
0795af57 507 DECLARE_MAC_BUF(mac);
b481de9c
ZY
508
509 spin_lock_irqsave(&priv->sta_lock, flags_spin);
510 if (is_ap)
511 index = IWL_AP_ID;
512 else if (is_broadcast_ether_addr(addr))
513 index = priv->hw_setting.bcast_sta_id;
514 else
515 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
516 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
517 addr)) {
518 index = i;
519 break;
520 }
521
522 if (!priv->stations[i].used &&
523 index == IWL_INVALID_STATION)
524 index = i;
525 }
526
527
9fbab516
BC
528 /* These two conditions have the same outcome, but keep them separate
529 since they have different meanings */
b481de9c
ZY
530 if (unlikely(index == IWL_INVALID_STATION)) {
531 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
532 return index;
533 }
534
535 if (priv->stations[index].used &&
536 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
537 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
538 return index;
539 }
540
541
0795af57 542 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
b481de9c
ZY
543 station = &priv->stations[index];
544 station->used = 1;
545 priv->num_stations++;
546
6440adb5 547 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 548 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
b481de9c
ZY
549 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
550 station->sta.mode = 0;
551 station->sta.sta.sta_id = index;
552 station->sta.station_flags = 0;
553
c8b0e6e1 554#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
555 /* BCAST station and IBSS stations do not work in HT mode */
556 if (index != priv->hw_setting.bcast_sta_id &&
557 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
67d62035
RR
558 iwl4965_set_ht_add_station(priv, index,
559 (struct ieee80211_ht_info *) ht_data);
c8b0e6e1 560#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
561
562 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
BC
563
564 /* Add station to device's station table */
bb8c093b 565 iwl4965_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
566 return index;
567
568}
569
570/*************** DRIVER STATUS FUNCTIONS *****/
571
bb8c093b 572static inline int iwl4965_is_ready(struct iwl4965_priv *priv)
b481de9c
ZY
573{
574 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
575 * set but EXIT_PENDING is not */
576 return test_bit(STATUS_READY, &priv->status) &&
577 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
578 !test_bit(STATUS_EXIT_PENDING, &priv->status);
579}
580
bb8c093b 581static inline int iwl4965_is_alive(struct iwl4965_priv *priv)
b481de9c
ZY
582{
583 return test_bit(STATUS_ALIVE, &priv->status);
584}
585
bb8c093b 586static inline int iwl4965_is_init(struct iwl4965_priv *priv)
b481de9c
ZY
587{
588 return test_bit(STATUS_INIT, &priv->status);
589}
590
bb8c093b 591static inline int iwl4965_is_rfkill(struct iwl4965_priv *priv)
b481de9c
ZY
592{
593 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
594 test_bit(STATUS_RF_KILL_SW, &priv->status);
595}
596
bb8c093b 597static inline int iwl4965_is_ready_rf(struct iwl4965_priv *priv)
b481de9c
ZY
598{
599
bb8c093b 600 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
601 return 0;
602
bb8c093b 603 return iwl4965_is_ready(priv);
b481de9c
ZY
604}
605
606/*************** HOST COMMAND QUEUE FUNCTIONS *****/
607
608#define IWL_CMD(x) case x : return #x
609
610static const char *get_cmd_string(u8 cmd)
611{
612 switch (cmd) {
613 IWL_CMD(REPLY_ALIVE);
614 IWL_CMD(REPLY_ERROR);
615 IWL_CMD(REPLY_RXON);
616 IWL_CMD(REPLY_RXON_ASSOC);
617 IWL_CMD(REPLY_QOS_PARAM);
618 IWL_CMD(REPLY_RXON_TIMING);
619 IWL_CMD(REPLY_ADD_STA);
620 IWL_CMD(REPLY_REMOVE_STA);
621 IWL_CMD(REPLY_REMOVE_ALL_STA);
622 IWL_CMD(REPLY_TX);
623 IWL_CMD(REPLY_RATE_SCALE);
624 IWL_CMD(REPLY_LEDS_CMD);
625 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
626 IWL_CMD(RADAR_NOTIFICATION);
627 IWL_CMD(REPLY_QUIET_CMD);
628 IWL_CMD(REPLY_CHANNEL_SWITCH);
629 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
630 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
631 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
632 IWL_CMD(POWER_TABLE_CMD);
633 IWL_CMD(PM_SLEEP_NOTIFICATION);
634 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
635 IWL_CMD(REPLY_SCAN_CMD);
636 IWL_CMD(REPLY_SCAN_ABORT_CMD);
637 IWL_CMD(SCAN_START_NOTIFICATION);
638 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
639 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
640 IWL_CMD(BEACON_NOTIFICATION);
641 IWL_CMD(REPLY_TX_BEACON);
642 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
643 IWL_CMD(QUIET_NOTIFICATION);
644 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
645 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
646 IWL_CMD(REPLY_BT_CONFIG);
647 IWL_CMD(REPLY_STATISTICS_CMD);
648 IWL_CMD(STATISTICS_NOTIFICATION);
649 IWL_CMD(REPLY_CARD_STATE_CMD);
650 IWL_CMD(CARD_STATE_NOTIFICATION);
651 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
652 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
653 IWL_CMD(SENSITIVITY_CMD);
654 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
655 IWL_CMD(REPLY_RX_PHY_CMD);
656 IWL_CMD(REPLY_RX_MPDU_CMD);
657 IWL_CMD(REPLY_4965_RX);
658 IWL_CMD(REPLY_COMPRESSED_BA);
659 default:
660 return "UNKNOWN";
661
662 }
663}
664
665#define HOST_COMPLETE_TIMEOUT (HZ / 2)
666
667/**
bb8c093b 668 * iwl4965_enqueue_hcmd - enqueue a uCode command
b481de9c
ZY
669 * @priv: device private data point
670 * @cmd: a point to the ucode command structure
671 *
672 * The function returns < 0 values to indicate the operation is
673 * failed. On success, it turns the index (> 0) of command in the
674 * command queue.
675 */
bb8c093b 676static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 677{
bb8c093b
CH
678 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
679 struct iwl4965_queue *q = &txq->q;
680 struct iwl4965_tfd_frame *tfd;
b481de9c 681 u32 *control_flags;
bb8c093b 682 struct iwl4965_cmd *out_cmd;
b481de9c
ZY
683 u32 idx;
684 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
685 dma_addr_t phys_addr;
686 int ret;
687 unsigned long flags;
688
689 /* If any of the command structures end up being larger than
690 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
691 * we will need to increase the size of the TFD entries */
692 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
693 !(cmd->meta.flags & CMD_SIZE_HUGE));
694
bb8c093b 695 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
b481de9c
ZY
696 IWL_ERROR("No space for Tx\n");
697 return -ENOSPC;
698 }
699
700 spin_lock_irqsave(&priv->hcmd_lock, flags);
701
fc4b6853 702 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
703 memset(tfd, 0, sizeof(*tfd));
704
705 control_flags = (u32 *) tfd;
706
fc4b6853 707 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
b481de9c
ZY
708 out_cmd = &txq->cmd[idx];
709
710 out_cmd->hdr.cmd = cmd->id;
711 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
712 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
713
714 /* At this point, the out_cmd now has all of the incoming cmd
715 * information */
716
717 out_cmd->hdr.flags = 0;
718 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
fc4b6853 719 INDEX_TO_SEQ(q->write_ptr));
b481de9c
ZY
720 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
721 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
722
723 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
bb8c093b
CH
724 offsetof(struct iwl4965_cmd, hdr);
725 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
b481de9c
ZY
726
727 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
728 "%d bytes at %d[%d]:%d\n",
729 get_cmd_string(out_cmd->hdr.cmd),
730 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fc4b6853 731 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
b481de9c
ZY
732
733 txq->need_update = 1;
6440adb5
BC
734
735 /* Set up entry in queue's byte count circular buffer */
b481de9c 736 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
6440adb5
BC
737
738 /* Increment and update queue's write index */
bb8c093b
CH
739 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
740 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
741
742 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
743 return ret ? ret : idx;
744}
745
bb8c093b 746static int iwl4965_send_cmd_async(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
747{
748 int ret;
749
750 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
751
752 /* An asynchronous command can not expect an SKB to be set. */
753 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
754
755 /* An asynchronous command MUST have a callback. */
756 BUG_ON(!cmd->meta.u.callback);
757
758 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
759 return -EBUSY;
760
bb8c093b 761 ret = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c 762 if (ret < 0) {
bb8c093b 763 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
764 get_cmd_string(cmd->id), ret);
765 return ret;
766 }
767 return 0;
768}
769
bb8c093b 770static int iwl4965_send_cmd_sync(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
771{
772 int cmd_idx;
773 int ret;
774 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
775
776 BUG_ON(cmd->meta.flags & CMD_ASYNC);
777
778 /* A synchronous command can not have a callback set. */
779 BUG_ON(cmd->meta.u.callback != NULL);
780
781 if (atomic_xchg(&entry, 1)) {
782 IWL_ERROR("Error sending %s: Already sending a host command\n",
783 get_cmd_string(cmd->id));
784 return -EBUSY;
785 }
786
787 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
788
789 if (cmd->meta.flags & CMD_WANT_SKB)
790 cmd->meta.source = &cmd->meta;
791
bb8c093b 792 cmd_idx = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c
ZY
793 if (cmd_idx < 0) {
794 ret = cmd_idx;
bb8c093b 795 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
796 get_cmd_string(cmd->id), ret);
797 goto out;
798 }
799
800 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
801 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
802 HOST_COMPLETE_TIMEOUT);
803 if (!ret) {
804 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
805 IWL_ERROR("Error sending %s: time out after %dms.\n",
806 get_cmd_string(cmd->id),
807 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
808
809 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
810 ret = -ETIMEDOUT;
811 goto cancel;
812 }
813 }
814
815 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
816 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
817 get_cmd_string(cmd->id));
818 ret = -ECANCELED;
819 goto fail;
820 }
821 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
822 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
823 get_cmd_string(cmd->id));
824 ret = -EIO;
825 goto fail;
826 }
827 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
828 IWL_ERROR("Error: Response NULL in '%s'\n",
829 get_cmd_string(cmd->id));
830 ret = -EIO;
831 goto out;
832 }
833
834 ret = 0;
835 goto out;
836
837cancel:
838 if (cmd->meta.flags & CMD_WANT_SKB) {
bb8c093b 839 struct iwl4965_cmd *qcmd;
b481de9c
ZY
840
841 /* Cancel the CMD_WANT_SKB flag for the cmd in the
842 * TX cmd queue. Otherwise in case the cmd comes
843 * in later, it will possibly set an invalid
844 * address (cmd->meta.source). */
845 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
846 qcmd->meta.flags &= ~CMD_WANT_SKB;
847 }
848fail:
849 if (cmd->meta.u.skb) {
850 dev_kfree_skb_any(cmd->meta.u.skb);
851 cmd->meta.u.skb = NULL;
852 }
853out:
854 atomic_set(&entry, 0);
855 return ret;
856}
857
bb8c093b 858int iwl4965_send_cmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 859{
b481de9c 860 if (cmd->meta.flags & CMD_ASYNC)
bb8c093b 861 return iwl4965_send_cmd_async(priv, cmd);
b481de9c 862
bb8c093b 863 return iwl4965_send_cmd_sync(priv, cmd);
b481de9c
ZY
864}
865
bb8c093b 866int iwl4965_send_cmd_pdu(struct iwl4965_priv *priv, u8 id, u16 len, const void *data)
b481de9c 867{
bb8c093b 868 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
869 .id = id,
870 .len = len,
871 .data = data,
872 };
873
bb8c093b 874 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
875}
876
bb8c093b 877static int __must_check iwl4965_send_cmd_u32(struct iwl4965_priv *priv, u8 id, u32 val)
b481de9c 878{
bb8c093b 879 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
880 .id = id,
881 .len = sizeof(val),
882 .data = &val,
883 };
884
bb8c093b 885 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
886}
887
bb8c093b 888int iwl4965_send_statistics_request(struct iwl4965_priv *priv)
b481de9c 889{
bb8c093b 890 return iwl4965_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
b481de9c
ZY
891}
892
893/**
bb8c093b 894 * iwl4965_rxon_add_station - add station into station table.
b481de9c
ZY
895 *
896 * there is only one AP station with id= IWL_AP_ID
9fbab516
BC
897 * NOTE: mutex must be held before calling this fnction
898 */
bb8c093b 899static int iwl4965_rxon_add_station(struct iwl4965_priv *priv,
b481de9c
ZY
900 const u8 *addr, int is_ap)
901{
556f8db7 902 u8 sta_id;
b481de9c 903
6440adb5 904 /* Add station to device's station table */
67d62035
RR
905#ifdef CONFIG_IWL4965_HT
906 struct ieee80211_conf *conf = &priv->hw->conf;
907 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
908
909 if ((is_ap) &&
910 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
911 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
912 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
913 0, cur_ht_config);
914 else
915#endif /* CONFIG_IWL4965_HT */
916 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
917 0, NULL);
6440adb5
BC
918
919 /* Set up default rate scaling table in device's station table */
b481de9c
ZY
920 iwl4965_add_station(priv, addr, is_ap);
921
556f8db7 922 return sta_id;
b481de9c
ZY
923}
924
925/**
bb8c093b 926 * iwl4965_set_rxon_channel - Set the phymode and channel values in staging RXON
b481de9c
ZY
927 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
928 * @channel: Any channel valid for the requested phymode
929
930 * In addition to setting the staging RXON, priv->phymode is also set.
931 *
932 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
933 * in the staging RXON flag structure based on the phymode
934 */
9fbab516
BC
935static int iwl4965_set_rxon_channel(struct iwl4965_priv *priv, u8 phymode,
936 u16 channel)
b481de9c 937{
bb8c093b 938 if (!iwl4965_get_channel_info(priv, phymode, channel)) {
b481de9c
ZY
939 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
940 channel, phymode);
941 return -EINVAL;
942 }
943
944 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
945 (priv->phymode == phymode))
946 return 0;
947
948 priv->staging_rxon.channel = cpu_to_le16(channel);
949 if (phymode == MODE_IEEE80211A)
950 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
951 else
952 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
953
954 priv->phymode = phymode;
955
956 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
957
958 return 0;
959}
960
961/**
bb8c093b 962 * iwl4965_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
963 *
964 * NOTE: This is really only useful during development and can eventually
965 * be #ifdef'd out once the driver is stable and folks aren't actively
966 * making changes
967 */
bb8c093b 968static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c
ZY
969{
970 int error = 0;
971 int counter = 1;
972
973 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
974 error |= le32_to_cpu(rxon->flags &
975 (RXON_FLG_TGJ_NARROW_BAND_MSK |
976 RXON_FLG_RADAR_DETECT_MSK));
977 if (error)
978 IWL_WARNING("check 24G fields %d | %d\n",
979 counter++, error);
980 } else {
981 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
982 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
983 if (error)
984 IWL_WARNING("check 52 fields %d | %d\n",
985 counter++, error);
986 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
987 if (error)
988 IWL_WARNING("check 52 CCK %d | %d\n",
989 counter++, error);
990 }
991 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
992 if (error)
993 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
994
995 /* make sure basic rates 6Mbps and 1Mbps are supported */
996 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
997 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
998 if (error)
999 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
1000
1001 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
1002 if (error)
1003 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
1004
1005 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
1006 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
1007 if (error)
1008 IWL_WARNING("check CCK and short slot %d | %d\n",
1009 counter++, error);
1010
1011 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
1012 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
1013 if (error)
1014 IWL_WARNING("check CCK & auto detect %d | %d\n",
1015 counter++, error);
1016
1017 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
1018 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
1019 if (error)
1020 IWL_WARNING("check TGG and auto detect %d | %d\n",
1021 counter++, error);
1022
1023 if (error)
1024 IWL_WARNING("Tuning to channel %d\n",
1025 le16_to_cpu(rxon->channel));
1026
1027 if (error) {
bb8c093b 1028 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
b481de9c
ZY
1029 return -1;
1030 }
1031 return 0;
1032}
1033
1034/**
9fbab516 1035 * iwl4965_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 1036 * @priv: staging_rxon is compared to active_rxon
b481de9c 1037 *
9fbab516
BC
1038 * If the RXON structure is changing enough to require a new tune,
1039 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
1040 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 1041 */
bb8c093b 1042static int iwl4965_full_rxon_required(struct iwl4965_priv *priv)
b481de9c
ZY
1043{
1044
1045 /* These items are only settable from the full RXON command */
1046 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
1047 compare_ether_addr(priv->staging_rxon.bssid_addr,
1048 priv->active_rxon.bssid_addr) ||
1049 compare_ether_addr(priv->staging_rxon.node_addr,
1050 priv->active_rxon.node_addr) ||
1051 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
1052 priv->active_rxon.wlap_bssid_addr) ||
1053 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
1054 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
1055 (priv->staging_rxon.air_propagation !=
1056 priv->active_rxon.air_propagation) ||
1057 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
1058 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
1059 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
1060 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
1061 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
1062 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
1063 return 1;
1064
1065 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
1066 * be updated with the RXON_ASSOC command -- however only some
1067 * flag transitions are allowed using RXON_ASSOC */
1068
1069 /* Check if we are not switching bands */
1070 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1071 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1072 return 1;
1073
1074 /* Check if we are switching association toggle */
1075 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1076 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1077 return 1;
1078
1079 return 0;
1080}
1081
bb8c093b 1082static int iwl4965_send_rxon_assoc(struct iwl4965_priv *priv)
b481de9c
ZY
1083{
1084 int rc = 0;
bb8c093b
CH
1085 struct iwl4965_rx_packet *res = NULL;
1086 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1087 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1088 .id = REPLY_RXON_ASSOC,
1089 .len = sizeof(rxon_assoc),
1090 .meta.flags = CMD_WANT_SKB,
1091 .data = &rxon_assoc,
1092 };
bb8c093b
CH
1093 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
1094 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
b481de9c
ZY
1095
1096 if ((rxon1->flags == rxon2->flags) &&
1097 (rxon1->filter_flags == rxon2->filter_flags) &&
1098 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1099 (rxon1->ofdm_ht_single_stream_basic_rates ==
1100 rxon2->ofdm_ht_single_stream_basic_rates) &&
1101 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1102 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1103 (rxon1->rx_chain == rxon2->rx_chain) &&
1104 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1105 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1106 return 0;
1107 }
1108
1109 rxon_assoc.flags = priv->staging_rxon.flags;
1110 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1111 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1112 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1113 rxon_assoc.reserved = 0;
1114 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1115 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1116 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1117 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1118 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1119
bb8c093b 1120 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1121 if (rc)
1122 return rc;
1123
bb8c093b 1124 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1125 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1126 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1127 rc = -EIO;
1128 }
1129
1130 priv->alloc_rxb_skb--;
1131 dev_kfree_skb_any(cmd.meta.u.skb);
1132
1133 return rc;
1134}
1135
1136/**
bb8c093b 1137 * iwl4965_commit_rxon - commit staging_rxon to hardware
b481de9c 1138 *
01ebd063 1139 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
1140 * the active_rxon structure is updated with the new data. This
1141 * function correctly transitions out of the RXON_ASSOC_MSK state if
1142 * a HW tune is required based on the RXON structure changes.
1143 */
bb8c093b 1144static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
b481de9c
ZY
1145{
1146 /* cast away the const for active_rxon in this function */
bb8c093b 1147 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
0795af57 1148 DECLARE_MAC_BUF(mac);
b481de9c
ZY
1149 int rc = 0;
1150
bb8c093b 1151 if (!iwl4965_is_alive(priv))
b481de9c
ZY
1152 return -1;
1153
1154 /* always get timestamp with Rx frame */
1155 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1156
bb8c093b 1157 rc = iwl4965_check_rxon_cmd(&priv->staging_rxon);
b481de9c
ZY
1158 if (rc) {
1159 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1160 return -EINVAL;
1161 }
1162
1163 /* If we don't need to send a full RXON, we can use
bb8c093b 1164 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 1165 * and other flags for the current radio configuration. */
bb8c093b
CH
1166 if (!iwl4965_full_rxon_required(priv)) {
1167 rc = iwl4965_send_rxon_assoc(priv);
b481de9c
ZY
1168 if (rc) {
1169 IWL_ERROR("Error setting RXON_ASSOC "
1170 "configuration (%d).\n", rc);
1171 return rc;
1172 }
1173
1174 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1175
1176 return 0;
1177 }
1178
1179 /* station table will be cleared */
1180 priv->assoc_station_added = 0;
1181
c8b0e6e1 1182#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1183 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1184 if (!priv->error_recovering)
1185 priv->start_calib = 0;
1186
1187 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1188#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1189
1190 /* If we are currently associated and the new config requires
1191 * an RXON_ASSOC and the new config wants the associated mask enabled,
1192 * we must clear the associated from the active configuration
1193 * before we apply the new config */
bb8c093b 1194 if (iwl4965_is_associated(priv) &&
b481de9c
ZY
1195 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1196 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1197 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1198
bb8c093b
CH
1199 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1200 sizeof(struct iwl4965_rxon_cmd),
b481de9c
ZY
1201 &priv->active_rxon);
1202
1203 /* If the mask clearing failed then we set
1204 * active_rxon back to what it was previously */
1205 if (rc) {
1206 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1207 IWL_ERROR("Error clearing ASSOC_MSK on current "
1208 "configuration (%d).\n", rc);
1209 return rc;
1210 }
b481de9c
ZY
1211 }
1212
1213 IWL_DEBUG_INFO("Sending RXON\n"
1214 "* with%s RXON_FILTER_ASSOC_MSK\n"
1215 "* channel = %d\n"
0795af57 1216 "* bssid = %s\n",
b481de9c
ZY
1217 ((priv->staging_rxon.filter_flags &
1218 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1219 le16_to_cpu(priv->staging_rxon.channel),
0795af57 1220 print_mac(mac, priv->staging_rxon.bssid_addr));
b481de9c
ZY
1221
1222 /* Apply the new configuration */
bb8c093b
CH
1223 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1224 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon);
b481de9c
ZY
1225 if (rc) {
1226 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1227 return rc;
1228 }
1229
bb8c093b 1230 iwl4965_clear_stations_table(priv);
556f8db7 1231
c8b0e6e1 1232#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1233 if (!priv->error_recovering)
1234 priv->start_calib = 0;
1235
1236 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1237 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1238#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1239
1240 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1241
1242 /* If we issue a new RXON command which required a tune then we must
1243 * send a new TXPOWER command or we won't be able to Tx any frames */
bb8c093b 1244 rc = iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
1245 if (rc) {
1246 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1247 return rc;
1248 }
1249
1250 /* Add the broadcast address so we can send broadcast frames */
bb8c093b 1251 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) ==
b481de9c
ZY
1252 IWL_INVALID_STATION) {
1253 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1254 return -EIO;
1255 }
1256
1257 /* If we have set the ASSOC_MSK and we are in BSS mode then
1258 * add the IWL_AP_ID to the station rate table */
bb8c093b 1259 if (iwl4965_is_associated(priv) &&
b481de9c 1260 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
bb8c093b 1261 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
b481de9c
ZY
1262 == IWL_INVALID_STATION) {
1263 IWL_ERROR("Error adding AP address for transmit.\n");
1264 return -EIO;
1265 }
1266 priv->assoc_station_added = 1;
1267 }
1268
1269 return 0;
1270}
1271
bb8c093b 1272static int iwl4965_send_bt_config(struct iwl4965_priv *priv)
b481de9c 1273{
bb8c093b 1274 struct iwl4965_bt_cmd bt_cmd = {
b481de9c
ZY
1275 .flags = 3,
1276 .lead_time = 0xAA,
1277 .max_kill = 1,
1278 .kill_ack_mask = 0,
1279 .kill_cts_mask = 0,
1280 };
1281
bb8c093b
CH
1282 return iwl4965_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1283 sizeof(struct iwl4965_bt_cmd), &bt_cmd);
b481de9c
ZY
1284}
1285
bb8c093b 1286static int iwl4965_send_scan_abort(struct iwl4965_priv *priv)
b481de9c
ZY
1287{
1288 int rc = 0;
bb8c093b
CH
1289 struct iwl4965_rx_packet *res;
1290 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1291 .id = REPLY_SCAN_ABORT_CMD,
1292 .meta.flags = CMD_WANT_SKB,
1293 };
1294
1295 /* If there isn't a scan actively going on in the hardware
1296 * then we are in between scan bands and not actually
1297 * actively scanning, so don't send the abort command */
1298 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1299 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1300 return 0;
1301 }
1302
bb8c093b 1303 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1304 if (rc) {
1305 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1306 return rc;
1307 }
1308
bb8c093b 1309 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1310 if (res->u.status != CAN_ABORT_STATUS) {
1311 /* The scan abort will return 1 for success or
1312 * 2 for "failure". A failure condition can be
1313 * due to simply not being in an active scan which
1314 * can occur if we send the scan abort before we
1315 * the microcode has notified us that a scan is
1316 * completed. */
1317 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1318 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1319 clear_bit(STATUS_SCAN_HW, &priv->status);
1320 }
1321
1322 dev_kfree_skb_any(cmd.meta.u.skb);
1323
1324 return rc;
1325}
1326
bb8c093b
CH
1327static int iwl4965_card_state_sync_callback(struct iwl4965_priv *priv,
1328 struct iwl4965_cmd *cmd,
b481de9c
ZY
1329 struct sk_buff *skb)
1330{
1331 return 1;
1332}
1333
1334/*
1335 * CARD_STATE_CMD
1336 *
9fbab516 1337 * Use: Sets the device's internal card state to enable, disable, or halt
b481de9c
ZY
1338 *
1339 * When in the 'enable' state the card operates as normal.
1340 * When in the 'disable' state, the card enters into a low power mode.
1341 * When in the 'halt' state, the card is shut down and must be fully
1342 * restarted to come back on.
1343 */
bb8c093b 1344static int iwl4965_send_card_state(struct iwl4965_priv *priv, u32 flags, u8 meta_flag)
b481de9c 1345{
bb8c093b 1346 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1347 .id = REPLY_CARD_STATE_CMD,
1348 .len = sizeof(u32),
1349 .data = &flags,
1350 .meta.flags = meta_flag,
1351 };
1352
1353 if (meta_flag & CMD_ASYNC)
bb8c093b 1354 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
b481de9c 1355
bb8c093b 1356 return iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1357}
1358
bb8c093b
CH
1359static int iwl4965_add_sta_sync_callback(struct iwl4965_priv *priv,
1360 struct iwl4965_cmd *cmd, struct sk_buff *skb)
b481de9c 1361{
bb8c093b 1362 struct iwl4965_rx_packet *res = NULL;
b481de9c
ZY
1363
1364 if (!skb) {
1365 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1366 return 1;
1367 }
1368
bb8c093b 1369 res = (struct iwl4965_rx_packet *)skb->data;
b481de9c
ZY
1370 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1371 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1372 res->hdr.flags);
1373 return 1;
1374 }
1375
1376 switch (res->u.add_sta.status) {
1377 case ADD_STA_SUCCESS_MSK:
1378 break;
1379 default:
1380 break;
1381 }
1382
1383 /* We didn't cache the SKB; let the caller free it */
1384 return 1;
1385}
1386
bb8c093b
CH
1387int iwl4965_send_add_station(struct iwl4965_priv *priv,
1388 struct iwl4965_addsta_cmd *sta, u8 flags)
b481de9c 1389{
bb8c093b 1390 struct iwl4965_rx_packet *res = NULL;
b481de9c 1391 int rc = 0;
bb8c093b 1392 struct iwl4965_host_cmd cmd = {
b481de9c 1393 .id = REPLY_ADD_STA,
bb8c093b 1394 .len = sizeof(struct iwl4965_addsta_cmd),
b481de9c
ZY
1395 .meta.flags = flags,
1396 .data = sta,
1397 };
1398
1399 if (flags & CMD_ASYNC)
bb8c093b 1400 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
b481de9c
ZY
1401 else
1402 cmd.meta.flags |= CMD_WANT_SKB;
1403
bb8c093b 1404 rc = iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1405
1406 if (rc || (flags & CMD_ASYNC))
1407 return rc;
1408
bb8c093b 1409 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1410 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1411 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1412 res->hdr.flags);
1413 rc = -EIO;
1414 }
1415
1416 if (rc == 0) {
1417 switch (res->u.add_sta.status) {
1418 case ADD_STA_SUCCESS_MSK:
1419 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1420 break;
1421 default:
1422 rc = -EIO;
1423 IWL_WARNING("REPLY_ADD_STA failed\n");
1424 break;
1425 }
1426 }
1427
1428 priv->alloc_rxb_skb--;
1429 dev_kfree_skb_any(cmd.meta.u.skb);
1430
1431 return rc;
1432}
1433
bb8c093b 1434static int iwl4965_update_sta_key_info(struct iwl4965_priv *priv,
b481de9c
ZY
1435 struct ieee80211_key_conf *keyconf,
1436 u8 sta_id)
1437{
1438 unsigned long flags;
1439 __le16 key_flags = 0;
1440
1441 switch (keyconf->alg) {
1442 case ALG_CCMP:
1443 key_flags |= STA_KEY_FLG_CCMP;
1444 key_flags |= cpu_to_le16(
1445 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1446 key_flags &= ~STA_KEY_FLG_INVALID;
1447 break;
1448 case ALG_TKIP:
1449 case ALG_WEP:
b481de9c
ZY
1450 default:
1451 return -EINVAL;
1452 }
1453 spin_lock_irqsave(&priv->sta_lock, flags);
1454 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1455 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1456 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1457 keyconf->keylen);
1458
1459 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1460 keyconf->keylen);
1461 priv->stations[sta_id].sta.key.key_flags = key_flags;
1462 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1463 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1464
1465 spin_unlock_irqrestore(&priv->sta_lock, flags);
1466
1467 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
bb8c093b 1468 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1469 return 0;
1470}
1471
bb8c093b 1472static int iwl4965_clear_sta_key_info(struct iwl4965_priv *priv, u8 sta_id)
b481de9c
ZY
1473{
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&priv->sta_lock, flags);
bb8c093b
CH
1477 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl4965_hw_key));
1478 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl4965_keyinfo));
b481de9c
ZY
1479 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1480 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1481 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1482 spin_unlock_irqrestore(&priv->sta_lock, flags);
1483
1484 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
bb8c093b 1485 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1486 return 0;
1487}
1488
bb8c093b 1489static void iwl4965_clear_free_frames(struct iwl4965_priv *priv)
b481de9c
ZY
1490{
1491 struct list_head *element;
1492
1493 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1494 priv->frames_count);
1495
1496 while (!list_empty(&priv->free_frames)) {
1497 element = priv->free_frames.next;
1498 list_del(element);
bb8c093b 1499 kfree(list_entry(element, struct iwl4965_frame, list));
b481de9c
ZY
1500 priv->frames_count--;
1501 }
1502
1503 if (priv->frames_count) {
1504 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1505 priv->frames_count);
1506 priv->frames_count = 0;
1507 }
1508}
1509
bb8c093b 1510static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl4965_priv *priv)
b481de9c 1511{
bb8c093b 1512 struct iwl4965_frame *frame;
b481de9c
ZY
1513 struct list_head *element;
1514 if (list_empty(&priv->free_frames)) {
1515 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1516 if (!frame) {
1517 IWL_ERROR("Could not allocate frame!\n");
1518 return NULL;
1519 }
1520
1521 priv->frames_count++;
1522 return frame;
1523 }
1524
1525 element = priv->free_frames.next;
1526 list_del(element);
bb8c093b 1527 return list_entry(element, struct iwl4965_frame, list);
b481de9c
ZY
1528}
1529
bb8c093b 1530static void iwl4965_free_frame(struct iwl4965_priv *priv, struct iwl4965_frame *frame)
b481de9c
ZY
1531{
1532 memset(frame, 0, sizeof(*frame));
1533 list_add(&frame->list, &priv->free_frames);
1534}
1535
bb8c093b 1536unsigned int iwl4965_fill_beacon_frame(struct iwl4965_priv *priv,
b481de9c
ZY
1537 struct ieee80211_hdr *hdr,
1538 const u8 *dest, int left)
1539{
1540
bb8c093b 1541 if (!iwl4965_is_associated(priv) || !priv->ibss_beacon ||
b481de9c
ZY
1542 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1543 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1544 return 0;
1545
1546 if (priv->ibss_beacon->len > left)
1547 return 0;
1548
1549 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1550
1551 return priv->ibss_beacon->len;
1552}
1553
bb8c093b 1554int iwl4965_rate_index_from_plcp(int plcp)
b481de9c
ZY
1555{
1556 int i = 0;
1557
77626355 1558 /* 4965 HT rate format */
b481de9c
ZY
1559 if (plcp & RATE_MCS_HT_MSK) {
1560 i = (plcp & 0xff);
1561
1562 if (i >= IWL_RATE_MIMO_6M_PLCP)
1563 i = i - IWL_RATE_MIMO_6M_PLCP;
1564
1565 i += IWL_FIRST_OFDM_RATE;
1566 /* skip 9M not supported in ht*/
1567 if (i >= IWL_RATE_9M_INDEX)
1568 i += 1;
1569 if ((i >= IWL_FIRST_OFDM_RATE) &&
1570 (i <= IWL_LAST_OFDM_RATE))
1571 return i;
77626355
BC
1572
1573 /* 4965 legacy rate format, search for match in table */
b481de9c 1574 } else {
bb8c093b
CH
1575 for (i = 0; i < ARRAY_SIZE(iwl4965_rates); i++)
1576 if (iwl4965_rates[i].plcp == (plcp &0xFF))
b481de9c
ZY
1577 return i;
1578 }
1579 return -1;
1580}
1581
bb8c093b 1582static u8 iwl4965_rate_get_lowest_plcp(int rate_mask)
b481de9c
ZY
1583{
1584 u8 i;
1585
1586 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1587 i = iwl4965_rates[i].next_ieee) {
b481de9c 1588 if (rate_mask & (1 << i))
bb8c093b 1589 return iwl4965_rates[i].plcp;
b481de9c
ZY
1590 }
1591
1592 return IWL_RATE_INVALID;
1593}
1594
bb8c093b 1595static int iwl4965_send_beacon_cmd(struct iwl4965_priv *priv)
b481de9c 1596{
bb8c093b 1597 struct iwl4965_frame *frame;
b481de9c
ZY
1598 unsigned int frame_size;
1599 int rc;
1600 u8 rate;
1601
bb8c093b 1602 frame = iwl4965_get_free_frame(priv);
b481de9c
ZY
1603
1604 if (!frame) {
1605 IWL_ERROR("Could not obtain free frame buffer for beacon "
1606 "command.\n");
1607 return -ENOMEM;
1608 }
1609
1610 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
bb8c093b 1611 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
b481de9c
ZY
1612 0xFF0);
1613 if (rate == IWL_INVALID_RATE)
1614 rate = IWL_RATE_6M_PLCP;
1615 } else {
bb8c093b 1616 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
b481de9c
ZY
1617 if (rate == IWL_INVALID_RATE)
1618 rate = IWL_RATE_1M_PLCP;
1619 }
1620
bb8c093b 1621 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1622
bb8c093b 1623 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1624 &frame->u.cmd[0]);
1625
bb8c093b 1626 iwl4965_free_frame(priv, frame);
b481de9c
ZY
1627
1628 return rc;
1629}
1630
1631/******************************************************************************
1632 *
1633 * EEPROM related functions
1634 *
1635 ******************************************************************************/
1636
bb8c093b 1637static void get_eeprom_mac(struct iwl4965_priv *priv, u8 *mac)
b481de9c
ZY
1638{
1639 memcpy(mac, priv->eeprom.mac_address, 6);
1640}
1641
1642/**
bb8c093b 1643 * iwl4965_eeprom_init - read EEPROM contents
b481de9c 1644 *
6440adb5 1645 * Load the EEPROM contents from adapter into priv->eeprom
b481de9c
ZY
1646 *
1647 * NOTE: This routine uses the non-debug IO access functions.
1648 */
bb8c093b 1649int iwl4965_eeprom_init(struct iwl4965_priv *priv)
b481de9c 1650{
0e5ce1f3 1651 __le16 *e = (__le16 *)&priv->eeprom;
bb8c093b 1652 u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP);
b481de9c
ZY
1653 u32 r;
1654 int sz = sizeof(priv->eeprom);
1655 int rc;
1656 int i;
1657 u16 addr;
1658
1659 /* The EEPROM structure has several padding buffers within it
1660 * and when adding new EEPROM maps is subject to programmer errors
1661 * which may be very difficult to identify without explicitly
1662 * checking the resulting size of the eeprom map. */
1663 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1664
1665 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1666 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1667 return -ENOENT;
1668 }
1669
6440adb5 1670 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
bb8c093b 1671 rc = iwl4965_eeprom_acquire_semaphore(priv);
b481de9c 1672 if (rc < 0) {
91e17473 1673 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
b481de9c
ZY
1674 return -ENOENT;
1675 }
1676
1677 /* eeprom is an array of 16bit values */
1678 for (addr = 0; addr < sz; addr += sizeof(u16)) {
bb8c093b
CH
1679 _iwl4965_write32(priv, CSR_EEPROM_REG, addr << 1);
1680 _iwl4965_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
b481de9c
ZY
1681
1682 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1683 i += IWL_EEPROM_ACCESS_DELAY) {
bb8c093b 1684 r = _iwl4965_read_direct32(priv, CSR_EEPROM_REG);
b481de9c
ZY
1685 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1686 break;
1687 udelay(IWL_EEPROM_ACCESS_DELAY);
1688 }
1689
1690 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1691 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1692 rc = -ETIMEDOUT;
1693 goto done;
1694 }
0e5ce1f3 1695 e[addr / 2] = cpu_to_le16(r >> 16);
b481de9c
ZY
1696 }
1697 rc = 0;
1698
1699done:
bb8c093b 1700 iwl4965_eeprom_release_semaphore(priv);
b481de9c
ZY
1701 return rc;
1702}
1703
1704/******************************************************************************
1705 *
1706 * Misc. internal state and helper functions
1707 *
1708 ******************************************************************************/
c8b0e6e1 1709#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
1710
1711/**
bb8c093b 1712 * iwl4965_report_frame - dump frame to syslog during debug sessions
b481de9c 1713 *
9fbab516 1714 * You may hack this function to show different aspects of received frames,
b481de9c
ZY
1715 * including selective frame dumps.
1716 * group100 parameter selects whether to show 1 out of 100 good frames.
1717 *
9fbab516
BC
1718 * TODO: This was originally written for 3945, need to audit for
1719 * proper operation with 4965.
b481de9c 1720 */
bb8c093b
CH
1721void iwl4965_report_frame(struct iwl4965_priv *priv,
1722 struct iwl4965_rx_packet *pkt,
b481de9c
ZY
1723 struct ieee80211_hdr *header, int group100)
1724{
1725 u32 to_us;
1726 u32 print_summary = 0;
1727 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1728 u32 hundred = 0;
1729 u32 dataframe = 0;
1730 u16 fc;
1731 u16 seq_ctl;
1732 u16 channel;
1733 u16 phy_flags;
1734 int rate_sym;
1735 u16 length;
1736 u16 status;
1737 u16 bcn_tmr;
1738 u32 tsf_low;
1739 u64 tsf;
1740 u8 rssi;
1741 u8 agc;
1742 u16 sig_avg;
1743 u16 noise_diff;
bb8c093b
CH
1744 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1745 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1746 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
b481de9c
ZY
1747 u8 *data = IWL_RX_DATA(pkt);
1748
1749 /* MAC header */
1750 fc = le16_to_cpu(header->frame_control);
1751 seq_ctl = le16_to_cpu(header->seq_ctrl);
1752
1753 /* metadata */
1754 channel = le16_to_cpu(rx_hdr->channel);
1755 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1756 rate_sym = rx_hdr->rate;
1757 length = le16_to_cpu(rx_hdr->len);
1758
1759 /* end-of-frame status and timestamp */
1760 status = le32_to_cpu(rx_end->status);
1761 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1762 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1763 tsf = le64_to_cpu(rx_end->timestamp);
1764
1765 /* signal statistics */
1766 rssi = rx_stats->rssi;
1767 agc = rx_stats->agc;
1768 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1769 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1770
1771 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1772
1773 /* if data frame is to us and all is good,
1774 * (optionally) print summary for only 1 out of every 100 */
1775 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1776 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1777 dataframe = 1;
1778 if (!group100)
1779 print_summary = 1; /* print each frame */
1780 else if (priv->framecnt_to_us < 100) {
1781 priv->framecnt_to_us++;
1782 print_summary = 0;
1783 } else {
1784 priv->framecnt_to_us = 0;
1785 print_summary = 1;
1786 hundred = 1;
1787 }
1788 } else {
1789 /* print summary for all other frames */
1790 print_summary = 1;
1791 }
1792
1793 if (print_summary) {
1794 char *title;
1795 u32 rate;
1796
1797 if (hundred)
1798 title = "100Frames";
1799 else if (fc & IEEE80211_FCTL_RETRY)
1800 title = "Retry";
1801 else if (ieee80211_is_assoc_response(fc))
1802 title = "AscRsp";
1803 else if (ieee80211_is_reassoc_response(fc))
1804 title = "RasRsp";
1805 else if (ieee80211_is_probe_response(fc)) {
1806 title = "PrbRsp";
1807 print_dump = 1; /* dump frame contents */
1808 } else if (ieee80211_is_beacon(fc)) {
1809 title = "Beacon";
1810 print_dump = 1; /* dump frame contents */
1811 } else if (ieee80211_is_atim(fc))
1812 title = "ATIM";
1813 else if (ieee80211_is_auth(fc))
1814 title = "Auth";
1815 else if (ieee80211_is_deauth(fc))
1816 title = "DeAuth";
1817 else if (ieee80211_is_disassoc(fc))
1818 title = "DisAssoc";
1819 else
1820 title = "Frame";
1821
bb8c093b 1822 rate = iwl4965_rate_index_from_plcp(rate_sym);
b481de9c
ZY
1823 if (rate == -1)
1824 rate = 0;
1825 else
bb8c093b 1826 rate = iwl4965_rates[rate].ieee / 2;
b481de9c
ZY
1827
1828 /* print frame summary.
1829 * MAC addresses show just the last byte (for brevity),
1830 * but you can hack it to show more, if you'd like to. */
1831 if (dataframe)
1832 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1833 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1834 title, fc, header->addr1[5],
1835 length, rssi, channel, rate);
1836 else {
1837 /* src/dst addresses assume managed mode */
1838 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1839 "src=0x%02x, rssi=%u, tim=%lu usec, "
1840 "phy=0x%02x, chnl=%d\n",
1841 title, fc, header->addr1[5],
1842 header->addr3[5], rssi,
1843 tsf_low - priv->scan_start_tsf,
1844 phy_flags, channel);
1845 }
1846 }
1847 if (print_dump)
bb8c093b 1848 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
b481de9c
ZY
1849}
1850#endif
1851
bb8c093b 1852static void iwl4965_unset_hw_setting(struct iwl4965_priv *priv)
b481de9c
ZY
1853{
1854 if (priv->hw_setting.shared_virt)
1855 pci_free_consistent(priv->pci_dev,
bb8c093b 1856 sizeof(struct iwl4965_shared),
b481de9c
ZY
1857 priv->hw_setting.shared_virt,
1858 priv->hw_setting.shared_phys);
1859}
1860
1861/**
bb8c093b 1862 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
b481de9c
ZY
1863 *
1864 * return : set the bit for each supported rate insert in ie
1865 */
bb8c093b 1866static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
c7c46676 1867 u16 basic_rate, int *left)
b481de9c
ZY
1868{
1869 u16 ret_rates = 0, bit;
1870 int i;
c7c46676
TW
1871 u8 *cnt = ie;
1872 u8 *rates = ie + 1;
b481de9c
ZY
1873
1874 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1875 if (bit & supported_rate) {
1876 ret_rates |= bit;
bb8c093b 1877 rates[*cnt] = iwl4965_rates[i].ieee |
c7c46676
TW
1878 ((bit & basic_rate) ? 0x80 : 0x00);
1879 (*cnt)++;
1880 (*left)--;
1881 if ((*left <= 0) ||
1882 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
b481de9c
ZY
1883 break;
1884 }
1885 }
1886
1887 return ret_rates;
1888}
1889
c8b0e6e1 1890#ifdef CONFIG_IWL4965_HT
bb8c093b 1891void static iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8fb88032
RR
1892 struct ieee80211_ht_cap *ht_cap,
1893 u8 use_current_config);
b481de9c
ZY
1894#endif
1895
1896/**
bb8c093b 1897 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
b481de9c 1898 */
bb8c093b 1899static u16 iwl4965_fill_probe_req(struct iwl4965_priv *priv,
b481de9c
ZY
1900 struct ieee80211_mgmt *frame,
1901 int left, int is_direct)
1902{
1903 int len = 0;
1904 u8 *pos = NULL;
bee488db 1905 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
8fb88032
RR
1906#ifdef CONFIG_IWL4965_HT
1907 struct ieee80211_hw_mode *mode;
1908#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
1909
1910 /* Make sure there is enough space for the probe request,
1911 * two mandatory IEs and the data */
1912 left -= 24;
1913 if (left < 0)
1914 return 0;
1915 len += 24;
1916
1917 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
bb8c093b 1918 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c 1919 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
bb8c093b 1920 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c
ZY
1921 frame->seq_ctrl = 0;
1922
1923 /* fill in our indirect SSID IE */
1924 /* ...next IE... */
1925
1926 left -= 2;
1927 if (left < 0)
1928 return 0;
1929 len += 2;
1930 pos = &(frame->u.probe_req.variable[0]);
1931 *pos++ = WLAN_EID_SSID;
1932 *pos++ = 0;
1933
1934 /* fill in our direct SSID IE... */
1935 if (is_direct) {
1936 /* ...next IE... */
1937 left -= 2 + priv->essid_len;
1938 if (left < 0)
1939 return 0;
1940 /* ... fill it in... */
1941 *pos++ = WLAN_EID_SSID;
1942 *pos++ = priv->essid_len;
1943 memcpy(pos, priv->essid, priv->essid_len);
1944 pos += priv->essid_len;
1945 len += 2 + priv->essid_len;
1946 }
1947
1948 /* fill in supported rate */
1949 /* ...next IE... */
1950 left -= 2;
1951 if (left < 0)
1952 return 0;
c7c46676 1953
b481de9c
ZY
1954 /* ... fill it in... */
1955 *pos++ = WLAN_EID_SUPP_RATES;
1956 *pos = 0;
c7c46676 1957
bee488db 1958 /* exclude 60M rate */
1959 active_rates = priv->rates_mask;
1960 active_rates &= ~IWL_RATE_60M_MASK;
1961
1962 active_rate_basic = active_rates & IWL_BASIC_RATES_MASK;
b481de9c 1963
c7c46676 1964 cck_rates = IWL_CCK_RATES_MASK & active_rates;
bb8c093b 1965 ret_rates = iwl4965_supported_rate_to_ie(pos, cck_rates,
bee488db 1966 active_rate_basic, &left);
c7c46676
TW
1967 active_rates &= ~ret_rates;
1968
bb8c093b 1969 ret_rates = iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1970 active_rate_basic, &left);
c7c46676
TW
1971 active_rates &= ~ret_rates;
1972
b481de9c
ZY
1973 len += 2 + *pos;
1974 pos += (*pos) + 1;
c7c46676 1975 if (active_rates == 0)
b481de9c
ZY
1976 goto fill_end;
1977
1978 /* fill in supported extended rate */
1979 /* ...next IE... */
1980 left -= 2;
1981 if (left < 0)
1982 return 0;
1983 /* ... fill it in... */
1984 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1985 *pos = 0;
bb8c093b 1986 iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1987 active_rate_basic, &left);
b481de9c
ZY
1988 if (*pos > 0)
1989 len += 2 + *pos;
1990
c8b0e6e1 1991#ifdef CONFIG_IWL4965_HT
8fb88032
RR
1992 mode = priv->hw->conf.mode;
1993 if (mode->ht_info.ht_supported) {
b481de9c
ZY
1994 pos += (*pos) + 1;
1995 *pos++ = WLAN_EID_HT_CAPABILITY;
8fb88032
RR
1996 *pos++ = sizeof(struct ieee80211_ht_cap);
1997 iwl4965_set_ht_capab(priv->hw,
1998 (struct ieee80211_ht_cap *)pos, 0);
1999 len += 2 + sizeof(struct ieee80211_ht_cap);
b481de9c 2000 }
c8b0e6e1 2001#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
2002
2003 fill_end:
2004 return (u16)len;
2005}
2006
2007/*
2008 * QoS support
2009*/
c8b0e6e1 2010#ifdef CONFIG_IWL4965_QOS
bb8c093b
CH
2011static int iwl4965_send_qos_params_command(struct iwl4965_priv *priv,
2012 struct iwl4965_qosparam_cmd *qos)
b481de9c
ZY
2013{
2014
bb8c093b
CH
2015 return iwl4965_send_cmd_pdu(priv, REPLY_QOS_PARAM,
2016 sizeof(struct iwl4965_qosparam_cmd), qos);
b481de9c
ZY
2017}
2018
bb8c093b 2019static void iwl4965_reset_qos(struct iwl4965_priv *priv)
b481de9c
ZY
2020{
2021 u16 cw_min = 15;
2022 u16 cw_max = 1023;
2023 u8 aifs = 2;
2024 u8 is_legacy = 0;
2025 unsigned long flags;
2026 int i;
2027
2028 spin_lock_irqsave(&priv->lock, flags);
2029 priv->qos_data.qos_active = 0;
2030
2031 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
2032 if (priv->qos_data.qos_enable)
2033 priv->qos_data.qos_active = 1;
2034 if (!(priv->active_rate & 0xfff0)) {
2035 cw_min = 31;
2036 is_legacy = 1;
2037 }
2038 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2039 if (priv->qos_data.qos_enable)
2040 priv->qos_data.qos_active = 1;
2041 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
2042 cw_min = 31;
2043 is_legacy = 1;
2044 }
2045
2046 if (priv->qos_data.qos_active)
2047 aifs = 3;
2048
2049 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
2050 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
2051 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
2052 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
2053 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
2054
2055 if (priv->qos_data.qos_active) {
2056 i = 1;
2057 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
2058 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
2059 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
2060 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2061 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2062
2063 i = 2;
2064 priv->qos_data.def_qos_parm.ac[i].cw_min =
2065 cpu_to_le16((cw_min + 1) / 2 - 1);
2066 priv->qos_data.def_qos_parm.ac[i].cw_max =
2067 cpu_to_le16(cw_max);
2068 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2069 if (is_legacy)
2070 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2071 cpu_to_le16(6016);
2072 else
2073 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2074 cpu_to_le16(3008);
2075 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2076
2077 i = 3;
2078 priv->qos_data.def_qos_parm.ac[i].cw_min =
2079 cpu_to_le16((cw_min + 1) / 4 - 1);
2080 priv->qos_data.def_qos_parm.ac[i].cw_max =
2081 cpu_to_le16((cw_max + 1) / 2 - 1);
2082 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2083 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2084 if (is_legacy)
2085 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2086 cpu_to_le16(3264);
2087 else
2088 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2089 cpu_to_le16(1504);
2090 } else {
2091 for (i = 1; i < 4; i++) {
2092 priv->qos_data.def_qos_parm.ac[i].cw_min =
2093 cpu_to_le16(cw_min);
2094 priv->qos_data.def_qos_parm.ac[i].cw_max =
2095 cpu_to_le16(cw_max);
2096 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2097 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2098 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2099 }
2100 }
2101 IWL_DEBUG_QOS("set QoS to default \n");
2102
2103 spin_unlock_irqrestore(&priv->lock, flags);
2104}
2105
bb8c093b 2106static void iwl4965_activate_qos(struct iwl4965_priv *priv, u8 force)
b481de9c
ZY
2107{
2108 unsigned long flags;
2109
b481de9c
ZY
2110 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2111 return;
2112
2113 if (!priv->qos_data.qos_enable)
2114 return;
2115
2116 spin_lock_irqsave(&priv->lock, flags);
2117 priv->qos_data.def_qos_parm.qos_flags = 0;
2118
2119 if (priv->qos_data.qos_cap.q_AP.queue_request &&
2120 !priv->qos_data.qos_cap.q_AP.txop_request)
2121 priv->qos_data.def_qos_parm.qos_flags |=
2122 QOS_PARAM_FLG_TXOP_TYPE_MSK;
b481de9c
ZY
2123 if (priv->qos_data.qos_active)
2124 priv->qos_data.def_qos_parm.qos_flags |=
2125 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2126
c8b0e6e1 2127#ifdef CONFIG_IWL4965_HT
fd105e79 2128 if (priv->current_ht_config.is_ht)
f1f1f5c7 2129 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
c8b0e6e1 2130#endif /* CONFIG_IWL4965_HT */
f1f1f5c7 2131
b481de9c
ZY
2132 spin_unlock_irqrestore(&priv->lock, flags);
2133
bb8c093b 2134 if (force || iwl4965_is_associated(priv)) {
f1f1f5c7
TW
2135 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2136 priv->qos_data.qos_active,
2137 priv->qos_data.def_qos_parm.qos_flags);
b481de9c 2138
bb8c093b 2139 iwl4965_send_qos_params_command(priv,
b481de9c
ZY
2140 &(priv->qos_data.def_qos_parm));
2141 }
2142}
2143
c8b0e6e1 2144#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
2145/*
2146 * Power management (not Tx power!) functions
2147 */
2148#define MSEC_TO_USEC 1024
2149
2150#define NOSLP __constant_cpu_to_le16(0), 0, 0
2151#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
2152#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2153#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2154 __constant_cpu_to_le32(X1), \
2155 __constant_cpu_to_le32(X2), \
2156 __constant_cpu_to_le32(X3), \
2157 __constant_cpu_to_le32(X4)}
2158
2159
2160/* default power management (not Tx power) table values */
2161/* for tim 0-10 */
bb8c093b 2162static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
b481de9c
ZY
2163 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2164 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2165 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2166 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2167 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2168 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2169};
2170
2171/* for tim > 10 */
bb8c093b 2172static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
b481de9c
ZY
2173 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2174 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2175 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2176 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2177 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2178 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2179 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2180 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2181 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2182 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2183};
2184
bb8c093b 2185int iwl4965_power_init_handle(struct iwl4965_priv *priv)
b481de9c
ZY
2186{
2187 int rc = 0, i;
bb8c093b
CH
2188 struct iwl4965_power_mgr *pow_data;
2189 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
b481de9c
ZY
2190 u16 pci_pm;
2191
2192 IWL_DEBUG_POWER("Initialize power \n");
2193
2194 pow_data = &(priv->power_data);
2195
2196 memset(pow_data, 0, sizeof(*pow_data));
2197
2198 pow_data->active_index = IWL_POWER_RANGE_0;
2199 pow_data->dtim_val = 0xffff;
2200
2201 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2202 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2203
2204 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2205 if (rc != 0)
2206 return 0;
2207 else {
bb8c093b 2208 struct iwl4965_powertable_cmd *cmd;
b481de9c
ZY
2209
2210 IWL_DEBUG_POWER("adjust power command flags\n");
2211
2212 for (i = 0; i < IWL_POWER_AC; i++) {
2213 cmd = &pow_data->pwr_range_0[i].cmd;
2214
2215 if (pci_pm & 0x1)
2216 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2217 else
2218 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2219 }
2220 }
2221 return rc;
2222}
2223
bb8c093b
CH
2224static int iwl4965_update_power_cmd(struct iwl4965_priv *priv,
2225 struct iwl4965_powertable_cmd *cmd, u32 mode)
b481de9c
ZY
2226{
2227 int rc = 0, i;
2228 u8 skip;
2229 u32 max_sleep = 0;
bb8c093b 2230 struct iwl4965_power_vec_entry *range;
b481de9c 2231 u8 period = 0;
bb8c093b 2232 struct iwl4965_power_mgr *pow_data;
b481de9c
ZY
2233
2234 if (mode > IWL_POWER_INDEX_5) {
2235 IWL_DEBUG_POWER("Error invalid power mode \n");
2236 return -1;
2237 }
2238 pow_data = &(priv->power_data);
2239
2240 if (pow_data->active_index == IWL_POWER_RANGE_0)
2241 range = &pow_data->pwr_range_0[0];
2242 else
2243 range = &pow_data->pwr_range_1[1];
2244
bb8c093b 2245 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
b481de9c
ZY
2246
2247#ifdef IWL_MAC80211_DISABLE
2248 if (priv->assoc_network != NULL) {
2249 unsigned long flags;
2250
2251 period = priv->assoc_network->tim.tim_period;
2252 }
2253#endif /*IWL_MAC80211_DISABLE */
2254 skip = range[mode].no_dtim;
2255
2256 if (period == 0) {
2257 period = 1;
2258 skip = 0;
2259 }
2260
2261 if (skip == 0) {
2262 max_sleep = period;
2263 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2264 } else {
2265 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2266 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2267 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2268 }
2269
2270 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2271 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2272 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2273 }
2274
2275 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2276 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2277 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2278 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2279 le32_to_cpu(cmd->sleep_interval[0]),
2280 le32_to_cpu(cmd->sleep_interval[1]),
2281 le32_to_cpu(cmd->sleep_interval[2]),
2282 le32_to_cpu(cmd->sleep_interval[3]),
2283 le32_to_cpu(cmd->sleep_interval[4]));
2284
2285 return rc;
2286}
2287
bb8c093b 2288static int iwl4965_send_power_mode(struct iwl4965_priv *priv, u32 mode)
b481de9c 2289{
9a62f73b 2290 u32 uninitialized_var(final_mode);
b481de9c 2291 int rc;
bb8c093b 2292 struct iwl4965_powertable_cmd cmd;
b481de9c
ZY
2293
2294 /* If on battery, set to 3,
01ebd063 2295 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
2296 * else user level */
2297 switch (mode) {
2298 case IWL_POWER_BATTERY:
2299 final_mode = IWL_POWER_INDEX_3;
2300 break;
2301 case IWL_POWER_AC:
2302 final_mode = IWL_POWER_MODE_CAM;
2303 break;
2304 default:
2305 final_mode = mode;
2306 break;
2307 }
2308
2309 cmd.keep_alive_beacons = 0;
2310
bb8c093b 2311 iwl4965_update_power_cmd(priv, &cmd, final_mode);
b481de9c 2312
bb8c093b 2313 rc = iwl4965_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
b481de9c
ZY
2314
2315 if (final_mode == IWL_POWER_MODE_CAM)
2316 clear_bit(STATUS_POWER_PMI, &priv->status);
2317 else
2318 set_bit(STATUS_POWER_PMI, &priv->status);
2319
2320 return rc;
2321}
2322
bb8c093b 2323int iwl4965_is_network_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
2324{
2325 /* Filter incoming packets to determine if they are targeted toward
2326 * this network, discarding packets coming from ourselves */
2327 switch (priv->iw_mode) {
2328 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2329 /* packets from our adapter are dropped (echo) */
2330 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2331 return 0;
2332 /* {broad,multi}cast packets to our IBSS go through */
2333 if (is_multicast_ether_addr(header->addr1))
2334 return !compare_ether_addr(header->addr3, priv->bssid);
2335 /* packets to our adapter go through */
2336 return !compare_ether_addr(header->addr1, priv->mac_addr);
2337 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2338 /* packets from our adapter are dropped (echo) */
2339 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2340 return 0;
2341 /* {broad,multi}cast packets to our BSS go through */
2342 if (is_multicast_ether_addr(header->addr1))
2343 return !compare_ether_addr(header->addr2, priv->bssid);
2344 /* packets to our adapter go through */
2345 return !compare_ether_addr(header->addr1, priv->mac_addr);
2346 }
2347
2348 return 1;
2349}
2350
2351#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2352
bb8c093b 2353static const char *iwl4965_get_tx_fail_reason(u32 status)
b481de9c
ZY
2354{
2355 switch (status & TX_STATUS_MSK) {
2356 case TX_STATUS_SUCCESS:
2357 return "SUCCESS";
2358 TX_STATUS_ENTRY(SHORT_LIMIT);
2359 TX_STATUS_ENTRY(LONG_LIMIT);
2360 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2361 TX_STATUS_ENTRY(MGMNT_ABORT);
2362 TX_STATUS_ENTRY(NEXT_FRAG);
2363 TX_STATUS_ENTRY(LIFE_EXPIRE);
2364 TX_STATUS_ENTRY(DEST_PS);
2365 TX_STATUS_ENTRY(ABORTED);
2366 TX_STATUS_ENTRY(BT_RETRY);
2367 TX_STATUS_ENTRY(STA_INVALID);
2368 TX_STATUS_ENTRY(FRAG_DROPPED);
2369 TX_STATUS_ENTRY(TID_DISABLE);
2370 TX_STATUS_ENTRY(FRAME_FLUSHED);
2371 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2372 TX_STATUS_ENTRY(TX_LOCKED);
2373 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2374 }
2375
2376 return "UNKNOWN";
2377}
2378
2379/**
bb8c093b 2380 * iwl4965_scan_cancel - Cancel any currently executing HW scan
b481de9c
ZY
2381 *
2382 * NOTE: priv->mutex is not required before calling this function
2383 */
bb8c093b 2384static int iwl4965_scan_cancel(struct iwl4965_priv *priv)
b481de9c
ZY
2385{
2386 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2387 clear_bit(STATUS_SCANNING, &priv->status);
2388 return 0;
2389 }
2390
2391 if (test_bit(STATUS_SCANNING, &priv->status)) {
2392 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2393 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2394 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2395 queue_work(priv->workqueue, &priv->abort_scan);
2396
2397 } else
2398 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2399
2400 return test_bit(STATUS_SCANNING, &priv->status);
2401 }
2402
2403 return 0;
2404}
2405
2406/**
bb8c093b 2407 * iwl4965_scan_cancel_timeout - Cancel any currently executing HW scan
b481de9c
ZY
2408 * @ms: amount of time to wait (in milliseconds) for scan to abort
2409 *
2410 * NOTE: priv->mutex must be held before calling this function
2411 */
bb8c093b 2412static int iwl4965_scan_cancel_timeout(struct iwl4965_priv *priv, unsigned long ms)
b481de9c
ZY
2413{
2414 unsigned long now = jiffies;
2415 int ret;
2416
bb8c093b 2417 ret = iwl4965_scan_cancel(priv);
b481de9c
ZY
2418 if (ret && ms) {
2419 mutex_unlock(&priv->mutex);
2420 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2421 test_bit(STATUS_SCANNING, &priv->status))
2422 msleep(1);
2423 mutex_lock(&priv->mutex);
2424
2425 return test_bit(STATUS_SCANNING, &priv->status);
2426 }
2427
2428 return ret;
2429}
2430
bb8c093b 2431static void iwl4965_sequence_reset(struct iwl4965_priv *priv)
b481de9c
ZY
2432{
2433 /* Reset ieee stats */
2434
2435 /* We don't reset the net_device_stats (ieee->stats) on
2436 * re-association */
2437
2438 priv->last_seq_num = -1;
2439 priv->last_frag_num = -1;
2440 priv->last_packet_time = 0;
2441
bb8c093b 2442 iwl4965_scan_cancel(priv);
b481de9c
ZY
2443}
2444
2445#define MAX_UCODE_BEACON_INTERVAL 4096
2446#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2447
bb8c093b 2448static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
2449{
2450 u16 new_val = 0;
2451 u16 beacon_factor = 0;
2452
2453 beacon_factor =
2454 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2455 / MAX_UCODE_BEACON_INTERVAL;
2456 new_val = beacon_val / beacon_factor;
2457
2458 return cpu_to_le16(new_val);
2459}
2460
bb8c093b 2461static void iwl4965_setup_rxon_timing(struct iwl4965_priv *priv)
b481de9c
ZY
2462{
2463 u64 interval_tm_unit;
2464 u64 tsf, result;
2465 unsigned long flags;
2466 struct ieee80211_conf *conf = NULL;
2467 u16 beacon_int = 0;
2468
2469 conf = ieee80211_get_hw_conf(priv->hw);
2470
2471 spin_lock_irqsave(&priv->lock, flags);
2472 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2473 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2474
2475 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2476
2477 tsf = priv->timestamp1;
2478 tsf = ((tsf << 32) | priv->timestamp0);
2479
2480 beacon_int = priv->beacon_int;
2481 spin_unlock_irqrestore(&priv->lock, flags);
2482
2483 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2484 if (beacon_int == 0) {
2485 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2486 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2487 } else {
2488 priv->rxon_timing.beacon_interval =
2489 cpu_to_le16(beacon_int);
2490 priv->rxon_timing.beacon_interval =
bb8c093b 2491 iwl4965_adjust_beacon_interval(
b481de9c
ZY
2492 le16_to_cpu(priv->rxon_timing.beacon_interval));
2493 }
2494
2495 priv->rxon_timing.atim_window = 0;
2496 } else {
2497 priv->rxon_timing.beacon_interval =
bb8c093b 2498 iwl4965_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
2499 /* TODO: we need to get atim_window from upper stack
2500 * for now we set to 0 */
2501 priv->rxon_timing.atim_window = 0;
2502 }
2503
2504 interval_tm_unit =
2505 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2506 result = do_div(tsf, interval_tm_unit);
2507 priv->rxon_timing.beacon_init_val =
2508 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2509
2510 IWL_DEBUG_ASSOC
2511 ("beacon interval %d beacon timer %d beacon tim %d\n",
2512 le16_to_cpu(priv->rxon_timing.beacon_interval),
2513 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2514 le16_to_cpu(priv->rxon_timing.atim_window));
2515}
2516
bb8c093b 2517static int iwl4965_scan_initiate(struct iwl4965_priv *priv)
b481de9c
ZY
2518{
2519 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2520 IWL_ERROR("APs don't scan.\n");
2521 return 0;
2522 }
2523
bb8c093b 2524 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
2525 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2526 return -EIO;
2527 }
2528
2529 if (test_bit(STATUS_SCANNING, &priv->status)) {
2530 IWL_DEBUG_SCAN("Scan already in progress.\n");
2531 return -EAGAIN;
2532 }
2533
2534 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2535 IWL_DEBUG_SCAN("Scan request while abort pending. "
2536 "Queuing.\n");
2537 return -EAGAIN;
2538 }
2539
2540 IWL_DEBUG_INFO("Starting scan...\n");
2541 priv->scan_bands = 2;
2542 set_bit(STATUS_SCANNING, &priv->status);
2543 priv->scan_start = jiffies;
2544 priv->scan_pass_start = priv->scan_start;
2545
2546 queue_work(priv->workqueue, &priv->request_scan);
2547
2548 return 0;
2549}
2550
bb8c093b 2551static int iwl4965_set_rxon_hwcrypto(struct iwl4965_priv *priv, int hw_decrypt)
b481de9c 2552{
bb8c093b 2553 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
b481de9c
ZY
2554
2555 if (hw_decrypt)
2556 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2557 else
2558 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2559
2560 return 0;
2561}
2562
bb8c093b 2563static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode)
b481de9c
ZY
2564{
2565 if (phymode == MODE_IEEE80211A) {
2566 priv->staging_rxon.flags &=
2567 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2568 | RXON_FLG_CCK_MSK);
2569 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2570 } else {
bb8c093b 2571 /* Copied from iwl4965_bg_post_associate() */
b481de9c
ZY
2572 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2573 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2574 else
2575 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2576
2577 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2578 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2579
2580 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2581 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2582 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2583 }
2584}
2585
2586/*
01ebd063 2587 * initialize rxon structure with default values from eeprom
b481de9c 2588 */
bb8c093b 2589static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
b481de9c 2590{
bb8c093b 2591 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
2592
2593 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2594
2595 switch (priv->iw_mode) {
2596 case IEEE80211_IF_TYPE_AP:
2597 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2598 break;
2599
2600 case IEEE80211_IF_TYPE_STA:
2601 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2602 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2603 break;
2604
2605 case IEEE80211_IF_TYPE_IBSS:
2606 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2607 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2608 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2609 RXON_FILTER_ACCEPT_GRP_MSK;
2610 break;
2611
2612 case IEEE80211_IF_TYPE_MNTR:
2613 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2614 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2615 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2616 break;
2617 }
2618
2619#if 0
2620 /* TODO: Figure out when short_preamble would be set and cache from
2621 * that */
2622 if (!hw_to_local(priv->hw)->short_preamble)
2623 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2624 else
2625 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2626#endif
2627
bb8c093b 2628 ch_info = iwl4965_get_channel_info(priv, priv->phymode,
b481de9c
ZY
2629 le16_to_cpu(priv->staging_rxon.channel));
2630
2631 if (!ch_info)
2632 ch_info = &priv->channel_info[0];
2633
2634 /*
2635 * in some case A channels are all non IBSS
2636 * in this case force B/G channel
2637 */
2638 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2639 !(is_channel_ibss(ch_info)))
2640 ch_info = &priv->channel_info[0];
2641
2642 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2643 if (is_channel_a_band(ch_info))
2644 priv->phymode = MODE_IEEE80211A;
2645 else
2646 priv->phymode = MODE_IEEE80211G;
2647
bb8c093b 2648 iwl4965_set_flags_for_phymode(priv, priv->phymode);
b481de9c
ZY
2649
2650 priv->staging_rxon.ofdm_basic_rates =
2651 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2652 priv->staging_rxon.cck_basic_rates =
2653 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2654
2655 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2656 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2657 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2658 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2659 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2660 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2661 iwl4965_set_rxon_chain(priv);
2662}
2663
bb8c093b 2664static int iwl4965_set_mode(struct iwl4965_priv *priv, int mode)
b481de9c 2665{
b481de9c 2666 if (mode == IEEE80211_IF_TYPE_IBSS) {
bb8c093b 2667 const struct iwl4965_channel_info *ch_info;
b481de9c 2668
bb8c093b 2669 ch_info = iwl4965_get_channel_info(priv,
b481de9c
ZY
2670 priv->phymode,
2671 le16_to_cpu(priv->staging_rxon.channel));
2672
2673 if (!ch_info || !is_channel_ibss(ch_info)) {
2674 IWL_ERROR("channel %d not IBSS channel\n",
2675 le16_to_cpu(priv->staging_rxon.channel));
2676 return -EINVAL;
2677 }
2678 }
2679
b481de9c
ZY
2680 priv->iw_mode = mode;
2681
bb8c093b 2682 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
2683 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2684
bb8c093b 2685 iwl4965_clear_stations_table(priv);
b481de9c 2686
fde3571f
MA
2687 /* dont commit rxon if rf-kill is on*/
2688 if (!iwl4965_is_ready_rf(priv))
2689 return -EAGAIN;
2690
2691 cancel_delayed_work(&priv->scan_check);
2692 if (iwl4965_scan_cancel_timeout(priv, 100)) {
2693 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2694 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2695 return -EAGAIN;
2696 }
2697
bb8c093b 2698 iwl4965_commit_rxon(priv);
b481de9c
ZY
2699
2700 return 0;
2701}
2702
bb8c093b 2703static void iwl4965_build_tx_cmd_hwcrypto(struct iwl4965_priv *priv,
b481de9c 2704 struct ieee80211_tx_control *ctl,
bb8c093b 2705 struct iwl4965_cmd *cmd,
b481de9c
ZY
2706 struct sk_buff *skb_frag,
2707 int last_frag)
2708{
bb8c093b 2709 struct iwl4965_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
b481de9c
ZY
2710
2711 switch (keyinfo->alg) {
2712 case ALG_CCMP:
2713 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2714 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2715 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2716 break;
2717
2718 case ALG_TKIP:
2719#if 0
2720 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2721
2722 if (last_frag)
2723 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2724 8);
2725 else
2726 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2727#endif
2728 break;
2729
2730 case ALG_WEP:
2731 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2732 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2733
2734 if (keyinfo->keylen == 13)
2735 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2736
2737 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2738
2739 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2740 "with key %d\n", ctl->key_idx);
2741 break;
2742
b481de9c
ZY
2743 default:
2744 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2745 break;
2746 }
2747}
2748
2749/*
2750 * handle build REPLY_TX command notification.
2751 */
bb8c093b
CH
2752static void iwl4965_build_tx_cmd_basic(struct iwl4965_priv *priv,
2753 struct iwl4965_cmd *cmd,
b481de9c
ZY
2754 struct ieee80211_tx_control *ctrl,
2755 struct ieee80211_hdr *hdr,
2756 int is_unicast, u8 std_id)
2757{
2758 __le16 *qc;
2759 u16 fc = le16_to_cpu(hdr->frame_control);
2760 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2761
2762 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2763 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2764 tx_flags |= TX_CMD_FLG_ACK_MSK;
2765 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2766 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2767 if (ieee80211_is_probe_response(fc) &&
2768 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2769 tx_flags |= TX_CMD_FLG_TSF_MSK;
2770 } else {
2771 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2772 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2773 }
2774
87e4f7df
TW
2775 if (ieee80211_is_back_request(fc))
2776 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
2777
2778
b481de9c
ZY
2779 cmd->cmd.tx.sta_id = std_id;
2780 if (ieee80211_get_morefrag(hdr))
2781 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2782
2783 qc = ieee80211_get_qos_ctrl(hdr);
2784 if (qc) {
2785 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2786 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2787 } else
2788 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2789
2790 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2791 tx_flags |= TX_CMD_FLG_RTS_MSK;
2792 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2793 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2794 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2795 tx_flags |= TX_CMD_FLG_CTS_MSK;
2796 }
2797
2798 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2799 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2800
2801 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2802 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2803 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2804 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
bc434dd2 2805 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 2806 else
bc434dd2 2807 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
b481de9c
ZY
2808 } else
2809 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2810
2811 cmd->cmd.tx.driver_txop = 0;
2812 cmd->cmd.tx.tx_flags = tx_flags;
2813 cmd->cmd.tx.next_frame_len = 0;
2814}
2815
6440adb5
BC
2816/**
2817 * iwl4965_get_sta_id - Find station's index within station table
2818 *
2819 * If new IBSS station, create new entry in station table
2820 */
9fbab516
BC
2821static int iwl4965_get_sta_id(struct iwl4965_priv *priv,
2822 struct ieee80211_hdr *hdr)
b481de9c
ZY
2823{
2824 int sta_id;
2825 u16 fc = le16_to_cpu(hdr->frame_control);
0795af57 2826 DECLARE_MAC_BUF(mac);
b481de9c 2827
6440adb5 2828 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
2829 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2830 is_multicast_ether_addr(hdr->addr1))
2831 return priv->hw_setting.bcast_sta_id;
2832
2833 switch (priv->iw_mode) {
2834
6440adb5
BC
2835 /* If we are a client station in a BSS network, use the special
2836 * AP station entry (that's the only station we communicate with) */
b481de9c
ZY
2837 case IEEE80211_IF_TYPE_STA:
2838 return IWL_AP_ID;
2839
2840 /* If we are an AP, then find the station, or use BCAST */
2841 case IEEE80211_IF_TYPE_AP:
bb8c093b 2842 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2843 if (sta_id != IWL_INVALID_STATION)
2844 return sta_id;
2845 return priv->hw_setting.bcast_sta_id;
2846
6440adb5
BC
2847 /* If this frame is going out to an IBSS network, find the station,
2848 * or create a new station table entry */
b481de9c 2849 case IEEE80211_IF_TYPE_IBSS:
bb8c093b 2850 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2851 if (sta_id != IWL_INVALID_STATION)
2852 return sta_id;
2853
6440adb5 2854 /* Create new station table entry */
67d62035
RR
2855 sta_id = iwl4965_add_station_flags(priv, hdr->addr1,
2856 0, CMD_ASYNC, NULL);
b481de9c
ZY
2857
2858 if (sta_id != IWL_INVALID_STATION)
2859 return sta_id;
2860
0795af57 2861 IWL_DEBUG_DROP("Station %s not in station map. "
b481de9c 2862 "Defaulting to broadcast...\n",
0795af57 2863 print_mac(mac, hdr->addr1));
bb8c093b 2864 iwl4965_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
b481de9c
ZY
2865 return priv->hw_setting.bcast_sta_id;
2866
2867 default:
01ebd063 2868 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
b481de9c
ZY
2869 return priv->hw_setting.bcast_sta_id;
2870 }
2871}
2872
2873/*
2874 * start REPLY_TX command process
2875 */
bb8c093b 2876static int iwl4965_tx_skb(struct iwl4965_priv *priv,
b481de9c
ZY
2877 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2878{
2879 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bb8c093b 2880 struct iwl4965_tfd_frame *tfd;
b481de9c
ZY
2881 u32 *control_flags;
2882 int txq_id = ctl->queue;
bb8c093b
CH
2883 struct iwl4965_tx_queue *txq = NULL;
2884 struct iwl4965_queue *q = NULL;
b481de9c
ZY
2885 dma_addr_t phys_addr;
2886 dma_addr_t txcmd_phys;
87e4f7df 2887 dma_addr_t scratch_phys;
bb8c093b 2888 struct iwl4965_cmd *out_cmd = NULL;
b481de9c
ZY
2889 u16 len, idx, len_org;
2890 u8 id, hdr_len, unicast;
2891 u8 sta_id;
2892 u16 seq_number = 0;
2893 u16 fc;
2894 __le16 *qc;
2895 u8 wait_write_ptr = 0;
2896 unsigned long flags;
2897 int rc;
2898
2899 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 2900 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
2901 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2902 goto drop_unlock;
2903 }
2904
32bfd35d
JB
2905 if (!priv->vif) {
2906 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
b481de9c
ZY
2907 goto drop_unlock;
2908 }
2909
2910 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2911 IWL_ERROR("ERROR: No TX rate available.\n");
2912 goto drop_unlock;
2913 }
2914
2915 unicast = !is_multicast_ether_addr(hdr->addr1);
2916 id = 0;
2917
2918 fc = le16_to_cpu(hdr->frame_control);
2919
c8b0e6e1 2920#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
2921 if (ieee80211_is_auth(fc))
2922 IWL_DEBUG_TX("Sending AUTH frame\n");
2923 else if (ieee80211_is_assoc_request(fc))
2924 IWL_DEBUG_TX("Sending ASSOC frame\n");
2925 else if (ieee80211_is_reassoc_request(fc))
2926 IWL_DEBUG_TX("Sending REASSOC frame\n");
2927#endif
2928
7878a5a4
MA
2929 /* drop all data frame if we are not associated */
2930 if (!iwl4965_is_associated(priv) && !priv->assoc_id &&
b481de9c 2931 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
bb8c093b 2932 IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n");
b481de9c
ZY
2933 goto drop_unlock;
2934 }
2935
2936 spin_unlock_irqrestore(&priv->lock, flags);
2937
2938 hdr_len = ieee80211_get_hdrlen(fc);
6440adb5
BC
2939
2940 /* Find (or create) index into station table for destination station */
bb8c093b 2941 sta_id = iwl4965_get_sta_id(priv, hdr);
b481de9c 2942 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
2943 DECLARE_MAC_BUF(mac);
2944
2945 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2946 print_mac(mac, hdr->addr1));
b481de9c
ZY
2947 goto drop;
2948 }
2949
2950 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2951
2952 qc = ieee80211_get_qos_ctrl(hdr);
2953 if (qc) {
2954 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2955 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2956 IEEE80211_SCTL_SEQ;
2957 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2958 (hdr->seq_ctrl &
2959 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2960 seq_number += 0x10;
c8b0e6e1
CH
2961#ifdef CONFIG_IWL4965_HT
2962#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
2963 /* aggregation is on for this <sta,tid> */
2964 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG)
2965 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
c8b0e6e1
CH
2966#endif /* CONFIG_IWL4965_HT_AGG */
2967#endif /* CONFIG_IWL4965_HT */
b481de9c 2968 }
6440adb5
BC
2969
2970 /* Descriptor for chosen Tx queue */
b481de9c
ZY
2971 txq = &priv->txq[txq_id];
2972 q = &txq->q;
2973
2974 spin_lock_irqsave(&priv->lock, flags);
2975
6440adb5 2976 /* Set up first empty TFD within this queue's circular TFD buffer */
fc4b6853 2977 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
2978 memset(tfd, 0, sizeof(*tfd));
2979 control_flags = (u32 *) tfd;
fc4b6853 2980 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 2981
6440adb5 2982 /* Set up driver data for this TFD */
bb8c093b 2983 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
fc4b6853
TW
2984 txq->txb[q->write_ptr].skb[0] = skb;
2985 memcpy(&(txq->txb[q->write_ptr].status.control),
b481de9c 2986 ctl, sizeof(struct ieee80211_tx_control));
6440adb5
BC
2987
2988 /* Set up first empty entry in queue's array of Tx/cmd buffers */
b481de9c
ZY
2989 out_cmd = &txq->cmd[idx];
2990 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2991 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
6440adb5
BC
2992
2993 /*
2994 * Set up the Tx-command (not MAC!) header.
2995 * Store the chosen Tx queue and TFD index within the sequence field;
2996 * after Tx, uCode's Tx response will return this value so driver can
2997 * locate the frame within the tx queue and do post-tx processing.
2998 */
b481de9c
ZY
2999 out_cmd->hdr.cmd = REPLY_TX;
3000 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 3001 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
BC
3002
3003 /* Copy MAC header from skb into command buffer */
b481de9c
ZY
3004 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
3005
6440adb5
BC
3006 /*
3007 * Use the first empty entry in this queue's command buffer array
3008 * to contain the Tx command and MAC header concatenated together
3009 * (payload data will be in another buffer).
3010 * Size of this varies, due to varying MAC header length.
3011 * If end is not dword aligned, we'll have 2 extra bytes at the end
3012 * of the MAC header (device reads on dword boundaries).
3013 * We'll tell device about this padding later.
3014 */
b481de9c 3015 len = priv->hw_setting.tx_cmd_len +
bb8c093b 3016 sizeof(struct iwl4965_cmd_header) + hdr_len;
b481de9c
ZY
3017
3018 len_org = len;
3019 len = (len + 3) & ~3;
3020
3021 if (len_org != len)
3022 len_org = 1;
3023 else
3024 len_org = 0;
3025
6440adb5
BC
3026 /* Physical address of this Tx command's header (not MAC header!),
3027 * within command buffer array. */
bb8c093b
CH
3028 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl4965_cmd) * idx +
3029 offsetof(struct iwl4965_cmd, hdr);
b481de9c 3030
6440adb5
BC
3031 /* Add buffer containing Tx command and MAC(!) header to TFD's
3032 * first entry */
bb8c093b 3033 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
b481de9c
ZY
3034
3035 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
bb8c093b 3036 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
b481de9c 3037
6440adb5
BC
3038 /* Set up TFD's 2nd entry to point directly to remainder of skb,
3039 * if any (802.11 null frames have no payload). */
b481de9c
ZY
3040 len = skb->len - hdr_len;
3041 if (len) {
3042 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
3043 len, PCI_DMA_TODEVICE);
bb8c093b 3044 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
b481de9c
ZY
3045 }
3046
6440adb5 3047 /* Tell 4965 about any 2-byte padding after MAC header */
b481de9c
ZY
3048 if (len_org)
3049 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
3050
6440adb5 3051 /* Total # bytes to be transmitted */
b481de9c
ZY
3052 len = (u16)skb->len;
3053 out_cmd->cmd.tx.len = cpu_to_le16(len);
3054
3055 /* TODO need this for burst mode later on */
bb8c093b 3056 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
b481de9c
ZY
3057
3058 /* set is_hcca to 0; it probably will never be implemented */
bb8c093b 3059 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
b481de9c 3060
87e4f7df
TW
3061 scratch_phys = txcmd_phys + sizeof(struct iwl4965_cmd_header) +
3062 offsetof(struct iwl4965_tx_cmd, scratch);
3063 out_cmd->cmd.tx.dram_lsb_ptr = cpu_to_le32(scratch_phys);
3064 out_cmd->cmd.tx.dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
3065
3066#ifdef CONFIG_IWL4965_HT_AGG
3067#ifdef CONFIG_IWL4965_HT
3068 /* TODO: move this functionality to rate scaling */
3069 iwl4965_tl_get_stats(priv, hdr);
3070#endif /* CONFIG_IWL4965_HT_AGG */
3071#endif /*CONFIG_IWL4965_HT */
3072
b481de9c
ZY
3073
3074 if (!ieee80211_get_morefrag(hdr)) {
3075 txq->need_update = 1;
3076 if (qc) {
3077 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
3078 priv->stations[sta_id].tid[tid].seq_number = seq_number;
3079 }
3080 } else {
3081 wait_write_ptr = 1;
3082 txq->need_update = 0;
3083 }
3084
bb8c093b 3085 iwl4965_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
b481de9c
ZY
3086 sizeof(out_cmd->cmd.tx));
3087
bb8c093b 3088 iwl4965_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
b481de9c
ZY
3089 ieee80211_get_hdrlen(fc));
3090
6440adb5 3091 /* Set up entry for this TFD in Tx byte-count array */
b481de9c
ZY
3092 iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
3093
6440adb5 3094 /* Tell device the write index *just past* this latest filled TFD */
bb8c093b
CH
3095 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
3096 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3097 spin_unlock_irqrestore(&priv->lock, flags);
3098
3099 if (rc)
3100 return rc;
3101
bb8c093b 3102 if ((iwl4965_queue_space(q) < q->high_mark)
b481de9c
ZY
3103 && priv->mac80211_registered) {
3104 if (wait_write_ptr) {
3105 spin_lock_irqsave(&priv->lock, flags);
3106 txq->need_update = 1;
bb8c093b 3107 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3108 spin_unlock_irqrestore(&priv->lock, flags);
3109 }
3110
3111 ieee80211_stop_queue(priv->hw, ctl->queue);
3112 }
3113
3114 return 0;
3115
3116drop_unlock:
3117 spin_unlock_irqrestore(&priv->lock, flags);
3118drop:
3119 return -1;
3120}
3121
bb8c093b 3122static void iwl4965_set_rate(struct iwl4965_priv *priv)
b481de9c
ZY
3123{
3124 const struct ieee80211_hw_mode *hw = NULL;
3125 struct ieee80211_rate *rate;
3126 int i;
3127
bb8c093b 3128 hw = iwl4965_get_hw_mode(priv, priv->phymode);
c4ba9621
SA
3129 if (!hw) {
3130 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
3131 return;
3132 }
b481de9c
ZY
3133
3134 priv->active_rate = 0;
3135 priv->active_rate_basic = 0;
3136
3137 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
3138 hw->mode == MODE_IEEE80211A ?
3139 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
3140
3141 for (i = 0; i < hw->num_rates; i++) {
3142 rate = &(hw->rates[i]);
3143 if ((rate->val < IWL_RATE_COUNT) &&
3144 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3145 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
bb8c093b 3146 rate->val, iwl4965_rates[rate->val].plcp,
b481de9c
ZY
3147 (rate->flags & IEEE80211_RATE_BASIC) ?
3148 "*" : "");
3149 priv->active_rate |= (1 << rate->val);
3150 if (rate->flags & IEEE80211_RATE_BASIC)
3151 priv->active_rate_basic |= (1 << rate->val);
3152 } else
3153 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
bb8c093b 3154 rate->val, iwl4965_rates[rate->val].plcp);
b481de9c
ZY
3155 }
3156
3157 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
3158 priv->active_rate, priv->active_rate_basic);
3159
3160 /*
3161 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
3162 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
3163 * OFDM
3164 */
3165 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
3166 priv->staging_rxon.cck_basic_rates =
3167 ((priv->active_rate_basic &
3168 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
3169 else
3170 priv->staging_rxon.cck_basic_rates =
3171 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
3172
3173 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
3174 priv->staging_rxon.ofdm_basic_rates =
3175 ((priv->active_rate_basic &
3176 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
3177 IWL_FIRST_OFDM_RATE) & 0xFF;
3178 else
3179 priv->staging_rxon.ofdm_basic_rates =
3180 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3181}
3182
bb8c093b 3183static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio)
b481de9c
ZY
3184{
3185 unsigned long flags;
3186
3187 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
3188 return;
3189
3190 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
3191 disable_radio ? "OFF" : "ON");
3192
3193 if (disable_radio) {
bb8c093b 3194 iwl4965_scan_cancel(priv);
b481de9c
ZY
3195 /* FIXME: This is a workaround for AP */
3196 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3197 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3198 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
3199 CSR_UCODE_SW_BIT_RFKILL);
3200 spin_unlock_irqrestore(&priv->lock, flags);
bb8c093b 3201 iwl4965_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
b481de9c
ZY
3202 set_bit(STATUS_RF_KILL_SW, &priv->status);
3203 }
3204 return;
3205 }
3206
3207 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3208 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
3209
3210 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3211 spin_unlock_irqrestore(&priv->lock, flags);
3212
3213 /* wake up ucode */
3214 msleep(10);
3215
3216 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
3217 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
3218 if (!iwl4965_grab_nic_access(priv))
3219 iwl4965_release_nic_access(priv);
b481de9c
ZY
3220 spin_unlock_irqrestore(&priv->lock, flags);
3221
3222 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3223 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3224 "disabled by HW switch\n");
3225 return;
3226 }
3227
3228 queue_work(priv->workqueue, &priv->restart);
3229 return;
3230}
3231
bb8c093b 3232void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb,
b481de9c
ZY
3233 u32 decrypt_res, struct ieee80211_rx_status *stats)
3234{
3235 u16 fc =
3236 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3237
3238 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3239 return;
3240
3241 if (!(fc & IEEE80211_FCTL_PROTECTED))
3242 return;
3243
3244 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3245 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3246 case RX_RES_STATUS_SEC_TYPE_TKIP:
3247 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3248 RX_RES_STATUS_BAD_ICV_MIC)
3249 stats->flag |= RX_FLAG_MMIC_ERROR;
3250 case RX_RES_STATUS_SEC_TYPE_WEP:
3251 case RX_RES_STATUS_SEC_TYPE_CCMP:
3252 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3253 RX_RES_STATUS_DECRYPT_OK) {
3254 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3255 stats->flag |= RX_FLAG_DECRYPTED;
3256 }
3257 break;
3258
3259 default:
3260 break;
3261 }
3262}
3263
b481de9c
ZY
3264
3265#define IWL_PACKET_RETRY_TIME HZ
3266
bb8c093b 3267int iwl4965_is_duplicate_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
3268{
3269 u16 sc = le16_to_cpu(header->seq_ctrl);
3270 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3271 u16 frag = sc & IEEE80211_SCTL_FRAG;
3272 u16 *last_seq, *last_frag;
3273 unsigned long *last_time;
3274
3275 switch (priv->iw_mode) {
3276 case IEEE80211_IF_TYPE_IBSS:{
3277 struct list_head *p;
bb8c093b 3278 struct iwl4965_ibss_seq *entry = NULL;
b481de9c
ZY
3279 u8 *mac = header->addr2;
3280 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3281
3282 __list_for_each(p, &priv->ibss_mac_hash[index]) {
bb8c093b 3283 entry = list_entry(p, struct iwl4965_ibss_seq, list);
b481de9c
ZY
3284 if (!compare_ether_addr(entry->mac, mac))
3285 break;
3286 }
3287 if (p == &priv->ibss_mac_hash[index]) {
3288 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3289 if (!entry) {
bc434dd2 3290 IWL_ERROR("Cannot malloc new mac entry\n");
b481de9c
ZY
3291 return 0;
3292 }
3293 memcpy(entry->mac, mac, ETH_ALEN);
3294 entry->seq_num = seq;
3295 entry->frag_num = frag;
3296 entry->packet_time = jiffies;
bc434dd2 3297 list_add(&entry->list, &priv->ibss_mac_hash[index]);
b481de9c
ZY
3298 return 0;
3299 }
3300 last_seq = &entry->seq_num;
3301 last_frag = &entry->frag_num;
3302 last_time = &entry->packet_time;
3303 break;
3304 }
3305 case IEEE80211_IF_TYPE_STA:
3306 last_seq = &priv->last_seq_num;
3307 last_frag = &priv->last_frag_num;
3308 last_time = &priv->last_packet_time;
3309 break;
3310 default:
3311 return 0;
3312 }
3313 if ((*last_seq == seq) &&
3314 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3315 if (*last_frag == frag)
3316 goto drop;
3317 if (*last_frag + 1 != frag)
3318 /* out-of-order fragment */
3319 goto drop;
3320 } else
3321 *last_seq = seq;
3322
3323 *last_frag = frag;
3324 *last_time = jiffies;
3325 return 0;
3326
3327 drop:
3328 return 1;
3329}
3330
c8b0e6e1 3331#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
3332
3333#include "iwl-spectrum.h"
3334
3335#define BEACON_TIME_MASK_LOW 0x00FFFFFF
3336#define BEACON_TIME_MASK_HIGH 0xFF000000
3337#define TIME_UNIT 1024
3338
3339/*
3340 * extended beacon time format
3341 * time in usec will be changed into a 32-bit value in 8:24 format
3342 * the high 1 byte is the beacon counts
3343 * the lower 3 bytes is the time in usec within one beacon interval
3344 */
3345
bb8c093b 3346static u32 iwl4965_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
3347{
3348 u32 quot;
3349 u32 rem;
3350 u32 interval = beacon_interval * 1024;
3351
3352 if (!interval || !usec)
3353 return 0;
3354
3355 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3356 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3357
3358 return (quot << 24) + rem;
3359}
3360
3361/* base is usually what we get from ucode with each received frame,
3362 * the same as HW timer counter counting down
3363 */
3364
bb8c093b 3365static __le32 iwl4965_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
3366{
3367 u32 base_low = base & BEACON_TIME_MASK_LOW;
3368 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3369 u32 interval = beacon_interval * TIME_UNIT;
3370 u32 res = (base & BEACON_TIME_MASK_HIGH) +
3371 (addon & BEACON_TIME_MASK_HIGH);
3372
3373 if (base_low > addon_low)
3374 res += base_low - addon_low;
3375 else if (base_low < addon_low) {
3376 res += interval + base_low - addon_low;
3377 res += (1 << 24);
3378 } else
3379 res += (1 << 24);
3380
3381 return cpu_to_le32(res);
3382}
3383
bb8c093b 3384static int iwl4965_get_measurement(struct iwl4965_priv *priv,
b481de9c
ZY
3385 struct ieee80211_measurement_params *params,
3386 u8 type)
3387{
bb8c093b
CH
3388 struct iwl4965_spectrum_cmd spectrum;
3389 struct iwl4965_rx_packet *res;
3390 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
3391 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3392 .data = (void *)&spectrum,
3393 .meta.flags = CMD_WANT_SKB,
3394 };
3395 u32 add_time = le64_to_cpu(params->start_time);
3396 int rc;
3397 int spectrum_resp_status;
3398 int duration = le16_to_cpu(params->duration);
3399
bb8c093b 3400 if (iwl4965_is_associated(priv))
b481de9c 3401 add_time =
bb8c093b 3402 iwl4965_usecs_to_beacons(
b481de9c
ZY
3403 le64_to_cpu(params->start_time) - priv->last_tsf,
3404 le16_to_cpu(priv->rxon_timing.beacon_interval));
3405
3406 memset(&spectrum, 0, sizeof(spectrum));
3407
3408 spectrum.channel_count = cpu_to_le16(1);
3409 spectrum.flags =
3410 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3411 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3412 cmd.len = sizeof(spectrum);
3413 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3414
bb8c093b 3415 if (iwl4965_is_associated(priv))
b481de9c 3416 spectrum.start_time =
bb8c093b 3417 iwl4965_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
3418 add_time,
3419 le16_to_cpu(priv->rxon_timing.beacon_interval));
3420 else
3421 spectrum.start_time = 0;
3422
3423 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3424 spectrum.channels[0].channel = params->channel;
3425 spectrum.channels[0].type = type;
3426 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3427 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3428 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3429
bb8c093b 3430 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
3431 if (rc)
3432 return rc;
3433
bb8c093b 3434 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
3435 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3436 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3437 rc = -EIO;
3438 }
3439
3440 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3441 switch (spectrum_resp_status) {
3442 case 0: /* Command will be handled */
3443 if (res->u.spectrum.id != 0xff) {
3444 IWL_DEBUG_INFO
3445 ("Replaced existing measurement: %d\n",
3446 res->u.spectrum.id);
3447 priv->measurement_status &= ~MEASUREMENT_READY;
3448 }
3449 priv->measurement_status |= MEASUREMENT_ACTIVE;
3450 rc = 0;
3451 break;
3452
3453 case 1: /* Command will not be handled */
3454 rc = -EAGAIN;
3455 break;
3456 }
3457
3458 dev_kfree_skb_any(cmd.meta.u.skb);
3459
3460 return rc;
3461}
3462#endif
3463
bb8c093b
CH
3464static void iwl4965_txstatus_to_ieee(struct iwl4965_priv *priv,
3465 struct iwl4965_tx_info *tx_sta)
b481de9c
ZY
3466{
3467
3468 tx_sta->status.ack_signal = 0;
3469 tx_sta->status.excessive_retries = 0;
3470 tx_sta->status.queue_length = 0;
3471 tx_sta->status.queue_number = 0;
3472
3473 if (in_interrupt())
3474 ieee80211_tx_status_irqsafe(priv->hw,
3475 tx_sta->skb[0], &(tx_sta->status));
3476 else
3477 ieee80211_tx_status(priv->hw,
3478 tx_sta->skb[0], &(tx_sta->status));
3479
3480 tx_sta->skb[0] = NULL;
3481}
3482
3483/**
6440adb5 3484 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
b481de9c 3485 *
6440adb5
BC
3486 * When FW advances 'R' index, all entries between old and new 'R' index
3487 * need to be reclaimed. As result, some free space forms. If there is
3488 * enough free space (> low mark), wake the stack that feeds us.
b481de9c 3489 */
bb8c093b 3490int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index)
b481de9c 3491{
bb8c093b
CH
3492 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
3493 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
3494 int nfreed = 0;
3495
3496 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3497 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3498 "is out of range [0-%d] %d %d.\n", txq_id,
fc4b6853 3499 index, q->n_bd, q->write_ptr, q->read_ptr);
b481de9c
ZY
3500 return 0;
3501 }
3502
bb8c093b 3503 for (index = iwl4965_queue_inc_wrap(index, q->n_bd);
fc4b6853 3504 q->read_ptr != index;
bb8c093b 3505 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd)) {
b481de9c 3506 if (txq_id != IWL_CMD_QUEUE_NUM) {
bb8c093b 3507 iwl4965_txstatus_to_ieee(priv,
fc4b6853 3508 &(txq->txb[txq->q.read_ptr]));
bb8c093b 3509 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c
ZY
3510 } else if (nfreed > 1) {
3511 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
fc4b6853 3512 q->write_ptr, q->read_ptr);
b481de9c
ZY
3513 queue_work(priv->workqueue, &priv->restart);
3514 }
3515 nfreed++;
3516 }
3517
bb8c093b 3518 if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
b481de9c
ZY
3519 (txq_id != IWL_CMD_QUEUE_NUM) &&
3520 priv->mac80211_registered)
3521 ieee80211_wake_queue(priv->hw, txq_id);
3522
3523
3524 return nfreed;
3525}
3526
bb8c093b 3527static int iwl4965_is_tx_success(u32 status)
b481de9c
ZY
3528{
3529 status &= TX_STATUS_MSK;
3530 return (status == TX_STATUS_SUCCESS)
3531 || (status == TX_STATUS_DIRECT_DONE);
3532}
3533
3534/******************************************************************************
3535 *
3536 * Generic RX handler implementations
3537 *
3538 ******************************************************************************/
c8b0e6e1
CH
3539#ifdef CONFIG_IWL4965_HT
3540#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3541
bb8c093b 3542static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv,
b481de9c
ZY
3543 struct ieee80211_hdr *hdr)
3544{
3545 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
3546 return IWL_AP_ID;
3547 else {
3548 u8 *da = ieee80211_get_DA(hdr);
bb8c093b 3549 return iwl4965_hw_find_station(priv, da);
b481de9c
ZY
3550 }
3551}
3552
bb8c093b
CH
3553static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
3554 struct iwl4965_priv *priv, int txq_id, int idx)
b481de9c
ZY
3555{
3556 if (priv->txq[txq_id].txb[idx].skb[0])
3557 return (struct ieee80211_hdr *)priv->txq[txq_id].
3558 txb[idx].skb[0]->data;
3559 return NULL;
3560}
3561
bb8c093b 3562static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
b481de9c
ZY
3563{
3564 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
3565 tx_resp->frame_count);
3566 return le32_to_cpu(*scd_ssn) & MAX_SN;
3567
3568}
6440adb5
BC
3569
3570/**
3571 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
3572 */
bb8c093b
CH
3573static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3574 struct iwl4965_ht_agg *agg,
3575 struct iwl4965_tx_resp *tx_resp,
b481de9c
ZY
3576 u16 start_idx)
3577{
3578 u32 status;
3579 __le32 *frame_status = &tx_resp->status;
3580 struct ieee80211_tx_status *tx_status = NULL;
3581 struct ieee80211_hdr *hdr = NULL;
3582 int i, sh;
3583 int txq_id, idx;
3584 u16 seq;
3585
3586 if (agg->wait_for_ba)
6440adb5 3587 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
b481de9c
ZY
3588
3589 agg->frame_count = tx_resp->frame_count;
3590 agg->start_idx = start_idx;
3591 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3592 agg->bitmap0 = agg->bitmap1 = 0;
3593
6440adb5 3594 /* # frames attempted by Tx command */
b481de9c 3595 if (agg->frame_count == 1) {
6440adb5 3596 /* Only one frame was attempted; no block-ack will arrive */
bb8c093b 3597 struct iwl4965_tx_queue *txq ;
b481de9c
ZY
3598 status = le32_to_cpu(frame_status[0]);
3599
3600 txq_id = agg->txq_id;
3601 txq = &priv->txq[txq_id];
3602 /* FIXME: code repetition */
3603 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n",
3604 agg->frame_count, agg->start_idx);
3605
fc4b6853 3606 tx_status = &(priv->txq[txq_id].txb[txq->q.read_ptr].status);
b481de9c
ZY
3607 tx_status->retry_count = tx_resp->failure_frame;
3608 tx_status->queue_number = status & 0xff;
3609 tx_status->queue_length = tx_resp->bt_kill_count;
3610 tx_status->queue_length |= tx_resp->failure_rts;
3611
bb8c093b 3612 tx_status->flags = iwl4965_is_tx_success(status)?
b481de9c
ZY
3613 IEEE80211_TX_STATUS_ACK : 0;
3614 tx_status->control.tx_rate =
bb8c093b 3615 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3616 /* FIXME: code repetition end */
3617
3618 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3619 status & 0xff, tx_resp->failure_frame);
3620 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
bb8c093b 3621 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
b481de9c
ZY
3622
3623 agg->wait_for_ba = 0;
3624 } else {
6440adb5 3625 /* Two or more frames were attempted; expect block-ack */
b481de9c
ZY
3626 u64 bitmap = 0;
3627 int start = agg->start_idx;
3628
6440adb5 3629 /* Construct bit-map of pending frames within Tx window */
b481de9c
ZY
3630 for (i = 0; i < agg->frame_count; i++) {
3631 u16 sc;
3632 status = le32_to_cpu(frame_status[i]);
3633 seq = status >> 16;
3634 idx = SEQ_TO_INDEX(seq);
3635 txq_id = SEQ_TO_QUEUE(seq);
3636
3637 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3638 AGG_TX_STATE_ABORT_MSK))
3639 continue;
3640
3641 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3642 agg->frame_count, txq_id, idx);
3643
bb8c093b 3644 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
b481de9c
ZY
3645
3646 sc = le16_to_cpu(hdr->seq_ctrl);
3647 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3648 IWL_ERROR("BUG_ON idx doesn't match seq control"
3649 " idx=%d, seq_idx=%d, seq=%d\n",
3650 idx, SEQ_TO_SN(sc),
3651 hdr->seq_ctrl);
3652 return -1;
3653 }
3654
3655 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
3656 i, idx, SEQ_TO_SN(sc));
3657
3658 sh = idx - start;
3659 if (sh > 64) {
3660 sh = (start - idx) + 0xff;
3661 bitmap = bitmap << sh;
3662 sh = 0;
3663 start = idx;
3664 } else if (sh < -64)
3665 sh = 0xff - (start - idx);
3666 else if (sh < 0) {
3667 sh = start - idx;
3668 start = idx;
3669 bitmap = bitmap << sh;
3670 sh = 0;
3671 }
3672 bitmap |= (1 << sh);
3673 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3674 start, (u32)(bitmap & 0xFFFFFFFF));
3675 }
3676
3677 agg->bitmap0 = bitmap & 0xFFFFFFFF;
3678 agg->bitmap1 = bitmap >> 32;
3679 agg->start_idx = start;
3680 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3681 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n",
3682 agg->frame_count, agg->start_idx,
3683 agg->bitmap0);
3684
3685 if (bitmap)
3686 agg->wait_for_ba = 1;
3687 }
3688 return 0;
3689}
3690#endif
3691#endif
3692
6440adb5
BC
3693/**
3694 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3695 */
bb8c093b
CH
3696static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3697 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3698{
bb8c093b 3699 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3700 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3701 int txq_id = SEQ_TO_QUEUE(sequence);
3702 int index = SEQ_TO_INDEX(sequence);
bb8c093b 3703 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
b481de9c 3704 struct ieee80211_tx_status *tx_status;
bb8c093b 3705 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
b481de9c 3706 u32 status = le32_to_cpu(tx_resp->status);
c8b0e6e1
CH
3707#ifdef CONFIG_IWL4965_HT
3708#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
3709 int tid, sta_id;
3710#endif
3711#endif
3712
3713 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3714 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3715 "is out of range [0-%d] %d %d\n", txq_id,
fc4b6853
TW
3716 index, txq->q.n_bd, txq->q.write_ptr,
3717 txq->q.read_ptr);
b481de9c
ZY
3718 return;
3719 }
3720
c8b0e6e1
CH
3721#ifdef CONFIG_IWL4965_HT
3722#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3723 if (txq->sched_retry) {
bb8c093b 3724 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
b481de9c 3725 struct ieee80211_hdr *hdr =
bb8c093b
CH
3726 iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3727 struct iwl4965_ht_agg *agg = NULL;
b481de9c
ZY
3728 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3729
3730 if (qc == NULL) {
3731 IWL_ERROR("BUG_ON qc is null!!!!\n");
3732 return;
3733 }
3734
3735 tid = le16_to_cpu(*qc) & 0xf;
3736
bb8c093b 3737 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
b481de9c
ZY
3738 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3739 IWL_ERROR("Station not known for\n");
3740 return;
3741 }
3742
3743 agg = &priv->stations[sta_id].tid[tid].agg;
3744
3745 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index);
3746
3747 if ((tx_resp->frame_count == 1) &&
bb8c093b 3748 !iwl4965_is_tx_success(status)) {
b481de9c
ZY
3749 /* TODO: send BAR */
3750 }
3751
fc4b6853 3752 if ((txq->q.read_ptr != (scd_ssn & 0xff))) {
bb8c093b 3753 index = iwl4965_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
b481de9c
ZY
3754 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3755 "%d index %d\n", scd_ssn , index);
bb8c093b 3756 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
3757 }
3758 } else {
c8b0e6e1
CH
3759#endif /* CONFIG_IWL4965_HT_AGG */
3760#endif /* CONFIG_IWL4965_HT */
fc4b6853 3761 tx_status = &(txq->txb[txq->q.read_ptr].status);
b481de9c
ZY
3762
3763 tx_status->retry_count = tx_resp->failure_frame;
3764 tx_status->queue_number = status;
3765 tx_status->queue_length = tx_resp->bt_kill_count;
3766 tx_status->queue_length |= tx_resp->failure_rts;
3767
3768 tx_status->flags =
bb8c093b 3769 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
b481de9c
ZY
3770
3771 tx_status->control.tx_rate =
bb8c093b 3772 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3773
3774 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
bb8c093b 3775 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
b481de9c
ZY
3776 status, le32_to_cpu(tx_resp->rate_n_flags),
3777 tx_resp->failure_frame);
3778
3779 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3780 if (index != -1)
bb8c093b 3781 iwl4965_tx_queue_reclaim(priv, txq_id, index);
c8b0e6e1
CH
3782#ifdef CONFIG_IWL4965_HT
3783#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3784 }
c8b0e6e1
CH
3785#endif /* CONFIG_IWL4965_HT_AGG */
3786#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
3787
3788 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3789 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3790}
3791
3792
bb8c093b
CH
3793static void iwl4965_rx_reply_alive(struct iwl4965_priv *priv,
3794 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3795{
bb8c093b
CH
3796 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3797 struct iwl4965_alive_resp *palive;
b481de9c
ZY
3798 struct delayed_work *pwork;
3799
3800 palive = &pkt->u.alive_frame;
3801
3802 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3803 "0x%01X 0x%01X\n",
3804 palive->is_valid, palive->ver_type,
3805 palive->ver_subtype);
3806
3807 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3808 IWL_DEBUG_INFO("Initialization Alive received.\n");
3809 memcpy(&priv->card_alive_init,
3810 &pkt->u.alive_frame,
bb8c093b 3811 sizeof(struct iwl4965_init_alive_resp));
b481de9c
ZY
3812 pwork = &priv->init_alive_start;
3813 } else {
3814 IWL_DEBUG_INFO("Runtime Alive received.\n");
3815 memcpy(&priv->card_alive, &pkt->u.alive_frame,
bb8c093b 3816 sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
3817 pwork = &priv->alive_start;
3818 }
3819
3820 /* We delay the ALIVE response by 5ms to
3821 * give the HW RF Kill time to activate... */
3822 if (palive->is_valid == UCODE_VALID_OK)
3823 queue_delayed_work(priv->workqueue, pwork,
3824 msecs_to_jiffies(5));
3825 else
3826 IWL_WARNING("uCode did not respond OK.\n");
3827}
3828
bb8c093b
CH
3829static void iwl4965_rx_reply_add_sta(struct iwl4965_priv *priv,
3830 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3831{
bb8c093b 3832 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3833
3834 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3835 return;
3836}
3837
bb8c093b
CH
3838static void iwl4965_rx_reply_error(struct iwl4965_priv *priv,
3839 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3840{
bb8c093b 3841 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3842
3843 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3844 "seq 0x%04X ser 0x%08X\n",
3845 le32_to_cpu(pkt->u.err_resp.error_type),
3846 get_cmd_string(pkt->u.err_resp.cmd_id),
3847 pkt->u.err_resp.cmd_id,
3848 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3849 le32_to_cpu(pkt->u.err_resp.error_info));
3850}
3851
3852#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3853
bb8c093b 3854static void iwl4965_rx_csa(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3855{
bb8c093b
CH
3856 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3857 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon;
3858 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
3859 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3860 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3861 rxon->channel = csa->channel;
3862 priv->staging_rxon.channel = csa->channel;
3863}
3864
bb8c093b
CH
3865static void iwl4965_rx_spectrum_measure_notif(struct iwl4965_priv *priv,
3866 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3867{
c8b0e6e1 3868#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
bb8c093b
CH
3869 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3870 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
3871
3872 if (!report->state) {
3873 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3874 "Spectrum Measure Notification: Start\n");
3875 return;
3876 }
3877
3878 memcpy(&priv->measure_report, report, sizeof(*report));
3879 priv->measurement_status |= MEASUREMENT_READY;
3880#endif
3881}
3882
bb8c093b
CH
3883static void iwl4965_rx_pm_sleep_notif(struct iwl4965_priv *priv,
3884 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3885{
c8b0e6e1 3886#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3887 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3888 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
3889 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3890 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3891#endif
3892}
3893
bb8c093b
CH
3894static void iwl4965_rx_pm_debug_statistics_notif(struct iwl4965_priv *priv,
3895 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3896{
bb8c093b 3897 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3898 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3899 "notification for %s:\n",
3900 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
bb8c093b 3901 iwl4965_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
b481de9c
ZY
3902}
3903
bb8c093b 3904static void iwl4965_bg_beacon_update(struct work_struct *work)
b481de9c 3905{
bb8c093b
CH
3906 struct iwl4965_priv *priv =
3907 container_of(work, struct iwl4965_priv, beacon_update);
b481de9c
ZY
3908 struct sk_buff *beacon;
3909
3910 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
32bfd35d 3911 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL);
b481de9c
ZY
3912
3913 if (!beacon) {
3914 IWL_ERROR("update beacon failed\n");
3915 return;
3916 }
3917
3918 mutex_lock(&priv->mutex);
3919 /* new beacon skb is allocated every time; dispose previous.*/
3920 if (priv->ibss_beacon)
3921 dev_kfree_skb(priv->ibss_beacon);
3922
3923 priv->ibss_beacon = beacon;
3924 mutex_unlock(&priv->mutex);
3925
bb8c093b 3926 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
3927}
3928
bb8c093b
CH
3929static void iwl4965_rx_beacon_notif(struct iwl4965_priv *priv,
3930 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3931{
c8b0e6e1 3932#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3933 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3934 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3935 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
b481de9c
ZY
3936
3937 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3938 "tsf %d %d rate %d\n",
3939 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3940 beacon->beacon_notify_hdr.failure_frame,
3941 le32_to_cpu(beacon->ibss_mgr_status),
3942 le32_to_cpu(beacon->high_tsf),
3943 le32_to_cpu(beacon->low_tsf), rate);
3944#endif
3945
3946 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3947 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3948 queue_work(priv->workqueue, &priv->beacon_update);
3949}
3950
3951/* Service response to REPLY_SCAN_CMD (0x80) */
bb8c093b
CH
3952static void iwl4965_rx_reply_scan(struct iwl4965_priv *priv,
3953 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3954{
c8b0e6e1 3955#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3956 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3957 struct iwl4965_scanreq_notification *notif =
3958 (struct iwl4965_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
3959
3960 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3961#endif
3962}
3963
3964/* Service SCAN_START_NOTIFICATION (0x82) */
bb8c093b
CH
3965static void iwl4965_rx_scan_start_notif(struct iwl4965_priv *priv,
3966 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3967{
bb8c093b
CH
3968 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3969 struct iwl4965_scanstart_notification *notif =
3970 (struct iwl4965_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
3971 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3972 IWL_DEBUG_SCAN("Scan start: "
3973 "%d [802.11%s] "
3974 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3975 notif->channel,
3976 notif->band ? "bg" : "a",
3977 notif->tsf_high,
3978 notif->tsf_low, notif->status, notif->beacon_timer);
3979}
3980
3981/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
bb8c093b
CH
3982static void iwl4965_rx_scan_results_notif(struct iwl4965_priv *priv,
3983 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3984{
bb8c093b
CH
3985 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3986 struct iwl4965_scanresults_notification *notif =
3987 (struct iwl4965_scanresults_notification *)pkt->u.raw;
b481de9c
ZY
3988
3989 IWL_DEBUG_SCAN("Scan ch.res: "
3990 "%d [802.11%s] "
3991 "(TSF: 0x%08X:%08X) - %d "
3992 "elapsed=%lu usec (%dms since last)\n",
3993 notif->channel,
3994 notif->band ? "bg" : "a",
3995 le32_to_cpu(notif->tsf_high),
3996 le32_to_cpu(notif->tsf_low),
3997 le32_to_cpu(notif->statistics[0]),
3998 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3999 jiffies_to_msecs(elapsed_jiffies
4000 (priv->last_scan_jiffies, jiffies)));
4001
4002 priv->last_scan_jiffies = jiffies;
7878a5a4 4003 priv->next_scan_jiffies = 0;
b481de9c
ZY
4004}
4005
4006/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
bb8c093b
CH
4007static void iwl4965_rx_scan_complete_notif(struct iwl4965_priv *priv,
4008 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4009{
bb8c093b
CH
4010 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4011 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
b481de9c
ZY
4012
4013 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
4014 scan_notif->scanned_channels,
4015 scan_notif->tsf_low,
4016 scan_notif->tsf_high, scan_notif->status);
4017
4018 /* The HW is no longer scanning */
4019 clear_bit(STATUS_SCAN_HW, &priv->status);
4020
4021 /* The scan completion notification came in, so kill that timer... */
4022 cancel_delayed_work(&priv->scan_check);
4023
4024 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
4025 (priv->scan_bands == 2) ? "2.4" : "5.2",
4026 jiffies_to_msecs(elapsed_jiffies
4027 (priv->scan_pass_start, jiffies)));
4028
4029 /* Remove this scanned band from the list
4030 * of pending bands to scan */
4031 priv->scan_bands--;
4032
4033 /* If a request to abort was given, or the scan did not succeed
4034 * then we reset the scan state machine and terminate,
4035 * re-queuing another scan if one has been requested */
4036 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4037 IWL_DEBUG_INFO("Aborted scan completed.\n");
4038 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
4039 } else {
4040 /* If there are more bands on this scan pass reschedule */
4041 if (priv->scan_bands > 0)
4042 goto reschedule;
4043 }
4044
4045 priv->last_scan_jiffies = jiffies;
7878a5a4 4046 priv->next_scan_jiffies = 0;
b481de9c
ZY
4047 IWL_DEBUG_INFO("Setting scan to off\n");
4048
4049 clear_bit(STATUS_SCANNING, &priv->status);
4050
4051 IWL_DEBUG_INFO("Scan took %dms\n",
4052 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
4053
4054 queue_work(priv->workqueue, &priv->scan_completed);
4055
4056 return;
4057
4058reschedule:
4059 priv->scan_pass_start = jiffies;
4060 queue_work(priv->workqueue, &priv->request_scan);
4061}
4062
4063/* Handle notification from uCode that card's power state is changing
4064 * due to software, hardware, or critical temperature RFKILL */
bb8c093b
CH
4065static void iwl4965_rx_card_state_notif(struct iwl4965_priv *priv,
4066 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4067{
bb8c093b 4068 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
4069 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4070 unsigned long status = priv->status;
4071
4072 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
4073 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4074 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
4075
4076 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
4077 RF_CARD_DISABLED)) {
4078
bb8c093b 4079 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
4080 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4081
bb8c093b
CH
4082 if (!iwl4965_grab_nic_access(priv)) {
4083 iwl4965_write_direct32(
b481de9c
ZY
4084 priv, HBUS_TARG_MBX_C,
4085 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4086
bb8c093b 4087 iwl4965_release_nic_access(priv);
b481de9c
ZY
4088 }
4089
4090 if (!(flags & RXON_CARD_DISABLED)) {
bb8c093b 4091 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 4092 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
bb8c093b
CH
4093 if (!iwl4965_grab_nic_access(priv)) {
4094 iwl4965_write_direct32(
b481de9c
ZY
4095 priv, HBUS_TARG_MBX_C,
4096 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4097
bb8c093b 4098 iwl4965_release_nic_access(priv);
b481de9c
ZY
4099 }
4100 }
4101
4102 if (flags & RF_CARD_DISABLED) {
bb8c093b 4103 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c 4104 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
bb8c093b
CH
4105 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
4106 if (!iwl4965_grab_nic_access(priv))
4107 iwl4965_release_nic_access(priv);
b481de9c
ZY
4108 }
4109 }
4110
4111 if (flags & HW_CARD_DISABLED)
4112 set_bit(STATUS_RF_KILL_HW, &priv->status);
4113 else
4114 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4115
4116
4117 if (flags & SW_CARD_DISABLED)
4118 set_bit(STATUS_RF_KILL_SW, &priv->status);
4119 else
4120 clear_bit(STATUS_RF_KILL_SW, &priv->status);
4121
4122 if (!(flags & RXON_CARD_DISABLED))
bb8c093b 4123 iwl4965_scan_cancel(priv);
b481de9c
ZY
4124
4125 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
4126 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
4127 (test_bit(STATUS_RF_KILL_SW, &status) !=
4128 test_bit(STATUS_RF_KILL_SW, &priv->status)))
4129 queue_work(priv->workqueue, &priv->rf_kill);
4130 else
4131 wake_up_interruptible(&priv->wait_command_queue);
4132}
4133
4134/**
bb8c093b 4135 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
4136 *
4137 * Setup the RX handlers for each of the reply types sent from the uCode
4138 * to the host.
4139 *
4140 * This function chains into the hardware specific files for them to setup
4141 * any hardware specific handlers as well.
4142 */
bb8c093b 4143static void iwl4965_setup_rx_handlers(struct iwl4965_priv *priv)
b481de9c 4144{
bb8c093b
CH
4145 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
4146 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
4147 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
4148 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
b481de9c 4149 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
4150 iwl4965_rx_spectrum_measure_notif;
4151 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl4965_rx_pm_sleep_notif;
b481de9c 4152 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
4153 iwl4965_rx_pm_debug_statistics_notif;
4154 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
b481de9c 4155
9fbab516
BC
4156 /*
4157 * The same handler is used for both the REPLY to a discrete
4158 * statistics request from the host as well as for the periodic
4159 * statistics notifications (after received beacons) from the uCode.
b481de9c 4160 */
bb8c093b
CH
4161 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics;
4162 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics;
b481de9c 4163
bb8c093b
CH
4164 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
4165 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
b481de9c 4166 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 4167 iwl4965_rx_scan_results_notif;
b481de9c 4168 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
4169 iwl4965_rx_scan_complete_notif;
4170 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
4171 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
b481de9c 4172
9fbab516 4173 /* Set up hardware specific Rx handlers */
bb8c093b 4174 iwl4965_hw_rx_handler_setup(priv);
b481de9c
ZY
4175}
4176
4177/**
bb8c093b 4178 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
4179 * @rxb: Rx buffer to reclaim
4180 *
4181 * If an Rx buffer has an async callback associated with it the callback
4182 * will be executed. The attached skb (if present) will only be freed
4183 * if the callback returns 1
4184 */
bb8c093b
CH
4185static void iwl4965_tx_cmd_complete(struct iwl4965_priv *priv,
4186 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4187{
bb8c093b 4188 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4189 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4190 int txq_id = SEQ_TO_QUEUE(sequence);
4191 int index = SEQ_TO_INDEX(sequence);
4192 int huge = sequence & SEQ_HUGE_FRAME;
4193 int cmd_index;
bb8c093b 4194 struct iwl4965_cmd *cmd;
b481de9c
ZY
4195
4196 /* If a Tx command is being handled and it isn't in the actual
4197 * command queue then there a command routing bug has been introduced
4198 * in the queue management code. */
4199 if (txq_id != IWL_CMD_QUEUE_NUM)
4200 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
4201 txq_id, pkt->hdr.cmd);
4202 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
4203
4204 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
4205 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
4206
4207 /* Input error checking is done when commands are added to queue. */
4208 if (cmd->meta.flags & CMD_WANT_SKB) {
4209 cmd->meta.source->u.skb = rxb->skb;
4210 rxb->skb = NULL;
4211 } else if (cmd->meta.u.callback &&
4212 !cmd->meta.u.callback(priv, cmd, rxb->skb))
4213 rxb->skb = NULL;
4214
bb8c093b 4215 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
4216
4217 if (!(cmd->meta.flags & CMD_ASYNC)) {
4218 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4219 wake_up_interruptible(&priv->wait_command_queue);
4220 }
4221}
4222
4223/************************** RX-FUNCTIONS ****************************/
4224/*
4225 * Rx theory of operation
4226 *
9fbab516
BC
4227 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
4228 * each of which point to Receive Buffers to be filled by 4965. These get
4229 * used not only for Rx frames, but for any command response or notification
4230 * from the 4965. The driver and 4965 manage the Rx buffers by means
4231 * of indexes into the circular buffer.
b481de9c
ZY
4232 *
4233 * Rx Queue Indexes
4234 * The host/firmware share two index registers for managing the Rx buffers.
4235 *
4236 * The READ index maps to the first position that the firmware may be writing
4237 * to -- the driver can read up to (but not including) this position and get
4238 * good data.
4239 * The READ index is managed by the firmware once the card is enabled.
4240 *
4241 * The WRITE index maps to the last position the driver has read from -- the
4242 * position preceding WRITE is the last slot the firmware can place a packet.
4243 *
4244 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4245 * WRITE = READ.
4246 *
9fbab516 4247 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
4248 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4249 *
9fbab516 4250 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
4251 * and fire the RX interrupt. The driver can then query the READ index and
4252 * process as many packets as possible, moving the WRITE index forward as it
4253 * resets the Rx queue buffers with new memory.
4254 *
4255 * The management in the driver is as follows:
4256 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
4257 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 4258 * to replenish the iwl->rxq->rx_free.
bb8c093b 4259 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
4260 * iwl->rxq is replenished and the READ INDEX is updated (updating the
4261 * 'processed' and 'read' driver indexes as well)
4262 * + A received packet is processed and handed to the kernel network stack,
4263 * detached from the iwl->rxq. The driver 'processed' index is updated.
4264 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
4265 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
4266 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
4267 * were enough free buffers and RX_STALLED is set it is cleared.
4268 *
4269 *
4270 * Driver sequence:
4271 *
9fbab516
BC
4272 * iwl4965_rx_queue_alloc() Allocates rx_free
4273 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 4274 * iwl4965_rx_queue_restock
9fbab516 4275 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
4276 * queue, updates firmware pointers, and updates
4277 * the WRITE index. If insufficient rx_free buffers
bb8c093b 4278 * are available, schedules iwl4965_rx_replenish
b481de9c
ZY
4279 *
4280 * -- enable interrupts --
9fbab516 4281 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
b481de9c
ZY
4282 * READ INDEX, detaching the SKB from the pool.
4283 * Moves the packet buffer from queue to rx_used.
bb8c093b 4284 * Calls iwl4965_rx_queue_restock to refill any empty
b481de9c
ZY
4285 * slots.
4286 * ...
4287 *
4288 */
4289
4290/**
bb8c093b 4291 * iwl4965_rx_queue_space - Return number of free slots available in queue.
b481de9c 4292 */
bb8c093b 4293static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
b481de9c
ZY
4294{
4295 int s = q->read - q->write;
4296 if (s <= 0)
4297 s += RX_QUEUE_SIZE;
4298 /* keep some buffer to not confuse full and empty queue */
4299 s -= 2;
4300 if (s < 0)
4301 s = 0;
4302 return s;
4303}
4304
4305/**
bb8c093b 4306 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
b481de9c 4307 */
bb8c093b 4308int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, struct iwl4965_rx_queue *q)
b481de9c
ZY
4309{
4310 u32 reg = 0;
4311 int rc = 0;
4312 unsigned long flags;
4313
4314 spin_lock_irqsave(&q->lock, flags);
4315
4316 if (q->need_update == 0)
4317 goto exit_unlock;
4318
6440adb5 4319 /* If power-saving is in use, make sure device is awake */
b481de9c 4320 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
bb8c093b 4321 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4322
4323 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
bb8c093b 4324 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4325 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4326 goto exit_unlock;
4327 }
4328
bb8c093b 4329 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4330 if (rc)
4331 goto exit_unlock;
4332
6440adb5 4333 /* Device expects a multiple of 8 */
bb8c093b 4334 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
b481de9c 4335 q->write & ~0x7);
bb8c093b 4336 iwl4965_release_nic_access(priv);
6440adb5
BC
4337
4338 /* Else device is assumed to be awake */
b481de9c 4339 } else
6440adb5 4340 /* Device expects a multiple of 8 */
bb8c093b 4341 iwl4965_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
b481de9c
ZY
4342
4343
4344 q->need_update = 0;
4345
4346 exit_unlock:
4347 spin_unlock_irqrestore(&q->lock, flags);
4348 return rc;
4349}
4350
4351/**
9fbab516 4352 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 4353 */
bb8c093b 4354static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl4965_priv *priv,
b481de9c
ZY
4355 dma_addr_t dma_addr)
4356{
4357 return cpu_to_le32((u32)(dma_addr >> 8));
4358}
4359
4360
4361/**
bb8c093b 4362 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 4363 *
9fbab516 4364 * If there are slots in the RX queue that need to be restocked,
b481de9c 4365 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 4366 * as we can, pulling from rx_free.
b481de9c
ZY
4367 *
4368 * This moves the 'write' index forward to catch up with 'processed', and
4369 * also updates the memory address in the firmware to reference the new
4370 * target buffer.
4371 */
bb8c093b 4372static int iwl4965_rx_queue_restock(struct iwl4965_priv *priv)
b481de9c 4373{
bb8c093b 4374 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4375 struct list_head *element;
bb8c093b 4376 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4377 unsigned long flags;
4378 int write, rc;
4379
4380 spin_lock_irqsave(&rxq->lock, flags);
4381 write = rxq->write & ~0x7;
bb8c093b 4382 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 4383 /* Get next free Rx buffer, remove from free list */
b481de9c 4384 element = rxq->rx_free.next;
bb8c093b 4385 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
b481de9c 4386 list_del(element);
6440adb5
BC
4387
4388 /* Point to Rx buffer via next RBD in circular buffer */
bb8c093b 4389 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
b481de9c
ZY
4390 rxq->queue[rxq->write] = rxb;
4391 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4392 rxq->free_count--;
4393 }
4394 spin_unlock_irqrestore(&rxq->lock, flags);
4395 /* If the pre-allocated buffer pool is dropping low, schedule to
4396 * refill it */
4397 if (rxq->free_count <= RX_LOW_WATERMARK)
4398 queue_work(priv->workqueue, &priv->rx_replenish);
4399
4400
6440adb5
BC
4401 /* If we've added more space for the firmware to place data, tell it.
4402 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
4403 if ((write != (rxq->write & ~0x7))
4404 || (abs(rxq->write - rxq->read) > 7)) {
4405 spin_lock_irqsave(&rxq->lock, flags);
4406 rxq->need_update = 1;
4407 spin_unlock_irqrestore(&rxq->lock, flags);
bb8c093b 4408 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
4409 if (rc)
4410 return rc;
4411 }
4412
4413 return 0;
4414}
4415
4416/**
bb8c093b 4417 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
4418 *
4419 * When moving to rx_free an SKB is allocated for the slot.
4420 *
bb8c093b 4421 * Also restock the Rx queue via iwl4965_rx_queue_restock.
01ebd063 4422 * This is called as a scheduled work item (except for during initialization)
b481de9c 4423 */
5c0eef96 4424static void iwl4965_rx_allocate(struct iwl4965_priv *priv)
b481de9c 4425{
bb8c093b 4426 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4427 struct list_head *element;
bb8c093b 4428 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4429 unsigned long flags;
4430 spin_lock_irqsave(&rxq->lock, flags);
4431 while (!list_empty(&rxq->rx_used)) {
4432 element = rxq->rx_used.next;
bb8c093b 4433 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
6440adb5
BC
4434
4435 /* Alloc a new receive buffer */
b481de9c 4436 rxb->skb =
9ee1ba47
RR
4437 alloc_skb(priv->hw_setting.rx_buf_size,
4438 __GFP_NOWARN | GFP_ATOMIC);
b481de9c
ZY
4439 if (!rxb->skb) {
4440 if (net_ratelimit())
4441 printk(KERN_CRIT DRV_NAME
4442 ": Can not allocate SKB buffers\n");
4443 /* We don't reschedule replenish work here -- we will
4444 * call the restock method and if it still needs
4445 * more buffers it will schedule replenish */
4446 break;
4447 }
4448 priv->alloc_rxb_skb++;
4449 list_del(element);
6440adb5
BC
4450
4451 /* Get physical address of RB/SKB */
b481de9c
ZY
4452 rxb->dma_addr =
4453 pci_map_single(priv->pci_dev, rxb->skb->data,
9ee1ba47 4454 priv->hw_setting.rx_buf_size, PCI_DMA_FROMDEVICE);
b481de9c
ZY
4455 list_add_tail(&rxb->list, &rxq->rx_free);
4456 rxq->free_count++;
4457 }
4458 spin_unlock_irqrestore(&rxq->lock, flags);
5c0eef96
MA
4459}
4460
4461/*
4462 * this should be called while priv->lock is locked
4463*/
4fd1f841 4464static void __iwl4965_rx_replenish(void *data)
5c0eef96
MA
4465{
4466 struct iwl4965_priv *priv = data;
4467
4468 iwl4965_rx_allocate(priv);
4469 iwl4965_rx_queue_restock(priv);
4470}
4471
4472
4473void iwl4965_rx_replenish(void *data)
4474{
4475 struct iwl4965_priv *priv = data;
4476 unsigned long flags;
4477
4478 iwl4965_rx_allocate(priv);
b481de9c
ZY
4479
4480 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 4481 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4482 spin_unlock_irqrestore(&priv->lock, flags);
4483}
4484
4485/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
9fbab516 4486 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
b481de9c
ZY
4487 * This free routine walks the list of POOL entries and if SKB is set to
4488 * non NULL it is unmapped and freed
4489 */
bb8c093b 4490static void iwl4965_rx_queue_free(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4491{
4492 int i;
4493 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4494 if (rxq->pool[i].skb != NULL) {
4495 pci_unmap_single(priv->pci_dev,
4496 rxq->pool[i].dma_addr,
9ee1ba47
RR
4497 priv->hw_setting.rx_buf_size,
4498 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4499 dev_kfree_skb(rxq->pool[i].skb);
4500 }
4501 }
4502
4503 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4504 rxq->dma_addr);
4505 rxq->bd = NULL;
4506}
4507
bb8c093b 4508int iwl4965_rx_queue_alloc(struct iwl4965_priv *priv)
b481de9c 4509{
bb8c093b 4510 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4511 struct pci_dev *dev = priv->pci_dev;
4512 int i;
4513
4514 spin_lock_init(&rxq->lock);
4515 INIT_LIST_HEAD(&rxq->rx_free);
4516 INIT_LIST_HEAD(&rxq->rx_used);
6440adb5
BC
4517
4518 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
b481de9c
ZY
4519 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4520 if (!rxq->bd)
4521 return -ENOMEM;
6440adb5 4522
b481de9c
ZY
4523 /* Fill the rx_used queue with _all_ of the Rx buffers */
4524 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4525 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
6440adb5 4526
b481de9c
ZY
4527 /* Set us so that we have processed and used all buffers, but have
4528 * not restocked the Rx queue with fresh buffers */
4529 rxq->read = rxq->write = 0;
4530 rxq->free_count = 0;
4531 rxq->need_update = 0;
4532 return 0;
4533}
4534
bb8c093b 4535void iwl4965_rx_queue_reset(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4536{
4537 unsigned long flags;
4538 int i;
4539 spin_lock_irqsave(&rxq->lock, flags);
4540 INIT_LIST_HEAD(&rxq->rx_free);
4541 INIT_LIST_HEAD(&rxq->rx_used);
4542 /* Fill the rx_used queue with _all_ of the Rx buffers */
4543 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4544 /* In the reset function, these buffers may have been allocated
4545 * to an SKB, so we need to unmap and free potential storage */
4546 if (rxq->pool[i].skb != NULL) {
4547 pci_unmap_single(priv->pci_dev,
4548 rxq->pool[i].dma_addr,
9ee1ba47
RR
4549 priv->hw_setting.rx_buf_size,
4550 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4551 priv->alloc_rxb_skb--;
4552 dev_kfree_skb(rxq->pool[i].skb);
4553 rxq->pool[i].skb = NULL;
4554 }
4555 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4556 }
4557
4558 /* Set us so that we have processed and used all buffers, but have
4559 * not restocked the Rx queue with fresh buffers */
4560 rxq->read = rxq->write = 0;
4561 rxq->free_count = 0;
4562 spin_unlock_irqrestore(&rxq->lock, flags);
4563}
4564
4565/* Convert linear signal-to-noise ratio into dB */
4566static u8 ratio2dB[100] = {
4567/* 0 1 2 3 4 5 6 7 8 9 */
4568 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4569 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4570 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4571 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4572 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4573 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4574 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4575 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4576 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4577 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4578};
4579
4580/* Calculates a relative dB value from a ratio of linear
4581 * (i.e. not dB) signal levels.
4582 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 4583int iwl4965_calc_db_from_ratio(int sig_ratio)
b481de9c 4584{
c899a575
AB
4585 /* 1000:1 or higher just report as 60 dB */
4586 if (sig_ratio >= 1000)
b481de9c
ZY
4587 return 60;
4588
c899a575 4589 /* 100:1 or higher, divide by 10 and use table,
b481de9c 4590 * add 20 dB to make up for divide by 10 */
c899a575 4591 if (sig_ratio >= 100)
b481de9c
ZY
4592 return (20 + (int)ratio2dB[sig_ratio/10]);
4593
4594 /* We shouldn't see this */
4595 if (sig_ratio < 1)
4596 return 0;
4597
4598 /* Use table for ratios 1:1 - 99:1 */
4599 return (int)ratio2dB[sig_ratio];
4600}
4601
4602#define PERFECT_RSSI (-20) /* dBm */
4603#define WORST_RSSI (-95) /* dBm */
4604#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4605
4606/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4607 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4608 * about formulas used below. */
bb8c093b 4609int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
4610{
4611 int sig_qual;
4612 int degradation = PERFECT_RSSI - rssi_dbm;
4613
4614 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4615 * as indicator; formula is (signal dbm - noise dbm).
4616 * SNR at or above 40 is a great signal (100%).
4617 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4618 * Weakest usable signal is usually 10 - 15 dB SNR. */
4619 if (noise_dbm) {
4620 if (rssi_dbm - noise_dbm >= 40)
4621 return 100;
4622 else if (rssi_dbm < noise_dbm)
4623 return 0;
4624 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4625
4626 /* Else use just the signal level.
4627 * This formula is a least squares fit of data points collected and
4628 * compared with a reference system that had a percentage (%) display
4629 * for signal quality. */
4630 } else
4631 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4632 (15 * RSSI_RANGE + 62 * degradation)) /
4633 (RSSI_RANGE * RSSI_RANGE);
4634
4635 if (sig_qual > 100)
4636 sig_qual = 100;
4637 else if (sig_qual < 1)
4638 sig_qual = 0;
4639
4640 return sig_qual;
4641}
4642
4643/**
9fbab516 4644 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
4645 *
4646 * Uses the priv->rx_handlers callback function array to invoke
4647 * the appropriate handlers, including command responses,
4648 * frame-received notifications, and other notifications.
4649 */
bb8c093b 4650static void iwl4965_rx_handle(struct iwl4965_priv *priv)
b481de9c 4651{
bb8c093b
CH
4652 struct iwl4965_rx_mem_buffer *rxb;
4653 struct iwl4965_rx_packet *pkt;
4654 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4655 u32 r, i;
4656 int reclaim;
4657 unsigned long flags;
5c0eef96
MA
4658 u8 fill_rx = 0;
4659 u32 count = 0;
b481de9c 4660
6440adb5
BC
4661 /* uCode's read index (stored in shared DRAM) indicates the last Rx
4662 * buffer that the driver may process (last buffer filled by ucode). */
bb8c093b 4663 r = iwl4965_hw_get_rx_read(priv);
b481de9c
ZY
4664 i = rxq->read;
4665
4666 /* Rx interrupt, but nothing sent from uCode */
4667 if (i == r)
4668 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4669
5c0eef96
MA
4670 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
4671 fill_rx = 1;
4672
b481de9c
ZY
4673 while (i != r) {
4674 rxb = rxq->queue[i];
4675
9fbab516 4676 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
4677 * then a bug has been introduced in the queue refilling
4678 * routines -- catch it here */
4679 BUG_ON(rxb == NULL);
4680
4681 rxq->queue[i] = NULL;
4682
4683 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
9ee1ba47 4684 priv->hw_setting.rx_buf_size,
b481de9c 4685 PCI_DMA_FROMDEVICE);
bb8c093b 4686 pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4687
4688 /* Reclaim a command buffer only if this packet is a response
4689 * to a (driver-originated) command.
4690 * If the packet (e.g. Rx frame) originated from uCode,
4691 * there is no command buffer to reclaim.
4692 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4693 * but apparently a few don't get set; catch them here. */
4694 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4695 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4696 (pkt->hdr.cmd != REPLY_4965_RX) &&
cfe01709 4697 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
b481de9c
ZY
4698 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4699 (pkt->hdr.cmd != REPLY_TX);
4700
4701 /* Based on type of command response or notification,
4702 * handle those that need handling via function in
bb8c093b 4703 * rx_handlers table. See iwl4965_setup_rx_handlers() */
b481de9c
ZY
4704 if (priv->rx_handlers[pkt->hdr.cmd]) {
4705 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4706 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4707 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4708 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4709 } else {
4710 /* No handling needed */
4711 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4712 "r %d i %d No handler needed for %s, 0x%02x\n",
4713 r, i, get_cmd_string(pkt->hdr.cmd),
4714 pkt->hdr.cmd);
4715 }
4716
4717 if (reclaim) {
9fbab516
BC
4718 /* Invoke any callbacks, transfer the skb to caller, and
4719 * fire off the (possibly) blocking iwl4965_send_cmd()
b481de9c
ZY
4720 * as we reclaim the driver command queue */
4721 if (rxb && rxb->skb)
bb8c093b 4722 iwl4965_tx_cmd_complete(priv, rxb);
b481de9c
ZY
4723 else
4724 IWL_WARNING("Claim null rxb?\n");
4725 }
4726
4727 /* For now we just don't re-use anything. We can tweak this
4728 * later to try and re-use notification packets and SKBs that
4729 * fail to Rx correctly */
4730 if (rxb->skb != NULL) {
4731 priv->alloc_rxb_skb--;
4732 dev_kfree_skb_any(rxb->skb);
4733 rxb->skb = NULL;
4734 }
4735
4736 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
9ee1ba47
RR
4737 priv->hw_setting.rx_buf_size,
4738 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4739 spin_lock_irqsave(&rxq->lock, flags);
4740 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4741 spin_unlock_irqrestore(&rxq->lock, flags);
4742 i = (i + 1) & RX_QUEUE_MASK;
5c0eef96
MA
4743 /* If there are a lot of unused frames,
4744 * restock the Rx queue so ucode wont assert. */
4745 if (fill_rx) {
4746 count++;
4747 if (count >= 8) {
4748 priv->rxq.read = i;
4749 __iwl4965_rx_replenish(priv);
4750 count = 0;
4751 }
4752 }
b481de9c
ZY
4753 }
4754
4755 /* Backtrack one entry */
4756 priv->rxq.read = i;
bb8c093b 4757 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4758}
4759
6440adb5
BC
4760/**
4761 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware
4762 */
bb8c093b
CH
4763static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
4764 struct iwl4965_tx_queue *txq)
b481de9c
ZY
4765{
4766 u32 reg = 0;
4767 int rc = 0;
4768 int txq_id = txq->q.id;
4769
4770 if (txq->need_update == 0)
4771 return rc;
4772
4773 /* if we're trying to save power */
4774 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4775 /* wake up nic if it's powered down ...
4776 * uCode will wake up, and interrupt us again, so next
4777 * time we'll skip this part. */
bb8c093b 4778 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4779
4780 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4781 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
bb8c093b 4782 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4783 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4784 return rc;
4785 }
4786
4787 /* restore this queue's parameters in nic hardware. */
bb8c093b 4788 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4789 if (rc)
4790 return rc;
bb8c093b 4791 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
fc4b6853 4792 txq->q.write_ptr | (txq_id << 8));
bb8c093b 4793 iwl4965_release_nic_access(priv);
b481de9c
ZY
4794
4795 /* else not in power-save mode, uCode will never sleep when we're
4796 * trying to tx (during RFKILL, we're not trying to tx). */
4797 } else
bb8c093b 4798 iwl4965_write32(priv, HBUS_TARG_WRPTR,
fc4b6853 4799 txq->q.write_ptr | (txq_id << 8));
b481de9c
ZY
4800
4801 txq->need_update = 0;
4802
4803 return rc;
4804}
4805
c8b0e6e1 4806#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 4807static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c 4808{
0795af57
JP
4809 DECLARE_MAC_BUF(mac);
4810
b481de9c 4811 IWL_DEBUG_RADIO("RX CONFIG:\n");
bb8c093b 4812 iwl4965_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
4813 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4814 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4815 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4816 le32_to_cpu(rxon->filter_flags));
4817 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4818 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4819 rxon->ofdm_basic_rates);
4820 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
0795af57
JP
4821 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n",
4822 print_mac(mac, rxon->node_addr));
4823 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4824 print_mac(mac, rxon->bssid_addr));
b481de9c
ZY
4825 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4826}
4827#endif
4828
bb8c093b 4829static void iwl4965_enable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4830{
4831 IWL_DEBUG_ISR("Enabling interrupts\n");
4832 set_bit(STATUS_INT_ENABLED, &priv->status);
bb8c093b 4833 iwl4965_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
4834}
4835
bb8c093b 4836static inline void iwl4965_disable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4837{
4838 clear_bit(STATUS_INT_ENABLED, &priv->status);
4839
4840 /* disable interrupts from uCode/NIC to host */
bb8c093b 4841 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
4842
4843 /* acknowledge/clear/reset any interrupts still pending
4844 * from uCode or flow handler (Rx/Tx DMA) */
bb8c093b
CH
4845 iwl4965_write32(priv, CSR_INT, 0xffffffff);
4846 iwl4965_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
4847 IWL_DEBUG_ISR("Disabled interrupts\n");
4848}
4849
4850static const char *desc_lookup(int i)
4851{
4852 switch (i) {
4853 case 1:
4854 return "FAIL";
4855 case 2:
4856 return "BAD_PARAM";
4857 case 3:
4858 return "BAD_CHECKSUM";
4859 case 4:
4860 return "NMI_INTERRUPT";
4861 case 5:
4862 return "SYSASSERT";
4863 case 6:
4864 return "FATAL_ERROR";
4865 }
4866
4867 return "UNKNOWN";
4868}
4869
4870#define ERROR_START_OFFSET (1 * sizeof(u32))
4871#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4872
bb8c093b 4873static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv)
b481de9c
ZY
4874{
4875 u32 data2, line;
4876 u32 desc, time, count, base, data1;
4877 u32 blink1, blink2, ilink1, ilink2;
4878 int rc;
4879
4880 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4881
bb8c093b 4882 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4883 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4884 return;
4885 }
4886
bb8c093b 4887 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4888 if (rc) {
4889 IWL_WARNING("Can not read from adapter at this time.\n");
4890 return;
4891 }
4892
bb8c093b 4893 count = iwl4965_read_targ_mem(priv, base);
b481de9c
ZY
4894
4895 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4896 IWL_ERROR("Start IWL Error Log Dump:\n");
4897 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4898 priv->status, priv->config, count);
4899 }
4900
bb8c093b
CH
4901 desc = iwl4965_read_targ_mem(priv, base + 1 * sizeof(u32));
4902 blink1 = iwl4965_read_targ_mem(priv, base + 3 * sizeof(u32));
4903 blink2 = iwl4965_read_targ_mem(priv, base + 4 * sizeof(u32));
4904 ilink1 = iwl4965_read_targ_mem(priv, base + 5 * sizeof(u32));
4905 ilink2 = iwl4965_read_targ_mem(priv, base + 6 * sizeof(u32));
4906 data1 = iwl4965_read_targ_mem(priv, base + 7 * sizeof(u32));
4907 data2 = iwl4965_read_targ_mem(priv, base + 8 * sizeof(u32));
4908 line = iwl4965_read_targ_mem(priv, base + 9 * sizeof(u32));
4909 time = iwl4965_read_targ_mem(priv, base + 11 * sizeof(u32));
b481de9c
ZY
4910
4911 IWL_ERROR("Desc Time "
4912 "data1 data2 line\n");
4913 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4914 desc_lookup(desc), desc, time, data1, data2, line);
4915 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4916 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4917 ilink1, ilink2);
4918
bb8c093b 4919 iwl4965_release_nic_access(priv);
b481de9c
ZY
4920}
4921
4922#define EVENT_START_OFFSET (4 * sizeof(u32))
4923
4924/**
bb8c093b 4925 * iwl4965_print_event_log - Dump error event log to syslog
b481de9c 4926 *
bb8c093b 4927 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
b481de9c 4928 */
bb8c093b 4929static void iwl4965_print_event_log(struct iwl4965_priv *priv, u32 start_idx,
b481de9c
ZY
4930 u32 num_events, u32 mode)
4931{
4932 u32 i;
4933 u32 base; /* SRAM byte address of event log header */
4934 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4935 u32 ptr; /* SRAM byte address of log data */
4936 u32 ev, time, data; /* event log data */
4937
4938 if (num_events == 0)
4939 return;
4940
4941 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4942
4943 if (mode == 0)
4944 event_size = 2 * sizeof(u32);
4945 else
4946 event_size = 3 * sizeof(u32);
4947
4948 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4949
4950 /* "time" is actually "data" for mode 0 (no timestamp).
4951 * place event id # at far right for easier visual parsing. */
4952 for (i = 0; i < num_events; i++) {
bb8c093b 4953 ev = iwl4965_read_targ_mem(priv, ptr);
b481de9c 4954 ptr += sizeof(u32);
bb8c093b 4955 time = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4956 ptr += sizeof(u32);
4957 if (mode == 0)
4958 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4959 else {
bb8c093b 4960 data = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4961 ptr += sizeof(u32);
4962 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4963 }
4964 }
4965}
4966
bb8c093b 4967static void iwl4965_dump_nic_event_log(struct iwl4965_priv *priv)
b481de9c
ZY
4968{
4969 int rc;
4970 u32 base; /* SRAM byte address of event log header */
4971 u32 capacity; /* event log capacity in # entries */
4972 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4973 u32 num_wraps; /* # times uCode wrapped to top of log */
4974 u32 next_entry; /* index of next entry to be written by uCode */
4975 u32 size; /* # entries that we'll print */
4976
4977 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
bb8c093b 4978 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4979 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4980 return;
4981 }
4982
bb8c093b 4983 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4984 if (rc) {
4985 IWL_WARNING("Can not read from adapter at this time.\n");
4986 return;
4987 }
4988
4989 /* event log header */
bb8c093b
CH
4990 capacity = iwl4965_read_targ_mem(priv, base);
4991 mode = iwl4965_read_targ_mem(priv, base + (1 * sizeof(u32)));
4992 num_wraps = iwl4965_read_targ_mem(priv, base + (2 * sizeof(u32)));
4993 next_entry = iwl4965_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
4994
4995 size = num_wraps ? capacity : next_entry;
4996
4997 /* bail out if nothing in log */
4998 if (size == 0) {
583fab37 4999 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
bb8c093b 5000 iwl4965_release_nic_access(priv);
b481de9c
ZY
5001 return;
5002 }
5003
583fab37 5004 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
5005 size, num_wraps);
5006
5007 /* if uCode has wrapped back to top of log, start at the oldest entry,
5008 * i.e the next one that uCode would fill. */
5009 if (num_wraps)
bb8c093b 5010 iwl4965_print_event_log(priv, next_entry,
b481de9c
ZY
5011 capacity - next_entry, mode);
5012
5013 /* (then/else) start at top of log */
bb8c093b 5014 iwl4965_print_event_log(priv, 0, next_entry, mode);
b481de9c 5015
bb8c093b 5016 iwl4965_release_nic_access(priv);
b481de9c
ZY
5017}
5018
5019/**
bb8c093b 5020 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 5021 */
bb8c093b 5022static void iwl4965_irq_handle_error(struct iwl4965_priv *priv)
b481de9c 5023{
bb8c093b 5024 /* Set the FW error flag -- cleared on iwl4965_down */
b481de9c
ZY
5025 set_bit(STATUS_FW_ERROR, &priv->status);
5026
5027 /* Cancel currently queued command. */
5028 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
5029
c8b0e6e1 5030#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5031 if (iwl4965_debug_level & IWL_DL_FW_ERRORS) {
5032 iwl4965_dump_nic_error_log(priv);
5033 iwl4965_dump_nic_event_log(priv);
5034 iwl4965_print_rx_config_cmd(&priv->staging_rxon);
b481de9c
ZY
5035 }
5036#endif
5037
5038 wake_up_interruptible(&priv->wait_command_queue);
5039
5040 /* Keep the restart process from trying to send host
5041 * commands by clearing the INIT status bit */
5042 clear_bit(STATUS_READY, &priv->status);
5043
5044 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5045 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
5046 "Restarting adapter due to uCode error.\n");
5047
bb8c093b 5048 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5049 memcpy(&priv->recovery_rxon, &priv->active_rxon,
5050 sizeof(priv->recovery_rxon));
5051 priv->error_recovering = 1;
5052 }
5053 queue_work(priv->workqueue, &priv->restart);
5054 }
5055}
5056
bb8c093b 5057static void iwl4965_error_recovery(struct iwl4965_priv *priv)
b481de9c
ZY
5058{
5059 unsigned long flags;
5060
5061 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
5062 sizeof(priv->staging_rxon));
5063 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5064 iwl4965_commit_rxon(priv);
b481de9c 5065
bb8c093b 5066 iwl4965_rxon_add_station(priv, priv->bssid, 1);
b481de9c
ZY
5067
5068 spin_lock_irqsave(&priv->lock, flags);
5069 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
5070 priv->error_recovering = 0;
5071 spin_unlock_irqrestore(&priv->lock, flags);
5072}
5073
bb8c093b 5074static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
b481de9c
ZY
5075{
5076 u32 inta, handled = 0;
5077 u32 inta_fh;
5078 unsigned long flags;
c8b0e6e1 5079#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
5080 u32 inta_mask;
5081#endif
5082
5083 spin_lock_irqsave(&priv->lock, flags);
5084
5085 /* Ack/clear/reset pending uCode interrupts.
5086 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
5087 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
bb8c093b
CH
5088 inta = iwl4965_read32(priv, CSR_INT);
5089 iwl4965_write32(priv, CSR_INT, inta);
b481de9c
ZY
5090
5091 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
5092 * Any new interrupts that happen after this, either while we're
5093 * in this tasklet, or later, will show up in next ISR/tasklet. */
bb8c093b
CH
5094 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
5095 iwl4965_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 5096
c8b0e6e1 5097#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5098 if (iwl4965_debug_level & IWL_DL_ISR) {
9fbab516
BC
5099 /* just for debug */
5100 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
b481de9c
ZY
5101 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5102 inta, inta_mask, inta_fh);
5103 }
5104#endif
5105
5106 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
5107 * atomic, make sure that inta covers all the interrupts that
5108 * we've discovered, even if FH interrupt came in just after
5109 * reading CSR_INT. */
5110 if (inta_fh & CSR_FH_INT_RX_MASK)
5111 inta |= CSR_INT_BIT_FH_RX;
5112 if (inta_fh & CSR_FH_INT_TX_MASK)
5113 inta |= CSR_INT_BIT_FH_TX;
5114
5115 /* Now service all interrupt bits discovered above. */
5116 if (inta & CSR_INT_BIT_HW_ERR) {
5117 IWL_ERROR("Microcode HW error detected. Restarting.\n");
5118
5119 /* Tell the device to stop sending interrupts */
bb8c093b 5120 iwl4965_disable_interrupts(priv);
b481de9c 5121
bb8c093b 5122 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5123
5124 handled |= CSR_INT_BIT_HW_ERR;
5125
5126 spin_unlock_irqrestore(&priv->lock, flags);
5127
5128 return;
5129 }
5130
c8b0e6e1 5131#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5132 if (iwl4965_debug_level & (IWL_DL_ISR)) {
b481de9c
ZY
5133 /* NIC fires this, but we don't use it, redundant with WAKEUP */
5134 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
5135 IWL_DEBUG_ISR("Microcode started or stopped.\n");
5136
5137 /* Alive notification via Rx interrupt will do the real work */
5138 if (inta & CSR_INT_BIT_ALIVE)
5139 IWL_DEBUG_ISR("Alive interrupt\n");
5140 }
5141#endif
5142 /* Safely ignore these bits for debug checks below */
5143 inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
5144
9fbab516 5145 /* HW RF KILL switch toggled */
b481de9c
ZY
5146 if (inta & CSR_INT_BIT_RF_KILL) {
5147 int hw_rf_kill = 0;
bb8c093b 5148 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
5149 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5150 hw_rf_kill = 1;
5151
5152 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
5153 "RF_KILL bit toggled to %s.\n",
5154 hw_rf_kill ? "disable radio":"enable radio");
5155
5156 /* Queue restart only if RF_KILL switch was set to "kill"
5157 * when we loaded driver, and is now set to "enable".
5158 * After we're Alive, RF_KILL gets handled by
5159 * iwl_rx_card_state_notif() */
53e49093
ZY
5160 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
5161 clear_bit(STATUS_RF_KILL_HW, &priv->status);
b481de9c 5162 queue_work(priv->workqueue, &priv->restart);
53e49093 5163 }
b481de9c
ZY
5164
5165 handled |= CSR_INT_BIT_RF_KILL;
5166 }
5167
9fbab516 5168 /* Chip got too hot and stopped itself */
b481de9c
ZY
5169 if (inta & CSR_INT_BIT_CT_KILL) {
5170 IWL_ERROR("Microcode CT kill error detected.\n");
5171 handled |= CSR_INT_BIT_CT_KILL;
5172 }
5173
5174 /* Error detected by uCode */
5175 if (inta & CSR_INT_BIT_SW_ERR) {
5176 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
5177 inta);
bb8c093b 5178 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5179 handled |= CSR_INT_BIT_SW_ERR;
5180 }
5181
5182 /* uCode wakes up after power-down sleep */
5183 if (inta & CSR_INT_BIT_WAKEUP) {
5184 IWL_DEBUG_ISR("Wakeup interrupt\n");
bb8c093b
CH
5185 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq);
5186 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]);
5187 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]);
5188 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]);
5189 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]);
5190 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]);
5191 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
5192
5193 handled |= CSR_INT_BIT_WAKEUP;
5194 }
5195
5196 /* All uCode command responses, including Tx command responses,
5197 * Rx "responses" (frame-received notification), and other
5198 * notifications from uCode come through here*/
5199 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 5200 iwl4965_rx_handle(priv);
b481de9c
ZY
5201 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
5202 }
5203
5204 if (inta & CSR_INT_BIT_FH_TX) {
5205 IWL_DEBUG_ISR("Tx interrupt\n");
5206 handled |= CSR_INT_BIT_FH_TX;
5207 }
5208
5209 if (inta & ~handled)
5210 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
5211
5212 if (inta & ~CSR_INI_SET_MASK) {
5213 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
5214 inta & ~CSR_INI_SET_MASK);
5215 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
5216 }
5217
5218 /* Re-enable all interrupts */
bb8c093b 5219 iwl4965_enable_interrupts(priv);
b481de9c 5220
c8b0e6e1 5221#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5222 if (iwl4965_debug_level & (IWL_DL_ISR)) {
5223 inta = iwl4965_read32(priv, CSR_INT);
5224 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
5225 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5226 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
5227 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
5228 }
5229#endif
5230 spin_unlock_irqrestore(&priv->lock, flags);
5231}
5232
bb8c093b 5233static irqreturn_t iwl4965_isr(int irq, void *data)
b481de9c 5234{
bb8c093b 5235 struct iwl4965_priv *priv = data;
b481de9c
ZY
5236 u32 inta, inta_mask;
5237 u32 inta_fh;
5238 if (!priv)
5239 return IRQ_NONE;
5240
5241 spin_lock(&priv->lock);
5242
5243 /* Disable (but don't clear!) interrupts here to avoid
5244 * back-to-back ISRs and sporadic interrupts from our NIC.
5245 * If we have something to service, the tasklet will re-enable ints.
5246 * If we *don't* have something, we'll re-enable before leaving here. */
bb8c093b
CH
5247 inta_mask = iwl4965_read32(priv, CSR_INT_MASK); /* just for debug */
5248 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
5249
5250 /* Discover which interrupts are active/pending */
bb8c093b
CH
5251 inta = iwl4965_read32(priv, CSR_INT);
5252 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5253
5254 /* Ignore interrupt if there's nothing in NIC to service.
5255 * This may be due to IRQ shared with another device,
5256 * or due to sporadic interrupts thrown from our NIC. */
5257 if (!inta && !inta_fh) {
5258 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5259 goto none;
5260 }
5261
5262 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
66fbb541
ON
5263 /* Hardware disappeared. It might have already raised
5264 * an interrupt */
b481de9c 5265 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
66fbb541 5266 goto unplugged;
b481de9c
ZY
5267 }
5268
5269 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5270 inta, inta_mask, inta_fh);
5271
bb8c093b 5272 /* iwl4965_irq_tasklet() will service interrupts and re-enable them */
b481de9c 5273 tasklet_schedule(&priv->irq_tasklet);
b481de9c 5274
66fbb541
ON
5275 unplugged:
5276 spin_unlock(&priv->lock);
b481de9c
ZY
5277 return IRQ_HANDLED;
5278
5279 none:
5280 /* re-enable interrupts here since we don't have anything to service. */
bb8c093b 5281 iwl4965_enable_interrupts(priv);
b481de9c
ZY
5282 spin_unlock(&priv->lock);
5283 return IRQ_NONE;
5284}
5285
5286/************************** EEPROM BANDS ****************************
5287 *
bb8c093b 5288 * The iwl4965_eeprom_band definitions below provide the mapping from the
b481de9c
ZY
5289 * EEPROM contents to the specific channel number supported for each
5290 * band.
5291 *
bb8c093b 5292 * For example, iwl4965_priv->eeprom.band_3_channels[4] from the band_3
b481de9c
ZY
5293 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
5294 * The specific geography and calibration information for that channel
5295 * is contained in the eeprom map itself.
5296 *
5297 * During init, we copy the eeprom information and channel map
5298 * information into priv->channel_info_24/52 and priv->channel_map_24/52
5299 *
5300 * channel_map_24/52 provides the index in the channel_info array for a
5301 * given channel. We have to have two separate maps as there is channel
5302 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
5303 * band_2
5304 *
5305 * A value of 0xff stored in the channel_map indicates that the channel
5306 * is not supported by the hardware at all.
5307 *
5308 * A value of 0xfe in the channel_map indicates that the channel is not
5309 * valid for Tx with the current hardware. This means that
5310 * while the system can tune and receive on a given channel, it may not
5311 * be able to associate or transmit any frames on that
5312 * channel. There is no corresponding channel information for that
5313 * entry.
5314 *
5315 *********************************************************************/
5316
5317/* 2.4 GHz */
bb8c093b 5318static const u8 iwl4965_eeprom_band_1[14] = {
b481de9c
ZY
5319 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
5320};
5321
5322/* 5.2 GHz bands */
9fbab516 5323static const u8 iwl4965_eeprom_band_2[] = { /* 4915-5080MHz */
b481de9c
ZY
5324 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
5325};
5326
9fbab516 5327static const u8 iwl4965_eeprom_band_3[] = { /* 5170-5320MHz */
b481de9c
ZY
5328 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
5329};
5330
bb8c093b 5331static const u8 iwl4965_eeprom_band_4[] = { /* 5500-5700MHz */
b481de9c
ZY
5332 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
5333};
5334
bb8c093b 5335static const u8 iwl4965_eeprom_band_5[] = { /* 5725-5825MHz */
b481de9c
ZY
5336 145, 149, 153, 157, 161, 165
5337};
5338
bb8c093b 5339static u8 iwl4965_eeprom_band_6[] = { /* 2.4 FAT channel */
b481de9c
ZY
5340 1, 2, 3, 4, 5, 6, 7
5341};
5342
bb8c093b 5343static u8 iwl4965_eeprom_band_7[] = { /* 5.2 FAT channel */
b481de9c
ZY
5344 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
5345};
5346
9fbab516
BC
5347static void iwl4965_init_band_reference(const struct iwl4965_priv *priv,
5348 int band,
b481de9c 5349 int *eeprom_ch_count,
bb8c093b 5350 const struct iwl4965_eeprom_channel
b481de9c
ZY
5351 **eeprom_ch_info,
5352 const u8 **eeprom_ch_index)
5353{
5354 switch (band) {
5355 case 1: /* 2.4GHz band */
bb8c093b 5356 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_1);
b481de9c 5357 *eeprom_ch_info = priv->eeprom.band_1_channels;
bb8c093b 5358 *eeprom_ch_index = iwl4965_eeprom_band_1;
b481de9c 5359 break;
9fbab516 5360 case 2: /* 4.9GHz band */
bb8c093b 5361 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_2);
b481de9c 5362 *eeprom_ch_info = priv->eeprom.band_2_channels;
bb8c093b 5363 *eeprom_ch_index = iwl4965_eeprom_band_2;
b481de9c
ZY
5364 break;
5365 case 3: /* 5.2GHz band */
bb8c093b 5366 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_3);
b481de9c 5367 *eeprom_ch_info = priv->eeprom.band_3_channels;
bb8c093b 5368 *eeprom_ch_index = iwl4965_eeprom_band_3;
b481de9c 5369 break;
9fbab516 5370 case 4: /* 5.5GHz band */
bb8c093b 5371 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_4);
b481de9c 5372 *eeprom_ch_info = priv->eeprom.band_4_channels;
bb8c093b 5373 *eeprom_ch_index = iwl4965_eeprom_band_4;
b481de9c 5374 break;
9fbab516 5375 case 5: /* 5.7GHz band */
bb8c093b 5376 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c 5377 *eeprom_ch_info = priv->eeprom.band_5_channels;
bb8c093b 5378 *eeprom_ch_index = iwl4965_eeprom_band_5;
b481de9c 5379 break;
9fbab516 5380 case 6: /* 2.4GHz FAT channels */
bb8c093b 5381 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_6);
b481de9c 5382 *eeprom_ch_info = priv->eeprom.band_24_channels;
bb8c093b 5383 *eeprom_ch_index = iwl4965_eeprom_band_6;
b481de9c 5384 break;
9fbab516 5385 case 7: /* 5 GHz FAT channels */
bb8c093b 5386 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_7);
b481de9c 5387 *eeprom_ch_info = priv->eeprom.band_52_channels;
bb8c093b 5388 *eeprom_ch_index = iwl4965_eeprom_band_7;
b481de9c
ZY
5389 break;
5390 default:
5391 BUG();
5392 return;
5393 }
5394}
5395
6440adb5
BC
5396/**
5397 * iwl4965_get_channel_info - Find driver's private channel info
5398 *
5399 * Based on band and channel number.
5400 */
bb8c093b 5401const struct iwl4965_channel_info *iwl4965_get_channel_info(const struct iwl4965_priv *priv,
b481de9c
ZY
5402 int phymode, u16 channel)
5403{
5404 int i;
5405
5406 switch (phymode) {
5407 case MODE_IEEE80211A:
5408 for (i = 14; i < priv->channel_count; i++) {
5409 if (priv->channel_info[i].channel == channel)
5410 return &priv->channel_info[i];
5411 }
5412 break;
5413
5414 case MODE_IEEE80211B:
5415 case MODE_IEEE80211G:
5416 if (channel >= 1 && channel <= 14)
5417 return &priv->channel_info[channel - 1];
5418 break;
5419
5420 }
5421
5422 return NULL;
5423}
5424
5425#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
5426 ? # x " " : "")
5427
6440adb5
BC
5428/**
5429 * iwl4965_init_channel_map - Set up driver's info for all possible channels
5430 */
bb8c093b 5431static int iwl4965_init_channel_map(struct iwl4965_priv *priv)
b481de9c
ZY
5432{
5433 int eeprom_ch_count = 0;
5434 const u8 *eeprom_ch_index = NULL;
bb8c093b 5435 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL;
b481de9c 5436 int band, ch;
bb8c093b 5437 struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5438
5439 if (priv->channel_count) {
5440 IWL_DEBUG_INFO("Channel map already initialized.\n");
5441 return 0;
5442 }
5443
5444 if (priv->eeprom.version < 0x2f) {
5445 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5446 priv->eeprom.version);
5447 return -EINVAL;
5448 }
5449
5450 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5451
5452 priv->channel_count =
bb8c093b
CH
5453 ARRAY_SIZE(iwl4965_eeprom_band_1) +
5454 ARRAY_SIZE(iwl4965_eeprom_band_2) +
5455 ARRAY_SIZE(iwl4965_eeprom_band_3) +
5456 ARRAY_SIZE(iwl4965_eeprom_band_4) +
5457 ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c
ZY
5458
5459 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5460
bb8c093b 5461 priv->channel_info = kzalloc(sizeof(struct iwl4965_channel_info) *
b481de9c
ZY
5462 priv->channel_count, GFP_KERNEL);
5463 if (!priv->channel_info) {
5464 IWL_ERROR("Could not allocate channel_info\n");
5465 priv->channel_count = 0;
5466 return -ENOMEM;
5467 }
5468
5469 ch_info = priv->channel_info;
5470
5471 /* Loop through the 5 EEPROM bands adding them in order to the
5472 * channel map we maintain (that contains additional information than
5473 * what just in the EEPROM) */
5474 for (band = 1; band <= 5; band++) {
5475
bb8c093b 5476 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5477 &eeprom_ch_info, &eeprom_ch_index);
5478
5479 /* Loop through each band adding each of the channels */
5480 for (ch = 0; ch < eeprom_ch_count; ch++) {
5481 ch_info->channel = eeprom_ch_index[ch];
5482 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5483 MODE_IEEE80211A;
5484
5485 /* permanently store EEPROM's channel regulatory flags
5486 * and max power in channel info database. */
5487 ch_info->eeprom = eeprom_ch_info[ch];
5488
5489 /* Copy the run-time flags so they are there even on
5490 * invalid channels */
5491 ch_info->flags = eeprom_ch_info[ch].flags;
5492
5493 if (!(is_channel_valid(ch_info))) {
5494 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5495 "No traffic\n",
5496 ch_info->channel,
5497 ch_info->flags,
5498 is_channel_a_band(ch_info) ?
5499 "5.2" : "2.4");
5500 ch_info++;
5501 continue;
5502 }
5503
5504 /* Initialize regulatory-based run-time data */
5505 ch_info->max_power_avg = ch_info->curr_txpow =
5506 eeprom_ch_info[ch].max_power_avg;
5507 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5508 ch_info->min_power = 0;
5509
5510 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5511 " %ddBm): Ad-Hoc %ssupported\n",
5512 ch_info->channel,
5513 is_channel_a_band(ch_info) ?
5514 "5.2" : "2.4",
5515 CHECK_AND_PRINT(IBSS),
5516 CHECK_AND_PRINT(ACTIVE),
5517 CHECK_AND_PRINT(RADAR),
5518 CHECK_AND_PRINT(WIDE),
5519 CHECK_AND_PRINT(NARROW),
5520 CHECK_AND_PRINT(DFS),
5521 eeprom_ch_info[ch].flags,
5522 eeprom_ch_info[ch].max_power_avg,
5523 ((eeprom_ch_info[ch].
5524 flags & EEPROM_CHANNEL_IBSS)
5525 && !(eeprom_ch_info[ch].
5526 flags & EEPROM_CHANNEL_RADAR))
5527 ? "" : "not ");
5528
5529 /* Set the user_txpower_limit to the highest power
5530 * supported by any channel */
5531 if (eeprom_ch_info[ch].max_power_avg >
5532 priv->user_txpower_limit)
5533 priv->user_txpower_limit =
5534 eeprom_ch_info[ch].max_power_avg;
5535
5536 ch_info++;
5537 }
5538 }
5539
6440adb5 5540 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */
b481de9c
ZY
5541 for (band = 6; band <= 7; band++) {
5542 int phymode;
5543 u8 fat_extension_chan;
5544
bb8c093b 5545 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5546 &eeprom_ch_info, &eeprom_ch_index);
5547
6440adb5 5548 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
b481de9c 5549 phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A;
6440adb5 5550
b481de9c
ZY
5551 /* Loop through each band adding each of the channels */
5552 for (ch = 0; ch < eeprom_ch_count; ch++) {
5553
5554 if ((band == 6) &&
5555 ((eeprom_ch_index[ch] == 5) ||
5556 (eeprom_ch_index[ch] == 6) ||
5557 (eeprom_ch_index[ch] == 7)))
5558 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
5559 else
5560 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
5561
6440adb5 5562 /* Set up driver's info for lower half */
b481de9c
ZY
5563 iwl4965_set_fat_chan_info(priv, phymode,
5564 eeprom_ch_index[ch],
5565 &(eeprom_ch_info[ch]),
5566 fat_extension_chan);
5567
6440adb5 5568 /* Set up driver's info for upper half */
b481de9c
ZY
5569 iwl4965_set_fat_chan_info(priv, phymode,
5570 (eeprom_ch_index[ch] + 4),
5571 &(eeprom_ch_info[ch]),
5572 HT_IE_EXT_CHANNEL_BELOW);
5573 }
5574 }
5575
5576 return 0;
5577}
5578
5579/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5580 * sending probe req. This should be set long enough to hear probe responses
5581 * from more than one AP. */
5582#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
5583#define IWL_ACTIVE_DWELL_TIME_52 (10)
5584
5585/* For faster active scanning, scan will move to the next channel if fewer than
5586 * PLCP_QUIET_THRESH packets are heard on this channel within
5587 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
5588 * time if it's a quiet channel (nothing responded to our probe, and there's
5589 * no other traffic).
5590 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5591#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
5592#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
5593
5594/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5595 * Must be set longer than active dwell time.
5596 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5597#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
5598#define IWL_PASSIVE_DWELL_TIME_52 (10)
5599#define IWL_PASSIVE_DWELL_BASE (100)
5600#define IWL_CHANNEL_TUNE_TIME 5
5601
bb8c093b 5602static inline u16 iwl4965_get_active_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c
ZY
5603{
5604 if (phymode == MODE_IEEE80211A)
5605 return IWL_ACTIVE_DWELL_TIME_52;
5606 else
5607 return IWL_ACTIVE_DWELL_TIME_24;
5608}
5609
bb8c093b 5610static u16 iwl4965_get_passive_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c 5611{
bb8c093b 5612 u16 active = iwl4965_get_active_dwell_time(priv, phymode);
b481de9c
ZY
5613 u16 passive = (phymode != MODE_IEEE80211A) ?
5614 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5615 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5616
bb8c093b 5617 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5618 /* If we're associated, we clamp the maximum passive
5619 * dwell time to be 98% of the beacon interval (minus
5620 * 2 * channel tune time) */
5621 passive = priv->beacon_int;
5622 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5623 passive = IWL_PASSIVE_DWELL_BASE;
5624 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5625 }
5626
5627 if (passive <= active)
5628 passive = active + 1;
5629
5630 return passive;
5631}
5632
bb8c093b 5633static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
b481de9c 5634 u8 is_active, u8 direct_mask,
bb8c093b 5635 struct iwl4965_scan_channel *scan_ch)
b481de9c
ZY
5636{
5637 const struct ieee80211_channel *channels = NULL;
5638 const struct ieee80211_hw_mode *hw_mode;
bb8c093b 5639 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5640 u16 passive_dwell = 0;
5641 u16 active_dwell = 0;
5642 int added, i;
5643
bb8c093b 5644 hw_mode = iwl4965_get_hw_mode(priv, phymode);
b481de9c
ZY
5645 if (!hw_mode)
5646 return 0;
5647
5648 channels = hw_mode->channels;
5649
bb8c093b
CH
5650 active_dwell = iwl4965_get_active_dwell_time(priv, phymode);
5651 passive_dwell = iwl4965_get_passive_dwell_time(priv, phymode);
b481de9c
ZY
5652
5653 for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5654 if (channels[i].chan ==
5655 le16_to_cpu(priv->active_rxon.channel)) {
bb8c093b 5656 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5657 IWL_DEBUG_SCAN
5658 ("Skipping current channel %d\n",
5659 le16_to_cpu(priv->active_rxon.channel));
5660 continue;
5661 }
5662 } else if (priv->only_active_channel)
5663 continue;
5664
5665 scan_ch->channel = channels[i].chan;
5666
9fbab516
BC
5667 ch_info = iwl4965_get_channel_info(priv, phymode,
5668 scan_ch->channel);
b481de9c
ZY
5669 if (!is_channel_valid(ch_info)) {
5670 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5671 scan_ch->channel);
5672 continue;
5673 }
5674
5675 if (!is_active || is_channel_passive(ch_info) ||
5676 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5677 scan_ch->type = 0; /* passive */
5678 else
5679 scan_ch->type = 1; /* active */
5680
5681 if (scan_ch->type & 1)
5682 scan_ch->type |= (direct_mask << 1);
5683
5684 if (is_channel_narrow(ch_info))
5685 scan_ch->type |= (1 << 7);
5686
5687 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5688 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5689
9fbab516 5690 /* Set txpower levels to defaults */
b481de9c
ZY
5691 scan_ch->tpc.dsp_atten = 110;
5692 /* scan_pwr_info->tpc.dsp_atten; */
5693
5694 /*scan_pwr_info->tpc.tx_gain; */
5695 if (phymode == MODE_IEEE80211A)
5696 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5697 else {
5698 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5699 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516 5700 * power level:
8a1b0245 5701 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
b481de9c
ZY
5702 */
5703 }
5704
5705 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5706 scan_ch->channel,
5707 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5708 (scan_ch->type & 1) ?
5709 active_dwell : passive_dwell);
5710
5711 scan_ch++;
5712 added++;
5713 }
5714
5715 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5716 return added;
5717}
5718
bb8c093b 5719static void iwl4965_reset_channel_flag(struct iwl4965_priv *priv)
b481de9c
ZY
5720{
5721 int i, j;
5722 for (i = 0; i < 3; i++) {
5723 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5724 for (j = 0; j < hw_mode->num_channels; j++)
5725 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5726 }
5727}
5728
bb8c093b 5729static void iwl4965_init_hw_rates(struct iwl4965_priv *priv,
b481de9c
ZY
5730 struct ieee80211_rate *rates)
5731{
5732 int i;
5733
5734 for (i = 0; i < IWL_RATE_COUNT; i++) {
bb8c093b 5735 rates[i].rate = iwl4965_rates[i].ieee * 5;
b481de9c
ZY
5736 rates[i].val = i; /* Rate scaling will work on indexes */
5737 rates[i].val2 = i;
5738 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5739 /* Only OFDM have the bits-per-symbol set */
5740 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5741 rates[i].flags |= IEEE80211_RATE_OFDM;
5742 else {
5743 /*
5744 * If CCK 1M then set rate flag to CCK else CCK_2
5745 * which is CCK | PREAMBLE2
5746 */
bb8c093b 5747 rates[i].flags |= (iwl4965_rates[i].plcp == 10) ?
b481de9c
ZY
5748 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5749 }
5750
5751 /* Set up which ones are basic rates... */
5752 if (IWL_BASIC_RATES_MASK & (1 << i))
5753 rates[i].flags |= IEEE80211_RATE_BASIC;
5754 }
b481de9c
ZY
5755}
5756
5757/**
bb8c093b 5758 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 5759 */
bb8c093b 5760static int iwl4965_init_geos(struct iwl4965_priv *priv)
b481de9c 5761{
bb8c093b 5762 struct iwl4965_channel_info *ch;
b481de9c
ZY
5763 struct ieee80211_hw_mode *modes;
5764 struct ieee80211_channel *channels;
5765 struct ieee80211_channel *geo_ch;
5766 struct ieee80211_rate *rates;
5767 int i = 0;
5768 enum {
5769 A = 0,
5770 B = 1,
5771 G = 2,
b481de9c 5772 };
326eeee8 5773 int mode_count = 3;
b481de9c
ZY
5774
5775 if (priv->modes) {
5776 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5777 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5778 return 0;
5779 }
5780
5781 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5782 GFP_KERNEL);
5783 if (!modes)
5784 return -ENOMEM;
5785
5786 channels = kzalloc(sizeof(struct ieee80211_channel) *
5787 priv->channel_count, GFP_KERNEL);
5788 if (!channels) {
5789 kfree(modes);
5790 return -ENOMEM;
5791 }
5792
5793 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5794 GFP_KERNEL);
5795 if (!rates) {
5796 kfree(modes);
5797 kfree(channels);
5798 return -ENOMEM;
5799 }
5800
5801 /* 0 = 802.11a
5802 * 1 = 802.11b
5803 * 2 = 802.11g
5804 */
5805
5806 /* 5.2GHz channels start after the 2.4GHz channels */
5807 modes[A].mode = MODE_IEEE80211A;
bb8c093b 5808 modes[A].channels = &channels[ARRAY_SIZE(iwl4965_eeprom_band_1)];
b481de9c
ZY
5809 modes[A].rates = rates;
5810 modes[A].num_rates = 8; /* just OFDM */
5811 modes[A].rates = &rates[4];
5812 modes[A].num_channels = 0;
326eeee8
RR
5813#ifdef CONFIG_IWL4965_HT
5814 iwl4965_init_ht_hw_capab(&modes[A].ht_info, MODE_IEEE80211A);
5815#endif
b481de9c
ZY
5816
5817 modes[B].mode = MODE_IEEE80211B;
5818 modes[B].channels = channels;
5819 modes[B].rates = rates;
5820 modes[B].num_rates = 4; /* just CCK */
5821 modes[B].num_channels = 0;
5822
5823 modes[G].mode = MODE_IEEE80211G;
5824 modes[G].channels = channels;
5825 modes[G].rates = rates;
5826 modes[G].num_rates = 12; /* OFDM & CCK */
5827 modes[G].num_channels = 0;
326eeee8
RR
5828#ifdef CONFIG_IWL4965_HT
5829 iwl4965_init_ht_hw_capab(&modes[G].ht_info, MODE_IEEE80211G);
5830#endif
b481de9c
ZY
5831
5832 priv->ieee_channels = channels;
5833 priv->ieee_rates = rates;
5834
bb8c093b 5835 iwl4965_init_hw_rates(priv, rates);
b481de9c
ZY
5836
5837 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5838 ch = &priv->channel_info[i];
5839
5840 if (!is_channel_valid(ch)) {
5841 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5842 "skipping.\n",
5843 ch->channel, is_channel_a_band(ch) ?
5844 "5.2" : "2.4");
5845 continue;
5846 }
5847
5848 if (is_channel_a_band(ch)) {
5849 geo_ch = &modes[A].channels[modes[A].num_channels++];
b481de9c
ZY
5850 } else {
5851 geo_ch = &modes[B].channels[modes[B].num_channels++];
5852 modes[G].num_channels++;
b481de9c
ZY
5853 }
5854
5855 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5856 geo_ch->chan = ch->channel;
5857 geo_ch->power_level = ch->max_power_avg;
5858 geo_ch->antenna_max = 0xff;
5859
5860 if (is_channel_valid(ch)) {
5861 geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5862 if (ch->flags & EEPROM_CHANNEL_IBSS)
5863 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5864
5865 if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5866 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5867
5868 if (ch->flags & EEPROM_CHANNEL_RADAR)
5869 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5870
5871 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5872 priv->max_channel_txpower_limit =
5873 ch->max_power_avg;
5874 }
5875
5876 geo_ch->val = geo_ch->flag;
5877 }
5878
5879 if ((modes[A].num_channels == 0) && priv->is_abg) {
5880 printk(KERN_INFO DRV_NAME
5881 ": Incorrectly detected BG card as ABG. Please send "
5882 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5883 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5884 priv->is_abg = 0;
5885 }
5886
5887 printk(KERN_INFO DRV_NAME
5888 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5889 modes[G].num_channels, modes[A].num_channels);
5890
5891 /*
5892 * NOTE: We register these in preference of order -- the
5893 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5894 * a phymode based on rates or AP capabilities but seems to
5895 * configure it purely on if the channel being configured
5896 * is supported by a mode -- and the first match is taken
5897 */
5898
5899 if (modes[G].num_channels)
5900 ieee80211_register_hwmode(priv->hw, &modes[G]);
5901 if (modes[B].num_channels)
5902 ieee80211_register_hwmode(priv->hw, &modes[B]);
5903 if (modes[A].num_channels)
5904 ieee80211_register_hwmode(priv->hw, &modes[A]);
5905
5906 priv->modes = modes;
5907 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5908
5909 return 0;
5910}
5911
5912/******************************************************************************
5913 *
5914 * uCode download functions
5915 *
5916 ******************************************************************************/
5917
bb8c093b 5918static void iwl4965_dealloc_ucode_pci(struct iwl4965_priv *priv)
b481de9c 5919{
98c92211
TW
5920 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
5921 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
5922 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
5923 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
5924 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
5925 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c
ZY
5926}
5927
5928/**
bb8c093b 5929 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
5930 * looking at all data.
5931 */
4fd1f841 5932static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 *image,
9fbab516 5933 u32 len)
b481de9c
ZY
5934{
5935 u32 val;
5936 u32 save_len = len;
5937 int rc = 0;
5938 u32 errcnt;
5939
5940 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5941
bb8c093b 5942 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
5943 if (rc)
5944 return rc;
5945
bb8c093b 5946 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
b481de9c
ZY
5947
5948 errcnt = 0;
5949 for (; len > 0; len -= sizeof(u32), image++) {
5950 /* read data comes through single port, auto-incr addr */
5951 /* NOTE: Use the debugless read so we don't flood kernel log
5952 * if IWL_DL_IO is set */
bb8c093b 5953 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
5954 if (val != le32_to_cpu(*image)) {
5955 IWL_ERROR("uCode INST section is invalid at "
5956 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5957 save_len - len, val, le32_to_cpu(*image));
5958 rc = -EIO;
5959 errcnt++;
5960 if (errcnt >= 20)
5961 break;
5962 }
5963 }
5964
bb8c093b 5965 iwl4965_release_nic_access(priv);
b481de9c
ZY
5966
5967 if (!errcnt)
5968 IWL_DEBUG_INFO
5969 ("ucode image in INSTRUCTION memory is good\n");
5970
5971 return rc;
5972}
5973
5974
5975/**
bb8c093b 5976 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
5977 * using sample data 100 bytes apart. If these sample points are good,
5978 * it's a pretty good bet that everything between them is good, too.
5979 */
bb8c093b 5980static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
5981{
5982 u32 val;
5983 int rc = 0;
5984 u32 errcnt = 0;
5985 u32 i;
5986
5987 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5988
bb8c093b 5989 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
5990 if (rc)
5991 return rc;
5992
5993 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
5994 /* read data comes through single port, auto-incr addr */
5995 /* NOTE: Use the debugless read so we don't flood kernel log
5996 * if IWL_DL_IO is set */
bb8c093b 5997 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR,
b481de9c 5998 i + RTC_INST_LOWER_BOUND);
bb8c093b 5999 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
6000 if (val != le32_to_cpu(*image)) {
6001#if 0 /* Enable this if you want to see details */
6002 IWL_ERROR("uCode INST section is invalid at "
6003 "offset 0x%x, is 0x%x, s/b 0x%x\n",
6004 i, val, *image);
6005#endif
6006 rc = -EIO;
6007 errcnt++;
6008 if (errcnt >= 3)
6009 break;
6010 }
6011 }
6012
bb8c093b 6013 iwl4965_release_nic_access(priv);
b481de9c
ZY
6014
6015 return rc;
6016}
6017
6018
6019/**
bb8c093b 6020 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
6021 * and verify its contents
6022 */
bb8c093b 6023static int iwl4965_verify_ucode(struct iwl4965_priv *priv)
b481de9c
ZY
6024{
6025 __le32 *image;
6026 u32 len;
6027 int rc = 0;
6028
6029 /* Try bootstrap */
6030 image = (__le32 *)priv->ucode_boot.v_addr;
6031 len = priv->ucode_boot.len;
bb8c093b 6032 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6033 if (rc == 0) {
6034 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
6035 return 0;
6036 }
6037
6038 /* Try initialize */
6039 image = (__le32 *)priv->ucode_init.v_addr;
6040 len = priv->ucode_init.len;
bb8c093b 6041 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6042 if (rc == 0) {
6043 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
6044 return 0;
6045 }
6046
6047 /* Try runtime/protocol */
6048 image = (__le32 *)priv->ucode_code.v_addr;
6049 len = priv->ucode_code.len;
bb8c093b 6050 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6051 if (rc == 0) {
6052 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
6053 return 0;
6054 }
6055
6056 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
6057
9fbab516
BC
6058 /* Since nothing seems to match, show first several data entries in
6059 * instruction SRAM, so maybe visual inspection will give a clue.
6060 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
6061 image = (__le32 *)priv->ucode_boot.v_addr;
6062 len = priv->ucode_boot.len;
bb8c093b 6063 rc = iwl4965_verify_inst_full(priv, image, len);
b481de9c
ZY
6064
6065 return rc;
6066}
6067
6068
6069/* check contents of special bootstrap uCode SRAM */
bb8c093b 6070static int iwl4965_verify_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6071{
6072 __le32 *image = priv->ucode_boot.v_addr;
6073 u32 len = priv->ucode_boot.len;
6074 u32 reg;
6075 u32 val;
6076
6077 IWL_DEBUG_INFO("Begin verify bsm\n");
6078
6079 /* verify BSM SRAM contents */
bb8c093b 6080 val = iwl4965_read_prph(priv, BSM_WR_DWCOUNT_REG);
b481de9c
ZY
6081 for (reg = BSM_SRAM_LOWER_BOUND;
6082 reg < BSM_SRAM_LOWER_BOUND + len;
6083 reg += sizeof(u32), image ++) {
bb8c093b 6084 val = iwl4965_read_prph(priv, reg);
b481de9c
ZY
6085 if (val != le32_to_cpu(*image)) {
6086 IWL_ERROR("BSM uCode verification failed at "
6087 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
6088 BSM_SRAM_LOWER_BOUND,
6089 reg - BSM_SRAM_LOWER_BOUND, len,
6090 val, le32_to_cpu(*image));
6091 return -EIO;
6092 }
6093 }
6094
6095 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
6096
6097 return 0;
6098}
6099
6100/**
bb8c093b 6101 * iwl4965_load_bsm - Load bootstrap instructions
b481de9c
ZY
6102 *
6103 * BSM operation:
6104 *
6105 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
6106 * in special SRAM that does not power down during RFKILL. When powering back
6107 * up after power-saving sleeps (or during initial uCode load), the BSM loads
6108 * the bootstrap program into the on-board processor, and starts it.
6109 *
6110 * The bootstrap program loads (via DMA) instructions and data for a new
6111 * program from host DRAM locations indicated by the host driver in the
6112 * BSM_DRAM_* registers. Once the new program is loaded, it starts
6113 * automatically.
6114 *
6115 * When initializing the NIC, the host driver points the BSM to the
6116 * "initialize" uCode image. This uCode sets up some internal data, then
6117 * notifies host via "initialize alive" that it is complete.
6118 *
6119 * The host then replaces the BSM_DRAM_* pointer values to point to the
6120 * normal runtime uCode instructions and a backup uCode data cache buffer
6121 * (filled initially with starting data values for the on-board processor),
6122 * then triggers the "initialize" uCode to load and launch the runtime uCode,
6123 * which begins normal operation.
6124 *
6125 * When doing a power-save shutdown, runtime uCode saves data SRAM into
6126 * the backup data cache in DRAM before SRAM is powered down.
6127 *
6128 * When powering back up, the BSM loads the bootstrap program. This reloads
6129 * the runtime uCode instructions and the backup data cache into SRAM,
6130 * and re-launches the runtime uCode from where it left off.
6131 */
bb8c093b 6132static int iwl4965_load_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6133{
6134 __le32 *image = priv->ucode_boot.v_addr;
6135 u32 len = priv->ucode_boot.len;
6136 dma_addr_t pinst;
6137 dma_addr_t pdata;
6138 u32 inst_len;
6139 u32 data_len;
6140 int rc;
6141 int i;
6142 u32 done;
6143 u32 reg_offset;
6144
6145 IWL_DEBUG_INFO("Begin load bsm\n");
6146
6147 /* make sure bootstrap program is no larger than BSM's SRAM size */
6148 if (len > IWL_MAX_BSM_SIZE)
6149 return -EINVAL;
6150
6151 /* Tell bootstrap uCode where to find the "Initialize" uCode
9fbab516 6152 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
bb8c093b 6153 * NOTE: iwl4965_initialize_alive_start() will replace these values,
b481de9c
ZY
6154 * after the "initialize" uCode has run, to point to
6155 * runtime/protocol instructions and backup data cache. */
6156 pinst = priv->ucode_init.p_addr >> 4;
6157 pdata = priv->ucode_init_data.p_addr >> 4;
6158 inst_len = priv->ucode_init.len;
6159 data_len = priv->ucode_init_data.len;
6160
bb8c093b 6161 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6162 if (rc)
6163 return rc;
6164
bb8c093b
CH
6165 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6166 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6167 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
6168 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
b481de9c
ZY
6169
6170 /* Fill BSM memory with bootstrap instructions */
6171 for (reg_offset = BSM_SRAM_LOWER_BOUND;
6172 reg_offset < BSM_SRAM_LOWER_BOUND + len;
6173 reg_offset += sizeof(u32), image++)
bb8c093b 6174 _iwl4965_write_prph(priv, reg_offset,
b481de9c
ZY
6175 le32_to_cpu(*image));
6176
bb8c093b 6177 rc = iwl4965_verify_bsm(priv);
b481de9c 6178 if (rc) {
bb8c093b 6179 iwl4965_release_nic_access(priv);
b481de9c
ZY
6180 return rc;
6181 }
6182
6183 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
bb8c093b
CH
6184 iwl4965_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
6185 iwl4965_write_prph(priv, BSM_WR_MEM_DST_REG,
b481de9c 6186 RTC_INST_LOWER_BOUND);
bb8c093b 6187 iwl4965_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
b481de9c
ZY
6188
6189 /* Load bootstrap code into instruction SRAM now,
6190 * to prepare to load "initialize" uCode */
bb8c093b 6191 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6192 BSM_WR_CTRL_REG_BIT_START);
6193
6194 /* Wait for load of bootstrap uCode to finish */
6195 for (i = 0; i < 100; i++) {
bb8c093b 6196 done = iwl4965_read_prph(priv, BSM_WR_CTRL_REG);
b481de9c
ZY
6197 if (!(done & BSM_WR_CTRL_REG_BIT_START))
6198 break;
6199 udelay(10);
6200 }
6201 if (i < 100)
6202 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
6203 else {
6204 IWL_ERROR("BSM write did not complete!\n");
6205 return -EIO;
6206 }
6207
6208 /* Enable future boot loads whenever power management unit triggers it
6209 * (e.g. when powering back up after power-save shutdown) */
bb8c093b 6210 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6211 BSM_WR_CTRL_REG_BIT_START_EN);
6212
bb8c093b 6213 iwl4965_release_nic_access(priv);
b481de9c
ZY
6214
6215 return 0;
6216}
6217
bb8c093b 6218static void iwl4965_nic_start(struct iwl4965_priv *priv)
b481de9c
ZY
6219{
6220 /* Remove all resets to allow NIC to operate */
bb8c093b 6221 iwl4965_write32(priv, CSR_RESET, 0);
b481de9c
ZY
6222}
6223
90e759d1 6224
b481de9c 6225/**
bb8c093b 6226 * iwl4965_read_ucode - Read uCode images from disk file.
b481de9c
ZY
6227 *
6228 * Copy into buffers for card to fetch via bus-mastering
6229 */
bb8c093b 6230static int iwl4965_read_ucode(struct iwl4965_priv *priv)
b481de9c 6231{
bb8c093b 6232 struct iwl4965_ucode *ucode;
90e759d1 6233 int ret;
b481de9c
ZY
6234 const struct firmware *ucode_raw;
6235 const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode";
6236 u8 *src;
6237 size_t len;
6238 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
6239
6240 /* Ask kernel firmware_class module to get the boot firmware off disk.
6241 * request_firmware() is synchronous, file is in memory on return. */
90e759d1
TW
6242 ret = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
6243 if (ret < 0) {
6244 IWL_ERROR("%s firmware file req failed: Reason %d\n",
6245 name, ret);
b481de9c
ZY
6246 goto error;
6247 }
6248
6249 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
6250 name, ucode_raw->size);
6251
6252 /* Make sure that we got at least our header! */
6253 if (ucode_raw->size < sizeof(*ucode)) {
6254 IWL_ERROR("File size way too small!\n");
90e759d1 6255 ret = -EINVAL;
b481de9c
ZY
6256 goto err_release;
6257 }
6258
6259 /* Data from ucode file: header followed by uCode images */
6260 ucode = (void *)ucode_raw->data;
6261
6262 ver = le32_to_cpu(ucode->ver);
6263 inst_size = le32_to_cpu(ucode->inst_size);
6264 data_size = le32_to_cpu(ucode->data_size);
6265 init_size = le32_to_cpu(ucode->init_size);
6266 init_data_size = le32_to_cpu(ucode->init_data_size);
6267 boot_size = le32_to_cpu(ucode->boot_size);
6268
6269 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
6270 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
6271 inst_size);
6272 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
6273 data_size);
6274 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
6275 init_size);
6276 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
6277 init_data_size);
6278 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
6279 boot_size);
6280
6281 /* Verify size of file vs. image size info in file's header */
6282 if (ucode_raw->size < sizeof(*ucode) +
6283 inst_size + data_size + init_size +
6284 init_data_size + boot_size) {
6285
6286 IWL_DEBUG_INFO("uCode file size %d too small\n",
6287 (int)ucode_raw->size);
90e759d1 6288 ret = -EINVAL;
b481de9c
ZY
6289 goto err_release;
6290 }
6291
6292 /* Verify that uCode images will fit in card's SRAM */
6293 if (inst_size > IWL_MAX_INST_SIZE) {
90e759d1
TW
6294 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
6295 inst_size);
6296 ret = -EINVAL;
b481de9c
ZY
6297 goto err_release;
6298 }
6299
6300 if (data_size > IWL_MAX_DATA_SIZE) {
90e759d1
TW
6301 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
6302 data_size);
6303 ret = -EINVAL;
b481de9c
ZY
6304 goto err_release;
6305 }
6306 if (init_size > IWL_MAX_INST_SIZE) {
6307 IWL_DEBUG_INFO
90e759d1
TW
6308 ("uCode init instr len %d too large to fit in\n",
6309 init_size);
6310 ret = -EINVAL;
b481de9c
ZY
6311 goto err_release;
6312 }
6313 if (init_data_size > IWL_MAX_DATA_SIZE) {
6314 IWL_DEBUG_INFO
90e759d1
TW
6315 ("uCode init data len %d too large to fit in\n",
6316 init_data_size);
6317 ret = -EINVAL;
b481de9c
ZY
6318 goto err_release;
6319 }
6320 if (boot_size > IWL_MAX_BSM_SIZE) {
6321 IWL_DEBUG_INFO
90e759d1
TW
6322 ("uCode boot instr len %d too large to fit in\n",
6323 boot_size);
6324 ret = -EINVAL;
b481de9c
ZY
6325 goto err_release;
6326 }
6327
6328 /* Allocate ucode buffers for card's bus-master loading ... */
6329
6330 /* Runtime instructions and 2 copies of data:
6331 * 1) unmodified from disk
6332 * 2) backup cache for save/restore during power-downs */
6333 priv->ucode_code.len = inst_size;
98c92211 6334 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
6335
6336 priv->ucode_data.len = data_size;
98c92211 6337 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
6338
6339 priv->ucode_data_backup.len = data_size;
98c92211 6340 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c
ZY
6341
6342 /* Initialization instructions and data */
90e759d1
TW
6343 if (init_size && init_data_size) {
6344 priv->ucode_init.len = init_size;
98c92211 6345 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
90e759d1
TW
6346
6347 priv->ucode_init_data.len = init_data_size;
98c92211 6348 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
90e759d1
TW
6349
6350 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
6351 goto err_pci_alloc;
6352 }
b481de9c
ZY
6353
6354 /* Bootstrap (instructions only, no data) */
90e759d1
TW
6355 if (boot_size) {
6356 priv->ucode_boot.len = boot_size;
98c92211 6357 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 6358
90e759d1
TW
6359 if (!priv->ucode_boot.v_addr)
6360 goto err_pci_alloc;
6361 }
b481de9c
ZY
6362
6363 /* Copy images into buffers for card's bus-master reads ... */
6364
6365 /* Runtime instructions (first block of data in file) */
6366 src = &ucode->data[0];
6367 len = priv->ucode_code.len;
90e759d1 6368 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
6369 memcpy(priv->ucode_code.v_addr, src, len);
6370 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
6371 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
6372
6373 /* Runtime data (2nd block)
bb8c093b 6374 * NOTE: Copy into backup buffer will be done in iwl4965_up() */
b481de9c
ZY
6375 src = &ucode->data[inst_size];
6376 len = priv->ucode_data.len;
90e759d1 6377 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
6378 memcpy(priv->ucode_data.v_addr, src, len);
6379 memcpy(priv->ucode_data_backup.v_addr, src, len);
6380
6381 /* Initialization instructions (3rd block) */
6382 if (init_size) {
6383 src = &ucode->data[inst_size + data_size];
6384 len = priv->ucode_init.len;
90e759d1
TW
6385 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
6386 len);
b481de9c
ZY
6387 memcpy(priv->ucode_init.v_addr, src, len);
6388 }
6389
6390 /* Initialization data (4th block) */
6391 if (init_data_size) {
6392 src = &ucode->data[inst_size + data_size + init_size];
6393 len = priv->ucode_init_data.len;
90e759d1
TW
6394 IWL_DEBUG_INFO("Copying (but not loading) init data len %Zd\n",
6395 len);
b481de9c
ZY
6396 memcpy(priv->ucode_init_data.v_addr, src, len);
6397 }
6398
6399 /* Bootstrap instructions (5th block) */
6400 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
6401 len = priv->ucode_boot.len;
90e759d1 6402 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %Zd\n", len);
b481de9c
ZY
6403 memcpy(priv->ucode_boot.v_addr, src, len);
6404
6405 /* We have our copies now, allow OS release its copies */
6406 release_firmware(ucode_raw);
6407 return 0;
6408
6409 err_pci_alloc:
6410 IWL_ERROR("failed to allocate pci memory\n");
90e759d1 6411 ret = -ENOMEM;
bb8c093b 6412 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
6413
6414 err_release:
6415 release_firmware(ucode_raw);
6416
6417 error:
90e759d1 6418 return ret;
b481de9c
ZY
6419}
6420
6421
6422/**
bb8c093b 6423 * iwl4965_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
6424 *
6425 * Tell initialization uCode where to find runtime uCode.
6426 *
6427 * BSM registers initially contain pointers to initialization uCode.
6428 * We need to replace them to load runtime uCode inst and data,
6429 * and to save runtime data when powering down.
6430 */
bb8c093b 6431static int iwl4965_set_ucode_ptrs(struct iwl4965_priv *priv)
b481de9c
ZY
6432{
6433 dma_addr_t pinst;
6434 dma_addr_t pdata;
6435 int rc = 0;
6436 unsigned long flags;
6437
6438 /* bits 35:4 for 4965 */
6439 pinst = priv->ucode_code.p_addr >> 4;
6440 pdata = priv->ucode_data_backup.p_addr >> 4;
6441
6442 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 6443 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6444 if (rc) {
6445 spin_unlock_irqrestore(&priv->lock, flags);
6446 return rc;
6447 }
6448
6449 /* Tell bootstrap uCode where to find image to load */
bb8c093b
CH
6450 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6451 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6452 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
6453 priv->ucode_data.len);
6454
6455 /* Inst bytecount must be last to set up, bit 31 signals uCode
6456 * that all new ptr/size info is in place */
bb8c093b 6457 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
6458 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6459
bb8c093b 6460 iwl4965_release_nic_access(priv);
b481de9c
ZY
6461
6462 spin_unlock_irqrestore(&priv->lock, flags);
6463
6464 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6465
6466 return rc;
6467}
6468
6469/**
bb8c093b 6470 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
6471 *
6472 * Called after REPLY_ALIVE notification received from "initialize" uCode.
6473 *
6474 * The 4965 "initialize" ALIVE reply contains calibration data for:
6475 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
6476 * (3945 does not contain this data).
6477 *
6478 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6479*/
bb8c093b 6480static void iwl4965_init_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6481{
6482 /* Check alive response for "valid" sign from uCode */
6483 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
6484 /* We had an error bringing up the hardware, so take it
6485 * all the way back down so we can try again */
6486 IWL_DEBUG_INFO("Initialize Alive failed.\n");
6487 goto restart;
6488 }
6489
6490 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
6491 * This is a paranoid check, because we would not have gotten the
6492 * "initialize" alive if code weren't properly loaded. */
bb8c093b 6493 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6494 /* Runtime instruction load was bad;
6495 * take it all the way back down so we can try again */
6496 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
6497 goto restart;
6498 }
6499
6500 /* Calculate temperature */
6501 priv->temperature = iwl4965_get_temperature(priv);
6502
6503 /* Send pointers to protocol/runtime uCode image ... init code will
6504 * load and launch runtime uCode, which will send us another "Alive"
6505 * notification. */
6506 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 6507 if (iwl4965_set_ucode_ptrs(priv)) {
b481de9c
ZY
6508 /* Runtime instruction load won't happen;
6509 * take it all the way back down so we can try again */
6510 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
6511 goto restart;
6512 }
6513 return;
6514
6515 restart:
6516 queue_work(priv->workqueue, &priv->restart);
6517}
6518
6519
6520/**
bb8c093b 6521 * iwl4965_alive_start - called after REPLY_ALIVE notification received
b481de9c 6522 * from protocol/runtime uCode (initialization uCode's
bb8c093b 6523 * Alive gets handled by iwl4965_init_alive_start()).
b481de9c 6524 */
bb8c093b 6525static void iwl4965_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6526{
6527 int rc = 0;
6528
6529 IWL_DEBUG_INFO("Runtime Alive received.\n");
6530
6531 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
6532 /* We had an error bringing up the hardware, so take it
6533 * all the way back down so we can try again */
6534 IWL_DEBUG_INFO("Alive failed.\n");
6535 goto restart;
6536 }
6537
6538 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
6539 * This is a paranoid check, because we would not have gotten the
6540 * "runtime" alive if code weren't properly loaded. */
bb8c093b 6541 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6542 /* Runtime instruction load was bad;
6543 * take it all the way back down so we can try again */
6544 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
6545 goto restart;
6546 }
6547
bb8c093b 6548 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6549
6550 rc = iwl4965_alive_notify(priv);
6551 if (rc) {
6552 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
6553 rc);
6554 goto restart;
6555 }
6556
9fbab516 6557 /* After the ALIVE response, we can send host commands to 4965 uCode */
b481de9c
ZY
6558 set_bit(STATUS_ALIVE, &priv->status);
6559
6560 /* Clear out the uCode error bit if it is set */
6561 clear_bit(STATUS_FW_ERROR, &priv->status);
6562
bb8c093b 6563 rc = iwl4965_init_channel_map(priv);
b481de9c
ZY
6564 if (rc) {
6565 IWL_ERROR("initializing regulatory failed: %d\n", rc);
6566 return;
6567 }
6568
bb8c093b 6569 iwl4965_init_geos(priv);
5a66926a 6570 iwl4965_reset_channel_flag(priv);
b481de9c 6571
bb8c093b 6572 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
6573 return;
6574
5a66926a 6575 ieee80211_start_queues(priv->hw);
b481de9c
ZY
6576
6577 priv->active_rate = priv->rates_mask;
6578 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
6579
bb8c093b 6580 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 6581
bb8c093b
CH
6582 if (iwl4965_is_associated(priv)) {
6583 struct iwl4965_rxon_cmd *active_rxon =
6584 (struct iwl4965_rxon_cmd *)(&priv->active_rxon);
b481de9c
ZY
6585
6586 memcpy(&priv->staging_rxon, &priv->active_rxon,
6587 sizeof(priv->staging_rxon));
6588 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6589 } else {
6590 /* Initialize our rx_config data */
bb8c093b 6591 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
6592 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
6593 }
6594
9fbab516 6595 /* Configure Bluetooth device coexistence support */
bb8c093b 6596 iwl4965_send_bt_config(priv);
b481de9c
ZY
6597
6598 /* Configure the adapter for unassociated operation */
bb8c093b 6599 iwl4965_commit_rxon(priv);
b481de9c
ZY
6600
6601 /* At this point, the NIC is initialized and operational */
6602 priv->notif_missed_beacons = 0;
6603 set_bit(STATUS_READY, &priv->status);
6604
6605 iwl4965_rf_kill_ct_config(priv);
5a66926a 6606
b481de9c 6607 IWL_DEBUG_INFO("ALIVE processing complete.\n");
5a66926a 6608 wake_up_interruptible(&priv->wait_command_queue);
b481de9c
ZY
6609
6610 if (priv->error_recovering)
bb8c093b 6611 iwl4965_error_recovery(priv);
b481de9c
ZY
6612
6613 return;
6614
6615 restart:
6616 queue_work(priv->workqueue, &priv->restart);
6617}
6618
bb8c093b 6619static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv);
b481de9c 6620
bb8c093b 6621static void __iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6622{
6623 unsigned long flags;
6624 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
6625 struct ieee80211_conf *conf = NULL;
6626
6627 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
6628
6629 conf = ieee80211_get_hw_conf(priv->hw);
6630
6631 if (!exit_pending)
6632 set_bit(STATUS_EXIT_PENDING, &priv->status);
6633
bb8c093b 6634 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6635
6636 /* Unblock any waiting calls */
6637 wake_up_interruptible_all(&priv->wait_command_queue);
6638
b481de9c
ZY
6639 /* Wipe out the EXIT_PENDING status bit if we are not actually
6640 * exiting the module */
6641 if (!exit_pending)
6642 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6643
6644 /* stop and reset the on-board processor */
bb8c093b 6645 iwl4965_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
6646
6647 /* tell the device to stop sending interrupts */
bb8c093b 6648 iwl4965_disable_interrupts(priv);
b481de9c
ZY
6649
6650 if (priv->mac80211_registered)
6651 ieee80211_stop_queues(priv->hw);
6652
bb8c093b 6653 /* If we have not previously called iwl4965_init() then
b481de9c 6654 * clear all bits but the RF Kill and SUSPEND bits and return */
bb8c093b 6655 if (!iwl4965_is_init(priv)) {
b481de9c
ZY
6656 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6657 STATUS_RF_KILL_HW |
6658 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6659 STATUS_RF_KILL_SW |
6660 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6661 STATUS_IN_SUSPEND;
6662 goto exit;
6663 }
6664
6665 /* ...otherwise clear out all the status bits but the RF Kill and
6666 * SUSPEND bits and continue taking the NIC down. */
6667 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6668 STATUS_RF_KILL_HW |
6669 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6670 STATUS_RF_KILL_SW |
6671 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6672 STATUS_IN_SUSPEND |
6673 test_bit(STATUS_FW_ERROR, &priv->status) <<
6674 STATUS_FW_ERROR;
6675
6676 spin_lock_irqsave(&priv->lock, flags);
9fbab516
BC
6677 iwl4965_clear_bit(priv, CSR_GP_CNTRL,
6678 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
6679 spin_unlock_irqrestore(&priv->lock, flags);
6680
bb8c093b
CH
6681 iwl4965_hw_txq_ctx_stop(priv);
6682 iwl4965_hw_rxq_stop(priv);
b481de9c
ZY
6683
6684 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
6685 if (!iwl4965_grab_nic_access(priv)) {
6686 iwl4965_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 6687 APMG_CLK_VAL_DMA_CLK_RQT);
bb8c093b 6688 iwl4965_release_nic_access(priv);
b481de9c
ZY
6689 }
6690 spin_unlock_irqrestore(&priv->lock, flags);
6691
6692 udelay(5);
6693
bb8c093b
CH
6694 iwl4965_hw_nic_stop_master(priv);
6695 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6696 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
6697
6698 exit:
bb8c093b 6699 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
6700
6701 if (priv->ibss_beacon)
6702 dev_kfree_skb(priv->ibss_beacon);
6703 priv->ibss_beacon = NULL;
6704
6705 /* clear out any free frames */
bb8c093b 6706 iwl4965_clear_free_frames(priv);
b481de9c
ZY
6707}
6708
bb8c093b 6709static void iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6710{
6711 mutex_lock(&priv->mutex);
bb8c093b 6712 __iwl4965_down(priv);
b481de9c 6713 mutex_unlock(&priv->mutex);
b24d22b1 6714
bb8c093b 6715 iwl4965_cancel_deferred_work(priv);
b481de9c
ZY
6716}
6717
6718#define MAX_HW_RESTARTS 5
6719
bb8c093b 6720static int __iwl4965_up(struct iwl4965_priv *priv)
b481de9c
ZY
6721{
6722 int rc, i;
b481de9c
ZY
6723
6724 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6725 IWL_WARNING("Exit pending; will not bring the NIC up\n");
6726 return -EIO;
6727 }
6728
6729 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6730 IWL_WARNING("Radio disabled by SW RF kill (module "
6731 "parameter)\n");
e655b9f0
ZY
6732 return -ENODEV;
6733 }
6734
6735 /* If platform's RF_KILL switch is NOT set to KILL */
6736 if (iwl4965_read32(priv, CSR_GP_CNTRL) &
6737 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6738 clear_bit(STATUS_RF_KILL_HW, &priv->status);
6739 else {
6740 set_bit(STATUS_RF_KILL_HW, &priv->status);
6741 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
6742 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
6743 return -ENODEV;
6744 }
b481de9c
ZY
6745 }
6746
a781cf94
RC
6747 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
6748 IWL_ERROR("ucode not available for device bringup\n");
6749 return -EIO;
6750 }
6751
bb8c093b 6752 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 6753
bb8c093b 6754 rc = iwl4965_hw_nic_init(priv);
b481de9c
ZY
6755 if (rc) {
6756 IWL_ERROR("Unable to int nic\n");
6757 return rc;
6758 }
6759
6760 /* make sure rfkill handshake bits are cleared */
bb8c093b
CH
6761 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6762 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
6763 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6764
6765 /* clear (again), then enable host interrupts */
bb8c093b
CH
6766 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
6767 iwl4965_enable_interrupts(priv);
b481de9c
ZY
6768
6769 /* really make sure rfkill handshake bits are cleared */
bb8c093b
CH
6770 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6771 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
6772
6773 /* Copy original ucode data image from disk into backup cache.
6774 * This will be used to initialize the on-board processor's
6775 * data SRAM for a clean start when the runtime program first loads. */
6776 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
5a66926a 6777 priv->ucode_data.len);
b481de9c 6778
e655b9f0
ZY
6779 /* We return success when we resume from suspend and rf_kill is on. */
6780 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
b481de9c 6781 return 0;
b481de9c
ZY
6782
6783 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6784
bb8c093b 6785 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6786
6787 /* load bootstrap state machine,
6788 * load bootstrap program into processor's memory,
6789 * prepare to load the "initialize" uCode */
bb8c093b 6790 rc = iwl4965_load_bsm(priv);
b481de9c
ZY
6791
6792 if (rc) {
6793 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
6794 continue;
6795 }
6796
6797 /* start card; "initialize" will load runtime ucode */
bb8c093b 6798 iwl4965_nic_start(priv);
b481de9c 6799
b481de9c
ZY
6800 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
6801
6802 return 0;
6803 }
6804
6805 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 6806 __iwl4965_down(priv);
b481de9c
ZY
6807
6808 /* tried to restart and config the device for as long as our
6809 * patience could withstand */
6810 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
6811 return -EIO;
6812}
6813
6814
6815/*****************************************************************************
6816 *
6817 * Workqueue callbacks
6818 *
6819 *****************************************************************************/
6820
bb8c093b 6821static void iwl4965_bg_init_alive_start(struct work_struct *data)
b481de9c 6822{
bb8c093b
CH
6823 struct iwl4965_priv *priv =
6824 container_of(data, struct iwl4965_priv, init_alive_start.work);
b481de9c
ZY
6825
6826 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6827 return;
6828
6829 mutex_lock(&priv->mutex);
bb8c093b 6830 iwl4965_init_alive_start(priv);
b481de9c
ZY
6831 mutex_unlock(&priv->mutex);
6832}
6833
bb8c093b 6834static void iwl4965_bg_alive_start(struct work_struct *data)
b481de9c 6835{
bb8c093b
CH
6836 struct iwl4965_priv *priv =
6837 container_of(data, struct iwl4965_priv, alive_start.work);
b481de9c
ZY
6838
6839 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6840 return;
6841
6842 mutex_lock(&priv->mutex);
bb8c093b 6843 iwl4965_alive_start(priv);
b481de9c
ZY
6844 mutex_unlock(&priv->mutex);
6845}
6846
bb8c093b 6847static void iwl4965_bg_rf_kill(struct work_struct *work)
b481de9c 6848{
bb8c093b 6849 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, rf_kill);
b481de9c
ZY
6850
6851 wake_up_interruptible(&priv->wait_command_queue);
6852
6853 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6854 return;
6855
6856 mutex_lock(&priv->mutex);
6857
bb8c093b 6858 if (!iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6859 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6860 "HW and/or SW RF Kill no longer active, restarting "
6861 "device\n");
6862 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6863 queue_work(priv->workqueue, &priv->restart);
6864 } else {
6865
6866 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6867 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6868 "disabled by SW switch\n");
6869 else
6870 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6871 "Kill switch must be turned off for "
6872 "wireless networking to work.\n");
6873 }
6874 mutex_unlock(&priv->mutex);
6875}
6876
6877#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6878
bb8c093b 6879static void iwl4965_bg_scan_check(struct work_struct *data)
b481de9c 6880{
bb8c093b
CH
6881 struct iwl4965_priv *priv =
6882 container_of(data, struct iwl4965_priv, scan_check.work);
b481de9c
ZY
6883
6884 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6885 return;
6886
6887 mutex_lock(&priv->mutex);
6888 if (test_bit(STATUS_SCANNING, &priv->status) ||
6889 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6890 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6891 "Scan completion watchdog resetting adapter (%dms)\n",
6892 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
052c4b9f 6893
b481de9c 6894 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
bb8c093b 6895 iwl4965_send_scan_abort(priv);
b481de9c
ZY
6896 }
6897 mutex_unlock(&priv->mutex);
6898}
6899
bb8c093b 6900static void iwl4965_bg_request_scan(struct work_struct *data)
b481de9c 6901{
bb8c093b
CH
6902 struct iwl4965_priv *priv =
6903 container_of(data, struct iwl4965_priv, request_scan);
6904 struct iwl4965_host_cmd cmd = {
b481de9c 6905 .id = REPLY_SCAN_CMD,
bb8c093b 6906 .len = sizeof(struct iwl4965_scan_cmd),
b481de9c
ZY
6907 .meta.flags = CMD_SIZE_HUGE,
6908 };
6909 int rc = 0;
bb8c093b 6910 struct iwl4965_scan_cmd *scan;
b481de9c
ZY
6911 struct ieee80211_conf *conf = NULL;
6912 u8 direct_mask;
6913 int phymode;
6914
6915 conf = ieee80211_get_hw_conf(priv->hw);
6916
6917 mutex_lock(&priv->mutex);
6918
bb8c093b 6919 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
6920 IWL_WARNING("request scan called when driver not ready.\n");
6921 goto done;
6922 }
6923
6924 /* Make sure the scan wasn't cancelled before this queued work
6925 * was given the chance to run... */
6926 if (!test_bit(STATUS_SCANNING, &priv->status))
6927 goto done;
6928
6929 /* This should never be called or scheduled if there is currently
6930 * a scan active in the hardware. */
6931 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6932 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6933 "Ignoring second request.\n");
6934 rc = -EIO;
6935 goto done;
6936 }
6937
6938 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6939 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
6940 goto done;
6941 }
6942
6943 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6944 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6945 goto done;
6946 }
6947
bb8c093b 6948 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6949 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6950 goto done;
6951 }
6952
6953 if (!test_bit(STATUS_READY, &priv->status)) {
6954 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
6955 goto done;
6956 }
6957
6958 if (!priv->scan_bands) {
6959 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
6960 goto done;
6961 }
6962
6963 if (!priv->scan) {
bb8c093b 6964 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) +
b481de9c
ZY
6965 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
6966 if (!priv->scan) {
6967 rc = -ENOMEM;
6968 goto done;
6969 }
6970 }
6971 scan = priv->scan;
bb8c093b 6972 memset(scan, 0, sizeof(struct iwl4965_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
6973
6974 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
6975 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
6976
bb8c093b 6977 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
6978 u16 interval = 0;
6979 u32 extra;
6980 u32 suspend_time = 100;
6981 u32 scan_suspend_time = 100;
6982 unsigned long flags;
6983
6984 IWL_DEBUG_INFO("Scanning while associated...\n");
6985
6986 spin_lock_irqsave(&priv->lock, flags);
6987 interval = priv->beacon_int;
6988 spin_unlock_irqrestore(&priv->lock, flags);
6989
6990 scan->suspend_time = 0;
052c4b9f 6991 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
6992 if (!interval)
6993 interval = suspend_time;
6994
6995 extra = (suspend_time / interval) << 22;
6996 scan_suspend_time = (extra |
6997 ((suspend_time % interval) * 1024));
6998 scan->suspend_time = cpu_to_le32(scan_suspend_time);
6999 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
7000 scan_suspend_time, interval);
7001 }
7002
7003 /* We should add the ability for user to lock to PASSIVE ONLY */
7004 if (priv->one_direct_scan) {
7005 IWL_DEBUG_SCAN
7006 ("Kicking off one direct scan for '%s'\n",
bb8c093b 7007 iwl4965_escape_essid(priv->direct_ssid,
b481de9c
ZY
7008 priv->direct_ssid_len));
7009 scan->direct_scan[0].id = WLAN_EID_SSID;
7010 scan->direct_scan[0].len = priv->direct_ssid_len;
7011 memcpy(scan->direct_scan[0].ssid,
7012 priv->direct_ssid, priv->direct_ssid_len);
7013 direct_mask = 1;
bb8c093b 7014 } else if (!iwl4965_is_associated(priv) && priv->essid_len) {
b481de9c
ZY
7015 scan->direct_scan[0].id = WLAN_EID_SSID;
7016 scan->direct_scan[0].len = priv->essid_len;
7017 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
7018 direct_mask = 1;
7019 } else
7020 direct_mask = 0;
7021
7022 /* We don't build a direct scan probe request; the uCode will do
7023 * that based on the direct_mask added to each channel entry */
7024 scan->tx_cmd.len = cpu_to_le16(
bb8c093b 7025 iwl4965_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
b481de9c
ZY
7026 IWL_MAX_SCAN_SIZE - sizeof(scan), 0));
7027 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
7028 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
7029 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
7030
7031 /* flags + rate selection */
7032
7033 scan->tx_cmd.tx_flags |= cpu_to_le32(0x200);
7034
7035 switch (priv->scan_bands) {
7036 case 2:
7037 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
7038 scan->tx_cmd.rate_n_flags =
bb8c093b 7039 iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
b481de9c
ZY
7040 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
7041
7042 scan->good_CRC_th = 0;
7043 phymode = MODE_IEEE80211G;
7044 break;
7045
7046 case 1:
7047 scan->tx_cmd.rate_n_flags =
bb8c093b 7048 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
b481de9c
ZY
7049 RATE_MCS_ANT_B_MSK);
7050 scan->good_CRC_th = IWL_GOOD_CRC_TH;
7051 phymode = MODE_IEEE80211A;
7052 break;
7053
7054 default:
7055 IWL_WARNING("Invalid scan band count\n");
7056 goto done;
7057 }
7058
7059 /* select Rx chains */
7060
7061 /* Force use of chains B and C (0x6) for scan Rx.
7062 * Avoid A (0x1) because of its off-channel reception on A-band.
7063 * MIMO is not used here, but value is required to make uCode happy. */
7064 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
7065 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
7066 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
7067 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
7068
7069 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
7070 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
7071
7072 if (direct_mask)
7073 IWL_DEBUG_SCAN
7074 ("Initiating direct scan for %s.\n",
bb8c093b 7075 iwl4965_escape_essid(priv->essid, priv->essid_len));
b481de9c
ZY
7076 else
7077 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
7078
7079 scan->channel_count =
bb8c093b 7080 iwl4965_get_channels_for_scan(
b481de9c
ZY
7081 priv, phymode, 1, /* active */
7082 direct_mask,
7083 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
7084
7085 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 7086 scan->channel_count * sizeof(struct iwl4965_scan_channel);
b481de9c
ZY
7087 cmd.data = scan;
7088 scan->len = cpu_to_le16(cmd.len);
7089
7090 set_bit(STATUS_SCAN_HW, &priv->status);
bb8c093b 7091 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
7092 if (rc)
7093 goto done;
7094
7095 queue_delayed_work(priv->workqueue, &priv->scan_check,
7096 IWL_SCAN_CHECK_WATCHDOG);
7097
7098 mutex_unlock(&priv->mutex);
7099 return;
7100
7101 done:
01ebd063 7102 /* inform mac80211 scan aborted */
b481de9c
ZY
7103 queue_work(priv->workqueue, &priv->scan_completed);
7104 mutex_unlock(&priv->mutex);
7105}
7106
bb8c093b 7107static void iwl4965_bg_up(struct work_struct *data)
b481de9c 7108{
bb8c093b 7109 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, up);
b481de9c
ZY
7110
7111 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7112 return;
7113
7114 mutex_lock(&priv->mutex);
bb8c093b 7115 __iwl4965_up(priv);
b481de9c
ZY
7116 mutex_unlock(&priv->mutex);
7117}
7118
bb8c093b 7119static void iwl4965_bg_restart(struct work_struct *data)
b481de9c 7120{
bb8c093b 7121 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, restart);
b481de9c
ZY
7122
7123 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7124 return;
7125
bb8c093b 7126 iwl4965_down(priv);
b481de9c
ZY
7127 queue_work(priv->workqueue, &priv->up);
7128}
7129
bb8c093b 7130static void iwl4965_bg_rx_replenish(struct work_struct *data)
b481de9c 7131{
bb8c093b
CH
7132 struct iwl4965_priv *priv =
7133 container_of(data, struct iwl4965_priv, rx_replenish);
b481de9c
ZY
7134
7135 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7136 return;
7137
7138 mutex_lock(&priv->mutex);
bb8c093b 7139 iwl4965_rx_replenish(priv);
b481de9c
ZY
7140 mutex_unlock(&priv->mutex);
7141}
7142
7878a5a4
MA
7143#define IWL_DELAY_NEXT_SCAN (HZ*2)
7144
bb8c093b 7145static void iwl4965_bg_post_associate(struct work_struct *data)
b481de9c 7146{
bb8c093b 7147 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv,
b481de9c
ZY
7148 post_associate.work);
7149
7150 int rc = 0;
7151 struct ieee80211_conf *conf = NULL;
0795af57 7152 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7153
7154 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7155 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
7156 return;
7157 }
7158
0795af57
JP
7159 IWL_DEBUG_ASSOC("Associated as %d to: %s\n",
7160 priv->assoc_id,
7161 print_mac(mac, priv->active_rxon.bssid_addr));
b481de9c
ZY
7162
7163
7164 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7165 return;
7166
7167 mutex_lock(&priv->mutex);
7168
32bfd35d 7169 if (!priv->vif || !priv->is_open) {
948c171c
MA
7170 mutex_unlock(&priv->mutex);
7171 return;
7172 }
bb8c093b 7173 iwl4965_scan_cancel_timeout(priv, 200);
052c4b9f 7174
b481de9c
ZY
7175 conf = ieee80211_get_hw_conf(priv->hw);
7176
7177 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7178 iwl4965_commit_rxon(priv);
b481de9c 7179
bb8c093b
CH
7180 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7181 iwl4965_setup_rxon_timing(priv);
7182 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7183 sizeof(priv->rxon_timing), &priv->rxon_timing);
7184 if (rc)
7185 IWL_WARNING("REPLY_RXON_TIMING failed - "
7186 "Attempting to continue.\n");
7187
7188 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7189
c8b0e6e1 7190#ifdef CONFIG_IWL4965_HT
fd105e79
RR
7191 if (priv->current_ht_config.is_ht)
7192 iwl4965_set_rxon_ht(priv, &priv->current_ht_config);
c8b0e6e1 7193#endif /* CONFIG_IWL4965_HT*/
b481de9c
ZY
7194 iwl4965_set_rxon_chain(priv);
7195 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7196
7197 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
7198 priv->assoc_id, priv->beacon_int);
7199
7200 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7201 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7202 else
7203 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7204
7205 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7206 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
7207 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
7208 else
7209 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7210
7211 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7212 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7213
7214 }
7215
bb8c093b 7216 iwl4965_commit_rxon(priv);
b481de9c
ZY
7217
7218 switch (priv->iw_mode) {
7219 case IEEE80211_IF_TYPE_STA:
bb8c093b 7220 iwl4965_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
7221 break;
7222
7223 case IEEE80211_IF_TYPE_IBSS:
7224
7225 /* clear out the station table */
bb8c093b 7226 iwl4965_clear_stations_table(priv);
b481de9c 7227
bb8c093b
CH
7228 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
7229 iwl4965_rxon_add_station(priv, priv->bssid, 0);
7230 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
7231 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7232
7233 break;
7234
7235 default:
7236 IWL_ERROR("%s Should not be called in %d mode\n",
7237 __FUNCTION__, priv->iw_mode);
7238 break;
7239 }
7240
bb8c093b 7241 iwl4965_sequence_reset(priv);
b481de9c 7242
c8b0e6e1 7243#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
7244 /* Enable Rx differential gain and sensitivity calibrations */
7245 iwl4965_chain_noise_reset(priv);
7246 priv->start_calib = 1;
c8b0e6e1 7247#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
7248
7249 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7250 priv->assoc_station_added = 1;
7251
c8b0e6e1 7252#ifdef CONFIG_IWL4965_QOS
bb8c093b 7253 iwl4965_activate_qos(priv, 0);
c8b0e6e1 7254#endif /* CONFIG_IWL4965_QOS */
7878a5a4
MA
7255 /* we have just associated, don't start scan too early */
7256 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
b481de9c
ZY
7257 mutex_unlock(&priv->mutex);
7258}
7259
bb8c093b 7260static void iwl4965_bg_abort_scan(struct work_struct *work)
b481de9c 7261{
bb8c093b 7262 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, abort_scan);
b481de9c 7263
bb8c093b 7264 if (!iwl4965_is_ready(priv))
b481de9c
ZY
7265 return;
7266
7267 mutex_lock(&priv->mutex);
7268
7269 set_bit(STATUS_SCAN_ABORTING, &priv->status);
bb8c093b 7270 iwl4965_send_scan_abort(priv);
b481de9c
ZY
7271
7272 mutex_unlock(&priv->mutex);
7273}
7274
76bb77e0
ZY
7275static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf);
7276
bb8c093b 7277static void iwl4965_bg_scan_completed(struct work_struct *work)
b481de9c 7278{
bb8c093b
CH
7279 struct iwl4965_priv *priv =
7280 container_of(work, struct iwl4965_priv, scan_completed);
b481de9c
ZY
7281
7282 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
7283
7284 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7285 return;
7286
a0646470
ZY
7287 if (test_bit(STATUS_CONF_PENDING, &priv->status))
7288 iwl4965_mac_config(priv->hw, ieee80211_get_hw_conf(priv->hw));
76bb77e0 7289
b481de9c
ZY
7290 ieee80211_scan_completed(priv->hw);
7291
7292 /* Since setting the TXPOWER may have been deferred while
7293 * performing the scan, fire one off */
7294 mutex_lock(&priv->mutex);
bb8c093b 7295 iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
7296 mutex_unlock(&priv->mutex);
7297}
7298
7299/*****************************************************************************
7300 *
7301 * mac80211 entry point functions
7302 *
7303 *****************************************************************************/
7304
5a66926a
ZY
7305#define UCODE_READY_TIMEOUT (2 * HZ)
7306
bb8c093b 7307static int iwl4965_mac_start(struct ieee80211_hw *hw)
b481de9c 7308{
bb8c093b 7309 struct iwl4965_priv *priv = hw->priv;
5a66926a 7310 int ret;
b481de9c
ZY
7311
7312 IWL_DEBUG_MAC80211("enter\n");
7313
5a66926a
ZY
7314 if (pci_enable_device(priv->pci_dev)) {
7315 IWL_ERROR("Fail to pci_enable_device\n");
7316 return -ENODEV;
7317 }
7318 pci_restore_state(priv->pci_dev);
7319 pci_enable_msi(priv->pci_dev);
7320
7321 ret = request_irq(priv->pci_dev->irq, iwl4965_isr, IRQF_SHARED,
7322 DRV_NAME, priv);
7323 if (ret) {
7324 IWL_ERROR("Error allocating IRQ %d\n", priv->pci_dev->irq);
7325 goto out_disable_msi;
7326 }
7327
b481de9c
ZY
7328 /* we should be verifying the device is ready to be opened */
7329 mutex_lock(&priv->mutex);
7330
5a66926a
ZY
7331 memset(&priv->staging_rxon, 0, sizeof(struct iwl4965_rxon_cmd));
7332 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
7333 * ucode filename and max sizes are card-specific. */
b481de9c 7334
5a66926a
ZY
7335 if (!priv->ucode_code.len) {
7336 ret = iwl4965_read_ucode(priv);
7337 if (ret) {
7338 IWL_ERROR("Could not read microcode: %d\n", ret);
7339 mutex_unlock(&priv->mutex);
7340 goto out_release_irq;
7341 }
7342 }
b481de9c 7343
e655b9f0 7344 ret = __iwl4965_up(priv);
5a66926a 7345
b481de9c 7346 mutex_unlock(&priv->mutex);
5a66926a 7347
e655b9f0
ZY
7348 if (ret)
7349 goto out_release_irq;
7350
7351 IWL_DEBUG_INFO("Start UP work done.\n");
7352
7353 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
7354 return 0;
7355
5a66926a
ZY
7356 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
7357 * mac80211 will not be run successfully. */
7358 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
7359 test_bit(STATUS_READY, &priv->status),
7360 UCODE_READY_TIMEOUT);
7361 if (!ret) {
7362 if (!test_bit(STATUS_READY, &priv->status)) {
7363 IWL_ERROR("Wait for START_ALIVE timeout after %dms.\n",
7364 jiffies_to_msecs(UCODE_READY_TIMEOUT));
7365 ret = -ETIMEDOUT;
7366 goto out_release_irq;
7367 }
7368 }
7369
e655b9f0 7370 priv->is_open = 1;
b481de9c
ZY
7371 IWL_DEBUG_MAC80211("leave\n");
7372 return 0;
5a66926a
ZY
7373
7374out_release_irq:
7375 free_irq(priv->pci_dev->irq, priv);
7376out_disable_msi:
7377 pci_disable_msi(priv->pci_dev);
e655b9f0
ZY
7378 pci_disable_device(priv->pci_dev);
7379 priv->is_open = 0;
7380 IWL_DEBUG_MAC80211("leave - failed\n");
5a66926a 7381 return ret;
b481de9c
ZY
7382}
7383
bb8c093b 7384static void iwl4965_mac_stop(struct ieee80211_hw *hw)
b481de9c 7385{
bb8c093b 7386 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7387
7388 IWL_DEBUG_MAC80211("enter\n");
948c171c 7389
e655b9f0
ZY
7390 if (!priv->is_open) {
7391 IWL_DEBUG_MAC80211("leave - skip\n");
7392 return;
7393 }
7394
b481de9c 7395 priv->is_open = 0;
5a66926a
ZY
7396
7397 if (iwl4965_is_ready_rf(priv)) {
e655b9f0
ZY
7398 /* stop mac, cancel any scan request and clear
7399 * RXON_FILTER_ASSOC_MSK BIT
7400 */
5a66926a
ZY
7401 mutex_lock(&priv->mutex);
7402 iwl4965_scan_cancel_timeout(priv, 100);
7403 cancel_delayed_work(&priv->post_associate);
fde3571f 7404 mutex_unlock(&priv->mutex);
fde3571f
MA
7405 }
7406
5a66926a
ZY
7407 iwl4965_down(priv);
7408
7409 flush_workqueue(priv->workqueue);
7410 free_irq(priv->pci_dev->irq, priv);
7411 pci_disable_msi(priv->pci_dev);
7412 pci_save_state(priv->pci_dev);
7413 pci_disable_device(priv->pci_dev);
948c171c 7414
b481de9c 7415 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7416}
7417
bb8c093b 7418static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
7419 struct ieee80211_tx_control *ctl)
7420{
bb8c093b 7421 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7422
7423 IWL_DEBUG_MAC80211("enter\n");
7424
7425 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
7426 IWL_DEBUG_MAC80211("leave - monitor\n");
7427 return -1;
7428 }
7429
7430 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
7431 ctl->tx_rate);
7432
bb8c093b 7433 if (iwl4965_tx_skb(priv, skb, ctl))
b481de9c
ZY
7434 dev_kfree_skb_any(skb);
7435
7436 IWL_DEBUG_MAC80211("leave\n");
7437 return 0;
7438}
7439
bb8c093b 7440static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7441 struct ieee80211_if_init_conf *conf)
7442{
bb8c093b 7443 struct iwl4965_priv *priv = hw->priv;
b481de9c 7444 unsigned long flags;
0795af57 7445 DECLARE_MAC_BUF(mac);
b481de9c 7446
32bfd35d 7447 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
b481de9c 7448
32bfd35d
JB
7449 if (priv->vif) {
7450 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
b481de9c
ZY
7451 return 0;
7452 }
7453
7454 spin_lock_irqsave(&priv->lock, flags);
32bfd35d 7455 priv->vif = conf->vif;
b481de9c
ZY
7456
7457 spin_unlock_irqrestore(&priv->lock, flags);
7458
7459 mutex_lock(&priv->mutex);
864792e3
TW
7460
7461 if (conf->mac_addr) {
7462 IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr));
7463 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
7464 }
b481de9c 7465
5a66926a
ZY
7466 if (iwl4965_is_ready(priv))
7467 iwl4965_set_mode(priv, conf->type);
7468
b481de9c
ZY
7469 mutex_unlock(&priv->mutex);
7470
5a66926a 7471 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7472 return 0;
7473}
7474
7475/**
bb8c093b 7476 * iwl4965_mac_config - mac80211 config callback
b481de9c
ZY
7477 *
7478 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
7479 * be set inappropriately and the driver currently sets the hardware up to
7480 * use it whenever needed.
7481 */
bb8c093b 7482static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
b481de9c 7483{
bb8c093b
CH
7484 struct iwl4965_priv *priv = hw->priv;
7485 const struct iwl4965_channel_info *ch_info;
b481de9c 7486 unsigned long flags;
76bb77e0 7487 int ret = 0;
b481de9c
ZY
7488
7489 mutex_lock(&priv->mutex);
7490 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel);
7491
12342c47
ZY
7492 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
7493
bb8c093b 7494 if (!iwl4965_is_ready(priv)) {
b481de9c 7495 IWL_DEBUG_MAC80211("leave - not ready\n");
76bb77e0
ZY
7496 ret = -EIO;
7497 goto out;
b481de9c
ZY
7498 }
7499
bb8c093b 7500 if (unlikely(!iwl4965_param_disable_hw_scan &&
b481de9c 7501 test_bit(STATUS_SCANNING, &priv->status))) {
a0646470
ZY
7502 IWL_DEBUG_MAC80211("leave - scanning\n");
7503 set_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 7504 mutex_unlock(&priv->mutex);
a0646470 7505 return 0;
b481de9c
ZY
7506 }
7507
7508 spin_lock_irqsave(&priv->lock, flags);
7509
bb8c093b 7510 ch_info = iwl4965_get_channel_info(priv, conf->phymode, conf->channel);
b481de9c
ZY
7511 if (!is_channel_valid(ch_info)) {
7512 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
7513 conf->channel, conf->phymode);
7514 IWL_DEBUG_MAC80211("leave - invalid channel\n");
7515 spin_unlock_irqrestore(&priv->lock, flags);
76bb77e0
ZY
7516 ret = -EINVAL;
7517 goto out;
b481de9c
ZY
7518 }
7519
c8b0e6e1 7520#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
7521 /* if we are switching fron ht to 2.4 clear flags
7522 * from any ht related info since 2.4 does not
7523 * support ht */
7524 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel)
7525#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7526 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
7527#endif
7528 )
7529 priv->staging_rxon.flags = 0;
c8b0e6e1 7530#endif /* CONFIG_IWL4965_HT */
b481de9c 7531
bb8c093b 7532 iwl4965_set_rxon_channel(priv, conf->phymode, conf->channel);
b481de9c 7533
bb8c093b 7534 iwl4965_set_flags_for_phymode(priv, conf->phymode);
b481de9c
ZY
7535
7536 /* The list of supported rates and rate mask can be different
7537 * for each phymode; since the phymode may have changed, reset
7538 * the rate mask to what mac80211 lists */
bb8c093b 7539 iwl4965_set_rate(priv);
b481de9c
ZY
7540
7541 spin_unlock_irqrestore(&priv->lock, flags);
7542
7543#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7544 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 7545 iwl4965_hw_channel_switch(priv, conf->channel);
76bb77e0 7546 goto out;
b481de9c
ZY
7547 }
7548#endif
7549
bb8c093b 7550 iwl4965_radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
7551
7552 if (!conf->radio_enabled) {
7553 IWL_DEBUG_MAC80211("leave - radio disabled\n");
76bb77e0 7554 goto out;
b481de9c
ZY
7555 }
7556
bb8c093b 7557 if (iwl4965_is_rfkill(priv)) {
b481de9c 7558 IWL_DEBUG_MAC80211("leave - RF kill\n");
76bb77e0
ZY
7559 ret = -EIO;
7560 goto out;
b481de9c
ZY
7561 }
7562
bb8c093b 7563 iwl4965_set_rate(priv);
b481de9c
ZY
7564
7565 if (memcmp(&priv->active_rxon,
7566 &priv->staging_rxon, sizeof(priv->staging_rxon)))
bb8c093b 7567 iwl4965_commit_rxon(priv);
b481de9c
ZY
7568 else
7569 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
7570
7571 IWL_DEBUG_MAC80211("leave\n");
7572
a0646470
ZY
7573out:
7574 clear_bit(STATUS_CONF_PENDING, &priv->status);
5a66926a 7575 mutex_unlock(&priv->mutex);
76bb77e0 7576 return ret;
b481de9c
ZY
7577}
7578
bb8c093b 7579static void iwl4965_config_ap(struct iwl4965_priv *priv)
b481de9c
ZY
7580{
7581 int rc = 0;
7582
7583 if (priv->status & STATUS_EXIT_PENDING)
7584 return;
7585
7586 /* The following should be done only at AP bring up */
7587 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
7588
7589 /* RXON - unassoc (to set timing command) */
7590 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7591 iwl4965_commit_rxon(priv);
b481de9c
ZY
7592
7593 /* RXON Timing */
bb8c093b
CH
7594 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7595 iwl4965_setup_rxon_timing(priv);
7596 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7597 sizeof(priv->rxon_timing), &priv->rxon_timing);
7598 if (rc)
7599 IWL_WARNING("REPLY_RXON_TIMING failed - "
7600 "Attempting to continue.\n");
7601
7602 iwl4965_set_rxon_chain(priv);
7603
7604 /* FIXME: what should be the assoc_id for AP? */
7605 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7606 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7607 priv->staging_rxon.flags |=
7608 RXON_FLG_SHORT_PREAMBLE_MSK;
7609 else
7610 priv->staging_rxon.flags &=
7611 ~RXON_FLG_SHORT_PREAMBLE_MSK;
7612
7613 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7614 if (priv->assoc_capability &
7615 WLAN_CAPABILITY_SHORT_SLOT_TIME)
7616 priv->staging_rxon.flags |=
7617 RXON_FLG_SHORT_SLOT_MSK;
7618 else
7619 priv->staging_rxon.flags &=
7620 ~RXON_FLG_SHORT_SLOT_MSK;
7621
7622 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7623 priv->staging_rxon.flags &=
7624 ~RXON_FLG_SHORT_SLOT_MSK;
7625 }
7626 /* restore RXON assoc */
7627 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 7628 iwl4965_commit_rxon(priv);
c8b0e6e1 7629#ifdef CONFIG_IWL4965_QOS
bb8c093b 7630 iwl4965_activate_qos(priv, 1);
b481de9c 7631#endif
bb8c093b 7632 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
e1493deb 7633 }
bb8c093b 7634 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7635
7636 /* FIXME - we need to add code here to detect a totally new
7637 * configuration, reset the AP, unassoc, rxon timing, assoc,
7638 * clear sta table, add BCAST sta... */
7639}
7640
32bfd35d
JB
7641static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
7642 struct ieee80211_vif *vif,
b481de9c
ZY
7643 struct ieee80211_if_conf *conf)
7644{
bb8c093b 7645 struct iwl4965_priv *priv = hw->priv;
0795af57 7646 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7647 unsigned long flags;
7648 int rc;
7649
7650 if (conf == NULL)
7651 return -EIO;
7652
7653 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7654 (!conf->beacon || !conf->ssid_len)) {
7655 IWL_DEBUG_MAC80211
7656 ("Leaving in AP mode because HostAPD is not ready.\n");
7657 return 0;
7658 }
7659
5a66926a
ZY
7660 if (!iwl4965_is_alive(priv))
7661 return -EAGAIN;
7662
b481de9c
ZY
7663 mutex_lock(&priv->mutex);
7664
b481de9c 7665 if (conf->bssid)
0795af57
JP
7666 IWL_DEBUG_MAC80211("bssid: %s\n",
7667 print_mac(mac, conf->bssid));
b481de9c 7668
4150c572
JB
7669/*
7670 * very dubious code was here; the probe filtering flag is never set:
7671 *
b481de9c
ZY
7672 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7673 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572
JB
7674 */
7675 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
b481de9c
ZY
7676 IWL_DEBUG_MAC80211("leave - scanning\n");
7677 mutex_unlock(&priv->mutex);
7678 return 0;
7679 }
7680
32bfd35d
JB
7681 if (priv->vif != vif) {
7682 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
b481de9c
ZY
7683 mutex_unlock(&priv->mutex);
7684 return 0;
7685 }
7686
7687 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7688 if (!conf->bssid) {
7689 conf->bssid = priv->mac_addr;
7690 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
0795af57
JP
7691 IWL_DEBUG_MAC80211("bssid was set to: %s\n",
7692 print_mac(mac, conf->bssid));
b481de9c
ZY
7693 }
7694 if (priv->ibss_beacon)
7695 dev_kfree_skb(priv->ibss_beacon);
7696
7697 priv->ibss_beacon = conf->beacon;
7698 }
7699
fde3571f
MA
7700 if (iwl4965_is_rfkill(priv))
7701 goto done;
7702
b481de9c
ZY
7703 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
7704 !is_multicast_ether_addr(conf->bssid)) {
7705 /* If there is currently a HW scan going on in the background
7706 * then we need to cancel it else the RXON below will fail. */
bb8c093b 7707 if (iwl4965_scan_cancel_timeout(priv, 100)) {
b481de9c
ZY
7708 IWL_WARNING("Aborted scan still in progress "
7709 "after 100ms\n");
7710 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
7711 mutex_unlock(&priv->mutex);
7712 return -EAGAIN;
7713 }
7714 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
7715
7716 /* TODO: Audit driver for usage of these members and see
7717 * if mac80211 deprecates them (priv->bssid looks like it
7718 * shouldn't be there, but I haven't scanned the IBSS code
7719 * to verify) - jpk */
7720 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
7721
7722 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b 7723 iwl4965_config_ap(priv);
b481de9c 7724 else {
bb8c093b 7725 rc = iwl4965_commit_rxon(priv);
b481de9c 7726 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
bb8c093b 7727 iwl4965_rxon_add_station(
b481de9c
ZY
7728 priv, priv->active_rxon.bssid_addr, 1);
7729 }
7730
7731 } else {
bb8c093b 7732 iwl4965_scan_cancel_timeout(priv, 100);
b481de9c 7733 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7734 iwl4965_commit_rxon(priv);
b481de9c
ZY
7735 }
7736
fde3571f 7737 done:
b481de9c
ZY
7738 spin_lock_irqsave(&priv->lock, flags);
7739 if (!conf->ssid_len)
7740 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7741 else
7742 memcpy(priv->essid, conf->ssid, conf->ssid_len);
7743
7744 priv->essid_len = conf->ssid_len;
7745 spin_unlock_irqrestore(&priv->lock, flags);
7746
7747 IWL_DEBUG_MAC80211("leave\n");
7748 mutex_unlock(&priv->mutex);
7749
7750 return 0;
7751}
7752
bb8c093b 7753static void iwl4965_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
7754 unsigned int changed_flags,
7755 unsigned int *total_flags,
7756 int mc_count, struct dev_addr_list *mc_list)
7757{
7758 /*
7759 * XXX: dummy
bb8c093b 7760 * see also iwl4965_connection_init_rx_config
4150c572
JB
7761 */
7762 *total_flags = 0;
7763}
7764
bb8c093b 7765static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7766 struct ieee80211_if_init_conf *conf)
7767{
bb8c093b 7768 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7769
7770 IWL_DEBUG_MAC80211("enter\n");
7771
7772 mutex_lock(&priv->mutex);
948c171c 7773
fde3571f
MA
7774 if (iwl4965_is_ready_rf(priv)) {
7775 iwl4965_scan_cancel_timeout(priv, 100);
7776 cancel_delayed_work(&priv->post_associate);
7777 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7778 iwl4965_commit_rxon(priv);
7779 }
32bfd35d
JB
7780 if (priv->vif == conf->vif) {
7781 priv->vif = NULL;
b481de9c
ZY
7782 memset(priv->bssid, 0, ETH_ALEN);
7783 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7784 priv->essid_len = 0;
7785 }
7786 mutex_unlock(&priv->mutex);
7787
7788 IWL_DEBUG_MAC80211("leave\n");
7789
7790}
471b3efd
JB
7791
7792static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
7793 struct ieee80211_vif *vif,
7794 struct ieee80211_bss_conf *bss_conf,
7795 u32 changes)
220173b0 7796{
bb8c093b 7797 struct iwl4965_priv *priv = hw->priv;
220173b0 7798
471b3efd
JB
7799 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
7800 if (bss_conf->use_short_preamble)
220173b0
TW
7801 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7802 else
7803 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7804 }
7805
471b3efd
JB
7806 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
7807 if (bss_conf->use_cts_prot && (priv->phymode != MODE_IEEE80211A))
220173b0
TW
7808 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
7809 else
7810 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
7811 }
7812
471b3efd
JB
7813 if (changes & BSS_CHANGED_ASSOC) {
7814 /*
7815 * TODO:
7816 * do stuff instead of sniffing assoc resp
7817 */
7818 }
7819
bb8c093b
CH
7820 if (iwl4965_is_associated(priv))
7821 iwl4965_send_rxon_assoc(priv);
220173b0 7822}
b481de9c 7823
bb8c093b 7824static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
7825{
7826 int rc = 0;
7827 unsigned long flags;
bb8c093b 7828 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7829
7830 IWL_DEBUG_MAC80211("enter\n");
7831
052c4b9f 7832 mutex_lock(&priv->mutex);
b481de9c
ZY
7833 spin_lock_irqsave(&priv->lock, flags);
7834
bb8c093b 7835 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7836 rc = -EIO;
7837 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7838 goto out_unlock;
7839 }
7840
7841 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
7842 rc = -EIO;
7843 IWL_ERROR("ERROR: APs don't scan\n");
7844 goto out_unlock;
7845 }
7846
7878a5a4
MA
7847 /* we don't schedule scan within next_scan_jiffies period */
7848 if (priv->next_scan_jiffies &&
7849 time_after(priv->next_scan_jiffies, jiffies)) {
7850 rc = -EAGAIN;
7851 goto out_unlock;
7852 }
b481de9c 7853 /* if we just finished scan ask for delay */
7878a5a4
MA
7854 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies +
7855 IWL_DELAY_NEXT_SCAN, jiffies)) {
b481de9c
ZY
7856 rc = -EAGAIN;
7857 goto out_unlock;
7858 }
7859 if (len) {
7878a5a4 7860 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
bb8c093b 7861 iwl4965_escape_essid(ssid, len), (int)len);
b481de9c
ZY
7862
7863 priv->one_direct_scan = 1;
7864 priv->direct_ssid_len = (u8)
7865 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
7866 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
948c171c
MA
7867 } else
7868 priv->one_direct_scan = 0;
b481de9c 7869
bb8c093b 7870 rc = iwl4965_scan_initiate(priv);
b481de9c
ZY
7871
7872 IWL_DEBUG_MAC80211("leave\n");
7873
7874out_unlock:
7875 spin_unlock_irqrestore(&priv->lock, flags);
052c4b9f 7876 mutex_unlock(&priv->mutex);
b481de9c
ZY
7877
7878 return rc;
7879}
7880
bb8c093b 7881static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
b481de9c
ZY
7882 const u8 *local_addr, const u8 *addr,
7883 struct ieee80211_key_conf *key)
7884{
bb8c093b 7885 struct iwl4965_priv *priv = hw->priv;
0795af57 7886 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7887 int rc = 0;
7888 u8 sta_id;
7889
7890 IWL_DEBUG_MAC80211("enter\n");
7891
bb8c093b 7892 if (!iwl4965_param_hwcrypto) {
b481de9c
ZY
7893 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7894 return -EOPNOTSUPP;
7895 }
7896
7897 if (is_zero_ether_addr(addr))
7898 /* only support pairwise keys */
7899 return -EOPNOTSUPP;
7900
bb8c093b 7901 sta_id = iwl4965_hw_find_station(priv, addr);
b481de9c 7902 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
7903 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7904 print_mac(mac, addr));
b481de9c
ZY
7905 return -EINVAL;
7906 }
7907
7908 mutex_lock(&priv->mutex);
7909
bb8c093b 7910 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 7911
b481de9c
ZY
7912 switch (cmd) {
7913 case SET_KEY:
bb8c093b 7914 rc = iwl4965_update_sta_key_info(priv, key, sta_id);
b481de9c 7915 if (!rc) {
bb8c093b
CH
7916 iwl4965_set_rxon_hwcrypto(priv, 1);
7917 iwl4965_commit_rxon(priv);
b481de9c
ZY
7918 key->hw_key_idx = sta_id;
7919 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7920 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7921 }
7922 break;
7923 case DISABLE_KEY:
bb8c093b 7924 rc = iwl4965_clear_sta_key_info(priv, sta_id);
b481de9c 7925 if (!rc) {
bb8c093b
CH
7926 iwl4965_set_rxon_hwcrypto(priv, 0);
7927 iwl4965_commit_rxon(priv);
b481de9c
ZY
7928 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7929 }
7930 break;
7931 default:
7932 rc = -EINVAL;
7933 }
7934
7935 IWL_DEBUG_MAC80211("leave\n");
7936 mutex_unlock(&priv->mutex);
7937
7938 return rc;
7939}
7940
bb8c093b 7941static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
b481de9c
ZY
7942 const struct ieee80211_tx_queue_params *params)
7943{
bb8c093b 7944 struct iwl4965_priv *priv = hw->priv;
c8b0e6e1 7945#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7946 unsigned long flags;
7947 int q;
0054b34d 7948#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
7949
7950 IWL_DEBUG_MAC80211("enter\n");
7951
bb8c093b 7952 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7953 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7954 return -EIO;
7955 }
7956
7957 if (queue >= AC_NUM) {
7958 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7959 return 0;
7960 }
7961
c8b0e6e1 7962#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7963 if (!priv->qos_data.qos_enable) {
7964 priv->qos_data.qos_active = 0;
7965 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7966 return 0;
7967 }
7968 q = AC_NUM - 1 - queue;
7969
7970 spin_lock_irqsave(&priv->lock, flags);
7971
7972 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
7973 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
7974 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
7975 priv->qos_data.def_qos_parm.ac[q].edca_txop =
7976 cpu_to_le16((params->burst_time * 100));
7977
7978 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
7979 priv->qos_data.qos_active = 1;
7980
7981 spin_unlock_irqrestore(&priv->lock, flags);
7982
7983 mutex_lock(&priv->mutex);
7984 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b
CH
7985 iwl4965_activate_qos(priv, 1);
7986 else if (priv->assoc_id && iwl4965_is_associated(priv))
7987 iwl4965_activate_qos(priv, 0);
b481de9c
ZY
7988
7989 mutex_unlock(&priv->mutex);
7990
c8b0e6e1 7991#endif /*CONFIG_IWL4965_QOS */
b481de9c
ZY
7992
7993 IWL_DEBUG_MAC80211("leave\n");
7994 return 0;
7995}
7996
bb8c093b 7997static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
7998 struct ieee80211_tx_queue_stats *stats)
7999{
bb8c093b 8000 struct iwl4965_priv *priv = hw->priv;
b481de9c 8001 int i, avail;
bb8c093b
CH
8002 struct iwl4965_tx_queue *txq;
8003 struct iwl4965_queue *q;
b481de9c
ZY
8004 unsigned long flags;
8005
8006 IWL_DEBUG_MAC80211("enter\n");
8007
bb8c093b 8008 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8009 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8010 return -EIO;
8011 }
8012
8013 spin_lock_irqsave(&priv->lock, flags);
8014
8015 for (i = 0; i < AC_NUM; i++) {
8016 txq = &priv->txq[i];
8017 q = &txq->q;
bb8c093b 8018 avail = iwl4965_queue_space(q);
b481de9c
ZY
8019
8020 stats->data[i].len = q->n_window - avail;
8021 stats->data[i].limit = q->n_window - q->high_mark;
8022 stats->data[i].count = q->n_window;
8023
8024 }
8025 spin_unlock_irqrestore(&priv->lock, flags);
8026
8027 IWL_DEBUG_MAC80211("leave\n");
8028
8029 return 0;
8030}
8031
bb8c093b 8032static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
b481de9c
ZY
8033 struct ieee80211_low_level_stats *stats)
8034{
8035 IWL_DEBUG_MAC80211("enter\n");
8036 IWL_DEBUG_MAC80211("leave\n");
8037
8038 return 0;
8039}
8040
bb8c093b 8041static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
b481de9c
ZY
8042{
8043 IWL_DEBUG_MAC80211("enter\n");
8044 IWL_DEBUG_MAC80211("leave\n");
8045
8046 return 0;
8047}
8048
bb8c093b 8049static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 8050{
bb8c093b 8051 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8052 unsigned long flags;
8053
8054 mutex_lock(&priv->mutex);
8055 IWL_DEBUG_MAC80211("enter\n");
8056
8057 priv->lq_mngr.lq_ready = 0;
c8b0e6e1 8058#ifdef CONFIG_IWL4965_HT
b481de9c 8059 spin_lock_irqsave(&priv->lock, flags);
fd105e79 8060 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
b481de9c 8061 spin_unlock_irqrestore(&priv->lock, flags);
c8b0e6e1 8062#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
8063/* if (priv->lq_mngr.agg_ctrl.granted_ba)
8064 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/
8065
bb8c093b 8066 memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl4965_agg_control));
b481de9c
ZY
8067 priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10;
8068 priv->lq_mngr.agg_ctrl.ba_timeout = 5000;
8069 priv->lq_mngr.agg_ctrl.auto_agg = 1;
8070
8071 if (priv->lq_mngr.agg_ctrl.auto_agg)
8072 priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED;
c8b0e6e1
CH
8073#endif /*CONFIG_IWL4965_HT_AGG */
8074#endif /* CONFIG_IWL4965_HT */
b481de9c 8075
c8b0e6e1 8076#ifdef CONFIG_IWL4965_QOS
bb8c093b 8077 iwl4965_reset_qos(priv);
b481de9c
ZY
8078#endif
8079
8080 cancel_delayed_work(&priv->post_associate);
8081
8082 spin_lock_irqsave(&priv->lock, flags);
8083 priv->assoc_id = 0;
8084 priv->assoc_capability = 0;
8085 priv->call_post_assoc_from_beacon = 0;
8086 priv->assoc_station_added = 0;
8087
8088 /* new association get rid of ibss beacon skb */
8089 if (priv->ibss_beacon)
8090 dev_kfree_skb(priv->ibss_beacon);
8091
8092 priv->ibss_beacon = NULL;
8093
8094 priv->beacon_int = priv->hw->conf.beacon_int;
8095 priv->timestamp1 = 0;
8096 priv->timestamp0 = 0;
8097 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
8098 priv->beacon_int = 0;
8099
8100 spin_unlock_irqrestore(&priv->lock, flags);
8101
fde3571f
MA
8102 if (!iwl4965_is_ready_rf(priv)) {
8103 IWL_DEBUG_MAC80211("leave - not ready\n");
8104 mutex_unlock(&priv->mutex);
8105 return;
8106 }
8107
052c4b9f 8108 /* we are restarting association process
8109 * clear RXON_FILTER_ASSOC_MSK bit
8110 */
8111 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
bb8c093b 8112 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 8113 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 8114 iwl4965_commit_rxon(priv);
052c4b9f 8115 }
8116
b481de9c
ZY
8117 /* Per mac80211.h: This is only used in IBSS mode... */
8118 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
052c4b9f 8119
b481de9c
ZY
8120 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
8121 mutex_unlock(&priv->mutex);
8122 return;
8123 }
8124
b481de9c
ZY
8125 priv->only_active_channel = 0;
8126
bb8c093b 8127 iwl4965_set_rate(priv);
b481de9c
ZY
8128
8129 mutex_unlock(&priv->mutex);
8130
8131 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
8132}
8133
bb8c093b 8134static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
8135 struct ieee80211_tx_control *control)
8136{
bb8c093b 8137 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8138 unsigned long flags;
8139
8140 mutex_lock(&priv->mutex);
8141 IWL_DEBUG_MAC80211("enter\n");
8142
bb8c093b 8143 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8144 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8145 mutex_unlock(&priv->mutex);
8146 return -EIO;
8147 }
8148
8149 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
8150 IWL_DEBUG_MAC80211("leave - not IBSS\n");
8151 mutex_unlock(&priv->mutex);
8152 return -EIO;
8153 }
8154
8155 spin_lock_irqsave(&priv->lock, flags);
8156
8157 if (priv->ibss_beacon)
8158 dev_kfree_skb(priv->ibss_beacon);
8159
8160 priv->ibss_beacon = skb;
8161
8162 priv->assoc_id = 0;
8163
8164 IWL_DEBUG_MAC80211("leave\n");
8165 spin_unlock_irqrestore(&priv->lock, flags);
8166
c8b0e6e1 8167#ifdef CONFIG_IWL4965_QOS
bb8c093b 8168 iwl4965_reset_qos(priv);
b481de9c
ZY
8169#endif
8170
8171 queue_work(priv->workqueue, &priv->post_associate.work);
8172
8173 mutex_unlock(&priv->mutex);
8174
8175 return 0;
8176}
8177
c8b0e6e1 8178#ifdef CONFIG_IWL4965_HT
b481de9c 8179
fd105e79
RR
8180static void iwl4965_ht_info_fill(struct ieee80211_conf *conf,
8181 struct iwl4965_priv *priv)
b481de9c 8182{
fd105e79
RR
8183 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
8184 struct ieee80211_ht_info *ht_conf = &conf->ht_conf;
8185 struct ieee80211_ht_bss_info *ht_bss_conf = &conf->ht_bss_conf;
b481de9c
ZY
8186
8187 IWL_DEBUG_MAC80211("enter: \n");
8188
fd105e79
RR
8189 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) {
8190 iwl_conf->is_ht = 0;
8191 return;
b481de9c
ZY
8192 }
8193
fd105e79
RR
8194 iwl_conf->is_ht = 1;
8195 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8196
8197 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
8198 iwl_conf->sgf |= 0x1;
8199 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
8200 iwl_conf->sgf |= 0x2;
8201
8202 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
8203 iwl_conf->max_amsdu_size =
8204 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
8205 iwl_conf->supported_chan_width =
8206 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
8207 iwl_conf->tx_mimo_ps_mode =
8208 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8209 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
8210
8211 iwl_conf->control_channel = ht_bss_conf->primary_channel;
8212 iwl_conf->extension_chan_offset =
8213 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
8214 iwl_conf->tx_chan_width =
8215 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
8216 iwl_conf->ht_protection =
8217 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
8218 iwl_conf->non_GF_STA_present =
8219 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
8220
8221 IWL_DEBUG_MAC80211("control channel %d\n",
8222 iwl_conf->control_channel);
b481de9c 8223 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
8224}
8225
bb8c093b 8226static int iwl4965_mac_conf_ht(struct ieee80211_hw *hw,
fd105e79 8227 struct ieee80211_conf *conf)
b481de9c 8228{
bb8c093b 8229 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8230
8231 IWL_DEBUG_MAC80211("enter: \n");
8232
fd105e79 8233 iwl4965_ht_info_fill(conf, priv);
b481de9c
ZY
8234 iwl4965_set_rxon_chain(priv);
8235
8236 if (priv && priv->assoc_id &&
8237 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
8238 unsigned long flags;
8239
8240 spin_lock_irqsave(&priv->lock, flags);
8241 if (priv->beacon_int)
8242 queue_work(priv->workqueue, &priv->post_associate.work);
8243 else
8244 priv->call_post_assoc_from_beacon = 1;
8245 spin_unlock_irqrestore(&priv->lock, flags);
8246 }
8247
fd105e79
RR
8248 IWL_DEBUG_MAC80211("leave:\n");
8249 return 0;
b481de9c
ZY
8250}
8251
bb8c093b 8252static void iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8fb88032
RR
8253 struct ieee80211_ht_cap *ht_cap,
8254 u8 use_current_config)
b481de9c 8255{
8fb88032
RR
8256 struct ieee80211_conf *conf = &hw->conf;
8257 struct ieee80211_hw_mode *mode = conf->mode;
b481de9c 8258
8fb88032
RR
8259 if (use_current_config) {
8260 ht_cap->cap_info = cpu_to_le16(conf->ht_conf.cap);
8261 memcpy(ht_cap->supp_mcs_set,
8262 conf->ht_conf.supp_mcs_set, 16);
8263 } else {
8264 ht_cap->cap_info = cpu_to_le16(mode->ht_info.cap);
8265 memcpy(ht_cap->supp_mcs_set,
8266 mode->ht_info.supp_mcs_set, 16);
8267 }
8268 ht_cap->ampdu_params_info =
8269 (mode->ht_info.ampdu_factor & IEEE80211_HT_CAP_AMPDU_FACTOR) |
8270 ((mode->ht_info.ampdu_density << 2) &
8271 IEEE80211_HT_CAP_AMPDU_DENSITY);
b481de9c
ZY
8272}
8273
c8b0e6e1 8274#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
8275
8276/*****************************************************************************
8277 *
8278 * sysfs attributes
8279 *
8280 *****************************************************************************/
8281
c8b0e6e1 8282#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
8283
8284/*
8285 * The following adds a new attribute to the sysfs representation
8286 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
8287 * used for controlling the debug level.
8288 *
8289 * See the level definitions in iwl for details.
8290 */
8291
8292static ssize_t show_debug_level(struct device_driver *d, char *buf)
8293{
bb8c093b 8294 return sprintf(buf, "0x%08X\n", iwl4965_debug_level);
b481de9c
ZY
8295}
8296static ssize_t store_debug_level(struct device_driver *d,
8297 const char *buf, size_t count)
8298{
8299 char *p = (char *)buf;
8300 u32 val;
8301
8302 val = simple_strtoul(p, &p, 0);
8303 if (p == buf)
8304 printk(KERN_INFO DRV_NAME
8305 ": %s is not in hex or decimal form.\n", buf);
8306 else
bb8c093b 8307 iwl4965_debug_level = val;
b481de9c
ZY
8308
8309 return strnlen(buf, count);
8310}
8311
8312static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
8313 show_debug_level, store_debug_level);
8314
c8b0e6e1 8315#endif /* CONFIG_IWL4965_DEBUG */
b481de9c
ZY
8316
8317static ssize_t show_rf_kill(struct device *d,
8318 struct device_attribute *attr, char *buf)
8319{
8320 /*
8321 * 0 - RF kill not enabled
8322 * 1 - SW based RF kill active (sysfs)
8323 * 2 - HW based RF kill active
8324 * 3 - Both HW and SW based RF kill active
8325 */
bb8c093b 8326 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8327 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
8328 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
8329
8330 return sprintf(buf, "%i\n", val);
8331}
8332
8333static ssize_t store_rf_kill(struct device *d,
8334 struct device_attribute *attr,
8335 const char *buf, size_t count)
8336{
bb8c093b 8337 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8338
8339 mutex_lock(&priv->mutex);
bb8c093b 8340 iwl4965_radio_kill_sw(priv, buf[0] == '1');
b481de9c
ZY
8341 mutex_unlock(&priv->mutex);
8342
8343 return count;
8344}
8345
8346static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
8347
8348static ssize_t show_temperature(struct device *d,
8349 struct device_attribute *attr, char *buf)
8350{
bb8c093b 8351 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c 8352
bb8c093b 8353 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8354 return -EAGAIN;
8355
bb8c093b 8356 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv));
b481de9c
ZY
8357}
8358
8359static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
8360
8361static ssize_t show_rs_window(struct device *d,
8362 struct device_attribute *attr,
8363 char *buf)
8364{
bb8c093b
CH
8365 struct iwl4965_priv *priv = d->driver_data;
8366 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
b481de9c
ZY
8367}
8368static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
8369
8370static ssize_t show_tx_power(struct device *d,
8371 struct device_attribute *attr, char *buf)
8372{
bb8c093b 8373 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8374 return sprintf(buf, "%d\n", priv->user_txpower_limit);
8375}
8376
8377static ssize_t store_tx_power(struct device *d,
8378 struct device_attribute *attr,
8379 const char *buf, size_t count)
8380{
bb8c093b 8381 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8382 char *p = (char *)buf;
8383 u32 val;
8384
8385 val = simple_strtoul(p, &p, 10);
8386 if (p == buf)
8387 printk(KERN_INFO DRV_NAME
8388 ": %s is not in decimal form.\n", buf);
8389 else
bb8c093b 8390 iwl4965_hw_reg_set_txpower(priv, val);
b481de9c
ZY
8391
8392 return count;
8393}
8394
8395static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
8396
8397static ssize_t show_flags(struct device *d,
8398 struct device_attribute *attr, char *buf)
8399{
bb8c093b 8400 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8401
8402 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
8403}
8404
8405static ssize_t store_flags(struct device *d,
8406 struct device_attribute *attr,
8407 const char *buf, size_t count)
8408{
bb8c093b 8409 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8410 u32 flags = simple_strtoul(buf, NULL, 0);
8411
8412 mutex_lock(&priv->mutex);
8413 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
8414 /* Cancel any currently running scans... */
bb8c093b 8415 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8416 IWL_WARNING("Could not cancel scan.\n");
8417 else {
8418 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
8419 flags);
8420 priv->staging_rxon.flags = cpu_to_le32(flags);
bb8c093b 8421 iwl4965_commit_rxon(priv);
b481de9c
ZY
8422 }
8423 }
8424 mutex_unlock(&priv->mutex);
8425
8426 return count;
8427}
8428
8429static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
8430
8431static ssize_t show_filter_flags(struct device *d,
8432 struct device_attribute *attr, char *buf)
8433{
bb8c093b 8434 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8435
8436 return sprintf(buf, "0x%04X\n",
8437 le32_to_cpu(priv->active_rxon.filter_flags));
8438}
8439
8440static ssize_t store_filter_flags(struct device *d,
8441 struct device_attribute *attr,
8442 const char *buf, size_t count)
8443{
bb8c093b 8444 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8445 u32 filter_flags = simple_strtoul(buf, NULL, 0);
8446
8447 mutex_lock(&priv->mutex);
8448 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
8449 /* Cancel any currently running scans... */
bb8c093b 8450 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8451 IWL_WARNING("Could not cancel scan.\n");
8452 else {
8453 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
8454 "0x%04X\n", filter_flags);
8455 priv->staging_rxon.filter_flags =
8456 cpu_to_le32(filter_flags);
bb8c093b 8457 iwl4965_commit_rxon(priv);
b481de9c
ZY
8458 }
8459 }
8460 mutex_unlock(&priv->mutex);
8461
8462 return count;
8463}
8464
8465static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
8466 store_filter_flags);
8467
8468static ssize_t show_tune(struct device *d,
8469 struct device_attribute *attr, char *buf)
8470{
bb8c093b 8471 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8472
8473 return sprintf(buf, "0x%04X\n",
8474 (priv->phymode << 8) |
8475 le16_to_cpu(priv->active_rxon.channel));
8476}
8477
bb8c093b 8478static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode);
b481de9c
ZY
8479
8480static ssize_t store_tune(struct device *d,
8481 struct device_attribute *attr,
8482 const char *buf, size_t count)
8483{
bb8c093b 8484 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8485 char *p = (char *)buf;
8486 u16 tune = simple_strtoul(p, &p, 0);
8487 u8 phymode = (tune >> 8) & 0xff;
8488 u16 channel = tune & 0xff;
8489
8490 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
8491
8492 mutex_lock(&priv->mutex);
8493 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
8494 (priv->phymode != phymode)) {
bb8c093b 8495 const struct iwl4965_channel_info *ch_info;
b481de9c 8496
bb8c093b 8497 ch_info = iwl4965_get_channel_info(priv, phymode, channel);
b481de9c
ZY
8498 if (!ch_info) {
8499 IWL_WARNING("Requested invalid phymode/channel "
8500 "combination: %d %d\n", phymode, channel);
8501 mutex_unlock(&priv->mutex);
8502 return -EINVAL;
8503 }
8504
8505 /* Cancel any currently running scans... */
bb8c093b 8506 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8507 IWL_WARNING("Could not cancel scan.\n");
8508 else {
8509 IWL_DEBUG_INFO("Committing phymode and "
8510 "rxon.channel = %d %d\n",
8511 phymode, channel);
8512
bb8c093b
CH
8513 iwl4965_set_rxon_channel(priv, phymode, channel);
8514 iwl4965_set_flags_for_phymode(priv, phymode);
b481de9c 8515
bb8c093b
CH
8516 iwl4965_set_rate(priv);
8517 iwl4965_commit_rxon(priv);
b481de9c
ZY
8518 }
8519 }
8520 mutex_unlock(&priv->mutex);
8521
8522 return count;
8523}
8524
8525static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
8526
c8b0e6e1 8527#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8528
8529static ssize_t show_measurement(struct device *d,
8530 struct device_attribute *attr, char *buf)
8531{
bb8c093b
CH
8532 struct iwl4965_priv *priv = dev_get_drvdata(d);
8533 struct iwl4965_spectrum_notification measure_report;
b481de9c
ZY
8534 u32 size = sizeof(measure_report), len = 0, ofs = 0;
8535 u8 *data = (u8 *) & measure_report;
8536 unsigned long flags;
8537
8538 spin_lock_irqsave(&priv->lock, flags);
8539 if (!(priv->measurement_status & MEASUREMENT_READY)) {
8540 spin_unlock_irqrestore(&priv->lock, flags);
8541 return 0;
8542 }
8543 memcpy(&measure_report, &priv->measure_report, size);
8544 priv->measurement_status = 0;
8545 spin_unlock_irqrestore(&priv->lock, flags);
8546
8547 while (size && (PAGE_SIZE - len)) {
8548 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8549 PAGE_SIZE - len, 1);
8550 len = strlen(buf);
8551 if (PAGE_SIZE - len)
8552 buf[len++] = '\n';
8553
8554 ofs += 16;
8555 size -= min(size, 16U);
8556 }
8557
8558 return len;
8559}
8560
8561static ssize_t store_measurement(struct device *d,
8562 struct device_attribute *attr,
8563 const char *buf, size_t count)
8564{
bb8c093b 8565 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8566 struct ieee80211_measurement_params params = {
8567 .channel = le16_to_cpu(priv->active_rxon.channel),
8568 .start_time = cpu_to_le64(priv->last_tsf),
8569 .duration = cpu_to_le16(1),
8570 };
8571 u8 type = IWL_MEASURE_BASIC;
8572 u8 buffer[32];
8573 u8 channel;
8574
8575 if (count) {
8576 char *p = buffer;
8577 strncpy(buffer, buf, min(sizeof(buffer), count));
8578 channel = simple_strtoul(p, NULL, 0);
8579 if (channel)
8580 params.channel = channel;
8581
8582 p = buffer;
8583 while (*p && *p != ' ')
8584 p++;
8585 if (*p)
8586 type = simple_strtoul(p + 1, NULL, 0);
8587 }
8588
8589 IWL_DEBUG_INFO("Invoking measurement of type %d on "
8590 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 8591 iwl4965_get_measurement(priv, &params, type);
b481de9c
ZY
8592
8593 return count;
8594}
8595
8596static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
8597 show_measurement, store_measurement);
c8b0e6e1 8598#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */
b481de9c
ZY
8599
8600static ssize_t store_retry_rate(struct device *d,
8601 struct device_attribute *attr,
8602 const char *buf, size_t count)
8603{
bb8c093b 8604 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8605
8606 priv->retry_rate = simple_strtoul(buf, NULL, 0);
8607 if (priv->retry_rate <= 0)
8608 priv->retry_rate = 1;
8609
8610 return count;
8611}
8612
8613static ssize_t show_retry_rate(struct device *d,
8614 struct device_attribute *attr, char *buf)
8615{
bb8c093b 8616 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8617 return sprintf(buf, "%d", priv->retry_rate);
8618}
8619
8620static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
8621 store_retry_rate);
8622
8623static ssize_t store_power_level(struct device *d,
8624 struct device_attribute *attr,
8625 const char *buf, size_t count)
8626{
bb8c093b 8627 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8628 int rc;
8629 int mode;
8630
8631 mode = simple_strtoul(buf, NULL, 0);
8632 mutex_lock(&priv->mutex);
8633
bb8c093b 8634 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
8635 rc = -EAGAIN;
8636 goto out;
8637 }
8638
8639 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
8640 mode = IWL_POWER_AC;
8641 else
8642 mode |= IWL_POWER_ENABLED;
8643
8644 if (mode != priv->power_mode) {
bb8c093b 8645 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
8646 if (rc) {
8647 IWL_DEBUG_MAC80211("failed setting power mode.\n");
8648 goto out;
8649 }
8650 priv->power_mode = mode;
8651 }
8652
8653 rc = count;
8654
8655 out:
8656 mutex_unlock(&priv->mutex);
8657 return rc;
8658}
8659
8660#define MAX_WX_STRING 80
8661
8662/* Values are in microsecond */
8663static const s32 timeout_duration[] = {
8664 350000,
8665 250000,
8666 75000,
8667 37000,
8668 25000,
8669};
8670static const s32 period_duration[] = {
8671 400000,
8672 700000,
8673 1000000,
8674 1000000,
8675 1000000
8676};
8677
8678static ssize_t show_power_level(struct device *d,
8679 struct device_attribute *attr, char *buf)
8680{
bb8c093b 8681 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8682 int level = IWL_POWER_LEVEL(priv->power_mode);
8683 char *p = buf;
8684
8685 p += sprintf(p, "%d ", level);
8686 switch (level) {
8687 case IWL_POWER_MODE_CAM:
8688 case IWL_POWER_AC:
8689 p += sprintf(p, "(AC)");
8690 break;
8691 case IWL_POWER_BATTERY:
8692 p += sprintf(p, "(BATTERY)");
8693 break;
8694 default:
8695 p += sprintf(p,
8696 "(Timeout %dms, Period %dms)",
8697 timeout_duration[level - 1] / 1000,
8698 period_duration[level - 1] / 1000);
8699 }
8700
8701 if (!(priv->power_mode & IWL_POWER_ENABLED))
8702 p += sprintf(p, " OFF\n");
8703 else
8704 p += sprintf(p, " \n");
8705
8706 return (p - buf + 1);
8707
8708}
8709
8710static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8711 store_power_level);
8712
8713static ssize_t show_channels(struct device *d,
8714 struct device_attribute *attr, char *buf)
8715{
bb8c093b 8716 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8717 int len = 0, i;
8718 struct ieee80211_channel *channels = NULL;
8719 const struct ieee80211_hw_mode *hw_mode = NULL;
8720 int count = 0;
8721
bb8c093b 8722 if (!iwl4965_is_ready(priv))
b481de9c
ZY
8723 return -EAGAIN;
8724
bb8c093b 8725 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211G);
b481de9c 8726 if (!hw_mode)
bb8c093b 8727 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211B);
b481de9c
ZY
8728 if (hw_mode) {
8729 channels = hw_mode->channels;
8730 count = hw_mode->num_channels;
8731 }
8732
8733 len +=
8734 sprintf(&buf[len],
8735 "Displaying %d channels in 2.4GHz band "
8736 "(802.11bg):\n", count);
8737
8738 for (i = 0; i < count; i++)
8739 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8740 channels[i].chan,
8741 channels[i].power_level,
8742 channels[i].
8743 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8744 " (IEEE 802.11h required)" : "",
8745 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8746 || (channels[i].
8747 flag &
8748 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8749 ", IBSS",
8750 channels[i].
8751 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8752 "active/passive" : "passive only");
8753
bb8c093b 8754 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211A);
b481de9c
ZY
8755 if (hw_mode) {
8756 channels = hw_mode->channels;
8757 count = hw_mode->num_channels;
8758 } else {
8759 channels = NULL;
8760 count = 0;
8761 }
8762
8763 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8764 "(802.11a):\n", count);
8765
8766 for (i = 0; i < count; i++)
8767 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8768 channels[i].chan,
8769 channels[i].power_level,
8770 channels[i].
8771 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8772 " (IEEE 802.11h required)" : "",
8773 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8774 || (channels[i].
8775 flag &
8776 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8777 ", IBSS",
8778 channels[i].
8779 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8780 "active/passive" : "passive only");
8781
8782 return len;
8783}
8784
8785static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8786
8787static ssize_t show_statistics(struct device *d,
8788 struct device_attribute *attr, char *buf)
8789{
bb8c093b
CH
8790 struct iwl4965_priv *priv = dev_get_drvdata(d);
8791 u32 size = sizeof(struct iwl4965_notif_statistics);
b481de9c
ZY
8792 u32 len = 0, ofs = 0;
8793 u8 *data = (u8 *) & priv->statistics;
8794 int rc = 0;
8795
bb8c093b 8796 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8797 return -EAGAIN;
8798
8799 mutex_lock(&priv->mutex);
bb8c093b 8800 rc = iwl4965_send_statistics_request(priv);
b481de9c
ZY
8801 mutex_unlock(&priv->mutex);
8802
8803 if (rc) {
8804 len = sprintf(buf,
8805 "Error sending statistics request: 0x%08X\n", rc);
8806 return len;
8807 }
8808
8809 while (size && (PAGE_SIZE - len)) {
8810 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8811 PAGE_SIZE - len, 1);
8812 len = strlen(buf);
8813 if (PAGE_SIZE - len)
8814 buf[len++] = '\n';
8815
8816 ofs += 16;
8817 size -= min(size, 16U);
8818 }
8819
8820 return len;
8821}
8822
8823static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8824
8825static ssize_t show_antenna(struct device *d,
8826 struct device_attribute *attr, char *buf)
8827{
bb8c093b 8828 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c 8829
bb8c093b 8830 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8831 return -EAGAIN;
8832
8833 return sprintf(buf, "%d\n", priv->antenna);
8834}
8835
8836static ssize_t store_antenna(struct device *d,
8837 struct device_attribute *attr,
8838 const char *buf, size_t count)
8839{
8840 int ant;
bb8c093b 8841 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8842
8843 if (count == 0)
8844 return 0;
8845
8846 if (sscanf(buf, "%1i", &ant) != 1) {
8847 IWL_DEBUG_INFO("not in hex or decimal form.\n");
8848 return count;
8849 }
8850
8851 if ((ant >= 0) && (ant <= 2)) {
8852 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 8853 priv->antenna = (enum iwl4965_antenna)ant;
b481de9c
ZY
8854 } else
8855 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
8856
8857
8858 return count;
8859}
8860
8861static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8862
8863static ssize_t show_status(struct device *d,
8864 struct device_attribute *attr, char *buf)
8865{
bb8c093b
CH
8866 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8867 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8868 return -EAGAIN;
8869 return sprintf(buf, "0x%08x\n", (int)priv->status);
8870}
8871
8872static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
8873
8874static ssize_t dump_error_log(struct device *d,
8875 struct device_attribute *attr,
8876 const char *buf, size_t count)
8877{
8878 char *p = (char *)buf;
8879
8880 if (p[0] == '1')
bb8c093b 8881 iwl4965_dump_nic_error_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8882
8883 return strnlen(buf, count);
8884}
8885
8886static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
8887
8888static ssize_t dump_event_log(struct device *d,
8889 struct device_attribute *attr,
8890 const char *buf, size_t count)
8891{
8892 char *p = (char *)buf;
8893
8894 if (p[0] == '1')
bb8c093b 8895 iwl4965_dump_nic_event_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8896
8897 return strnlen(buf, count);
8898}
8899
8900static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
8901
8902/*****************************************************************************
8903 *
8904 * driver setup and teardown
8905 *
8906 *****************************************************************************/
8907
bb8c093b 8908static void iwl4965_setup_deferred_work(struct iwl4965_priv *priv)
b481de9c
ZY
8909{
8910 priv->workqueue = create_workqueue(DRV_NAME);
8911
8912 init_waitqueue_head(&priv->wait_command_queue);
8913
bb8c093b
CH
8914 INIT_WORK(&priv->up, iwl4965_bg_up);
8915 INIT_WORK(&priv->restart, iwl4965_bg_restart);
8916 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
8917 INIT_WORK(&priv->scan_completed, iwl4965_bg_scan_completed);
8918 INIT_WORK(&priv->request_scan, iwl4965_bg_request_scan);
8919 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
8920 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
8921 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
8922 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate);
8923 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
8924 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
8925 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check);
8926
8927 iwl4965_hw_setup_deferred_work(priv);
b481de9c
ZY
8928
8929 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 8930 iwl4965_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
8931}
8932
bb8c093b 8933static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv)
b481de9c 8934{
bb8c093b 8935 iwl4965_hw_cancel_deferred_work(priv);
b481de9c 8936
3ae6a054 8937 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
8938 cancel_delayed_work(&priv->scan_check);
8939 cancel_delayed_work(&priv->alive_start);
8940 cancel_delayed_work(&priv->post_associate);
8941 cancel_work_sync(&priv->beacon_update);
8942}
8943
bb8c093b 8944static struct attribute *iwl4965_sysfs_entries[] = {
b481de9c
ZY
8945 &dev_attr_antenna.attr,
8946 &dev_attr_channels.attr,
8947 &dev_attr_dump_errors.attr,
8948 &dev_attr_dump_events.attr,
8949 &dev_attr_flags.attr,
8950 &dev_attr_filter_flags.attr,
c8b0e6e1 8951#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8952 &dev_attr_measurement.attr,
8953#endif
8954 &dev_attr_power_level.attr,
8955 &dev_attr_retry_rate.attr,
8956 &dev_attr_rf_kill.attr,
8957 &dev_attr_rs_window.attr,
8958 &dev_attr_statistics.attr,
8959 &dev_attr_status.attr,
8960 &dev_attr_temperature.attr,
8961 &dev_attr_tune.attr,
8962 &dev_attr_tx_power.attr,
8963
8964 NULL
8965};
8966
bb8c093b 8967static struct attribute_group iwl4965_attribute_group = {
b481de9c 8968 .name = NULL, /* put in device directory */
bb8c093b 8969 .attrs = iwl4965_sysfs_entries,
b481de9c
ZY
8970};
8971
bb8c093b
CH
8972static struct ieee80211_ops iwl4965_hw_ops = {
8973 .tx = iwl4965_mac_tx,
8974 .start = iwl4965_mac_start,
8975 .stop = iwl4965_mac_stop,
8976 .add_interface = iwl4965_mac_add_interface,
8977 .remove_interface = iwl4965_mac_remove_interface,
8978 .config = iwl4965_mac_config,
8979 .config_interface = iwl4965_mac_config_interface,
8980 .configure_filter = iwl4965_configure_filter,
8981 .set_key = iwl4965_mac_set_key,
8982 .get_stats = iwl4965_mac_get_stats,
8983 .get_tx_stats = iwl4965_mac_get_tx_stats,
8984 .conf_tx = iwl4965_mac_conf_tx,
8985 .get_tsf = iwl4965_mac_get_tsf,
8986 .reset_tsf = iwl4965_mac_reset_tsf,
8987 .beacon_update = iwl4965_mac_beacon_update,
471b3efd 8988 .bss_info_changed = iwl4965_bss_info_changed,
c8b0e6e1 8989#ifdef CONFIG_IWL4965_HT
bb8c093b 8990 .conf_ht = iwl4965_mac_conf_ht,
9ab46173 8991 .ampdu_action = iwl4965_mac_ampdu_action,
c8b0e6e1 8992#ifdef CONFIG_IWL4965_HT_AGG
bb8c093b
CH
8993 .ht_tx_agg_start = iwl4965_mac_ht_tx_agg_start,
8994 .ht_tx_agg_stop = iwl4965_mac_ht_tx_agg_stop,
c8b0e6e1
CH
8995#endif /* CONFIG_IWL4965_HT_AGG */
8996#endif /* CONFIG_IWL4965_HT */
bb8c093b 8997 .hw_scan = iwl4965_mac_hw_scan
b481de9c
ZY
8998};
8999
bb8c093b 9000static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
9001{
9002 int err = 0;
bb8c093b 9003 struct iwl4965_priv *priv;
b481de9c
ZY
9004 struct ieee80211_hw *hw;
9005 int i;
5a66926a 9006 DECLARE_MAC_BUF(mac);
b481de9c 9007
6440adb5
BC
9008 /* Disabling hardware scan means that mac80211 will perform scans
9009 * "the hard way", rather than using device's scan. */
bb8c093b 9010 if (iwl4965_param_disable_hw_scan) {
b481de9c 9011 IWL_DEBUG_INFO("Disabling hw_scan\n");
bb8c093b 9012 iwl4965_hw_ops.hw_scan = NULL;
b481de9c
ZY
9013 }
9014
bb8c093b
CH
9015 if ((iwl4965_param_queues_num > IWL_MAX_NUM_QUEUES) ||
9016 (iwl4965_param_queues_num < IWL_MIN_NUM_QUEUES)) {
b481de9c
ZY
9017 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
9018 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
9019 err = -EINVAL;
9020 goto out;
9021 }
9022
9023 /* mac80211 allocates memory for this device instance, including
9024 * space for this driver's private structure */
bb8c093b 9025 hw = ieee80211_alloc_hw(sizeof(struct iwl4965_priv), &iwl4965_hw_ops);
b481de9c
ZY
9026 if (hw == NULL) {
9027 IWL_ERROR("Can not allocate network device\n");
9028 err = -ENOMEM;
9029 goto out;
9030 }
9031 SET_IEEE80211_DEV(hw, &pdev->dev);
9032
f51359a8
JB
9033 hw->rate_control_algorithm = "iwl-4965-rs";
9034
b481de9c
ZY
9035 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
9036 priv = hw->priv;
9037 priv->hw = hw;
9038
9039 priv->pci_dev = pdev;
bb8c093b 9040 priv->antenna = (enum iwl4965_antenna)iwl4965_param_antenna;
c8b0e6e1 9041#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9042 iwl4965_debug_level = iwl4965_param_debug;
b481de9c
ZY
9043 atomic_set(&priv->restrict_refcnt, 0);
9044#endif
9045 priv->retry_rate = 1;
9046
9047 priv->ibss_beacon = NULL;
9048
9049 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
9050 * the range of signal quality values that we'll provide.
9051 * Negative values for level/noise indicate that we'll provide dBm.
9052 * For WE, at least, non-0 values here *enable* display of values
9053 * in app (iwconfig). */
9054 hw->max_rssi = -20; /* signal level, negative indicates dBm */
9055 hw->max_noise = -20; /* noise level, negative indicates dBm */
9056 hw->max_signal = 100; /* link quality indication (%) */
9057
9058 /* Tell mac80211 our Tx characteristics */
9059 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
9060
6440adb5 9061 /* Default value; 4 EDCA QOS priorities */
b481de9c 9062 hw->queues = 4;
c8b0e6e1
CH
9063#ifdef CONFIG_IWL4965_HT
9064#ifdef CONFIG_IWL4965_HT_AGG
6440adb5 9065 /* Enhanced value; more queues, to support 11n aggregation */
b481de9c 9066 hw->queues = 16;
c8b0e6e1
CH
9067#endif /* CONFIG_IWL4965_HT_AGG */
9068#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
9069
9070 spin_lock_init(&priv->lock);
9071 spin_lock_init(&priv->power_data.lock);
9072 spin_lock_init(&priv->sta_lock);
9073 spin_lock_init(&priv->hcmd_lock);
9074 spin_lock_init(&priv->lq_mngr.lock);
9075
9076 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
9077 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
9078
9079 INIT_LIST_HEAD(&priv->free_frames);
9080
9081 mutex_init(&priv->mutex);
9082 if (pci_enable_device(pdev)) {
9083 err = -ENODEV;
9084 goto out_ieee80211_free_hw;
9085 }
9086
9087 pci_set_master(pdev);
9088
6440adb5 9089 /* Clear the driver's (not device's) station table */
bb8c093b 9090 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9091
9092 priv->data_retry_limit = -1;
9093 priv->ieee_channels = NULL;
9094 priv->ieee_rates = NULL;
9095 priv->phymode = -1;
9096
9097 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
9098 if (!err)
9099 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
9100 if (err) {
9101 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
9102 goto out_pci_disable_device;
9103 }
9104
9105 pci_set_drvdata(pdev, priv);
9106 err = pci_request_regions(pdev, DRV_NAME);
9107 if (err)
9108 goto out_pci_disable_device;
6440adb5 9109
b481de9c
ZY
9110 /* We disable the RETRY_TIMEOUT register (0x41) to keep
9111 * PCI Tx retries from interfering with C3 CPU state */
9112 pci_write_config_byte(pdev, 0x41, 0x00);
6440adb5 9113
b481de9c
ZY
9114 priv->hw_base = pci_iomap(pdev, 0, 0);
9115 if (!priv->hw_base) {
9116 err = -ENODEV;
9117 goto out_pci_release_regions;
9118 }
9119
9120 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
9121 (unsigned long long) pci_resource_len(pdev, 0));
9122 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
9123
9124 /* Initialize module parameter values here */
9125
6440adb5 9126 /* Disable radio (SW RF KILL) via parameter when loading driver */
bb8c093b 9127 if (iwl4965_param_disable) {
b481de9c
ZY
9128 set_bit(STATUS_RF_KILL_SW, &priv->status);
9129 IWL_DEBUG_INFO("Radio disabled.\n");
9130 }
9131
9132 priv->iw_mode = IEEE80211_IF_TYPE_STA;
9133
9134 priv->ps_mode = 0;
9135 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
b481de9c
ZY
9136 priv->valid_antenna = 0x7; /* assume all 3 connected */
9137 priv->ps_mode = IWL_MIMO_PS_NONE;
b481de9c 9138
6440adb5 9139 /* Choose which receivers/antennas to use */
b481de9c
ZY
9140 iwl4965_set_rxon_chain(priv);
9141
9142 printk(KERN_INFO DRV_NAME
9143 ": Detected Intel Wireless WiFi Link 4965AGN\n");
9144
9145 /* Device-specific setup */
bb8c093b 9146 if (iwl4965_hw_set_hw_setting(priv)) {
b481de9c 9147 IWL_ERROR("failed to set hw settings\n");
b481de9c
ZY
9148 goto out_iounmap;
9149 }
9150
c8b0e6e1 9151#ifdef CONFIG_IWL4965_QOS
bb8c093b 9152 if (iwl4965_param_qos_enable)
b481de9c
ZY
9153 priv->qos_data.qos_enable = 1;
9154
bb8c093b 9155 iwl4965_reset_qos(priv);
b481de9c
ZY
9156
9157 priv->qos_data.qos_active = 0;
9158 priv->qos_data.qos_cap.val = 0;
c8b0e6e1 9159#endif /* CONFIG_IWL4965_QOS */
b481de9c 9160
bb8c093b
CH
9161 iwl4965_set_rxon_channel(priv, MODE_IEEE80211G, 6);
9162 iwl4965_setup_deferred_work(priv);
9163 iwl4965_setup_rx_handlers(priv);
b481de9c
ZY
9164
9165 priv->rates_mask = IWL_RATES_MASK;
9166 /* If power management is turned on, default to AC mode */
9167 priv->power_mode = IWL_POWER_AC;
9168 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
9169
bb8c093b 9170 iwl4965_disable_interrupts(priv);
49df2b33 9171
bb8c093b 9172 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9173 if (err) {
9174 IWL_ERROR("failed to create sysfs device attributes\n");
b481de9c
ZY
9175 goto out_release_irq;
9176 }
9177
5a66926a
ZY
9178 /* nic init */
9179 iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS,
9180 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
9181
9182 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
9183 err = iwl4965_poll_bit(priv, CSR_GP_CNTRL,
9184 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9185 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
9186 if (err < 0) {
9187 IWL_DEBUG_INFO("Failed to init the card\n");
9188 goto out_remove_sysfs;
9189 }
9190 /* Read the EEPROM */
9191 err = iwl4965_eeprom_init(priv);
b481de9c 9192 if (err) {
5a66926a
ZY
9193 IWL_ERROR("Unable to init EEPROM\n");
9194 goto out_remove_sysfs;
b481de9c 9195 }
5a66926a
ZY
9196 /* MAC Address location in EEPROM same for 3945/4965 */
9197 get_eeprom_mac(priv, priv->mac_addr);
9198 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
9199 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
b481de9c 9200
5a66926a
ZY
9201 iwl4965_rate_control_register(priv->hw);
9202 err = ieee80211_register_hw(priv->hw);
9203 if (err) {
9204 IWL_ERROR("Failed to register network device (error %d)\n", err);
9205 goto out_remove_sysfs;
9206 }
b481de9c 9207
5a66926a
ZY
9208 priv->hw->conf.beacon_int = 100;
9209 priv->mac80211_registered = 1;
9210 pci_save_state(pdev);
9211 pci_disable_device(pdev);
b481de9c
ZY
9212
9213 return 0;
9214
5a66926a 9215 out_remove_sysfs:
bb8c093b 9216 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9217
9218 out_release_irq:
b481de9c
ZY
9219 destroy_workqueue(priv->workqueue);
9220 priv->workqueue = NULL;
bb8c093b 9221 iwl4965_unset_hw_setting(priv);
b481de9c
ZY
9222
9223 out_iounmap:
9224 pci_iounmap(pdev, priv->hw_base);
9225 out_pci_release_regions:
9226 pci_release_regions(pdev);
9227 out_pci_disable_device:
9228 pci_disable_device(pdev);
9229 pci_set_drvdata(pdev, NULL);
9230 out_ieee80211_free_hw:
9231 ieee80211_free_hw(priv->hw);
9232 out:
9233 return err;
9234}
9235
bb8c093b 9236static void iwl4965_pci_remove(struct pci_dev *pdev)
b481de9c 9237{
bb8c093b 9238 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
9239 struct list_head *p, *q;
9240 int i;
9241
9242 if (!priv)
9243 return;
9244
9245 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
9246
b481de9c 9247 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 9248
bb8c093b 9249 iwl4965_down(priv);
b481de9c
ZY
9250
9251 /* Free MAC hash list for ADHOC */
9252 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
9253 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
9254 list_del(p);
bb8c093b 9255 kfree(list_entry(p, struct iwl4965_ibss_seq, list));
b481de9c
ZY
9256 }
9257 }
9258
bb8c093b 9259 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c 9260
bb8c093b 9261 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
9262
9263 if (priv->rxq.bd)
bb8c093b
CH
9264 iwl4965_rx_queue_free(priv, &priv->rxq);
9265 iwl4965_hw_txq_ctx_free(priv);
b481de9c 9266
bb8c093b
CH
9267 iwl4965_unset_hw_setting(priv);
9268 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9269
9270 if (priv->mac80211_registered) {
9271 ieee80211_unregister_hw(priv->hw);
bb8c093b 9272 iwl4965_rate_control_unregister(priv->hw);
b481de9c
ZY
9273 }
9274
948c171c
MA
9275 /*netif_stop_queue(dev); */
9276 flush_workqueue(priv->workqueue);
9277
bb8c093b 9278 /* ieee80211_unregister_hw calls iwl4965_mac_stop, which flushes
b481de9c
ZY
9279 * priv->workqueue... so we can't take down the workqueue
9280 * until now... */
9281 destroy_workqueue(priv->workqueue);
9282 priv->workqueue = NULL;
9283
b481de9c
ZY
9284 pci_iounmap(pdev, priv->hw_base);
9285 pci_release_regions(pdev);
9286 pci_disable_device(pdev);
9287 pci_set_drvdata(pdev, NULL);
9288
9289 kfree(priv->channel_info);
9290
9291 kfree(priv->ieee_channels);
9292 kfree(priv->ieee_rates);
9293
9294 if (priv->ibss_beacon)
9295 dev_kfree_skb(priv->ibss_beacon);
9296
9297 ieee80211_free_hw(priv->hw);
9298}
9299
9300#ifdef CONFIG_PM
9301
bb8c093b 9302static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 9303{
bb8c093b 9304 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c 9305
e655b9f0
ZY
9306 if (priv->is_open) {
9307 set_bit(STATUS_IN_SUSPEND, &priv->status);
9308 iwl4965_mac_stop(priv->hw);
9309 priv->is_open = 1;
9310 }
b481de9c 9311
b481de9c
ZY
9312 pci_set_power_state(pdev, PCI_D3hot);
9313
b481de9c
ZY
9314 return 0;
9315}
9316
bb8c093b 9317static int iwl4965_pci_resume(struct pci_dev *pdev)
b481de9c 9318{
bb8c093b 9319 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c 9320
b481de9c 9321 pci_set_power_state(pdev, PCI_D0);
b481de9c 9322
e655b9f0
ZY
9323 if (priv->is_open)
9324 iwl4965_mac_start(priv->hw);
b481de9c 9325
e655b9f0 9326 clear_bit(STATUS_IN_SUSPEND, &priv->status);
b481de9c
ZY
9327 return 0;
9328}
9329
9330#endif /* CONFIG_PM */
9331
9332/*****************************************************************************
9333 *
9334 * driver and module entry point
9335 *
9336 *****************************************************************************/
9337
bb8c093b 9338static struct pci_driver iwl4965_driver = {
b481de9c 9339 .name = DRV_NAME,
bb8c093b
CH
9340 .id_table = iwl4965_hw_card_ids,
9341 .probe = iwl4965_pci_probe,
9342 .remove = __devexit_p(iwl4965_pci_remove),
b481de9c 9343#ifdef CONFIG_PM
bb8c093b
CH
9344 .suspend = iwl4965_pci_suspend,
9345 .resume = iwl4965_pci_resume,
b481de9c
ZY
9346#endif
9347};
9348
bb8c093b 9349static int __init iwl4965_init(void)
b481de9c
ZY
9350{
9351
9352 int ret;
9353 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
9354 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
bb8c093b 9355 ret = pci_register_driver(&iwl4965_driver);
b481de9c
ZY
9356 if (ret) {
9357 IWL_ERROR("Unable to initialize PCI module\n");
9358 return ret;
9359 }
c8b0e6e1 9360#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9361 ret = driver_create_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c
ZY
9362 if (ret) {
9363 IWL_ERROR("Unable to create driver sysfs file\n");
bb8c093b 9364 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9365 return ret;
9366 }
9367#endif
9368
9369 return ret;
9370}
9371
bb8c093b 9372static void __exit iwl4965_exit(void)
b481de9c 9373{
c8b0e6e1 9374#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9375 driver_remove_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c 9376#endif
bb8c093b 9377 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9378}
9379
bb8c093b 9380module_param_named(antenna, iwl4965_param_antenna, int, 0444);
b481de9c 9381MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
bb8c093b 9382module_param_named(disable, iwl4965_param_disable, int, 0444);
b481de9c 9383MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
bb8c093b 9384module_param_named(hwcrypto, iwl4965_param_hwcrypto, int, 0444);
b481de9c
ZY
9385MODULE_PARM_DESC(hwcrypto,
9386 "using hardware crypto engine (default 0 [software])\n");
bb8c093b 9387module_param_named(debug, iwl4965_param_debug, int, 0444);
b481de9c 9388MODULE_PARM_DESC(debug, "debug output mask");
bb8c093b 9389module_param_named(disable_hw_scan, iwl4965_param_disable_hw_scan, int, 0444);
b481de9c
ZY
9390MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
9391
bb8c093b 9392module_param_named(queues_num, iwl4965_param_queues_num, int, 0444);
b481de9c
ZY
9393MODULE_PARM_DESC(queues_num, "number of hw queues.");
9394
9395/* QoS */
bb8c093b 9396module_param_named(qos_enable, iwl4965_param_qos_enable, int, 0444);
b481de9c 9397MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
9ee1ba47
RR
9398module_param_named(amsdu_size_8K, iwl4965_param_amsdu_size_8K, int, 0444);
9399MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
b481de9c 9400
bb8c093b
CH
9401module_exit(iwl4965_exit);
9402module_init(iwl4965_init);