]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl4965-base.c
iwlwifi: fix ucode assertion for RX queue overrun
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlwifi / iwl4965-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/delay.h>
37#include <linux/skbuff.h>
38#include <linux/netdevice.h>
39#include <linux/wireless.h>
40#include <linux/firmware.h>
b481de9c
ZY
41#include <linux/etherdevice.h>
42#include <linux/if_arp.h>
43
44#include <net/ieee80211_radiotap.h>
45#include <net/mac80211.h>
46
47#include <asm/div64.h>
48
b481de9c
ZY
49#include "iwl-4965.h"
50#include "iwl-helpers.h"
51
c8b0e6e1 52#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 53u32 iwl4965_debug_level;
b481de9c
ZY
54#endif
55
bb8c093b
CH
56static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
57 struct iwl4965_tx_queue *txq);
416e1438 58
b481de9c
ZY
59/******************************************************************************
60 *
61 * module boiler plate
62 *
63 ******************************************************************************/
64
65/* module parameters */
6440adb5
BC
66static int iwl4965_param_disable_hw_scan; /* def: 0 = use 4965's h/w scan */
67static int iwl4965_param_debug; /* def: 0 = minimal debug log messages */
9fbab516
BC
68static int iwl4965_param_disable; /* def: enable radio */
69static int iwl4965_param_antenna; /* def: 0 = both antennas (use diversity) */
70int iwl4965_param_hwcrypto; /* def: using software encryption */
6440adb5
BC
71static int iwl4965_param_qos_enable = 1; /* def: 1 = use quality of service */
72int iwl4965_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 16 Tx queues */
b481de9c
ZY
73
74/*
75 * module name, copyright, version, etc.
76 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
77 */
78
79#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
80
c8b0e6e1 81#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
82#define VD "d"
83#else
84#define VD
85#endif
86
c8b0e6e1 87#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
88#define VS "s"
89#else
90#define VS
91#endif
92
80f3e024 93#define IWLWIFI_VERSION "1.1.19k" VD VS
b481de9c
ZY
94#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
95#define DRV_VERSION IWLWIFI_VERSION
96
97/* Change firmware file name, using "-" and incrementing number,
98 * *only* when uCode interface or architecture changes so that it
99 * is not compatible with earlier drivers.
100 * This number will also appear in << 8 position of 1st dword of uCode file */
101#define IWL4965_UCODE_API "-1"
102
103MODULE_DESCRIPTION(DRV_DESCRIPTION);
104MODULE_VERSION(DRV_VERSION);
105MODULE_AUTHOR(DRV_COPYRIGHT);
106MODULE_LICENSE("GPL");
107
108__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
109{
110 u16 fc = le16_to_cpu(hdr->frame_control);
111 int hdr_len = ieee80211_get_hdrlen(fc);
112
113 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
114 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
115 return NULL;
116}
117
bb8c093b
CH
118static const struct ieee80211_hw_mode *iwl4965_get_hw_mode(
119 struct iwl4965_priv *priv, int mode)
b481de9c
ZY
120{
121 int i;
122
123 for (i = 0; i < 3; i++)
124 if (priv->modes[i].mode == mode)
125 return &priv->modes[i];
126
127 return NULL;
128}
129
bb8c093b 130static int iwl4965_is_empty_essid(const char *essid, int essid_len)
b481de9c
ZY
131{
132 /* Single white space is for Linksys APs */
133 if (essid_len == 1 && essid[0] == ' ')
134 return 1;
135
136 /* Otherwise, if the entire essid is 0, we assume it is hidden */
137 while (essid_len) {
138 essid_len--;
139 if (essid[essid_len] != '\0')
140 return 0;
141 }
142
143 return 1;
144}
145
bb8c093b 146static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
b481de9c
ZY
147{
148 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
149 const char *s = essid;
150 char *d = escaped;
151
bb8c093b 152 if (iwl4965_is_empty_essid(essid, essid_len)) {
b481de9c
ZY
153 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
154 return escaped;
155 }
156
157 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
158 while (essid_len--) {
159 if (*s == '\0') {
160 *d++ = '\\';
161 *d++ = '0';
162 s++;
163 } else
164 *d++ = *s++;
165 }
166 *d = '\0';
167 return escaped;
168}
169
bb8c093b 170static void iwl4965_print_hex_dump(int level, void *p, u32 len)
b481de9c 171{
c8b0e6e1 172#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 173 if (!(iwl4965_debug_level & level))
b481de9c
ZY
174 return;
175
176 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
177 p, len, 1);
178#endif
179}
180
181/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
182 * DMA services
183 *
184 * Theory of operation
185 *
6440adb5
BC
186 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
187 * of buffer descriptors, each of which points to one or more data buffers for
188 * the device to read from or fill. Driver and device exchange status of each
189 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
190 * entries in each circular buffer, to protect against confusing empty and full
191 * queue states.
192 *
193 * The device reads or writes the data in the queues via the device's several
194 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
195 *
196 * For Tx queue, there are low mark and high mark limits. If, after queuing
197 * the packet for Tx, free space become < low mark, Tx queue stopped. When
198 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
199 * Tx queue resumed.
200 *
6440adb5
BC
201 * The 4965 operates with up to 17 queues: One receive queue, one transmit
202 * queue (#4) for sending commands to the device firmware, and 15 other
203 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
e3851447
BC
204 *
205 * See more detailed info in iwl-4965-hw.h.
b481de9c
ZY
206 ***************************************************/
207
bb8c093b 208static int iwl4965_queue_space(const struct iwl4965_queue *q)
b481de9c 209{
fc4b6853 210 int s = q->read_ptr - q->write_ptr;
b481de9c 211
fc4b6853 212 if (q->read_ptr > q->write_ptr)
b481de9c
ZY
213 s -= q->n_bd;
214
215 if (s <= 0)
216 s += q->n_window;
217 /* keep some reserve to not confuse empty and full situations */
218 s -= 2;
219 if (s < 0)
220 s = 0;
221 return s;
222}
223
6440adb5
BC
224/**
225 * iwl4965_queue_inc_wrap - increment queue index, wrap back to beginning
226 * @index -- current index
227 * @n_bd -- total number of entries in queue (must be power of 2)
228 */
bb8c093b 229static inline int iwl4965_queue_inc_wrap(int index, int n_bd)
b481de9c
ZY
230{
231 return ++index & (n_bd - 1);
232}
233
6440adb5
BC
234/**
235 * iwl4965_queue_dec_wrap - decrement queue index, wrap back to end
236 * @index -- current index
237 * @n_bd -- total number of entries in queue (must be power of 2)
238 */
bb8c093b 239static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
b481de9c
ZY
240{
241 return --index & (n_bd - 1);
242}
243
bb8c093b 244static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
b481de9c 245{
fc4b6853
TW
246 return q->write_ptr > q->read_ptr ?
247 (i >= q->read_ptr && i < q->write_ptr) :
248 !(i < q->read_ptr && i >= q->write_ptr);
b481de9c
ZY
249}
250
bb8c093b 251static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
b481de9c 252{
6440adb5 253 /* This is for scan command, the big buffer at end of command array */
b481de9c 254 if (is_huge)
6440adb5 255 return q->n_window; /* must be power of 2 */
b481de9c 256
6440adb5 257 /* Otherwise, use normal size buffers */
b481de9c
ZY
258 return index & (q->n_window - 1);
259}
260
6440adb5
BC
261/**
262 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
263 */
bb8c093b 264static int iwl4965_queue_init(struct iwl4965_priv *priv, struct iwl4965_queue *q,
b481de9c
ZY
265 int count, int slots_num, u32 id)
266{
267 q->n_bd = count;
268 q->n_window = slots_num;
269 q->id = id;
270
bb8c093b
CH
271 /* count must be power-of-two size, otherwise iwl4965_queue_inc_wrap
272 * and iwl4965_queue_dec_wrap are broken. */
b481de9c
ZY
273 BUG_ON(!is_power_of_2(count));
274
275 /* slots_num must be power-of-two size, otherwise
276 * get_cmd_index is broken. */
277 BUG_ON(!is_power_of_2(slots_num));
278
279 q->low_mark = q->n_window / 4;
280 if (q->low_mark < 4)
281 q->low_mark = 4;
282
283 q->high_mark = q->n_window / 8;
284 if (q->high_mark < 2)
285 q->high_mark = 2;
286
fc4b6853 287 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
288
289 return 0;
290}
291
6440adb5
BC
292/**
293 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
294 */
bb8c093b
CH
295static int iwl4965_tx_queue_alloc(struct iwl4965_priv *priv,
296 struct iwl4965_tx_queue *txq, u32 id)
b481de9c
ZY
297{
298 struct pci_dev *dev = priv->pci_dev;
299
6440adb5
BC
300 /* Driver private data, only for Tx (not command) queues,
301 * not shared with device. */
b481de9c
ZY
302 if (id != IWL_CMD_QUEUE_NUM) {
303 txq->txb = kmalloc(sizeof(txq->txb[0]) *
304 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
305 if (!txq->txb) {
01ebd063 306 IWL_ERROR("kmalloc for auxiliary BD "
b481de9c
ZY
307 "structures failed\n");
308 goto error;
309 }
310 } else
311 txq->txb = NULL;
312
6440adb5
BC
313 /* Circular buffer of transmit frame descriptors (TFDs),
314 * shared with device */
b481de9c
ZY
315 txq->bd = pci_alloc_consistent(dev,
316 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
317 &txq->q.dma_addr);
318
319 if (!txq->bd) {
320 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
321 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
322 goto error;
323 }
324 txq->q.id = id;
325
326 return 0;
327
328 error:
329 if (txq->txb) {
330 kfree(txq->txb);
331 txq->txb = NULL;
332 }
333
334 return -ENOMEM;
335}
336
8b6eaea8
BC
337/**
338 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
339 */
bb8c093b
CH
340int iwl4965_tx_queue_init(struct iwl4965_priv *priv,
341 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c
ZY
342{
343 struct pci_dev *dev = priv->pci_dev;
344 int len;
345 int rc = 0;
346
8b6eaea8
BC
347 /*
348 * Alloc buffer array for commands (Tx or other types of commands).
349 * For the command queue (#4), allocate command space + one big
350 * command for scan, since scan command is very huge; the system will
351 * not have two scans at the same time, so only one is needed.
6440adb5 352 * For data Tx queues (all other queues), no super-size command
8b6eaea8
BC
353 * space is needed.
354 */
bb8c093b 355 len = sizeof(struct iwl4965_cmd) * slots_num;
b481de9c
ZY
356 if (txq_id == IWL_CMD_QUEUE_NUM)
357 len += IWL_MAX_SCAN_SIZE;
358 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
359 if (!txq->cmd)
360 return -ENOMEM;
361
8b6eaea8 362 /* Alloc driver data array and TFD circular buffer */
bb8c093b 363 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
b481de9c
ZY
364 if (rc) {
365 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
366
367 return -ENOMEM;
368 }
369 txq->need_update = 0;
370
371 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
bb8c093b 372 * iwl4965_queue_inc_wrap and iwl4965_queue_dec_wrap are broken. */
b481de9c 373 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
8b6eaea8
BC
374
375 /* Initialize queue's high/low-water marks, and head/tail indexes */
bb8c093b 376 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 377
8b6eaea8 378 /* Tell device where to find queue */
bb8c093b 379 iwl4965_hw_tx_queue_init(priv, txq);
b481de9c
ZY
380
381 return 0;
382}
383
384/**
bb8c093b 385 * iwl4965_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
386 * @txq: Transmit queue to deallocate.
387 *
388 * Empty queue by removing and destroying all BD's.
6440adb5
BC
389 * Free all buffers.
390 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 391 */
bb8c093b 392void iwl4965_tx_queue_free(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
b481de9c 393{
bb8c093b 394 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
395 struct pci_dev *dev = priv->pci_dev;
396 int len;
397
398 if (q->n_bd == 0)
399 return;
400
401 /* first, empty all BD's */
fc4b6853 402 for (; q->write_ptr != q->read_ptr;
bb8c093b
CH
403 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd))
404 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c 405
bb8c093b 406 len = sizeof(struct iwl4965_cmd) * q->n_window;
b481de9c
ZY
407 if (q->id == IWL_CMD_QUEUE_NUM)
408 len += IWL_MAX_SCAN_SIZE;
409
6440adb5 410 /* De-alloc array of command/tx buffers */
b481de9c
ZY
411 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
412
6440adb5 413 /* De-alloc circular buffer of TFDs */
b481de9c 414 if (txq->q.n_bd)
bb8c093b 415 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
b481de9c
ZY
416 txq->q.n_bd, txq->bd, txq->q.dma_addr);
417
6440adb5 418 /* De-alloc array of per-TFD driver data */
b481de9c
ZY
419 if (txq->txb) {
420 kfree(txq->txb);
421 txq->txb = NULL;
422 }
423
6440adb5 424 /* 0-fill queue descriptor structure */
b481de9c
ZY
425 memset(txq, 0, sizeof(*txq));
426}
427
bb8c093b 428const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
b481de9c
ZY
429
430/*************** STATION TABLE MANAGEMENT ****
9fbab516 431 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
432 * the functionality provided here
433 */
434
435/**************************************************************/
436
01ebd063 437#if 0 /* temporary disable till we add real remove station */
6440adb5
BC
438/**
439 * iwl4965_remove_station - Remove driver's knowledge of station.
440 *
441 * NOTE: This does not remove station from device's station table.
442 */
bb8c093b 443static u8 iwl4965_remove_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
444{
445 int index = IWL_INVALID_STATION;
446 int i;
447 unsigned long flags;
448
449 spin_lock_irqsave(&priv->sta_lock, flags);
450
451 if (is_ap)
452 index = IWL_AP_ID;
453 else if (is_broadcast_ether_addr(addr))
454 index = priv->hw_setting.bcast_sta_id;
455 else
456 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
457 if (priv->stations[i].used &&
458 !compare_ether_addr(priv->stations[i].sta.sta.addr,
459 addr)) {
460 index = i;
461 break;
462 }
463
464 if (unlikely(index == IWL_INVALID_STATION))
465 goto out;
466
467 if (priv->stations[index].used) {
468 priv->stations[index].used = 0;
469 priv->num_stations--;
470 }
471
472 BUG_ON(priv->num_stations < 0);
473
474out:
475 spin_unlock_irqrestore(&priv->sta_lock, flags);
476 return 0;
477}
556f8db7 478#endif
b481de9c 479
6440adb5
BC
480/**
481 * iwl4965_clear_stations_table - Clear the driver's station table
482 *
483 * NOTE: This does not clear or otherwise alter the device's station table.
484 */
bb8c093b 485static void iwl4965_clear_stations_table(struct iwl4965_priv *priv)
b481de9c
ZY
486{
487 unsigned long flags;
488
489 spin_lock_irqsave(&priv->sta_lock, flags);
490
491 priv->num_stations = 0;
492 memset(priv->stations, 0, sizeof(priv->stations));
493
494 spin_unlock_irqrestore(&priv->sta_lock, flags);
495}
496
6440adb5
BC
497/**
498 * iwl4965_add_station_flags - Add station to tables in driver and device
499 */
bb8c093b 500u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr, int is_ap, u8 flags)
b481de9c
ZY
501{
502 int i;
503 int index = IWL_INVALID_STATION;
bb8c093b 504 struct iwl4965_station_entry *station;
b481de9c 505 unsigned long flags_spin;
0795af57 506 DECLARE_MAC_BUF(mac);
b481de9c
ZY
507
508 spin_lock_irqsave(&priv->sta_lock, flags_spin);
509 if (is_ap)
510 index = IWL_AP_ID;
511 else if (is_broadcast_ether_addr(addr))
512 index = priv->hw_setting.bcast_sta_id;
513 else
514 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
515 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
516 addr)) {
517 index = i;
518 break;
519 }
520
521 if (!priv->stations[i].used &&
522 index == IWL_INVALID_STATION)
523 index = i;
524 }
525
526
9fbab516
BC
527 /* These two conditions have the same outcome, but keep them separate
528 since they have different meanings */
b481de9c
ZY
529 if (unlikely(index == IWL_INVALID_STATION)) {
530 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
531 return index;
532 }
533
534 if (priv->stations[index].used &&
535 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
536 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
537 return index;
538 }
539
540
0795af57 541 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
b481de9c
ZY
542 station = &priv->stations[index];
543 station->used = 1;
544 priv->num_stations++;
545
6440adb5 546 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 547 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
b481de9c
ZY
548 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
549 station->sta.mode = 0;
550 station->sta.sta.sta_id = index;
551 station->sta.station_flags = 0;
552
c8b0e6e1 553#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
554 /* BCAST station and IBSS stations do not work in HT mode */
555 if (index != priv->hw_setting.bcast_sta_id &&
556 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
557 iwl4965_set_ht_add_station(priv, index);
c8b0e6e1 558#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
559
560 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
BC
561
562 /* Add station to device's station table */
bb8c093b 563 iwl4965_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
564 return index;
565
566}
567
568/*************** DRIVER STATUS FUNCTIONS *****/
569
bb8c093b 570static inline int iwl4965_is_ready(struct iwl4965_priv *priv)
b481de9c
ZY
571{
572 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
573 * set but EXIT_PENDING is not */
574 return test_bit(STATUS_READY, &priv->status) &&
575 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
576 !test_bit(STATUS_EXIT_PENDING, &priv->status);
577}
578
bb8c093b 579static inline int iwl4965_is_alive(struct iwl4965_priv *priv)
b481de9c
ZY
580{
581 return test_bit(STATUS_ALIVE, &priv->status);
582}
583
bb8c093b 584static inline int iwl4965_is_init(struct iwl4965_priv *priv)
b481de9c
ZY
585{
586 return test_bit(STATUS_INIT, &priv->status);
587}
588
bb8c093b 589static inline int iwl4965_is_rfkill(struct iwl4965_priv *priv)
b481de9c
ZY
590{
591 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
592 test_bit(STATUS_RF_KILL_SW, &priv->status);
593}
594
bb8c093b 595static inline int iwl4965_is_ready_rf(struct iwl4965_priv *priv)
b481de9c
ZY
596{
597
bb8c093b 598 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
599 return 0;
600
bb8c093b 601 return iwl4965_is_ready(priv);
b481de9c
ZY
602}
603
604/*************** HOST COMMAND QUEUE FUNCTIONS *****/
605
606#define IWL_CMD(x) case x : return #x
607
608static const char *get_cmd_string(u8 cmd)
609{
610 switch (cmd) {
611 IWL_CMD(REPLY_ALIVE);
612 IWL_CMD(REPLY_ERROR);
613 IWL_CMD(REPLY_RXON);
614 IWL_CMD(REPLY_RXON_ASSOC);
615 IWL_CMD(REPLY_QOS_PARAM);
616 IWL_CMD(REPLY_RXON_TIMING);
617 IWL_CMD(REPLY_ADD_STA);
618 IWL_CMD(REPLY_REMOVE_STA);
619 IWL_CMD(REPLY_REMOVE_ALL_STA);
620 IWL_CMD(REPLY_TX);
621 IWL_CMD(REPLY_RATE_SCALE);
622 IWL_CMD(REPLY_LEDS_CMD);
623 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
624 IWL_CMD(RADAR_NOTIFICATION);
625 IWL_CMD(REPLY_QUIET_CMD);
626 IWL_CMD(REPLY_CHANNEL_SWITCH);
627 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
628 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
629 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
630 IWL_CMD(POWER_TABLE_CMD);
631 IWL_CMD(PM_SLEEP_NOTIFICATION);
632 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
633 IWL_CMD(REPLY_SCAN_CMD);
634 IWL_CMD(REPLY_SCAN_ABORT_CMD);
635 IWL_CMD(SCAN_START_NOTIFICATION);
636 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
637 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
638 IWL_CMD(BEACON_NOTIFICATION);
639 IWL_CMD(REPLY_TX_BEACON);
640 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
641 IWL_CMD(QUIET_NOTIFICATION);
642 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
643 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
644 IWL_CMD(REPLY_BT_CONFIG);
645 IWL_CMD(REPLY_STATISTICS_CMD);
646 IWL_CMD(STATISTICS_NOTIFICATION);
647 IWL_CMD(REPLY_CARD_STATE_CMD);
648 IWL_CMD(CARD_STATE_NOTIFICATION);
649 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
650 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
651 IWL_CMD(SENSITIVITY_CMD);
652 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
653 IWL_CMD(REPLY_RX_PHY_CMD);
654 IWL_CMD(REPLY_RX_MPDU_CMD);
655 IWL_CMD(REPLY_4965_RX);
656 IWL_CMD(REPLY_COMPRESSED_BA);
657 default:
658 return "UNKNOWN";
659
660 }
661}
662
663#define HOST_COMPLETE_TIMEOUT (HZ / 2)
664
665/**
bb8c093b 666 * iwl4965_enqueue_hcmd - enqueue a uCode command
b481de9c
ZY
667 * @priv: device private data point
668 * @cmd: a point to the ucode command structure
669 *
670 * The function returns < 0 values to indicate the operation is
671 * failed. On success, it turns the index (> 0) of command in the
672 * command queue.
673 */
bb8c093b 674static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 675{
bb8c093b
CH
676 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
677 struct iwl4965_queue *q = &txq->q;
678 struct iwl4965_tfd_frame *tfd;
b481de9c 679 u32 *control_flags;
bb8c093b 680 struct iwl4965_cmd *out_cmd;
b481de9c
ZY
681 u32 idx;
682 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
683 dma_addr_t phys_addr;
684 int ret;
685 unsigned long flags;
686
687 /* If any of the command structures end up being larger than
688 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
689 * we will need to increase the size of the TFD entries */
690 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
691 !(cmd->meta.flags & CMD_SIZE_HUGE));
692
bb8c093b 693 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
b481de9c
ZY
694 IWL_ERROR("No space for Tx\n");
695 return -ENOSPC;
696 }
697
698 spin_lock_irqsave(&priv->hcmd_lock, flags);
699
fc4b6853 700 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
701 memset(tfd, 0, sizeof(*tfd));
702
703 control_flags = (u32 *) tfd;
704
fc4b6853 705 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
b481de9c
ZY
706 out_cmd = &txq->cmd[idx];
707
708 out_cmd->hdr.cmd = cmd->id;
709 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
710 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
711
712 /* At this point, the out_cmd now has all of the incoming cmd
713 * information */
714
715 out_cmd->hdr.flags = 0;
716 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
fc4b6853 717 INDEX_TO_SEQ(q->write_ptr));
b481de9c
ZY
718 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
719 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
720
721 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
bb8c093b
CH
722 offsetof(struct iwl4965_cmd, hdr);
723 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
b481de9c
ZY
724
725 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
726 "%d bytes at %d[%d]:%d\n",
727 get_cmd_string(out_cmd->hdr.cmd),
728 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fc4b6853 729 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
b481de9c
ZY
730
731 txq->need_update = 1;
6440adb5
BC
732
733 /* Set up entry in queue's byte count circular buffer */
b481de9c 734 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
6440adb5
BC
735
736 /* Increment and update queue's write index */
bb8c093b
CH
737 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
738 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
739
740 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
741 return ret ? ret : idx;
742}
743
bb8c093b 744static int iwl4965_send_cmd_async(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
745{
746 int ret;
747
748 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
749
750 /* An asynchronous command can not expect an SKB to be set. */
751 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
752
753 /* An asynchronous command MUST have a callback. */
754 BUG_ON(!cmd->meta.u.callback);
755
756 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
757 return -EBUSY;
758
bb8c093b 759 ret = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c 760 if (ret < 0) {
bb8c093b 761 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
762 get_cmd_string(cmd->id), ret);
763 return ret;
764 }
765 return 0;
766}
767
bb8c093b 768static int iwl4965_send_cmd_sync(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
769{
770 int cmd_idx;
771 int ret;
772 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
773
774 BUG_ON(cmd->meta.flags & CMD_ASYNC);
775
776 /* A synchronous command can not have a callback set. */
777 BUG_ON(cmd->meta.u.callback != NULL);
778
779 if (atomic_xchg(&entry, 1)) {
780 IWL_ERROR("Error sending %s: Already sending a host command\n",
781 get_cmd_string(cmd->id));
782 return -EBUSY;
783 }
784
785 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
786
787 if (cmd->meta.flags & CMD_WANT_SKB)
788 cmd->meta.source = &cmd->meta;
789
bb8c093b 790 cmd_idx = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c
ZY
791 if (cmd_idx < 0) {
792 ret = cmd_idx;
bb8c093b 793 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
794 get_cmd_string(cmd->id), ret);
795 goto out;
796 }
797
798 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
799 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
800 HOST_COMPLETE_TIMEOUT);
801 if (!ret) {
802 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
803 IWL_ERROR("Error sending %s: time out after %dms.\n",
804 get_cmd_string(cmd->id),
805 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
806
807 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
808 ret = -ETIMEDOUT;
809 goto cancel;
810 }
811 }
812
813 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
814 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
815 get_cmd_string(cmd->id));
816 ret = -ECANCELED;
817 goto fail;
818 }
819 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
820 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
821 get_cmd_string(cmd->id));
822 ret = -EIO;
823 goto fail;
824 }
825 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
826 IWL_ERROR("Error: Response NULL in '%s'\n",
827 get_cmd_string(cmd->id));
828 ret = -EIO;
829 goto out;
830 }
831
832 ret = 0;
833 goto out;
834
835cancel:
836 if (cmd->meta.flags & CMD_WANT_SKB) {
bb8c093b 837 struct iwl4965_cmd *qcmd;
b481de9c
ZY
838
839 /* Cancel the CMD_WANT_SKB flag for the cmd in the
840 * TX cmd queue. Otherwise in case the cmd comes
841 * in later, it will possibly set an invalid
842 * address (cmd->meta.source). */
843 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
844 qcmd->meta.flags &= ~CMD_WANT_SKB;
845 }
846fail:
847 if (cmd->meta.u.skb) {
848 dev_kfree_skb_any(cmd->meta.u.skb);
849 cmd->meta.u.skb = NULL;
850 }
851out:
852 atomic_set(&entry, 0);
853 return ret;
854}
855
bb8c093b 856int iwl4965_send_cmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 857{
b481de9c 858 if (cmd->meta.flags & CMD_ASYNC)
bb8c093b 859 return iwl4965_send_cmd_async(priv, cmd);
b481de9c 860
bb8c093b 861 return iwl4965_send_cmd_sync(priv, cmd);
b481de9c
ZY
862}
863
bb8c093b 864int iwl4965_send_cmd_pdu(struct iwl4965_priv *priv, u8 id, u16 len, const void *data)
b481de9c 865{
bb8c093b 866 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
867 .id = id,
868 .len = len,
869 .data = data,
870 };
871
bb8c093b 872 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
873}
874
bb8c093b 875static int __must_check iwl4965_send_cmd_u32(struct iwl4965_priv *priv, u8 id, u32 val)
b481de9c 876{
bb8c093b 877 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
878 .id = id,
879 .len = sizeof(val),
880 .data = &val,
881 };
882
bb8c093b 883 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
884}
885
bb8c093b 886int iwl4965_send_statistics_request(struct iwl4965_priv *priv)
b481de9c 887{
bb8c093b 888 return iwl4965_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
b481de9c
ZY
889}
890
891/**
bb8c093b 892 * iwl4965_rxon_add_station - add station into station table.
b481de9c
ZY
893 *
894 * there is only one AP station with id= IWL_AP_ID
9fbab516
BC
895 * NOTE: mutex must be held before calling this fnction
896 */
bb8c093b 897static int iwl4965_rxon_add_station(struct iwl4965_priv *priv,
b481de9c
ZY
898 const u8 *addr, int is_ap)
899{
556f8db7 900 u8 sta_id;
b481de9c 901
6440adb5 902 /* Add station to device's station table */
bb8c093b 903 sta_id = iwl4965_add_station_flags(priv, addr, is_ap, 0);
6440adb5
BC
904
905 /* Set up default rate scaling table in device's station table */
b481de9c
ZY
906 iwl4965_add_station(priv, addr, is_ap);
907
556f8db7 908 return sta_id;
b481de9c
ZY
909}
910
911/**
bb8c093b 912 * iwl4965_set_rxon_channel - Set the phymode and channel values in staging RXON
b481de9c
ZY
913 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
914 * @channel: Any channel valid for the requested phymode
915
916 * In addition to setting the staging RXON, priv->phymode is also set.
917 *
918 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
919 * in the staging RXON flag structure based on the phymode
920 */
9fbab516
BC
921static int iwl4965_set_rxon_channel(struct iwl4965_priv *priv, u8 phymode,
922 u16 channel)
b481de9c 923{
bb8c093b 924 if (!iwl4965_get_channel_info(priv, phymode, channel)) {
b481de9c
ZY
925 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
926 channel, phymode);
927 return -EINVAL;
928 }
929
930 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
931 (priv->phymode == phymode))
932 return 0;
933
934 priv->staging_rxon.channel = cpu_to_le16(channel);
935 if (phymode == MODE_IEEE80211A)
936 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
937 else
938 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
939
940 priv->phymode = phymode;
941
942 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
943
944 return 0;
945}
946
947/**
bb8c093b 948 * iwl4965_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
949 *
950 * NOTE: This is really only useful during development and can eventually
951 * be #ifdef'd out once the driver is stable and folks aren't actively
952 * making changes
953 */
bb8c093b 954static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c
ZY
955{
956 int error = 0;
957 int counter = 1;
958
959 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
960 error |= le32_to_cpu(rxon->flags &
961 (RXON_FLG_TGJ_NARROW_BAND_MSK |
962 RXON_FLG_RADAR_DETECT_MSK));
963 if (error)
964 IWL_WARNING("check 24G fields %d | %d\n",
965 counter++, error);
966 } else {
967 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
968 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
969 if (error)
970 IWL_WARNING("check 52 fields %d | %d\n",
971 counter++, error);
972 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
973 if (error)
974 IWL_WARNING("check 52 CCK %d | %d\n",
975 counter++, error);
976 }
977 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
978 if (error)
979 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
980
981 /* make sure basic rates 6Mbps and 1Mbps are supported */
982 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
983 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
984 if (error)
985 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
986
987 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
988 if (error)
989 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
990
991 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
992 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
993 if (error)
994 IWL_WARNING("check CCK and short slot %d | %d\n",
995 counter++, error);
996
997 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
998 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
999 if (error)
1000 IWL_WARNING("check CCK & auto detect %d | %d\n",
1001 counter++, error);
1002
1003 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
1004 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
1005 if (error)
1006 IWL_WARNING("check TGG and auto detect %d | %d\n",
1007 counter++, error);
1008
1009 if (error)
1010 IWL_WARNING("Tuning to channel %d\n",
1011 le16_to_cpu(rxon->channel));
1012
1013 if (error) {
bb8c093b 1014 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
b481de9c
ZY
1015 return -1;
1016 }
1017 return 0;
1018}
1019
1020/**
9fbab516 1021 * iwl4965_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 1022 * @priv: staging_rxon is compared to active_rxon
b481de9c 1023 *
9fbab516
BC
1024 * If the RXON structure is changing enough to require a new tune,
1025 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
1026 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 1027 */
bb8c093b 1028static int iwl4965_full_rxon_required(struct iwl4965_priv *priv)
b481de9c
ZY
1029{
1030
1031 /* These items are only settable from the full RXON command */
1032 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
1033 compare_ether_addr(priv->staging_rxon.bssid_addr,
1034 priv->active_rxon.bssid_addr) ||
1035 compare_ether_addr(priv->staging_rxon.node_addr,
1036 priv->active_rxon.node_addr) ||
1037 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
1038 priv->active_rxon.wlap_bssid_addr) ||
1039 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
1040 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
1041 (priv->staging_rxon.air_propagation !=
1042 priv->active_rxon.air_propagation) ||
1043 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
1044 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
1045 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
1046 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
1047 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
1048 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
1049 return 1;
1050
1051 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
1052 * be updated with the RXON_ASSOC command -- however only some
1053 * flag transitions are allowed using RXON_ASSOC */
1054
1055 /* Check if we are not switching bands */
1056 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1057 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1058 return 1;
1059
1060 /* Check if we are switching association toggle */
1061 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1062 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1063 return 1;
1064
1065 return 0;
1066}
1067
bb8c093b 1068static int iwl4965_send_rxon_assoc(struct iwl4965_priv *priv)
b481de9c
ZY
1069{
1070 int rc = 0;
bb8c093b
CH
1071 struct iwl4965_rx_packet *res = NULL;
1072 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1073 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1074 .id = REPLY_RXON_ASSOC,
1075 .len = sizeof(rxon_assoc),
1076 .meta.flags = CMD_WANT_SKB,
1077 .data = &rxon_assoc,
1078 };
bb8c093b
CH
1079 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
1080 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
b481de9c
ZY
1081
1082 if ((rxon1->flags == rxon2->flags) &&
1083 (rxon1->filter_flags == rxon2->filter_flags) &&
1084 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1085 (rxon1->ofdm_ht_single_stream_basic_rates ==
1086 rxon2->ofdm_ht_single_stream_basic_rates) &&
1087 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1088 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1089 (rxon1->rx_chain == rxon2->rx_chain) &&
1090 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1091 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1092 return 0;
1093 }
1094
1095 rxon_assoc.flags = priv->staging_rxon.flags;
1096 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1097 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1098 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1099 rxon_assoc.reserved = 0;
1100 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1101 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1102 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1103 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1104 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1105
bb8c093b 1106 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1107 if (rc)
1108 return rc;
1109
bb8c093b 1110 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1111 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1112 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1113 rc = -EIO;
1114 }
1115
1116 priv->alloc_rxb_skb--;
1117 dev_kfree_skb_any(cmd.meta.u.skb);
1118
1119 return rc;
1120}
1121
1122/**
bb8c093b 1123 * iwl4965_commit_rxon - commit staging_rxon to hardware
b481de9c 1124 *
01ebd063 1125 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
1126 * the active_rxon structure is updated with the new data. This
1127 * function correctly transitions out of the RXON_ASSOC_MSK state if
1128 * a HW tune is required based on the RXON structure changes.
1129 */
bb8c093b 1130static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
b481de9c
ZY
1131{
1132 /* cast away the const for active_rxon in this function */
bb8c093b 1133 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
0795af57 1134 DECLARE_MAC_BUF(mac);
b481de9c
ZY
1135 int rc = 0;
1136
bb8c093b 1137 if (!iwl4965_is_alive(priv))
b481de9c
ZY
1138 return -1;
1139
1140 /* always get timestamp with Rx frame */
1141 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1142
bb8c093b 1143 rc = iwl4965_check_rxon_cmd(&priv->staging_rxon);
b481de9c
ZY
1144 if (rc) {
1145 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1146 return -EINVAL;
1147 }
1148
1149 /* If we don't need to send a full RXON, we can use
bb8c093b 1150 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 1151 * and other flags for the current radio configuration. */
bb8c093b
CH
1152 if (!iwl4965_full_rxon_required(priv)) {
1153 rc = iwl4965_send_rxon_assoc(priv);
b481de9c
ZY
1154 if (rc) {
1155 IWL_ERROR("Error setting RXON_ASSOC "
1156 "configuration (%d).\n", rc);
1157 return rc;
1158 }
1159
1160 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1161
1162 return 0;
1163 }
1164
1165 /* station table will be cleared */
1166 priv->assoc_station_added = 0;
1167
c8b0e6e1 1168#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1169 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1170 if (!priv->error_recovering)
1171 priv->start_calib = 0;
1172
1173 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1174#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1175
1176 /* If we are currently associated and the new config requires
1177 * an RXON_ASSOC and the new config wants the associated mask enabled,
1178 * we must clear the associated from the active configuration
1179 * before we apply the new config */
bb8c093b 1180 if (iwl4965_is_associated(priv) &&
b481de9c
ZY
1181 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1182 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1183 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1184
bb8c093b
CH
1185 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1186 sizeof(struct iwl4965_rxon_cmd),
b481de9c
ZY
1187 &priv->active_rxon);
1188
1189 /* If the mask clearing failed then we set
1190 * active_rxon back to what it was previously */
1191 if (rc) {
1192 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1193 IWL_ERROR("Error clearing ASSOC_MSK on current "
1194 "configuration (%d).\n", rc);
1195 return rc;
1196 }
b481de9c
ZY
1197 }
1198
1199 IWL_DEBUG_INFO("Sending RXON\n"
1200 "* with%s RXON_FILTER_ASSOC_MSK\n"
1201 "* channel = %d\n"
0795af57 1202 "* bssid = %s\n",
b481de9c
ZY
1203 ((priv->staging_rxon.filter_flags &
1204 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1205 le16_to_cpu(priv->staging_rxon.channel),
0795af57 1206 print_mac(mac, priv->staging_rxon.bssid_addr));
b481de9c
ZY
1207
1208 /* Apply the new configuration */
bb8c093b
CH
1209 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1210 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon);
b481de9c
ZY
1211 if (rc) {
1212 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1213 return rc;
1214 }
1215
bb8c093b 1216 iwl4965_clear_stations_table(priv);
556f8db7 1217
c8b0e6e1 1218#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1219 if (!priv->error_recovering)
1220 priv->start_calib = 0;
1221
1222 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1223 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1224#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1225
1226 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1227
1228 /* If we issue a new RXON command which required a tune then we must
1229 * send a new TXPOWER command or we won't be able to Tx any frames */
bb8c093b 1230 rc = iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
1231 if (rc) {
1232 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1233 return rc;
1234 }
1235
1236 /* Add the broadcast address so we can send broadcast frames */
bb8c093b 1237 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) ==
b481de9c
ZY
1238 IWL_INVALID_STATION) {
1239 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1240 return -EIO;
1241 }
1242
1243 /* If we have set the ASSOC_MSK and we are in BSS mode then
1244 * add the IWL_AP_ID to the station rate table */
bb8c093b 1245 if (iwl4965_is_associated(priv) &&
b481de9c 1246 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
bb8c093b 1247 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
b481de9c
ZY
1248 == IWL_INVALID_STATION) {
1249 IWL_ERROR("Error adding AP address for transmit.\n");
1250 return -EIO;
1251 }
1252 priv->assoc_station_added = 1;
1253 }
1254
1255 return 0;
1256}
1257
bb8c093b 1258static int iwl4965_send_bt_config(struct iwl4965_priv *priv)
b481de9c 1259{
bb8c093b 1260 struct iwl4965_bt_cmd bt_cmd = {
b481de9c
ZY
1261 .flags = 3,
1262 .lead_time = 0xAA,
1263 .max_kill = 1,
1264 .kill_ack_mask = 0,
1265 .kill_cts_mask = 0,
1266 };
1267
bb8c093b
CH
1268 return iwl4965_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1269 sizeof(struct iwl4965_bt_cmd), &bt_cmd);
b481de9c
ZY
1270}
1271
bb8c093b 1272static int iwl4965_send_scan_abort(struct iwl4965_priv *priv)
b481de9c
ZY
1273{
1274 int rc = 0;
bb8c093b
CH
1275 struct iwl4965_rx_packet *res;
1276 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1277 .id = REPLY_SCAN_ABORT_CMD,
1278 .meta.flags = CMD_WANT_SKB,
1279 };
1280
1281 /* If there isn't a scan actively going on in the hardware
1282 * then we are in between scan bands and not actually
1283 * actively scanning, so don't send the abort command */
1284 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1285 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1286 return 0;
1287 }
1288
bb8c093b 1289 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1290 if (rc) {
1291 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1292 return rc;
1293 }
1294
bb8c093b 1295 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1296 if (res->u.status != CAN_ABORT_STATUS) {
1297 /* The scan abort will return 1 for success or
1298 * 2 for "failure". A failure condition can be
1299 * due to simply not being in an active scan which
1300 * can occur if we send the scan abort before we
1301 * the microcode has notified us that a scan is
1302 * completed. */
1303 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1304 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1305 clear_bit(STATUS_SCAN_HW, &priv->status);
1306 }
1307
1308 dev_kfree_skb_any(cmd.meta.u.skb);
1309
1310 return rc;
1311}
1312
bb8c093b
CH
1313static int iwl4965_card_state_sync_callback(struct iwl4965_priv *priv,
1314 struct iwl4965_cmd *cmd,
b481de9c
ZY
1315 struct sk_buff *skb)
1316{
1317 return 1;
1318}
1319
1320/*
1321 * CARD_STATE_CMD
1322 *
9fbab516 1323 * Use: Sets the device's internal card state to enable, disable, or halt
b481de9c
ZY
1324 *
1325 * When in the 'enable' state the card operates as normal.
1326 * When in the 'disable' state, the card enters into a low power mode.
1327 * When in the 'halt' state, the card is shut down and must be fully
1328 * restarted to come back on.
1329 */
bb8c093b 1330static int iwl4965_send_card_state(struct iwl4965_priv *priv, u32 flags, u8 meta_flag)
b481de9c 1331{
bb8c093b 1332 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1333 .id = REPLY_CARD_STATE_CMD,
1334 .len = sizeof(u32),
1335 .data = &flags,
1336 .meta.flags = meta_flag,
1337 };
1338
1339 if (meta_flag & CMD_ASYNC)
bb8c093b 1340 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
b481de9c 1341
bb8c093b 1342 return iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1343}
1344
bb8c093b
CH
1345static int iwl4965_add_sta_sync_callback(struct iwl4965_priv *priv,
1346 struct iwl4965_cmd *cmd, struct sk_buff *skb)
b481de9c 1347{
bb8c093b 1348 struct iwl4965_rx_packet *res = NULL;
b481de9c
ZY
1349
1350 if (!skb) {
1351 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1352 return 1;
1353 }
1354
bb8c093b 1355 res = (struct iwl4965_rx_packet *)skb->data;
b481de9c
ZY
1356 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1357 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1358 res->hdr.flags);
1359 return 1;
1360 }
1361
1362 switch (res->u.add_sta.status) {
1363 case ADD_STA_SUCCESS_MSK:
1364 break;
1365 default:
1366 break;
1367 }
1368
1369 /* We didn't cache the SKB; let the caller free it */
1370 return 1;
1371}
1372
bb8c093b
CH
1373int iwl4965_send_add_station(struct iwl4965_priv *priv,
1374 struct iwl4965_addsta_cmd *sta, u8 flags)
b481de9c 1375{
bb8c093b 1376 struct iwl4965_rx_packet *res = NULL;
b481de9c 1377 int rc = 0;
bb8c093b 1378 struct iwl4965_host_cmd cmd = {
b481de9c 1379 .id = REPLY_ADD_STA,
bb8c093b 1380 .len = sizeof(struct iwl4965_addsta_cmd),
b481de9c
ZY
1381 .meta.flags = flags,
1382 .data = sta,
1383 };
1384
1385 if (flags & CMD_ASYNC)
bb8c093b 1386 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
b481de9c
ZY
1387 else
1388 cmd.meta.flags |= CMD_WANT_SKB;
1389
bb8c093b 1390 rc = iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1391
1392 if (rc || (flags & CMD_ASYNC))
1393 return rc;
1394
bb8c093b 1395 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1396 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1397 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1398 res->hdr.flags);
1399 rc = -EIO;
1400 }
1401
1402 if (rc == 0) {
1403 switch (res->u.add_sta.status) {
1404 case ADD_STA_SUCCESS_MSK:
1405 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1406 break;
1407 default:
1408 rc = -EIO;
1409 IWL_WARNING("REPLY_ADD_STA failed\n");
1410 break;
1411 }
1412 }
1413
1414 priv->alloc_rxb_skb--;
1415 dev_kfree_skb_any(cmd.meta.u.skb);
1416
1417 return rc;
1418}
1419
bb8c093b 1420static int iwl4965_update_sta_key_info(struct iwl4965_priv *priv,
b481de9c
ZY
1421 struct ieee80211_key_conf *keyconf,
1422 u8 sta_id)
1423{
1424 unsigned long flags;
1425 __le16 key_flags = 0;
1426
1427 switch (keyconf->alg) {
1428 case ALG_CCMP:
1429 key_flags |= STA_KEY_FLG_CCMP;
1430 key_flags |= cpu_to_le16(
1431 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1432 key_flags &= ~STA_KEY_FLG_INVALID;
1433 break;
1434 case ALG_TKIP:
1435 case ALG_WEP:
b481de9c
ZY
1436 default:
1437 return -EINVAL;
1438 }
1439 spin_lock_irqsave(&priv->sta_lock, flags);
1440 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1441 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1442 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1443 keyconf->keylen);
1444
1445 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1446 keyconf->keylen);
1447 priv->stations[sta_id].sta.key.key_flags = key_flags;
1448 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1449 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1450
1451 spin_unlock_irqrestore(&priv->sta_lock, flags);
1452
1453 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
bb8c093b 1454 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1455 return 0;
1456}
1457
bb8c093b 1458static int iwl4965_clear_sta_key_info(struct iwl4965_priv *priv, u8 sta_id)
b481de9c
ZY
1459{
1460 unsigned long flags;
1461
1462 spin_lock_irqsave(&priv->sta_lock, flags);
bb8c093b
CH
1463 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl4965_hw_key));
1464 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl4965_keyinfo));
b481de9c
ZY
1465 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1466 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1467 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1468 spin_unlock_irqrestore(&priv->sta_lock, flags);
1469
1470 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
bb8c093b 1471 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1472 return 0;
1473}
1474
bb8c093b 1475static void iwl4965_clear_free_frames(struct iwl4965_priv *priv)
b481de9c
ZY
1476{
1477 struct list_head *element;
1478
1479 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1480 priv->frames_count);
1481
1482 while (!list_empty(&priv->free_frames)) {
1483 element = priv->free_frames.next;
1484 list_del(element);
bb8c093b 1485 kfree(list_entry(element, struct iwl4965_frame, list));
b481de9c
ZY
1486 priv->frames_count--;
1487 }
1488
1489 if (priv->frames_count) {
1490 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1491 priv->frames_count);
1492 priv->frames_count = 0;
1493 }
1494}
1495
bb8c093b 1496static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl4965_priv *priv)
b481de9c 1497{
bb8c093b 1498 struct iwl4965_frame *frame;
b481de9c
ZY
1499 struct list_head *element;
1500 if (list_empty(&priv->free_frames)) {
1501 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1502 if (!frame) {
1503 IWL_ERROR("Could not allocate frame!\n");
1504 return NULL;
1505 }
1506
1507 priv->frames_count++;
1508 return frame;
1509 }
1510
1511 element = priv->free_frames.next;
1512 list_del(element);
bb8c093b 1513 return list_entry(element, struct iwl4965_frame, list);
b481de9c
ZY
1514}
1515
bb8c093b 1516static void iwl4965_free_frame(struct iwl4965_priv *priv, struct iwl4965_frame *frame)
b481de9c
ZY
1517{
1518 memset(frame, 0, sizeof(*frame));
1519 list_add(&frame->list, &priv->free_frames);
1520}
1521
bb8c093b 1522unsigned int iwl4965_fill_beacon_frame(struct iwl4965_priv *priv,
b481de9c
ZY
1523 struct ieee80211_hdr *hdr,
1524 const u8 *dest, int left)
1525{
1526
bb8c093b 1527 if (!iwl4965_is_associated(priv) || !priv->ibss_beacon ||
b481de9c
ZY
1528 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1529 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1530 return 0;
1531
1532 if (priv->ibss_beacon->len > left)
1533 return 0;
1534
1535 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1536
1537 return priv->ibss_beacon->len;
1538}
1539
bb8c093b 1540int iwl4965_rate_index_from_plcp(int plcp)
b481de9c
ZY
1541{
1542 int i = 0;
1543
77626355 1544 /* 4965 HT rate format */
b481de9c
ZY
1545 if (plcp & RATE_MCS_HT_MSK) {
1546 i = (plcp & 0xff);
1547
1548 if (i >= IWL_RATE_MIMO_6M_PLCP)
1549 i = i - IWL_RATE_MIMO_6M_PLCP;
1550
1551 i += IWL_FIRST_OFDM_RATE;
1552 /* skip 9M not supported in ht*/
1553 if (i >= IWL_RATE_9M_INDEX)
1554 i += 1;
1555 if ((i >= IWL_FIRST_OFDM_RATE) &&
1556 (i <= IWL_LAST_OFDM_RATE))
1557 return i;
77626355
BC
1558
1559 /* 4965 legacy rate format, search for match in table */
b481de9c 1560 } else {
bb8c093b
CH
1561 for (i = 0; i < ARRAY_SIZE(iwl4965_rates); i++)
1562 if (iwl4965_rates[i].plcp == (plcp &0xFF))
b481de9c
ZY
1563 return i;
1564 }
1565 return -1;
1566}
1567
bb8c093b 1568static u8 iwl4965_rate_get_lowest_plcp(int rate_mask)
b481de9c
ZY
1569{
1570 u8 i;
1571
1572 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1573 i = iwl4965_rates[i].next_ieee) {
b481de9c 1574 if (rate_mask & (1 << i))
bb8c093b 1575 return iwl4965_rates[i].plcp;
b481de9c
ZY
1576 }
1577
1578 return IWL_RATE_INVALID;
1579}
1580
bb8c093b 1581static int iwl4965_send_beacon_cmd(struct iwl4965_priv *priv)
b481de9c 1582{
bb8c093b 1583 struct iwl4965_frame *frame;
b481de9c
ZY
1584 unsigned int frame_size;
1585 int rc;
1586 u8 rate;
1587
bb8c093b 1588 frame = iwl4965_get_free_frame(priv);
b481de9c
ZY
1589
1590 if (!frame) {
1591 IWL_ERROR("Could not obtain free frame buffer for beacon "
1592 "command.\n");
1593 return -ENOMEM;
1594 }
1595
1596 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
bb8c093b 1597 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
b481de9c
ZY
1598 0xFF0);
1599 if (rate == IWL_INVALID_RATE)
1600 rate = IWL_RATE_6M_PLCP;
1601 } else {
bb8c093b 1602 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
b481de9c
ZY
1603 if (rate == IWL_INVALID_RATE)
1604 rate = IWL_RATE_1M_PLCP;
1605 }
1606
bb8c093b 1607 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1608
bb8c093b 1609 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1610 &frame->u.cmd[0]);
1611
bb8c093b 1612 iwl4965_free_frame(priv, frame);
b481de9c
ZY
1613
1614 return rc;
1615}
1616
1617/******************************************************************************
1618 *
1619 * EEPROM related functions
1620 *
1621 ******************************************************************************/
1622
bb8c093b 1623static void get_eeprom_mac(struct iwl4965_priv *priv, u8 *mac)
b481de9c
ZY
1624{
1625 memcpy(mac, priv->eeprom.mac_address, 6);
1626}
1627
1628/**
bb8c093b 1629 * iwl4965_eeprom_init - read EEPROM contents
b481de9c 1630 *
6440adb5 1631 * Load the EEPROM contents from adapter into priv->eeprom
b481de9c
ZY
1632 *
1633 * NOTE: This routine uses the non-debug IO access functions.
1634 */
bb8c093b 1635int iwl4965_eeprom_init(struct iwl4965_priv *priv)
b481de9c
ZY
1636{
1637 u16 *e = (u16 *)&priv->eeprom;
bb8c093b 1638 u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP);
b481de9c
ZY
1639 u32 r;
1640 int sz = sizeof(priv->eeprom);
1641 int rc;
1642 int i;
1643 u16 addr;
1644
1645 /* The EEPROM structure has several padding buffers within it
1646 * and when adding new EEPROM maps is subject to programmer errors
1647 * which may be very difficult to identify without explicitly
1648 * checking the resulting size of the eeprom map. */
1649 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1650
1651 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1652 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1653 return -ENOENT;
1654 }
1655
6440adb5 1656 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
bb8c093b 1657 rc = iwl4965_eeprom_acquire_semaphore(priv);
b481de9c 1658 if (rc < 0) {
91e17473 1659 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
b481de9c
ZY
1660 return -ENOENT;
1661 }
1662
1663 /* eeprom is an array of 16bit values */
1664 for (addr = 0; addr < sz; addr += sizeof(u16)) {
bb8c093b
CH
1665 _iwl4965_write32(priv, CSR_EEPROM_REG, addr << 1);
1666 _iwl4965_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
b481de9c
ZY
1667
1668 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1669 i += IWL_EEPROM_ACCESS_DELAY) {
bb8c093b 1670 r = _iwl4965_read_direct32(priv, CSR_EEPROM_REG);
b481de9c
ZY
1671 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1672 break;
1673 udelay(IWL_EEPROM_ACCESS_DELAY);
1674 }
1675
1676 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1677 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1678 rc = -ETIMEDOUT;
1679 goto done;
1680 }
1681 e[addr / 2] = le16_to_cpu(r >> 16);
1682 }
1683 rc = 0;
1684
1685done:
bb8c093b 1686 iwl4965_eeprom_release_semaphore(priv);
b481de9c
ZY
1687 return rc;
1688}
1689
1690/******************************************************************************
1691 *
1692 * Misc. internal state and helper functions
1693 *
1694 ******************************************************************************/
c8b0e6e1 1695#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
1696
1697/**
bb8c093b 1698 * iwl4965_report_frame - dump frame to syslog during debug sessions
b481de9c 1699 *
9fbab516 1700 * You may hack this function to show different aspects of received frames,
b481de9c
ZY
1701 * including selective frame dumps.
1702 * group100 parameter selects whether to show 1 out of 100 good frames.
1703 *
9fbab516
BC
1704 * TODO: This was originally written for 3945, need to audit for
1705 * proper operation with 4965.
b481de9c 1706 */
bb8c093b
CH
1707void iwl4965_report_frame(struct iwl4965_priv *priv,
1708 struct iwl4965_rx_packet *pkt,
b481de9c
ZY
1709 struct ieee80211_hdr *header, int group100)
1710{
1711 u32 to_us;
1712 u32 print_summary = 0;
1713 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1714 u32 hundred = 0;
1715 u32 dataframe = 0;
1716 u16 fc;
1717 u16 seq_ctl;
1718 u16 channel;
1719 u16 phy_flags;
1720 int rate_sym;
1721 u16 length;
1722 u16 status;
1723 u16 bcn_tmr;
1724 u32 tsf_low;
1725 u64 tsf;
1726 u8 rssi;
1727 u8 agc;
1728 u16 sig_avg;
1729 u16 noise_diff;
bb8c093b
CH
1730 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1731 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1732 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
b481de9c
ZY
1733 u8 *data = IWL_RX_DATA(pkt);
1734
1735 /* MAC header */
1736 fc = le16_to_cpu(header->frame_control);
1737 seq_ctl = le16_to_cpu(header->seq_ctrl);
1738
1739 /* metadata */
1740 channel = le16_to_cpu(rx_hdr->channel);
1741 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1742 rate_sym = rx_hdr->rate;
1743 length = le16_to_cpu(rx_hdr->len);
1744
1745 /* end-of-frame status and timestamp */
1746 status = le32_to_cpu(rx_end->status);
1747 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1748 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1749 tsf = le64_to_cpu(rx_end->timestamp);
1750
1751 /* signal statistics */
1752 rssi = rx_stats->rssi;
1753 agc = rx_stats->agc;
1754 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1755 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1756
1757 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1758
1759 /* if data frame is to us and all is good,
1760 * (optionally) print summary for only 1 out of every 100 */
1761 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1762 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1763 dataframe = 1;
1764 if (!group100)
1765 print_summary = 1; /* print each frame */
1766 else if (priv->framecnt_to_us < 100) {
1767 priv->framecnt_to_us++;
1768 print_summary = 0;
1769 } else {
1770 priv->framecnt_to_us = 0;
1771 print_summary = 1;
1772 hundred = 1;
1773 }
1774 } else {
1775 /* print summary for all other frames */
1776 print_summary = 1;
1777 }
1778
1779 if (print_summary) {
1780 char *title;
1781 u32 rate;
1782
1783 if (hundred)
1784 title = "100Frames";
1785 else if (fc & IEEE80211_FCTL_RETRY)
1786 title = "Retry";
1787 else if (ieee80211_is_assoc_response(fc))
1788 title = "AscRsp";
1789 else if (ieee80211_is_reassoc_response(fc))
1790 title = "RasRsp";
1791 else if (ieee80211_is_probe_response(fc)) {
1792 title = "PrbRsp";
1793 print_dump = 1; /* dump frame contents */
1794 } else if (ieee80211_is_beacon(fc)) {
1795 title = "Beacon";
1796 print_dump = 1; /* dump frame contents */
1797 } else if (ieee80211_is_atim(fc))
1798 title = "ATIM";
1799 else if (ieee80211_is_auth(fc))
1800 title = "Auth";
1801 else if (ieee80211_is_deauth(fc))
1802 title = "DeAuth";
1803 else if (ieee80211_is_disassoc(fc))
1804 title = "DisAssoc";
1805 else
1806 title = "Frame";
1807
bb8c093b 1808 rate = iwl4965_rate_index_from_plcp(rate_sym);
b481de9c
ZY
1809 if (rate == -1)
1810 rate = 0;
1811 else
bb8c093b 1812 rate = iwl4965_rates[rate].ieee / 2;
b481de9c
ZY
1813
1814 /* print frame summary.
1815 * MAC addresses show just the last byte (for brevity),
1816 * but you can hack it to show more, if you'd like to. */
1817 if (dataframe)
1818 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1819 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1820 title, fc, header->addr1[5],
1821 length, rssi, channel, rate);
1822 else {
1823 /* src/dst addresses assume managed mode */
1824 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1825 "src=0x%02x, rssi=%u, tim=%lu usec, "
1826 "phy=0x%02x, chnl=%d\n",
1827 title, fc, header->addr1[5],
1828 header->addr3[5], rssi,
1829 tsf_low - priv->scan_start_tsf,
1830 phy_flags, channel);
1831 }
1832 }
1833 if (print_dump)
bb8c093b 1834 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
b481de9c
ZY
1835}
1836#endif
1837
bb8c093b 1838static void iwl4965_unset_hw_setting(struct iwl4965_priv *priv)
b481de9c
ZY
1839{
1840 if (priv->hw_setting.shared_virt)
1841 pci_free_consistent(priv->pci_dev,
bb8c093b 1842 sizeof(struct iwl4965_shared),
b481de9c
ZY
1843 priv->hw_setting.shared_virt,
1844 priv->hw_setting.shared_phys);
1845}
1846
1847/**
bb8c093b 1848 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
b481de9c
ZY
1849 *
1850 * return : set the bit for each supported rate insert in ie
1851 */
bb8c093b 1852static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
c7c46676 1853 u16 basic_rate, int *left)
b481de9c
ZY
1854{
1855 u16 ret_rates = 0, bit;
1856 int i;
c7c46676
TW
1857 u8 *cnt = ie;
1858 u8 *rates = ie + 1;
b481de9c
ZY
1859
1860 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1861 if (bit & supported_rate) {
1862 ret_rates |= bit;
bb8c093b 1863 rates[*cnt] = iwl4965_rates[i].ieee |
c7c46676
TW
1864 ((bit & basic_rate) ? 0x80 : 0x00);
1865 (*cnt)++;
1866 (*left)--;
1867 if ((*left <= 0) ||
1868 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
b481de9c
ZY
1869 break;
1870 }
1871 }
1872
1873 return ret_rates;
1874}
1875
c8b0e6e1 1876#ifdef CONFIG_IWL4965_HT
bb8c093b 1877void static iwl4965_set_ht_capab(struct ieee80211_hw *hw,
b481de9c
ZY
1878 struct ieee80211_ht_capability *ht_cap,
1879 u8 use_wide_chan);
1880#endif
1881
1882/**
bb8c093b 1883 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
b481de9c 1884 */
bb8c093b 1885static u16 iwl4965_fill_probe_req(struct iwl4965_priv *priv,
b481de9c
ZY
1886 struct ieee80211_mgmt *frame,
1887 int left, int is_direct)
1888{
1889 int len = 0;
1890 u8 *pos = NULL;
bee488db 1891 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
b481de9c
ZY
1892
1893 /* Make sure there is enough space for the probe request,
1894 * two mandatory IEs and the data */
1895 left -= 24;
1896 if (left < 0)
1897 return 0;
1898 len += 24;
1899
1900 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
bb8c093b 1901 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c 1902 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
bb8c093b 1903 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c
ZY
1904 frame->seq_ctrl = 0;
1905
1906 /* fill in our indirect SSID IE */
1907 /* ...next IE... */
1908
1909 left -= 2;
1910 if (left < 0)
1911 return 0;
1912 len += 2;
1913 pos = &(frame->u.probe_req.variable[0]);
1914 *pos++ = WLAN_EID_SSID;
1915 *pos++ = 0;
1916
1917 /* fill in our direct SSID IE... */
1918 if (is_direct) {
1919 /* ...next IE... */
1920 left -= 2 + priv->essid_len;
1921 if (left < 0)
1922 return 0;
1923 /* ... fill it in... */
1924 *pos++ = WLAN_EID_SSID;
1925 *pos++ = priv->essid_len;
1926 memcpy(pos, priv->essid, priv->essid_len);
1927 pos += priv->essid_len;
1928 len += 2 + priv->essid_len;
1929 }
1930
1931 /* fill in supported rate */
1932 /* ...next IE... */
1933 left -= 2;
1934 if (left < 0)
1935 return 0;
c7c46676 1936
b481de9c
ZY
1937 /* ... fill it in... */
1938 *pos++ = WLAN_EID_SUPP_RATES;
1939 *pos = 0;
c7c46676 1940
bee488db 1941 /* exclude 60M rate */
1942 active_rates = priv->rates_mask;
1943 active_rates &= ~IWL_RATE_60M_MASK;
1944
1945 active_rate_basic = active_rates & IWL_BASIC_RATES_MASK;
b481de9c 1946
c7c46676 1947 cck_rates = IWL_CCK_RATES_MASK & active_rates;
bb8c093b 1948 ret_rates = iwl4965_supported_rate_to_ie(pos, cck_rates,
bee488db 1949 active_rate_basic, &left);
c7c46676
TW
1950 active_rates &= ~ret_rates;
1951
bb8c093b 1952 ret_rates = iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1953 active_rate_basic, &left);
c7c46676
TW
1954 active_rates &= ~ret_rates;
1955
b481de9c
ZY
1956 len += 2 + *pos;
1957 pos += (*pos) + 1;
c7c46676 1958 if (active_rates == 0)
b481de9c
ZY
1959 goto fill_end;
1960
1961 /* fill in supported extended rate */
1962 /* ...next IE... */
1963 left -= 2;
1964 if (left < 0)
1965 return 0;
1966 /* ... fill it in... */
1967 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1968 *pos = 0;
bb8c093b 1969 iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1970 active_rate_basic, &left);
b481de9c
ZY
1971 if (*pos > 0)
1972 len += 2 + *pos;
1973
c8b0e6e1 1974#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
1975 if (is_direct && priv->is_ht_enabled) {
1976 u8 use_wide_chan = 1;
1977
1978 if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
1979 use_wide_chan = 0;
1980 pos += (*pos) + 1;
1981 *pos++ = WLAN_EID_HT_CAPABILITY;
1982 *pos++ = sizeof(struct ieee80211_ht_capability);
bb8c093b 1983 iwl4965_set_ht_capab(NULL, (struct ieee80211_ht_capability *)pos,
b481de9c
ZY
1984 use_wide_chan);
1985 len += 2 + sizeof(struct ieee80211_ht_capability);
1986 }
c8b0e6e1 1987#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
1988
1989 fill_end:
1990 return (u16)len;
1991}
1992
1993/*
1994 * QoS support
1995*/
c8b0e6e1 1996#ifdef CONFIG_IWL4965_QOS
bb8c093b
CH
1997static int iwl4965_send_qos_params_command(struct iwl4965_priv *priv,
1998 struct iwl4965_qosparam_cmd *qos)
b481de9c
ZY
1999{
2000
bb8c093b
CH
2001 return iwl4965_send_cmd_pdu(priv, REPLY_QOS_PARAM,
2002 sizeof(struct iwl4965_qosparam_cmd), qos);
b481de9c
ZY
2003}
2004
bb8c093b 2005static void iwl4965_reset_qos(struct iwl4965_priv *priv)
b481de9c
ZY
2006{
2007 u16 cw_min = 15;
2008 u16 cw_max = 1023;
2009 u8 aifs = 2;
2010 u8 is_legacy = 0;
2011 unsigned long flags;
2012 int i;
2013
2014 spin_lock_irqsave(&priv->lock, flags);
2015 priv->qos_data.qos_active = 0;
2016
2017 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
2018 if (priv->qos_data.qos_enable)
2019 priv->qos_data.qos_active = 1;
2020 if (!(priv->active_rate & 0xfff0)) {
2021 cw_min = 31;
2022 is_legacy = 1;
2023 }
2024 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2025 if (priv->qos_data.qos_enable)
2026 priv->qos_data.qos_active = 1;
2027 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
2028 cw_min = 31;
2029 is_legacy = 1;
2030 }
2031
2032 if (priv->qos_data.qos_active)
2033 aifs = 3;
2034
2035 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
2036 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
2037 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
2038 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
2039 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
2040
2041 if (priv->qos_data.qos_active) {
2042 i = 1;
2043 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
2044 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
2045 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
2046 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2047 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2048
2049 i = 2;
2050 priv->qos_data.def_qos_parm.ac[i].cw_min =
2051 cpu_to_le16((cw_min + 1) / 2 - 1);
2052 priv->qos_data.def_qos_parm.ac[i].cw_max =
2053 cpu_to_le16(cw_max);
2054 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2055 if (is_legacy)
2056 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2057 cpu_to_le16(6016);
2058 else
2059 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2060 cpu_to_le16(3008);
2061 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2062
2063 i = 3;
2064 priv->qos_data.def_qos_parm.ac[i].cw_min =
2065 cpu_to_le16((cw_min + 1) / 4 - 1);
2066 priv->qos_data.def_qos_parm.ac[i].cw_max =
2067 cpu_to_le16((cw_max + 1) / 2 - 1);
2068 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2069 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2070 if (is_legacy)
2071 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2072 cpu_to_le16(3264);
2073 else
2074 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2075 cpu_to_le16(1504);
2076 } else {
2077 for (i = 1; i < 4; i++) {
2078 priv->qos_data.def_qos_parm.ac[i].cw_min =
2079 cpu_to_le16(cw_min);
2080 priv->qos_data.def_qos_parm.ac[i].cw_max =
2081 cpu_to_le16(cw_max);
2082 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2083 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2084 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2085 }
2086 }
2087 IWL_DEBUG_QOS("set QoS to default \n");
2088
2089 spin_unlock_irqrestore(&priv->lock, flags);
2090}
2091
bb8c093b 2092static void iwl4965_activate_qos(struct iwl4965_priv *priv, u8 force)
b481de9c
ZY
2093{
2094 unsigned long flags;
2095
b481de9c
ZY
2096 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2097 return;
2098
2099 if (!priv->qos_data.qos_enable)
2100 return;
2101
2102 spin_lock_irqsave(&priv->lock, flags);
2103 priv->qos_data.def_qos_parm.qos_flags = 0;
2104
2105 if (priv->qos_data.qos_cap.q_AP.queue_request &&
2106 !priv->qos_data.qos_cap.q_AP.txop_request)
2107 priv->qos_data.def_qos_parm.qos_flags |=
2108 QOS_PARAM_FLG_TXOP_TYPE_MSK;
b481de9c
ZY
2109 if (priv->qos_data.qos_active)
2110 priv->qos_data.def_qos_parm.qos_flags |=
2111 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2112
c8b0e6e1 2113#ifdef CONFIG_IWL4965_HT
f1f1f5c7
TW
2114 if (priv->is_ht_enabled && priv->current_assoc_ht.is_ht)
2115 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
c8b0e6e1 2116#endif /* CONFIG_IWL4965_HT */
f1f1f5c7 2117
b481de9c
ZY
2118 spin_unlock_irqrestore(&priv->lock, flags);
2119
bb8c093b 2120 if (force || iwl4965_is_associated(priv)) {
f1f1f5c7
TW
2121 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2122 priv->qos_data.qos_active,
2123 priv->qos_data.def_qos_parm.qos_flags);
b481de9c 2124
bb8c093b 2125 iwl4965_send_qos_params_command(priv,
b481de9c
ZY
2126 &(priv->qos_data.def_qos_parm));
2127 }
2128}
2129
c8b0e6e1 2130#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
2131/*
2132 * Power management (not Tx power!) functions
2133 */
2134#define MSEC_TO_USEC 1024
2135
2136#define NOSLP __constant_cpu_to_le16(0), 0, 0
2137#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
2138#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2139#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2140 __constant_cpu_to_le32(X1), \
2141 __constant_cpu_to_le32(X2), \
2142 __constant_cpu_to_le32(X3), \
2143 __constant_cpu_to_le32(X4)}
2144
2145
2146/* default power management (not Tx power) table values */
2147/* for tim 0-10 */
bb8c093b 2148static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
b481de9c
ZY
2149 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2150 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2151 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2152 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2153 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2154 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2155};
2156
2157/* for tim > 10 */
bb8c093b 2158static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
b481de9c
ZY
2159 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2160 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2161 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2162 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2163 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2164 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2165 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2166 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2167 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2168 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2169};
2170
bb8c093b 2171int iwl4965_power_init_handle(struct iwl4965_priv *priv)
b481de9c
ZY
2172{
2173 int rc = 0, i;
bb8c093b
CH
2174 struct iwl4965_power_mgr *pow_data;
2175 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
b481de9c
ZY
2176 u16 pci_pm;
2177
2178 IWL_DEBUG_POWER("Initialize power \n");
2179
2180 pow_data = &(priv->power_data);
2181
2182 memset(pow_data, 0, sizeof(*pow_data));
2183
2184 pow_data->active_index = IWL_POWER_RANGE_0;
2185 pow_data->dtim_val = 0xffff;
2186
2187 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2188 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2189
2190 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2191 if (rc != 0)
2192 return 0;
2193 else {
bb8c093b 2194 struct iwl4965_powertable_cmd *cmd;
b481de9c
ZY
2195
2196 IWL_DEBUG_POWER("adjust power command flags\n");
2197
2198 for (i = 0; i < IWL_POWER_AC; i++) {
2199 cmd = &pow_data->pwr_range_0[i].cmd;
2200
2201 if (pci_pm & 0x1)
2202 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2203 else
2204 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2205 }
2206 }
2207 return rc;
2208}
2209
bb8c093b
CH
2210static int iwl4965_update_power_cmd(struct iwl4965_priv *priv,
2211 struct iwl4965_powertable_cmd *cmd, u32 mode)
b481de9c
ZY
2212{
2213 int rc = 0, i;
2214 u8 skip;
2215 u32 max_sleep = 0;
bb8c093b 2216 struct iwl4965_power_vec_entry *range;
b481de9c 2217 u8 period = 0;
bb8c093b 2218 struct iwl4965_power_mgr *pow_data;
b481de9c
ZY
2219
2220 if (mode > IWL_POWER_INDEX_5) {
2221 IWL_DEBUG_POWER("Error invalid power mode \n");
2222 return -1;
2223 }
2224 pow_data = &(priv->power_data);
2225
2226 if (pow_data->active_index == IWL_POWER_RANGE_0)
2227 range = &pow_data->pwr_range_0[0];
2228 else
2229 range = &pow_data->pwr_range_1[1];
2230
bb8c093b 2231 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
b481de9c
ZY
2232
2233#ifdef IWL_MAC80211_DISABLE
2234 if (priv->assoc_network != NULL) {
2235 unsigned long flags;
2236
2237 period = priv->assoc_network->tim.tim_period;
2238 }
2239#endif /*IWL_MAC80211_DISABLE */
2240 skip = range[mode].no_dtim;
2241
2242 if (period == 0) {
2243 period = 1;
2244 skip = 0;
2245 }
2246
2247 if (skip == 0) {
2248 max_sleep = period;
2249 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2250 } else {
2251 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2252 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2253 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2254 }
2255
2256 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2257 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2258 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2259 }
2260
2261 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2262 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2263 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2264 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2265 le32_to_cpu(cmd->sleep_interval[0]),
2266 le32_to_cpu(cmd->sleep_interval[1]),
2267 le32_to_cpu(cmd->sleep_interval[2]),
2268 le32_to_cpu(cmd->sleep_interval[3]),
2269 le32_to_cpu(cmd->sleep_interval[4]));
2270
2271 return rc;
2272}
2273
bb8c093b 2274static int iwl4965_send_power_mode(struct iwl4965_priv *priv, u32 mode)
b481de9c 2275{
9a62f73b 2276 u32 uninitialized_var(final_mode);
b481de9c 2277 int rc;
bb8c093b 2278 struct iwl4965_powertable_cmd cmd;
b481de9c
ZY
2279
2280 /* If on battery, set to 3,
01ebd063 2281 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
2282 * else user level */
2283 switch (mode) {
2284 case IWL_POWER_BATTERY:
2285 final_mode = IWL_POWER_INDEX_3;
2286 break;
2287 case IWL_POWER_AC:
2288 final_mode = IWL_POWER_MODE_CAM;
2289 break;
2290 default:
2291 final_mode = mode;
2292 break;
2293 }
2294
2295 cmd.keep_alive_beacons = 0;
2296
bb8c093b 2297 iwl4965_update_power_cmd(priv, &cmd, final_mode);
b481de9c 2298
bb8c093b 2299 rc = iwl4965_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
b481de9c
ZY
2300
2301 if (final_mode == IWL_POWER_MODE_CAM)
2302 clear_bit(STATUS_POWER_PMI, &priv->status);
2303 else
2304 set_bit(STATUS_POWER_PMI, &priv->status);
2305
2306 return rc;
2307}
2308
bb8c093b 2309int iwl4965_is_network_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
2310{
2311 /* Filter incoming packets to determine if they are targeted toward
2312 * this network, discarding packets coming from ourselves */
2313 switch (priv->iw_mode) {
2314 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2315 /* packets from our adapter are dropped (echo) */
2316 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2317 return 0;
2318 /* {broad,multi}cast packets to our IBSS go through */
2319 if (is_multicast_ether_addr(header->addr1))
2320 return !compare_ether_addr(header->addr3, priv->bssid);
2321 /* packets to our adapter go through */
2322 return !compare_ether_addr(header->addr1, priv->mac_addr);
2323 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2324 /* packets from our adapter are dropped (echo) */
2325 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2326 return 0;
2327 /* {broad,multi}cast packets to our BSS go through */
2328 if (is_multicast_ether_addr(header->addr1))
2329 return !compare_ether_addr(header->addr2, priv->bssid);
2330 /* packets to our adapter go through */
2331 return !compare_ether_addr(header->addr1, priv->mac_addr);
2332 }
2333
2334 return 1;
2335}
2336
2337#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2338
bb8c093b 2339static const char *iwl4965_get_tx_fail_reason(u32 status)
b481de9c
ZY
2340{
2341 switch (status & TX_STATUS_MSK) {
2342 case TX_STATUS_SUCCESS:
2343 return "SUCCESS";
2344 TX_STATUS_ENTRY(SHORT_LIMIT);
2345 TX_STATUS_ENTRY(LONG_LIMIT);
2346 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2347 TX_STATUS_ENTRY(MGMNT_ABORT);
2348 TX_STATUS_ENTRY(NEXT_FRAG);
2349 TX_STATUS_ENTRY(LIFE_EXPIRE);
2350 TX_STATUS_ENTRY(DEST_PS);
2351 TX_STATUS_ENTRY(ABORTED);
2352 TX_STATUS_ENTRY(BT_RETRY);
2353 TX_STATUS_ENTRY(STA_INVALID);
2354 TX_STATUS_ENTRY(FRAG_DROPPED);
2355 TX_STATUS_ENTRY(TID_DISABLE);
2356 TX_STATUS_ENTRY(FRAME_FLUSHED);
2357 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2358 TX_STATUS_ENTRY(TX_LOCKED);
2359 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2360 }
2361
2362 return "UNKNOWN";
2363}
2364
2365/**
bb8c093b 2366 * iwl4965_scan_cancel - Cancel any currently executing HW scan
b481de9c
ZY
2367 *
2368 * NOTE: priv->mutex is not required before calling this function
2369 */
bb8c093b 2370static int iwl4965_scan_cancel(struct iwl4965_priv *priv)
b481de9c
ZY
2371{
2372 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2373 clear_bit(STATUS_SCANNING, &priv->status);
2374 return 0;
2375 }
2376
2377 if (test_bit(STATUS_SCANNING, &priv->status)) {
2378 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2379 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2380 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2381 queue_work(priv->workqueue, &priv->abort_scan);
2382
2383 } else
2384 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2385
2386 return test_bit(STATUS_SCANNING, &priv->status);
2387 }
2388
2389 return 0;
2390}
2391
2392/**
bb8c093b 2393 * iwl4965_scan_cancel_timeout - Cancel any currently executing HW scan
b481de9c
ZY
2394 * @ms: amount of time to wait (in milliseconds) for scan to abort
2395 *
2396 * NOTE: priv->mutex must be held before calling this function
2397 */
bb8c093b 2398static int iwl4965_scan_cancel_timeout(struct iwl4965_priv *priv, unsigned long ms)
b481de9c
ZY
2399{
2400 unsigned long now = jiffies;
2401 int ret;
2402
bb8c093b 2403 ret = iwl4965_scan_cancel(priv);
b481de9c
ZY
2404 if (ret && ms) {
2405 mutex_unlock(&priv->mutex);
2406 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2407 test_bit(STATUS_SCANNING, &priv->status))
2408 msleep(1);
2409 mutex_lock(&priv->mutex);
2410
2411 return test_bit(STATUS_SCANNING, &priv->status);
2412 }
2413
2414 return ret;
2415}
2416
bb8c093b 2417static void iwl4965_sequence_reset(struct iwl4965_priv *priv)
b481de9c
ZY
2418{
2419 /* Reset ieee stats */
2420
2421 /* We don't reset the net_device_stats (ieee->stats) on
2422 * re-association */
2423
2424 priv->last_seq_num = -1;
2425 priv->last_frag_num = -1;
2426 priv->last_packet_time = 0;
2427
bb8c093b 2428 iwl4965_scan_cancel(priv);
b481de9c
ZY
2429}
2430
2431#define MAX_UCODE_BEACON_INTERVAL 4096
2432#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2433
bb8c093b 2434static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
2435{
2436 u16 new_val = 0;
2437 u16 beacon_factor = 0;
2438
2439 beacon_factor =
2440 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2441 / MAX_UCODE_BEACON_INTERVAL;
2442 new_val = beacon_val / beacon_factor;
2443
2444 return cpu_to_le16(new_val);
2445}
2446
bb8c093b 2447static void iwl4965_setup_rxon_timing(struct iwl4965_priv *priv)
b481de9c
ZY
2448{
2449 u64 interval_tm_unit;
2450 u64 tsf, result;
2451 unsigned long flags;
2452 struct ieee80211_conf *conf = NULL;
2453 u16 beacon_int = 0;
2454
2455 conf = ieee80211_get_hw_conf(priv->hw);
2456
2457 spin_lock_irqsave(&priv->lock, flags);
2458 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2459 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2460
2461 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2462
2463 tsf = priv->timestamp1;
2464 tsf = ((tsf << 32) | priv->timestamp0);
2465
2466 beacon_int = priv->beacon_int;
2467 spin_unlock_irqrestore(&priv->lock, flags);
2468
2469 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2470 if (beacon_int == 0) {
2471 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2472 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2473 } else {
2474 priv->rxon_timing.beacon_interval =
2475 cpu_to_le16(beacon_int);
2476 priv->rxon_timing.beacon_interval =
bb8c093b 2477 iwl4965_adjust_beacon_interval(
b481de9c
ZY
2478 le16_to_cpu(priv->rxon_timing.beacon_interval));
2479 }
2480
2481 priv->rxon_timing.atim_window = 0;
2482 } else {
2483 priv->rxon_timing.beacon_interval =
bb8c093b 2484 iwl4965_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
2485 /* TODO: we need to get atim_window from upper stack
2486 * for now we set to 0 */
2487 priv->rxon_timing.atim_window = 0;
2488 }
2489
2490 interval_tm_unit =
2491 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2492 result = do_div(tsf, interval_tm_unit);
2493 priv->rxon_timing.beacon_init_val =
2494 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2495
2496 IWL_DEBUG_ASSOC
2497 ("beacon interval %d beacon timer %d beacon tim %d\n",
2498 le16_to_cpu(priv->rxon_timing.beacon_interval),
2499 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2500 le16_to_cpu(priv->rxon_timing.atim_window));
2501}
2502
bb8c093b 2503static int iwl4965_scan_initiate(struct iwl4965_priv *priv)
b481de9c
ZY
2504{
2505 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2506 IWL_ERROR("APs don't scan.\n");
2507 return 0;
2508 }
2509
bb8c093b 2510 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
2511 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2512 return -EIO;
2513 }
2514
2515 if (test_bit(STATUS_SCANNING, &priv->status)) {
2516 IWL_DEBUG_SCAN("Scan already in progress.\n");
2517 return -EAGAIN;
2518 }
2519
2520 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2521 IWL_DEBUG_SCAN("Scan request while abort pending. "
2522 "Queuing.\n");
2523 return -EAGAIN;
2524 }
2525
2526 IWL_DEBUG_INFO("Starting scan...\n");
2527 priv->scan_bands = 2;
2528 set_bit(STATUS_SCANNING, &priv->status);
2529 priv->scan_start = jiffies;
2530 priv->scan_pass_start = priv->scan_start;
2531
2532 queue_work(priv->workqueue, &priv->request_scan);
2533
2534 return 0;
2535}
2536
bb8c093b 2537static int iwl4965_set_rxon_hwcrypto(struct iwl4965_priv *priv, int hw_decrypt)
b481de9c 2538{
bb8c093b 2539 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
b481de9c
ZY
2540
2541 if (hw_decrypt)
2542 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2543 else
2544 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2545
2546 return 0;
2547}
2548
bb8c093b 2549static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode)
b481de9c
ZY
2550{
2551 if (phymode == MODE_IEEE80211A) {
2552 priv->staging_rxon.flags &=
2553 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2554 | RXON_FLG_CCK_MSK);
2555 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2556 } else {
bb8c093b 2557 /* Copied from iwl4965_bg_post_associate() */
b481de9c
ZY
2558 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2559 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2560 else
2561 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2562
2563 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2564 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2565
2566 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2567 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2568 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2569 }
2570}
2571
2572/*
01ebd063 2573 * initialize rxon structure with default values from eeprom
b481de9c 2574 */
bb8c093b 2575static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
b481de9c 2576{
bb8c093b 2577 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
2578
2579 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2580
2581 switch (priv->iw_mode) {
2582 case IEEE80211_IF_TYPE_AP:
2583 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2584 break;
2585
2586 case IEEE80211_IF_TYPE_STA:
2587 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2588 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2589 break;
2590
2591 case IEEE80211_IF_TYPE_IBSS:
2592 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2593 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2594 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2595 RXON_FILTER_ACCEPT_GRP_MSK;
2596 break;
2597
2598 case IEEE80211_IF_TYPE_MNTR:
2599 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2600 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2601 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2602 break;
2603 }
2604
2605#if 0
2606 /* TODO: Figure out when short_preamble would be set and cache from
2607 * that */
2608 if (!hw_to_local(priv->hw)->short_preamble)
2609 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2610 else
2611 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2612#endif
2613
bb8c093b 2614 ch_info = iwl4965_get_channel_info(priv, priv->phymode,
b481de9c
ZY
2615 le16_to_cpu(priv->staging_rxon.channel));
2616
2617 if (!ch_info)
2618 ch_info = &priv->channel_info[0];
2619
2620 /*
2621 * in some case A channels are all non IBSS
2622 * in this case force B/G channel
2623 */
2624 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2625 !(is_channel_ibss(ch_info)))
2626 ch_info = &priv->channel_info[0];
2627
2628 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2629 if (is_channel_a_band(ch_info))
2630 priv->phymode = MODE_IEEE80211A;
2631 else
2632 priv->phymode = MODE_IEEE80211G;
2633
bb8c093b 2634 iwl4965_set_flags_for_phymode(priv, priv->phymode);
b481de9c
ZY
2635
2636 priv->staging_rxon.ofdm_basic_rates =
2637 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2638 priv->staging_rxon.cck_basic_rates =
2639 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2640
2641 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2642 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2643 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2644 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2645 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2646 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2647 iwl4965_set_rxon_chain(priv);
2648}
2649
bb8c093b 2650static int iwl4965_set_mode(struct iwl4965_priv *priv, int mode)
b481de9c 2651{
bb8c093b 2652 if (!iwl4965_is_ready_rf(priv))
b481de9c
ZY
2653 return -EAGAIN;
2654
2655 if (mode == IEEE80211_IF_TYPE_IBSS) {
bb8c093b 2656 const struct iwl4965_channel_info *ch_info;
b481de9c 2657
bb8c093b 2658 ch_info = iwl4965_get_channel_info(priv,
b481de9c
ZY
2659 priv->phymode,
2660 le16_to_cpu(priv->staging_rxon.channel));
2661
2662 if (!ch_info || !is_channel_ibss(ch_info)) {
2663 IWL_ERROR("channel %d not IBSS channel\n",
2664 le16_to_cpu(priv->staging_rxon.channel));
2665 return -EINVAL;
2666 }
2667 }
2668
2669 cancel_delayed_work(&priv->scan_check);
bb8c093b 2670 if (iwl4965_scan_cancel_timeout(priv, 100)) {
b481de9c
ZY
2671 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2672 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2673 return -EAGAIN;
2674 }
2675
2676 priv->iw_mode = mode;
2677
bb8c093b 2678 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
2679 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2680
bb8c093b 2681 iwl4965_clear_stations_table(priv);
b481de9c 2682
bb8c093b 2683 iwl4965_commit_rxon(priv);
b481de9c
ZY
2684
2685 return 0;
2686}
2687
bb8c093b 2688static void iwl4965_build_tx_cmd_hwcrypto(struct iwl4965_priv *priv,
b481de9c 2689 struct ieee80211_tx_control *ctl,
bb8c093b 2690 struct iwl4965_cmd *cmd,
b481de9c
ZY
2691 struct sk_buff *skb_frag,
2692 int last_frag)
2693{
bb8c093b 2694 struct iwl4965_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
b481de9c
ZY
2695
2696 switch (keyinfo->alg) {
2697 case ALG_CCMP:
2698 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2699 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2700 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2701 break;
2702
2703 case ALG_TKIP:
2704#if 0
2705 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2706
2707 if (last_frag)
2708 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2709 8);
2710 else
2711 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2712#endif
2713 break;
2714
2715 case ALG_WEP:
2716 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2717 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2718
2719 if (keyinfo->keylen == 13)
2720 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2721
2722 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2723
2724 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2725 "with key %d\n", ctl->key_idx);
2726 break;
2727
b481de9c
ZY
2728 default:
2729 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2730 break;
2731 }
2732}
2733
2734/*
2735 * handle build REPLY_TX command notification.
2736 */
bb8c093b
CH
2737static void iwl4965_build_tx_cmd_basic(struct iwl4965_priv *priv,
2738 struct iwl4965_cmd *cmd,
b481de9c
ZY
2739 struct ieee80211_tx_control *ctrl,
2740 struct ieee80211_hdr *hdr,
2741 int is_unicast, u8 std_id)
2742{
2743 __le16 *qc;
2744 u16 fc = le16_to_cpu(hdr->frame_control);
2745 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2746
2747 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2748 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2749 tx_flags |= TX_CMD_FLG_ACK_MSK;
2750 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2751 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2752 if (ieee80211_is_probe_response(fc) &&
2753 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2754 tx_flags |= TX_CMD_FLG_TSF_MSK;
2755 } else {
2756 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2757 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2758 }
2759
2760 cmd->cmd.tx.sta_id = std_id;
2761 if (ieee80211_get_morefrag(hdr))
2762 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2763
2764 qc = ieee80211_get_qos_ctrl(hdr);
2765 if (qc) {
2766 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2767 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2768 } else
2769 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2770
2771 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2772 tx_flags |= TX_CMD_FLG_RTS_MSK;
2773 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2774 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2775 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2776 tx_flags |= TX_CMD_FLG_CTS_MSK;
2777 }
2778
2779 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2780 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2781
2782 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2783 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2784 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2785 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
bc434dd2 2786 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 2787 else
bc434dd2 2788 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
b481de9c
ZY
2789 } else
2790 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2791
2792 cmd->cmd.tx.driver_txop = 0;
2793 cmd->cmd.tx.tx_flags = tx_flags;
2794 cmd->cmd.tx.next_frame_len = 0;
2795}
2796
6440adb5
BC
2797/**
2798 * iwl4965_get_sta_id - Find station's index within station table
2799 *
2800 * If new IBSS station, create new entry in station table
2801 */
9fbab516
BC
2802static int iwl4965_get_sta_id(struct iwl4965_priv *priv,
2803 struct ieee80211_hdr *hdr)
b481de9c
ZY
2804{
2805 int sta_id;
2806 u16 fc = le16_to_cpu(hdr->frame_control);
0795af57 2807 DECLARE_MAC_BUF(mac);
b481de9c 2808
6440adb5 2809 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
2810 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2811 is_multicast_ether_addr(hdr->addr1))
2812 return priv->hw_setting.bcast_sta_id;
2813
2814 switch (priv->iw_mode) {
2815
6440adb5
BC
2816 /* If we are a client station in a BSS network, use the special
2817 * AP station entry (that's the only station we communicate with) */
b481de9c
ZY
2818 case IEEE80211_IF_TYPE_STA:
2819 return IWL_AP_ID;
2820
2821 /* If we are an AP, then find the station, or use BCAST */
2822 case IEEE80211_IF_TYPE_AP:
bb8c093b 2823 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2824 if (sta_id != IWL_INVALID_STATION)
2825 return sta_id;
2826 return priv->hw_setting.bcast_sta_id;
2827
6440adb5
BC
2828 /* If this frame is going out to an IBSS network, find the station,
2829 * or create a new station table entry */
b481de9c 2830 case IEEE80211_IF_TYPE_IBSS:
bb8c093b 2831 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2832 if (sta_id != IWL_INVALID_STATION)
2833 return sta_id;
2834
6440adb5 2835 /* Create new station table entry */
bb8c093b 2836 sta_id = iwl4965_add_station_flags(priv, hdr->addr1, 0, CMD_ASYNC);
b481de9c
ZY
2837
2838 if (sta_id != IWL_INVALID_STATION)
2839 return sta_id;
2840
0795af57 2841 IWL_DEBUG_DROP("Station %s not in station map. "
b481de9c 2842 "Defaulting to broadcast...\n",
0795af57 2843 print_mac(mac, hdr->addr1));
bb8c093b 2844 iwl4965_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
b481de9c
ZY
2845 return priv->hw_setting.bcast_sta_id;
2846
2847 default:
01ebd063 2848 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
b481de9c
ZY
2849 return priv->hw_setting.bcast_sta_id;
2850 }
2851}
2852
2853/*
2854 * start REPLY_TX command process
2855 */
bb8c093b 2856static int iwl4965_tx_skb(struct iwl4965_priv *priv,
b481de9c
ZY
2857 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2858{
2859 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bb8c093b 2860 struct iwl4965_tfd_frame *tfd;
b481de9c
ZY
2861 u32 *control_flags;
2862 int txq_id = ctl->queue;
bb8c093b
CH
2863 struct iwl4965_tx_queue *txq = NULL;
2864 struct iwl4965_queue *q = NULL;
b481de9c
ZY
2865 dma_addr_t phys_addr;
2866 dma_addr_t txcmd_phys;
bb8c093b 2867 struct iwl4965_cmd *out_cmd = NULL;
b481de9c
ZY
2868 u16 len, idx, len_org;
2869 u8 id, hdr_len, unicast;
2870 u8 sta_id;
2871 u16 seq_number = 0;
2872 u16 fc;
2873 __le16 *qc;
2874 u8 wait_write_ptr = 0;
2875 unsigned long flags;
2876 int rc;
2877
2878 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 2879 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
2880 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2881 goto drop_unlock;
2882 }
2883
2884 if (!priv->interface_id) {
2885 IWL_DEBUG_DROP("Dropping - !priv->interface_id\n");
2886 goto drop_unlock;
2887 }
2888
2889 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2890 IWL_ERROR("ERROR: No TX rate available.\n");
2891 goto drop_unlock;
2892 }
2893
2894 unicast = !is_multicast_ether_addr(hdr->addr1);
2895 id = 0;
2896
2897 fc = le16_to_cpu(hdr->frame_control);
2898
c8b0e6e1 2899#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
2900 if (ieee80211_is_auth(fc))
2901 IWL_DEBUG_TX("Sending AUTH frame\n");
2902 else if (ieee80211_is_assoc_request(fc))
2903 IWL_DEBUG_TX("Sending ASSOC frame\n");
2904 else if (ieee80211_is_reassoc_request(fc))
2905 IWL_DEBUG_TX("Sending REASSOC frame\n");
2906#endif
2907
7878a5a4
MA
2908 /* drop all data frame if we are not associated */
2909 if (!iwl4965_is_associated(priv) && !priv->assoc_id &&
b481de9c 2910 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
bb8c093b 2911 IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n");
b481de9c
ZY
2912 goto drop_unlock;
2913 }
2914
2915 spin_unlock_irqrestore(&priv->lock, flags);
2916
2917 hdr_len = ieee80211_get_hdrlen(fc);
6440adb5
BC
2918
2919 /* Find (or create) index into station table for destination station */
bb8c093b 2920 sta_id = iwl4965_get_sta_id(priv, hdr);
b481de9c 2921 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
2922 DECLARE_MAC_BUF(mac);
2923
2924 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2925 print_mac(mac, hdr->addr1));
b481de9c
ZY
2926 goto drop;
2927 }
2928
2929 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2930
2931 qc = ieee80211_get_qos_ctrl(hdr);
2932 if (qc) {
2933 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2934 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2935 IEEE80211_SCTL_SEQ;
2936 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2937 (hdr->seq_ctrl &
2938 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2939 seq_number += 0x10;
c8b0e6e1
CH
2940#ifdef CONFIG_IWL4965_HT
2941#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
2942 /* aggregation is on for this <sta,tid> */
2943 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG)
2944 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
c8b0e6e1
CH
2945#endif /* CONFIG_IWL4965_HT_AGG */
2946#endif /* CONFIG_IWL4965_HT */
b481de9c 2947 }
6440adb5
BC
2948
2949 /* Descriptor for chosen Tx queue */
b481de9c
ZY
2950 txq = &priv->txq[txq_id];
2951 q = &txq->q;
2952
2953 spin_lock_irqsave(&priv->lock, flags);
2954
6440adb5 2955 /* Set up first empty TFD within this queue's circular TFD buffer */
fc4b6853 2956 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
2957 memset(tfd, 0, sizeof(*tfd));
2958 control_flags = (u32 *) tfd;
fc4b6853 2959 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 2960
6440adb5 2961 /* Set up driver data for this TFD */
bb8c093b 2962 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
fc4b6853
TW
2963 txq->txb[q->write_ptr].skb[0] = skb;
2964 memcpy(&(txq->txb[q->write_ptr].status.control),
b481de9c 2965 ctl, sizeof(struct ieee80211_tx_control));
6440adb5
BC
2966
2967 /* Set up first empty entry in queue's array of Tx/cmd buffers */
b481de9c
ZY
2968 out_cmd = &txq->cmd[idx];
2969 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2970 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
6440adb5
BC
2971
2972 /*
2973 * Set up the Tx-command (not MAC!) header.
2974 * Store the chosen Tx queue and TFD index within the sequence field;
2975 * after Tx, uCode's Tx response will return this value so driver can
2976 * locate the frame within the tx queue and do post-tx processing.
2977 */
b481de9c
ZY
2978 out_cmd->hdr.cmd = REPLY_TX;
2979 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 2980 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
BC
2981
2982 /* Copy MAC header from skb into command buffer */
b481de9c
ZY
2983 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2984
6440adb5
BC
2985 /*
2986 * Use the first empty entry in this queue's command buffer array
2987 * to contain the Tx command and MAC header concatenated together
2988 * (payload data will be in another buffer).
2989 * Size of this varies, due to varying MAC header length.
2990 * If end is not dword aligned, we'll have 2 extra bytes at the end
2991 * of the MAC header (device reads on dword boundaries).
2992 * We'll tell device about this padding later.
2993 */
b481de9c 2994 len = priv->hw_setting.tx_cmd_len +
bb8c093b 2995 sizeof(struct iwl4965_cmd_header) + hdr_len;
b481de9c
ZY
2996
2997 len_org = len;
2998 len = (len + 3) & ~3;
2999
3000 if (len_org != len)
3001 len_org = 1;
3002 else
3003 len_org = 0;
3004
6440adb5
BC
3005 /* Physical address of this Tx command's header (not MAC header!),
3006 * within command buffer array. */
bb8c093b
CH
3007 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl4965_cmd) * idx +
3008 offsetof(struct iwl4965_cmd, hdr);
b481de9c 3009
6440adb5
BC
3010 /* Add buffer containing Tx command and MAC(!) header to TFD's
3011 * first entry */
bb8c093b 3012 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
b481de9c
ZY
3013
3014 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
bb8c093b 3015 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
b481de9c 3016
6440adb5
BC
3017 /* Set up TFD's 2nd entry to point directly to remainder of skb,
3018 * if any (802.11 null frames have no payload). */
b481de9c
ZY
3019 len = skb->len - hdr_len;
3020 if (len) {
3021 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
3022 len, PCI_DMA_TODEVICE);
bb8c093b 3023 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
b481de9c
ZY
3024 }
3025
6440adb5 3026 /* Tell 4965 about any 2-byte padding after MAC header */
b481de9c
ZY
3027 if (len_org)
3028 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
3029
6440adb5 3030 /* Total # bytes to be transmitted */
b481de9c
ZY
3031 len = (u16)skb->len;
3032 out_cmd->cmd.tx.len = cpu_to_le16(len);
3033
3034 /* TODO need this for burst mode later on */
bb8c093b 3035 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
b481de9c
ZY
3036
3037 /* set is_hcca to 0; it probably will never be implemented */
bb8c093b 3038 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
b481de9c
ZY
3039
3040 iwl4965_tx_cmd(priv, out_cmd, sta_id, txcmd_phys,
3041 hdr, hdr_len, ctl, NULL);
3042
3043 if (!ieee80211_get_morefrag(hdr)) {
3044 txq->need_update = 1;
3045 if (qc) {
3046 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
3047 priv->stations[sta_id].tid[tid].seq_number = seq_number;
3048 }
3049 } else {
3050 wait_write_ptr = 1;
3051 txq->need_update = 0;
3052 }
3053
bb8c093b 3054 iwl4965_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
b481de9c
ZY
3055 sizeof(out_cmd->cmd.tx));
3056
bb8c093b 3057 iwl4965_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
b481de9c
ZY
3058 ieee80211_get_hdrlen(fc));
3059
6440adb5 3060 /* Set up entry for this TFD in Tx byte-count array */
b481de9c
ZY
3061 iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
3062
6440adb5 3063 /* Tell device the write index *just past* this latest filled TFD */
bb8c093b
CH
3064 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
3065 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3066 spin_unlock_irqrestore(&priv->lock, flags);
3067
3068 if (rc)
3069 return rc;
3070
bb8c093b 3071 if ((iwl4965_queue_space(q) < q->high_mark)
b481de9c
ZY
3072 && priv->mac80211_registered) {
3073 if (wait_write_ptr) {
3074 spin_lock_irqsave(&priv->lock, flags);
3075 txq->need_update = 1;
bb8c093b 3076 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3077 spin_unlock_irqrestore(&priv->lock, flags);
3078 }
3079
3080 ieee80211_stop_queue(priv->hw, ctl->queue);
3081 }
3082
3083 return 0;
3084
3085drop_unlock:
3086 spin_unlock_irqrestore(&priv->lock, flags);
3087drop:
3088 return -1;
3089}
3090
bb8c093b 3091static void iwl4965_set_rate(struct iwl4965_priv *priv)
b481de9c
ZY
3092{
3093 const struct ieee80211_hw_mode *hw = NULL;
3094 struct ieee80211_rate *rate;
3095 int i;
3096
bb8c093b 3097 hw = iwl4965_get_hw_mode(priv, priv->phymode);
c4ba9621
SA
3098 if (!hw) {
3099 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
3100 return;
3101 }
b481de9c
ZY
3102
3103 priv->active_rate = 0;
3104 priv->active_rate_basic = 0;
3105
3106 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
3107 hw->mode == MODE_IEEE80211A ?
3108 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
3109
3110 for (i = 0; i < hw->num_rates; i++) {
3111 rate = &(hw->rates[i]);
3112 if ((rate->val < IWL_RATE_COUNT) &&
3113 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3114 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
bb8c093b 3115 rate->val, iwl4965_rates[rate->val].plcp,
b481de9c
ZY
3116 (rate->flags & IEEE80211_RATE_BASIC) ?
3117 "*" : "");
3118 priv->active_rate |= (1 << rate->val);
3119 if (rate->flags & IEEE80211_RATE_BASIC)
3120 priv->active_rate_basic |= (1 << rate->val);
3121 } else
3122 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
bb8c093b 3123 rate->val, iwl4965_rates[rate->val].plcp);
b481de9c
ZY
3124 }
3125
3126 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
3127 priv->active_rate, priv->active_rate_basic);
3128
3129 /*
3130 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
3131 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
3132 * OFDM
3133 */
3134 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
3135 priv->staging_rxon.cck_basic_rates =
3136 ((priv->active_rate_basic &
3137 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
3138 else
3139 priv->staging_rxon.cck_basic_rates =
3140 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
3141
3142 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
3143 priv->staging_rxon.ofdm_basic_rates =
3144 ((priv->active_rate_basic &
3145 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
3146 IWL_FIRST_OFDM_RATE) & 0xFF;
3147 else
3148 priv->staging_rxon.ofdm_basic_rates =
3149 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3150}
3151
bb8c093b 3152static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio)
b481de9c
ZY
3153{
3154 unsigned long flags;
3155
3156 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
3157 return;
3158
3159 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
3160 disable_radio ? "OFF" : "ON");
3161
3162 if (disable_radio) {
bb8c093b 3163 iwl4965_scan_cancel(priv);
b481de9c
ZY
3164 /* FIXME: This is a workaround for AP */
3165 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3166 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3167 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
3168 CSR_UCODE_SW_BIT_RFKILL);
3169 spin_unlock_irqrestore(&priv->lock, flags);
bb8c093b 3170 iwl4965_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
b481de9c
ZY
3171 set_bit(STATUS_RF_KILL_SW, &priv->status);
3172 }
3173 return;
3174 }
3175
3176 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3177 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
3178
3179 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3180 spin_unlock_irqrestore(&priv->lock, flags);
3181
3182 /* wake up ucode */
3183 msleep(10);
3184
3185 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
3186 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
3187 if (!iwl4965_grab_nic_access(priv))
3188 iwl4965_release_nic_access(priv);
b481de9c
ZY
3189 spin_unlock_irqrestore(&priv->lock, flags);
3190
3191 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3192 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3193 "disabled by HW switch\n");
3194 return;
3195 }
3196
3197 queue_work(priv->workqueue, &priv->restart);
3198 return;
3199}
3200
bb8c093b 3201void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb,
b481de9c
ZY
3202 u32 decrypt_res, struct ieee80211_rx_status *stats)
3203{
3204 u16 fc =
3205 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3206
3207 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3208 return;
3209
3210 if (!(fc & IEEE80211_FCTL_PROTECTED))
3211 return;
3212
3213 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3214 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3215 case RX_RES_STATUS_SEC_TYPE_TKIP:
3216 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3217 RX_RES_STATUS_BAD_ICV_MIC)
3218 stats->flag |= RX_FLAG_MMIC_ERROR;
3219 case RX_RES_STATUS_SEC_TYPE_WEP:
3220 case RX_RES_STATUS_SEC_TYPE_CCMP:
3221 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3222 RX_RES_STATUS_DECRYPT_OK) {
3223 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3224 stats->flag |= RX_FLAG_DECRYPTED;
3225 }
3226 break;
3227
3228 default:
3229 break;
3230 }
3231}
3232
bb8c093b
CH
3233void iwl4965_handle_data_packet_monitor(struct iwl4965_priv *priv,
3234 struct iwl4965_rx_mem_buffer *rxb,
b481de9c
ZY
3235 void *data, short len,
3236 struct ieee80211_rx_status *stats,
3237 u16 phy_flags)
3238{
bb8c093b 3239 struct iwl4965_rt_rx_hdr *iwl4965_rt;
b481de9c
ZY
3240
3241 /* First cache any information we need before we overwrite
3242 * the information provided in the skb from the hardware */
3243 s8 signal = stats->ssi;
3244 s8 noise = 0;
3245 int rate = stats->rate;
3246 u64 tsf = stats->mactime;
3247 __le16 phy_flags_hw = cpu_to_le16(phy_flags);
3248
3249 /* We received data from the HW, so stop the watchdog */
bb8c093b 3250 if (len > IWL_RX_BUF_SIZE - sizeof(*iwl4965_rt)) {
b481de9c
ZY
3251 IWL_DEBUG_DROP("Dropping too large packet in monitor\n");
3252 return;
3253 }
3254
3255 /* copy the frame data to write after where the radiotap header goes */
bb8c093b
CH
3256 iwl4965_rt = (void *)rxb->skb->data;
3257 memmove(iwl4965_rt->payload, data, len);
b481de9c 3258
bb8c093b
CH
3259 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3260 iwl4965_rt->rt_hdr.it_pad = 0; /* always good to zero */
b481de9c
ZY
3261
3262 /* total header + data */
bb8c093b 3263 iwl4965_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl4965_rt));
b481de9c
ZY
3264
3265 /* Set the size of the skb to the size of the frame */
bb8c093b 3266 skb_put(rxb->skb, sizeof(*iwl4965_rt) + len);
b481de9c
ZY
3267
3268 /* Big bitfield of all the fields we provide in radiotap */
bb8c093b 3269 iwl4965_rt->rt_hdr.it_present =
b481de9c
ZY
3270 cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3271 (1 << IEEE80211_RADIOTAP_FLAGS) |
3272 (1 << IEEE80211_RADIOTAP_RATE) |
3273 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3274 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3275 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3276 (1 << IEEE80211_RADIOTAP_ANTENNA));
3277
3278 /* Zero the flags, we'll add to them as we go */
bb8c093b 3279 iwl4965_rt->rt_flags = 0;
b481de9c 3280
bb8c093b 3281 iwl4965_rt->rt_tsf = cpu_to_le64(tsf);
b481de9c
ZY
3282
3283 /* Convert to dBm */
bb8c093b
CH
3284 iwl4965_rt->rt_dbmsignal = signal;
3285 iwl4965_rt->rt_dbmnoise = noise;
b481de9c
ZY
3286
3287 /* Convert the channel frequency and set the flags */
bb8c093b 3288 iwl4965_rt->rt_channelMHz = cpu_to_le16(stats->freq);
b481de9c 3289 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
bb8c093b 3290 iwl4965_rt->rt_chbitmask =
b481de9c
ZY
3291 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
3292 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
bb8c093b 3293 iwl4965_rt->rt_chbitmask =
b481de9c
ZY
3294 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
3295 else /* 802.11g */
bb8c093b 3296 iwl4965_rt->rt_chbitmask =
b481de9c
ZY
3297 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ));
3298
bb8c093b 3299 rate = iwl4965_rate_index_from_plcp(rate);
b481de9c 3300 if (rate == -1)
bb8c093b 3301 iwl4965_rt->rt_rate = 0;
b481de9c 3302 else
bb8c093b 3303 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
b481de9c
ZY
3304
3305 /* antenna number */
bb8c093b 3306 iwl4965_rt->rt_antenna =
b481de9c
ZY
3307 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3308
3309 /* set the preamble flag if we have it */
3310 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
bb8c093b 3311 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
b481de9c
ZY
3312
3313 IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
3314
3315 stats->flag |= RX_FLAG_RADIOTAP;
3316 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3317 rxb->skb = NULL;
3318}
3319
3320
3321#define IWL_PACKET_RETRY_TIME HZ
3322
bb8c093b 3323int iwl4965_is_duplicate_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
3324{
3325 u16 sc = le16_to_cpu(header->seq_ctrl);
3326 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3327 u16 frag = sc & IEEE80211_SCTL_FRAG;
3328 u16 *last_seq, *last_frag;
3329 unsigned long *last_time;
3330
3331 switch (priv->iw_mode) {
3332 case IEEE80211_IF_TYPE_IBSS:{
3333 struct list_head *p;
bb8c093b 3334 struct iwl4965_ibss_seq *entry = NULL;
b481de9c
ZY
3335 u8 *mac = header->addr2;
3336 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3337
3338 __list_for_each(p, &priv->ibss_mac_hash[index]) {
bb8c093b 3339 entry = list_entry(p, struct iwl4965_ibss_seq, list);
b481de9c
ZY
3340 if (!compare_ether_addr(entry->mac, mac))
3341 break;
3342 }
3343 if (p == &priv->ibss_mac_hash[index]) {
3344 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3345 if (!entry) {
bc434dd2 3346 IWL_ERROR("Cannot malloc new mac entry\n");
b481de9c
ZY
3347 return 0;
3348 }
3349 memcpy(entry->mac, mac, ETH_ALEN);
3350 entry->seq_num = seq;
3351 entry->frag_num = frag;
3352 entry->packet_time = jiffies;
bc434dd2 3353 list_add(&entry->list, &priv->ibss_mac_hash[index]);
b481de9c
ZY
3354 return 0;
3355 }
3356 last_seq = &entry->seq_num;
3357 last_frag = &entry->frag_num;
3358 last_time = &entry->packet_time;
3359 break;
3360 }
3361 case IEEE80211_IF_TYPE_STA:
3362 last_seq = &priv->last_seq_num;
3363 last_frag = &priv->last_frag_num;
3364 last_time = &priv->last_packet_time;
3365 break;
3366 default:
3367 return 0;
3368 }
3369 if ((*last_seq == seq) &&
3370 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3371 if (*last_frag == frag)
3372 goto drop;
3373 if (*last_frag + 1 != frag)
3374 /* out-of-order fragment */
3375 goto drop;
3376 } else
3377 *last_seq = seq;
3378
3379 *last_frag = frag;
3380 *last_time = jiffies;
3381 return 0;
3382
3383 drop:
3384 return 1;
3385}
3386
c8b0e6e1 3387#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
3388
3389#include "iwl-spectrum.h"
3390
3391#define BEACON_TIME_MASK_LOW 0x00FFFFFF
3392#define BEACON_TIME_MASK_HIGH 0xFF000000
3393#define TIME_UNIT 1024
3394
3395/*
3396 * extended beacon time format
3397 * time in usec will be changed into a 32-bit value in 8:24 format
3398 * the high 1 byte is the beacon counts
3399 * the lower 3 bytes is the time in usec within one beacon interval
3400 */
3401
bb8c093b 3402static u32 iwl4965_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
3403{
3404 u32 quot;
3405 u32 rem;
3406 u32 interval = beacon_interval * 1024;
3407
3408 if (!interval || !usec)
3409 return 0;
3410
3411 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3412 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3413
3414 return (quot << 24) + rem;
3415}
3416
3417/* base is usually what we get from ucode with each received frame,
3418 * the same as HW timer counter counting down
3419 */
3420
bb8c093b 3421static __le32 iwl4965_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
3422{
3423 u32 base_low = base & BEACON_TIME_MASK_LOW;
3424 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3425 u32 interval = beacon_interval * TIME_UNIT;
3426 u32 res = (base & BEACON_TIME_MASK_HIGH) +
3427 (addon & BEACON_TIME_MASK_HIGH);
3428
3429 if (base_low > addon_low)
3430 res += base_low - addon_low;
3431 else if (base_low < addon_low) {
3432 res += interval + base_low - addon_low;
3433 res += (1 << 24);
3434 } else
3435 res += (1 << 24);
3436
3437 return cpu_to_le32(res);
3438}
3439
bb8c093b 3440static int iwl4965_get_measurement(struct iwl4965_priv *priv,
b481de9c
ZY
3441 struct ieee80211_measurement_params *params,
3442 u8 type)
3443{
bb8c093b
CH
3444 struct iwl4965_spectrum_cmd spectrum;
3445 struct iwl4965_rx_packet *res;
3446 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
3447 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3448 .data = (void *)&spectrum,
3449 .meta.flags = CMD_WANT_SKB,
3450 };
3451 u32 add_time = le64_to_cpu(params->start_time);
3452 int rc;
3453 int spectrum_resp_status;
3454 int duration = le16_to_cpu(params->duration);
3455
bb8c093b 3456 if (iwl4965_is_associated(priv))
b481de9c 3457 add_time =
bb8c093b 3458 iwl4965_usecs_to_beacons(
b481de9c
ZY
3459 le64_to_cpu(params->start_time) - priv->last_tsf,
3460 le16_to_cpu(priv->rxon_timing.beacon_interval));
3461
3462 memset(&spectrum, 0, sizeof(spectrum));
3463
3464 spectrum.channel_count = cpu_to_le16(1);
3465 spectrum.flags =
3466 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3467 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3468 cmd.len = sizeof(spectrum);
3469 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3470
bb8c093b 3471 if (iwl4965_is_associated(priv))
b481de9c 3472 spectrum.start_time =
bb8c093b 3473 iwl4965_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
3474 add_time,
3475 le16_to_cpu(priv->rxon_timing.beacon_interval));
3476 else
3477 spectrum.start_time = 0;
3478
3479 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3480 spectrum.channels[0].channel = params->channel;
3481 spectrum.channels[0].type = type;
3482 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3483 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3484 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3485
bb8c093b 3486 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
3487 if (rc)
3488 return rc;
3489
bb8c093b 3490 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
3491 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3492 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3493 rc = -EIO;
3494 }
3495
3496 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3497 switch (spectrum_resp_status) {
3498 case 0: /* Command will be handled */
3499 if (res->u.spectrum.id != 0xff) {
3500 IWL_DEBUG_INFO
3501 ("Replaced existing measurement: %d\n",
3502 res->u.spectrum.id);
3503 priv->measurement_status &= ~MEASUREMENT_READY;
3504 }
3505 priv->measurement_status |= MEASUREMENT_ACTIVE;
3506 rc = 0;
3507 break;
3508
3509 case 1: /* Command will not be handled */
3510 rc = -EAGAIN;
3511 break;
3512 }
3513
3514 dev_kfree_skb_any(cmd.meta.u.skb);
3515
3516 return rc;
3517}
3518#endif
3519
bb8c093b
CH
3520static void iwl4965_txstatus_to_ieee(struct iwl4965_priv *priv,
3521 struct iwl4965_tx_info *tx_sta)
b481de9c
ZY
3522{
3523
3524 tx_sta->status.ack_signal = 0;
3525 tx_sta->status.excessive_retries = 0;
3526 tx_sta->status.queue_length = 0;
3527 tx_sta->status.queue_number = 0;
3528
3529 if (in_interrupt())
3530 ieee80211_tx_status_irqsafe(priv->hw,
3531 tx_sta->skb[0], &(tx_sta->status));
3532 else
3533 ieee80211_tx_status(priv->hw,
3534 tx_sta->skb[0], &(tx_sta->status));
3535
3536 tx_sta->skb[0] = NULL;
3537}
3538
3539/**
6440adb5 3540 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
b481de9c 3541 *
6440adb5
BC
3542 * When FW advances 'R' index, all entries between old and new 'R' index
3543 * need to be reclaimed. As result, some free space forms. If there is
3544 * enough free space (> low mark), wake the stack that feeds us.
b481de9c 3545 */
bb8c093b 3546int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index)
b481de9c 3547{
bb8c093b
CH
3548 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
3549 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
3550 int nfreed = 0;
3551
3552 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3553 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3554 "is out of range [0-%d] %d %d.\n", txq_id,
fc4b6853 3555 index, q->n_bd, q->write_ptr, q->read_ptr);
b481de9c
ZY
3556 return 0;
3557 }
3558
bb8c093b 3559 for (index = iwl4965_queue_inc_wrap(index, q->n_bd);
fc4b6853 3560 q->read_ptr != index;
bb8c093b 3561 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd)) {
b481de9c 3562 if (txq_id != IWL_CMD_QUEUE_NUM) {
bb8c093b 3563 iwl4965_txstatus_to_ieee(priv,
fc4b6853 3564 &(txq->txb[txq->q.read_ptr]));
bb8c093b 3565 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c
ZY
3566 } else if (nfreed > 1) {
3567 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
fc4b6853 3568 q->write_ptr, q->read_ptr);
b481de9c
ZY
3569 queue_work(priv->workqueue, &priv->restart);
3570 }
3571 nfreed++;
3572 }
3573
bb8c093b 3574 if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
b481de9c
ZY
3575 (txq_id != IWL_CMD_QUEUE_NUM) &&
3576 priv->mac80211_registered)
3577 ieee80211_wake_queue(priv->hw, txq_id);
3578
3579
3580 return nfreed;
3581}
3582
bb8c093b 3583static int iwl4965_is_tx_success(u32 status)
b481de9c
ZY
3584{
3585 status &= TX_STATUS_MSK;
3586 return (status == TX_STATUS_SUCCESS)
3587 || (status == TX_STATUS_DIRECT_DONE);
3588}
3589
3590/******************************************************************************
3591 *
3592 * Generic RX handler implementations
3593 *
3594 ******************************************************************************/
c8b0e6e1
CH
3595#ifdef CONFIG_IWL4965_HT
3596#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3597
bb8c093b 3598static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv,
b481de9c
ZY
3599 struct ieee80211_hdr *hdr)
3600{
3601 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
3602 return IWL_AP_ID;
3603 else {
3604 u8 *da = ieee80211_get_DA(hdr);
bb8c093b 3605 return iwl4965_hw_find_station(priv, da);
b481de9c
ZY
3606 }
3607}
3608
bb8c093b
CH
3609static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
3610 struct iwl4965_priv *priv, int txq_id, int idx)
b481de9c
ZY
3611{
3612 if (priv->txq[txq_id].txb[idx].skb[0])
3613 return (struct ieee80211_hdr *)priv->txq[txq_id].
3614 txb[idx].skb[0]->data;
3615 return NULL;
3616}
3617
bb8c093b 3618static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
b481de9c
ZY
3619{
3620 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
3621 tx_resp->frame_count);
3622 return le32_to_cpu(*scd_ssn) & MAX_SN;
3623
3624}
6440adb5
BC
3625
3626/**
3627 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
3628 */
bb8c093b
CH
3629static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3630 struct iwl4965_ht_agg *agg,
3631 struct iwl4965_tx_resp *tx_resp,
b481de9c
ZY
3632 u16 start_idx)
3633{
3634 u32 status;
3635 __le32 *frame_status = &tx_resp->status;
3636 struct ieee80211_tx_status *tx_status = NULL;
3637 struct ieee80211_hdr *hdr = NULL;
3638 int i, sh;
3639 int txq_id, idx;
3640 u16 seq;
3641
3642 if (agg->wait_for_ba)
6440adb5 3643 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
b481de9c
ZY
3644
3645 agg->frame_count = tx_resp->frame_count;
3646 agg->start_idx = start_idx;
3647 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3648 agg->bitmap0 = agg->bitmap1 = 0;
3649
6440adb5 3650 /* # frames attempted by Tx command */
b481de9c 3651 if (agg->frame_count == 1) {
6440adb5 3652 /* Only one frame was attempted; no block-ack will arrive */
bb8c093b 3653 struct iwl4965_tx_queue *txq ;
b481de9c
ZY
3654 status = le32_to_cpu(frame_status[0]);
3655
3656 txq_id = agg->txq_id;
3657 txq = &priv->txq[txq_id];
3658 /* FIXME: code repetition */
3659 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n",
3660 agg->frame_count, agg->start_idx);
3661
fc4b6853 3662 tx_status = &(priv->txq[txq_id].txb[txq->q.read_ptr].status);
b481de9c
ZY
3663 tx_status->retry_count = tx_resp->failure_frame;
3664 tx_status->queue_number = status & 0xff;
3665 tx_status->queue_length = tx_resp->bt_kill_count;
3666 tx_status->queue_length |= tx_resp->failure_rts;
3667
bb8c093b 3668 tx_status->flags = iwl4965_is_tx_success(status)?
b481de9c
ZY
3669 IEEE80211_TX_STATUS_ACK : 0;
3670 tx_status->control.tx_rate =
bb8c093b 3671 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3672 /* FIXME: code repetition end */
3673
3674 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3675 status & 0xff, tx_resp->failure_frame);
3676 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
bb8c093b 3677 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
b481de9c
ZY
3678
3679 agg->wait_for_ba = 0;
3680 } else {
6440adb5 3681 /* Two or more frames were attempted; expect block-ack */
b481de9c
ZY
3682 u64 bitmap = 0;
3683 int start = agg->start_idx;
3684
6440adb5 3685 /* Construct bit-map of pending frames within Tx window */
b481de9c
ZY
3686 for (i = 0; i < agg->frame_count; i++) {
3687 u16 sc;
3688 status = le32_to_cpu(frame_status[i]);
3689 seq = status >> 16;
3690 idx = SEQ_TO_INDEX(seq);
3691 txq_id = SEQ_TO_QUEUE(seq);
3692
3693 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3694 AGG_TX_STATE_ABORT_MSK))
3695 continue;
3696
3697 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3698 agg->frame_count, txq_id, idx);
3699
bb8c093b 3700 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
b481de9c
ZY
3701
3702 sc = le16_to_cpu(hdr->seq_ctrl);
3703 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3704 IWL_ERROR("BUG_ON idx doesn't match seq control"
3705 " idx=%d, seq_idx=%d, seq=%d\n",
3706 idx, SEQ_TO_SN(sc),
3707 hdr->seq_ctrl);
3708 return -1;
3709 }
3710
3711 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
3712 i, idx, SEQ_TO_SN(sc));
3713
3714 sh = idx - start;
3715 if (sh > 64) {
3716 sh = (start - idx) + 0xff;
3717 bitmap = bitmap << sh;
3718 sh = 0;
3719 start = idx;
3720 } else if (sh < -64)
3721 sh = 0xff - (start - idx);
3722 else if (sh < 0) {
3723 sh = start - idx;
3724 start = idx;
3725 bitmap = bitmap << sh;
3726 sh = 0;
3727 }
3728 bitmap |= (1 << sh);
3729 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3730 start, (u32)(bitmap & 0xFFFFFFFF));
3731 }
3732
3733 agg->bitmap0 = bitmap & 0xFFFFFFFF;
3734 agg->bitmap1 = bitmap >> 32;
3735 agg->start_idx = start;
3736 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3737 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n",
3738 agg->frame_count, agg->start_idx,
3739 agg->bitmap0);
3740
3741 if (bitmap)
3742 agg->wait_for_ba = 1;
3743 }
3744 return 0;
3745}
3746#endif
3747#endif
3748
6440adb5
BC
3749/**
3750 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3751 */
bb8c093b
CH
3752static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3753 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3754{
bb8c093b 3755 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3756 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3757 int txq_id = SEQ_TO_QUEUE(sequence);
3758 int index = SEQ_TO_INDEX(sequence);
bb8c093b 3759 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
b481de9c 3760 struct ieee80211_tx_status *tx_status;
bb8c093b 3761 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
b481de9c 3762 u32 status = le32_to_cpu(tx_resp->status);
c8b0e6e1
CH
3763#ifdef CONFIG_IWL4965_HT
3764#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
3765 int tid, sta_id;
3766#endif
3767#endif
3768
3769 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3770 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3771 "is out of range [0-%d] %d %d\n", txq_id,
fc4b6853
TW
3772 index, txq->q.n_bd, txq->q.write_ptr,
3773 txq->q.read_ptr);
b481de9c
ZY
3774 return;
3775 }
3776
c8b0e6e1
CH
3777#ifdef CONFIG_IWL4965_HT
3778#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3779 if (txq->sched_retry) {
bb8c093b 3780 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
b481de9c 3781 struct ieee80211_hdr *hdr =
bb8c093b
CH
3782 iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3783 struct iwl4965_ht_agg *agg = NULL;
b481de9c
ZY
3784 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3785
3786 if (qc == NULL) {
3787 IWL_ERROR("BUG_ON qc is null!!!!\n");
3788 return;
3789 }
3790
3791 tid = le16_to_cpu(*qc) & 0xf;
3792
bb8c093b 3793 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
b481de9c
ZY
3794 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3795 IWL_ERROR("Station not known for\n");
3796 return;
3797 }
3798
3799 agg = &priv->stations[sta_id].tid[tid].agg;
3800
3801 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index);
3802
3803 if ((tx_resp->frame_count == 1) &&
bb8c093b 3804 !iwl4965_is_tx_success(status)) {
b481de9c
ZY
3805 /* TODO: send BAR */
3806 }
3807
fc4b6853 3808 if ((txq->q.read_ptr != (scd_ssn & 0xff))) {
bb8c093b 3809 index = iwl4965_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
b481de9c
ZY
3810 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3811 "%d index %d\n", scd_ssn , index);
bb8c093b 3812 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
3813 }
3814 } else {
c8b0e6e1
CH
3815#endif /* CONFIG_IWL4965_HT_AGG */
3816#endif /* CONFIG_IWL4965_HT */
fc4b6853 3817 tx_status = &(txq->txb[txq->q.read_ptr].status);
b481de9c
ZY
3818
3819 tx_status->retry_count = tx_resp->failure_frame;
3820 tx_status->queue_number = status;
3821 tx_status->queue_length = tx_resp->bt_kill_count;
3822 tx_status->queue_length |= tx_resp->failure_rts;
3823
3824 tx_status->flags =
bb8c093b 3825 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
b481de9c
ZY
3826
3827 tx_status->control.tx_rate =
bb8c093b 3828 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3829
3830 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
bb8c093b 3831 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
b481de9c
ZY
3832 status, le32_to_cpu(tx_resp->rate_n_flags),
3833 tx_resp->failure_frame);
3834
3835 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3836 if (index != -1)
bb8c093b 3837 iwl4965_tx_queue_reclaim(priv, txq_id, index);
c8b0e6e1
CH
3838#ifdef CONFIG_IWL4965_HT
3839#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3840 }
c8b0e6e1
CH
3841#endif /* CONFIG_IWL4965_HT_AGG */
3842#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
3843
3844 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3845 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3846}
3847
3848
bb8c093b
CH
3849static void iwl4965_rx_reply_alive(struct iwl4965_priv *priv,
3850 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3851{
bb8c093b
CH
3852 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3853 struct iwl4965_alive_resp *palive;
b481de9c
ZY
3854 struct delayed_work *pwork;
3855
3856 palive = &pkt->u.alive_frame;
3857
3858 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3859 "0x%01X 0x%01X\n",
3860 palive->is_valid, palive->ver_type,
3861 palive->ver_subtype);
3862
3863 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3864 IWL_DEBUG_INFO("Initialization Alive received.\n");
3865 memcpy(&priv->card_alive_init,
3866 &pkt->u.alive_frame,
bb8c093b 3867 sizeof(struct iwl4965_init_alive_resp));
b481de9c
ZY
3868 pwork = &priv->init_alive_start;
3869 } else {
3870 IWL_DEBUG_INFO("Runtime Alive received.\n");
3871 memcpy(&priv->card_alive, &pkt->u.alive_frame,
bb8c093b 3872 sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
3873 pwork = &priv->alive_start;
3874 }
3875
3876 /* We delay the ALIVE response by 5ms to
3877 * give the HW RF Kill time to activate... */
3878 if (palive->is_valid == UCODE_VALID_OK)
3879 queue_delayed_work(priv->workqueue, pwork,
3880 msecs_to_jiffies(5));
3881 else
3882 IWL_WARNING("uCode did not respond OK.\n");
3883}
3884
bb8c093b
CH
3885static void iwl4965_rx_reply_add_sta(struct iwl4965_priv *priv,
3886 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3887{
bb8c093b 3888 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3889
3890 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3891 return;
3892}
3893
bb8c093b
CH
3894static void iwl4965_rx_reply_error(struct iwl4965_priv *priv,
3895 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3896{
bb8c093b 3897 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3898
3899 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3900 "seq 0x%04X ser 0x%08X\n",
3901 le32_to_cpu(pkt->u.err_resp.error_type),
3902 get_cmd_string(pkt->u.err_resp.cmd_id),
3903 pkt->u.err_resp.cmd_id,
3904 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3905 le32_to_cpu(pkt->u.err_resp.error_info));
3906}
3907
3908#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3909
bb8c093b 3910static void iwl4965_rx_csa(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3911{
bb8c093b
CH
3912 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3913 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon;
3914 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
3915 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3916 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3917 rxon->channel = csa->channel;
3918 priv->staging_rxon.channel = csa->channel;
3919}
3920
bb8c093b
CH
3921static void iwl4965_rx_spectrum_measure_notif(struct iwl4965_priv *priv,
3922 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3923{
c8b0e6e1 3924#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
bb8c093b
CH
3925 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3926 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
3927
3928 if (!report->state) {
3929 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3930 "Spectrum Measure Notification: Start\n");
3931 return;
3932 }
3933
3934 memcpy(&priv->measure_report, report, sizeof(*report));
3935 priv->measurement_status |= MEASUREMENT_READY;
3936#endif
3937}
3938
bb8c093b
CH
3939static void iwl4965_rx_pm_sleep_notif(struct iwl4965_priv *priv,
3940 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3941{
c8b0e6e1 3942#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3943 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3944 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
3945 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3946 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3947#endif
3948}
3949
bb8c093b
CH
3950static void iwl4965_rx_pm_debug_statistics_notif(struct iwl4965_priv *priv,
3951 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3952{
bb8c093b 3953 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3954 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3955 "notification for %s:\n",
3956 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
bb8c093b 3957 iwl4965_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
b481de9c
ZY
3958}
3959
bb8c093b 3960static void iwl4965_bg_beacon_update(struct work_struct *work)
b481de9c 3961{
bb8c093b
CH
3962 struct iwl4965_priv *priv =
3963 container_of(work, struct iwl4965_priv, beacon_update);
b481de9c
ZY
3964 struct sk_buff *beacon;
3965
3966 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3967 beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL);
3968
3969 if (!beacon) {
3970 IWL_ERROR("update beacon failed\n");
3971 return;
3972 }
3973
3974 mutex_lock(&priv->mutex);
3975 /* new beacon skb is allocated every time; dispose previous.*/
3976 if (priv->ibss_beacon)
3977 dev_kfree_skb(priv->ibss_beacon);
3978
3979 priv->ibss_beacon = beacon;
3980 mutex_unlock(&priv->mutex);
3981
bb8c093b 3982 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
3983}
3984
bb8c093b
CH
3985static void iwl4965_rx_beacon_notif(struct iwl4965_priv *priv,
3986 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3987{
c8b0e6e1 3988#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3989 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3990 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3991 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
b481de9c
ZY
3992
3993 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3994 "tsf %d %d rate %d\n",
3995 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3996 beacon->beacon_notify_hdr.failure_frame,
3997 le32_to_cpu(beacon->ibss_mgr_status),
3998 le32_to_cpu(beacon->high_tsf),
3999 le32_to_cpu(beacon->low_tsf), rate);
4000#endif
4001
4002 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
4003 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
4004 queue_work(priv->workqueue, &priv->beacon_update);
4005}
4006
4007/* Service response to REPLY_SCAN_CMD (0x80) */
bb8c093b
CH
4008static void iwl4965_rx_reply_scan(struct iwl4965_priv *priv,
4009 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4010{
c8b0e6e1 4011#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
4012 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4013 struct iwl4965_scanreq_notification *notif =
4014 (struct iwl4965_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
4015
4016 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
4017#endif
4018}
4019
4020/* Service SCAN_START_NOTIFICATION (0x82) */
bb8c093b
CH
4021static void iwl4965_rx_scan_start_notif(struct iwl4965_priv *priv,
4022 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4023{
bb8c093b
CH
4024 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4025 struct iwl4965_scanstart_notification *notif =
4026 (struct iwl4965_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
4027 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
4028 IWL_DEBUG_SCAN("Scan start: "
4029 "%d [802.11%s] "
4030 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
4031 notif->channel,
4032 notif->band ? "bg" : "a",
4033 notif->tsf_high,
4034 notif->tsf_low, notif->status, notif->beacon_timer);
4035}
4036
4037/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
bb8c093b
CH
4038static void iwl4965_rx_scan_results_notif(struct iwl4965_priv *priv,
4039 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4040{
bb8c093b
CH
4041 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4042 struct iwl4965_scanresults_notification *notif =
4043 (struct iwl4965_scanresults_notification *)pkt->u.raw;
b481de9c
ZY
4044
4045 IWL_DEBUG_SCAN("Scan ch.res: "
4046 "%d [802.11%s] "
4047 "(TSF: 0x%08X:%08X) - %d "
4048 "elapsed=%lu usec (%dms since last)\n",
4049 notif->channel,
4050 notif->band ? "bg" : "a",
4051 le32_to_cpu(notif->tsf_high),
4052 le32_to_cpu(notif->tsf_low),
4053 le32_to_cpu(notif->statistics[0]),
4054 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
4055 jiffies_to_msecs(elapsed_jiffies
4056 (priv->last_scan_jiffies, jiffies)));
4057
4058 priv->last_scan_jiffies = jiffies;
7878a5a4 4059 priv->next_scan_jiffies = 0;
b481de9c
ZY
4060}
4061
4062/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
bb8c093b
CH
4063static void iwl4965_rx_scan_complete_notif(struct iwl4965_priv *priv,
4064 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4065{
bb8c093b
CH
4066 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4067 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
b481de9c
ZY
4068
4069 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
4070 scan_notif->scanned_channels,
4071 scan_notif->tsf_low,
4072 scan_notif->tsf_high, scan_notif->status);
4073
4074 /* The HW is no longer scanning */
4075 clear_bit(STATUS_SCAN_HW, &priv->status);
4076
4077 /* The scan completion notification came in, so kill that timer... */
4078 cancel_delayed_work(&priv->scan_check);
4079
4080 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
4081 (priv->scan_bands == 2) ? "2.4" : "5.2",
4082 jiffies_to_msecs(elapsed_jiffies
4083 (priv->scan_pass_start, jiffies)));
4084
4085 /* Remove this scanned band from the list
4086 * of pending bands to scan */
4087 priv->scan_bands--;
4088
4089 /* If a request to abort was given, or the scan did not succeed
4090 * then we reset the scan state machine and terminate,
4091 * re-queuing another scan if one has been requested */
4092 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4093 IWL_DEBUG_INFO("Aborted scan completed.\n");
4094 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
4095 } else {
4096 /* If there are more bands on this scan pass reschedule */
4097 if (priv->scan_bands > 0)
4098 goto reschedule;
4099 }
4100
4101 priv->last_scan_jiffies = jiffies;
7878a5a4 4102 priv->next_scan_jiffies = 0;
b481de9c
ZY
4103 IWL_DEBUG_INFO("Setting scan to off\n");
4104
4105 clear_bit(STATUS_SCANNING, &priv->status);
4106
4107 IWL_DEBUG_INFO("Scan took %dms\n",
4108 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
4109
4110 queue_work(priv->workqueue, &priv->scan_completed);
4111
4112 return;
4113
4114reschedule:
4115 priv->scan_pass_start = jiffies;
4116 queue_work(priv->workqueue, &priv->request_scan);
4117}
4118
4119/* Handle notification from uCode that card's power state is changing
4120 * due to software, hardware, or critical temperature RFKILL */
bb8c093b
CH
4121static void iwl4965_rx_card_state_notif(struct iwl4965_priv *priv,
4122 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4123{
bb8c093b 4124 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
4125 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4126 unsigned long status = priv->status;
4127
4128 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
4129 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4130 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
4131
4132 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
4133 RF_CARD_DISABLED)) {
4134
bb8c093b 4135 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
4136 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4137
bb8c093b
CH
4138 if (!iwl4965_grab_nic_access(priv)) {
4139 iwl4965_write_direct32(
b481de9c
ZY
4140 priv, HBUS_TARG_MBX_C,
4141 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4142
bb8c093b 4143 iwl4965_release_nic_access(priv);
b481de9c
ZY
4144 }
4145
4146 if (!(flags & RXON_CARD_DISABLED)) {
bb8c093b 4147 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 4148 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
bb8c093b
CH
4149 if (!iwl4965_grab_nic_access(priv)) {
4150 iwl4965_write_direct32(
b481de9c
ZY
4151 priv, HBUS_TARG_MBX_C,
4152 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4153
bb8c093b 4154 iwl4965_release_nic_access(priv);
b481de9c
ZY
4155 }
4156 }
4157
4158 if (flags & RF_CARD_DISABLED) {
bb8c093b 4159 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c 4160 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
bb8c093b
CH
4161 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
4162 if (!iwl4965_grab_nic_access(priv))
4163 iwl4965_release_nic_access(priv);
b481de9c
ZY
4164 }
4165 }
4166
4167 if (flags & HW_CARD_DISABLED)
4168 set_bit(STATUS_RF_KILL_HW, &priv->status);
4169 else
4170 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4171
4172
4173 if (flags & SW_CARD_DISABLED)
4174 set_bit(STATUS_RF_KILL_SW, &priv->status);
4175 else
4176 clear_bit(STATUS_RF_KILL_SW, &priv->status);
4177
4178 if (!(flags & RXON_CARD_DISABLED))
bb8c093b 4179 iwl4965_scan_cancel(priv);
b481de9c
ZY
4180
4181 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
4182 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
4183 (test_bit(STATUS_RF_KILL_SW, &status) !=
4184 test_bit(STATUS_RF_KILL_SW, &priv->status)))
4185 queue_work(priv->workqueue, &priv->rf_kill);
4186 else
4187 wake_up_interruptible(&priv->wait_command_queue);
4188}
4189
4190/**
bb8c093b 4191 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
4192 *
4193 * Setup the RX handlers for each of the reply types sent from the uCode
4194 * to the host.
4195 *
4196 * This function chains into the hardware specific files for them to setup
4197 * any hardware specific handlers as well.
4198 */
bb8c093b 4199static void iwl4965_setup_rx_handlers(struct iwl4965_priv *priv)
b481de9c 4200{
bb8c093b
CH
4201 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
4202 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
4203 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
4204 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
b481de9c 4205 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
4206 iwl4965_rx_spectrum_measure_notif;
4207 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl4965_rx_pm_sleep_notif;
b481de9c 4208 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
4209 iwl4965_rx_pm_debug_statistics_notif;
4210 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
b481de9c 4211
9fbab516
BC
4212 /*
4213 * The same handler is used for both the REPLY to a discrete
4214 * statistics request from the host as well as for the periodic
4215 * statistics notifications (after received beacons) from the uCode.
b481de9c 4216 */
bb8c093b
CH
4217 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics;
4218 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics;
b481de9c 4219
bb8c093b
CH
4220 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
4221 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
b481de9c 4222 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 4223 iwl4965_rx_scan_results_notif;
b481de9c 4224 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
4225 iwl4965_rx_scan_complete_notif;
4226 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
4227 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
b481de9c 4228
9fbab516 4229 /* Set up hardware specific Rx handlers */
bb8c093b 4230 iwl4965_hw_rx_handler_setup(priv);
b481de9c
ZY
4231}
4232
4233/**
bb8c093b 4234 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
4235 * @rxb: Rx buffer to reclaim
4236 *
4237 * If an Rx buffer has an async callback associated with it the callback
4238 * will be executed. The attached skb (if present) will only be freed
4239 * if the callback returns 1
4240 */
bb8c093b
CH
4241static void iwl4965_tx_cmd_complete(struct iwl4965_priv *priv,
4242 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4243{
bb8c093b 4244 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4245 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4246 int txq_id = SEQ_TO_QUEUE(sequence);
4247 int index = SEQ_TO_INDEX(sequence);
4248 int huge = sequence & SEQ_HUGE_FRAME;
4249 int cmd_index;
bb8c093b 4250 struct iwl4965_cmd *cmd;
b481de9c
ZY
4251
4252 /* If a Tx command is being handled and it isn't in the actual
4253 * command queue then there a command routing bug has been introduced
4254 * in the queue management code. */
4255 if (txq_id != IWL_CMD_QUEUE_NUM)
4256 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
4257 txq_id, pkt->hdr.cmd);
4258 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
4259
4260 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
4261 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
4262
4263 /* Input error checking is done when commands are added to queue. */
4264 if (cmd->meta.flags & CMD_WANT_SKB) {
4265 cmd->meta.source->u.skb = rxb->skb;
4266 rxb->skb = NULL;
4267 } else if (cmd->meta.u.callback &&
4268 !cmd->meta.u.callback(priv, cmd, rxb->skb))
4269 rxb->skb = NULL;
4270
bb8c093b 4271 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
4272
4273 if (!(cmd->meta.flags & CMD_ASYNC)) {
4274 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4275 wake_up_interruptible(&priv->wait_command_queue);
4276 }
4277}
4278
4279/************************** RX-FUNCTIONS ****************************/
4280/*
4281 * Rx theory of operation
4282 *
9fbab516
BC
4283 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
4284 * each of which point to Receive Buffers to be filled by 4965. These get
4285 * used not only for Rx frames, but for any command response or notification
4286 * from the 4965. The driver and 4965 manage the Rx buffers by means
4287 * of indexes into the circular buffer.
b481de9c
ZY
4288 *
4289 * Rx Queue Indexes
4290 * The host/firmware share two index registers for managing the Rx buffers.
4291 *
4292 * The READ index maps to the first position that the firmware may be writing
4293 * to -- the driver can read up to (but not including) this position and get
4294 * good data.
4295 * The READ index is managed by the firmware once the card is enabled.
4296 *
4297 * The WRITE index maps to the last position the driver has read from -- the
4298 * position preceding WRITE is the last slot the firmware can place a packet.
4299 *
4300 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4301 * WRITE = READ.
4302 *
9fbab516 4303 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
4304 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4305 *
9fbab516 4306 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
4307 * and fire the RX interrupt. The driver can then query the READ index and
4308 * process as many packets as possible, moving the WRITE index forward as it
4309 * resets the Rx queue buffers with new memory.
4310 *
4311 * The management in the driver is as follows:
4312 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
4313 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 4314 * to replenish the iwl->rxq->rx_free.
bb8c093b 4315 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
4316 * iwl->rxq is replenished and the READ INDEX is updated (updating the
4317 * 'processed' and 'read' driver indexes as well)
4318 * + A received packet is processed and handed to the kernel network stack,
4319 * detached from the iwl->rxq. The driver 'processed' index is updated.
4320 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
4321 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
4322 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
4323 * were enough free buffers and RX_STALLED is set it is cleared.
4324 *
4325 *
4326 * Driver sequence:
4327 *
9fbab516
BC
4328 * iwl4965_rx_queue_alloc() Allocates rx_free
4329 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 4330 * iwl4965_rx_queue_restock
9fbab516 4331 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
4332 * queue, updates firmware pointers, and updates
4333 * the WRITE index. If insufficient rx_free buffers
bb8c093b 4334 * are available, schedules iwl4965_rx_replenish
b481de9c
ZY
4335 *
4336 * -- enable interrupts --
9fbab516 4337 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
b481de9c
ZY
4338 * READ INDEX, detaching the SKB from the pool.
4339 * Moves the packet buffer from queue to rx_used.
bb8c093b 4340 * Calls iwl4965_rx_queue_restock to refill any empty
b481de9c
ZY
4341 * slots.
4342 * ...
4343 *
4344 */
4345
4346/**
bb8c093b 4347 * iwl4965_rx_queue_space - Return number of free slots available in queue.
b481de9c 4348 */
bb8c093b 4349static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
b481de9c
ZY
4350{
4351 int s = q->read - q->write;
4352 if (s <= 0)
4353 s += RX_QUEUE_SIZE;
4354 /* keep some buffer to not confuse full and empty queue */
4355 s -= 2;
4356 if (s < 0)
4357 s = 0;
4358 return s;
4359}
4360
4361/**
bb8c093b 4362 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
b481de9c 4363 */
bb8c093b 4364int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, struct iwl4965_rx_queue *q)
b481de9c
ZY
4365{
4366 u32 reg = 0;
4367 int rc = 0;
4368 unsigned long flags;
4369
4370 spin_lock_irqsave(&q->lock, flags);
4371
4372 if (q->need_update == 0)
4373 goto exit_unlock;
4374
6440adb5 4375 /* If power-saving is in use, make sure device is awake */
b481de9c 4376 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
bb8c093b 4377 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4378
4379 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
bb8c093b 4380 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4381 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4382 goto exit_unlock;
4383 }
4384
bb8c093b 4385 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4386 if (rc)
4387 goto exit_unlock;
4388
6440adb5 4389 /* Device expects a multiple of 8 */
bb8c093b 4390 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
b481de9c 4391 q->write & ~0x7);
bb8c093b 4392 iwl4965_release_nic_access(priv);
6440adb5
BC
4393
4394 /* Else device is assumed to be awake */
b481de9c 4395 } else
6440adb5 4396 /* Device expects a multiple of 8 */
bb8c093b 4397 iwl4965_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
b481de9c
ZY
4398
4399
4400 q->need_update = 0;
4401
4402 exit_unlock:
4403 spin_unlock_irqrestore(&q->lock, flags);
4404 return rc;
4405}
4406
4407/**
9fbab516 4408 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 4409 */
bb8c093b 4410static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl4965_priv *priv,
b481de9c
ZY
4411 dma_addr_t dma_addr)
4412{
4413 return cpu_to_le32((u32)(dma_addr >> 8));
4414}
4415
4416
4417/**
bb8c093b 4418 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 4419 *
9fbab516 4420 * If there are slots in the RX queue that need to be restocked,
b481de9c 4421 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 4422 * as we can, pulling from rx_free.
b481de9c
ZY
4423 *
4424 * This moves the 'write' index forward to catch up with 'processed', and
4425 * also updates the memory address in the firmware to reference the new
4426 * target buffer.
4427 */
bb8c093b 4428static int iwl4965_rx_queue_restock(struct iwl4965_priv *priv)
b481de9c 4429{
bb8c093b 4430 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4431 struct list_head *element;
bb8c093b 4432 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4433 unsigned long flags;
4434 int write, rc;
4435
4436 spin_lock_irqsave(&rxq->lock, flags);
4437 write = rxq->write & ~0x7;
bb8c093b 4438 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 4439 /* Get next free Rx buffer, remove from free list */
b481de9c 4440 element = rxq->rx_free.next;
bb8c093b 4441 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
b481de9c 4442 list_del(element);
6440adb5
BC
4443
4444 /* Point to Rx buffer via next RBD in circular buffer */
bb8c093b 4445 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
b481de9c
ZY
4446 rxq->queue[rxq->write] = rxb;
4447 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4448 rxq->free_count--;
4449 }
4450 spin_unlock_irqrestore(&rxq->lock, flags);
4451 /* If the pre-allocated buffer pool is dropping low, schedule to
4452 * refill it */
4453 if (rxq->free_count <= RX_LOW_WATERMARK)
4454 queue_work(priv->workqueue, &priv->rx_replenish);
4455
4456
6440adb5
BC
4457 /* If we've added more space for the firmware to place data, tell it.
4458 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
4459 if ((write != (rxq->write & ~0x7))
4460 || (abs(rxq->write - rxq->read) > 7)) {
4461 spin_lock_irqsave(&rxq->lock, flags);
4462 rxq->need_update = 1;
4463 spin_unlock_irqrestore(&rxq->lock, flags);
bb8c093b 4464 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
4465 if (rc)
4466 return rc;
4467 }
4468
4469 return 0;
4470}
4471
4472/**
bb8c093b 4473 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
4474 *
4475 * When moving to rx_free an SKB is allocated for the slot.
4476 *
bb8c093b 4477 * Also restock the Rx queue via iwl4965_rx_queue_restock.
01ebd063 4478 * This is called as a scheduled work item (except for during initialization)
b481de9c 4479 */
5c0eef96 4480static void iwl4965_rx_allocate(struct iwl4965_priv *priv)
b481de9c 4481{
bb8c093b 4482 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4483 struct list_head *element;
bb8c093b 4484 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4485 unsigned long flags;
4486 spin_lock_irqsave(&rxq->lock, flags);
4487 while (!list_empty(&rxq->rx_used)) {
4488 element = rxq->rx_used.next;
bb8c093b 4489 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
6440adb5
BC
4490
4491 /* Alloc a new receive buffer */
b481de9c
ZY
4492 rxb->skb =
4493 alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
4494 if (!rxb->skb) {
4495 if (net_ratelimit())
4496 printk(KERN_CRIT DRV_NAME
4497 ": Can not allocate SKB buffers\n");
4498 /* We don't reschedule replenish work here -- we will
4499 * call the restock method and if it still needs
4500 * more buffers it will schedule replenish */
4501 break;
4502 }
4503 priv->alloc_rxb_skb++;
4504 list_del(element);
6440adb5
BC
4505
4506 /* Get physical address of RB/SKB */
b481de9c
ZY
4507 rxb->dma_addr =
4508 pci_map_single(priv->pci_dev, rxb->skb->data,
4509 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4510 list_add_tail(&rxb->list, &rxq->rx_free);
4511 rxq->free_count++;
4512 }
4513 spin_unlock_irqrestore(&rxq->lock, flags);
5c0eef96
MA
4514}
4515
4516/*
4517 * this should be called while priv->lock is locked
4518*/
4519void __iwl4965_rx_replenish(void *data)
4520{
4521 struct iwl4965_priv *priv = data;
4522
4523 iwl4965_rx_allocate(priv);
4524 iwl4965_rx_queue_restock(priv);
4525}
4526
4527
4528void iwl4965_rx_replenish(void *data)
4529{
4530 struct iwl4965_priv *priv = data;
4531 unsigned long flags;
4532
4533 iwl4965_rx_allocate(priv);
b481de9c
ZY
4534
4535 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 4536 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4537 spin_unlock_irqrestore(&priv->lock, flags);
4538}
4539
4540/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
9fbab516 4541 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
b481de9c
ZY
4542 * This free routine walks the list of POOL entries and if SKB is set to
4543 * non NULL it is unmapped and freed
4544 */
bb8c093b 4545static void iwl4965_rx_queue_free(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4546{
4547 int i;
4548 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4549 if (rxq->pool[i].skb != NULL) {
4550 pci_unmap_single(priv->pci_dev,
4551 rxq->pool[i].dma_addr,
4552 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4553 dev_kfree_skb(rxq->pool[i].skb);
4554 }
4555 }
4556
4557 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4558 rxq->dma_addr);
4559 rxq->bd = NULL;
4560}
4561
bb8c093b 4562int iwl4965_rx_queue_alloc(struct iwl4965_priv *priv)
b481de9c 4563{
bb8c093b 4564 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4565 struct pci_dev *dev = priv->pci_dev;
4566 int i;
4567
4568 spin_lock_init(&rxq->lock);
4569 INIT_LIST_HEAD(&rxq->rx_free);
4570 INIT_LIST_HEAD(&rxq->rx_used);
6440adb5
BC
4571
4572 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
b481de9c
ZY
4573 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4574 if (!rxq->bd)
4575 return -ENOMEM;
6440adb5 4576
b481de9c
ZY
4577 /* Fill the rx_used queue with _all_ of the Rx buffers */
4578 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4579 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
6440adb5 4580
b481de9c
ZY
4581 /* Set us so that we have processed and used all buffers, but have
4582 * not restocked the Rx queue with fresh buffers */
4583 rxq->read = rxq->write = 0;
4584 rxq->free_count = 0;
4585 rxq->need_update = 0;
4586 return 0;
4587}
4588
bb8c093b 4589void iwl4965_rx_queue_reset(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4590{
4591 unsigned long flags;
4592 int i;
4593 spin_lock_irqsave(&rxq->lock, flags);
4594 INIT_LIST_HEAD(&rxq->rx_free);
4595 INIT_LIST_HEAD(&rxq->rx_used);
4596 /* Fill the rx_used queue with _all_ of the Rx buffers */
4597 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4598 /* In the reset function, these buffers may have been allocated
4599 * to an SKB, so we need to unmap and free potential storage */
4600 if (rxq->pool[i].skb != NULL) {
4601 pci_unmap_single(priv->pci_dev,
4602 rxq->pool[i].dma_addr,
4603 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4604 priv->alloc_rxb_skb--;
4605 dev_kfree_skb(rxq->pool[i].skb);
4606 rxq->pool[i].skb = NULL;
4607 }
4608 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4609 }
4610
4611 /* Set us so that we have processed and used all buffers, but have
4612 * not restocked the Rx queue with fresh buffers */
4613 rxq->read = rxq->write = 0;
4614 rxq->free_count = 0;
4615 spin_unlock_irqrestore(&rxq->lock, flags);
4616}
4617
4618/* Convert linear signal-to-noise ratio into dB */
4619static u8 ratio2dB[100] = {
4620/* 0 1 2 3 4 5 6 7 8 9 */
4621 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4622 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4623 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4624 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4625 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4626 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4627 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4628 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4629 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4630 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4631};
4632
4633/* Calculates a relative dB value from a ratio of linear
4634 * (i.e. not dB) signal levels.
4635 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 4636int iwl4965_calc_db_from_ratio(int sig_ratio)
b481de9c 4637{
c899a575
AB
4638 /* 1000:1 or higher just report as 60 dB */
4639 if (sig_ratio >= 1000)
b481de9c
ZY
4640 return 60;
4641
c899a575 4642 /* 100:1 or higher, divide by 10 and use table,
b481de9c 4643 * add 20 dB to make up for divide by 10 */
c899a575 4644 if (sig_ratio >= 100)
b481de9c
ZY
4645 return (20 + (int)ratio2dB[sig_ratio/10]);
4646
4647 /* We shouldn't see this */
4648 if (sig_ratio < 1)
4649 return 0;
4650
4651 /* Use table for ratios 1:1 - 99:1 */
4652 return (int)ratio2dB[sig_ratio];
4653}
4654
4655#define PERFECT_RSSI (-20) /* dBm */
4656#define WORST_RSSI (-95) /* dBm */
4657#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4658
4659/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4660 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4661 * about formulas used below. */
bb8c093b 4662int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
4663{
4664 int sig_qual;
4665 int degradation = PERFECT_RSSI - rssi_dbm;
4666
4667 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4668 * as indicator; formula is (signal dbm - noise dbm).
4669 * SNR at or above 40 is a great signal (100%).
4670 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4671 * Weakest usable signal is usually 10 - 15 dB SNR. */
4672 if (noise_dbm) {
4673 if (rssi_dbm - noise_dbm >= 40)
4674 return 100;
4675 else if (rssi_dbm < noise_dbm)
4676 return 0;
4677 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4678
4679 /* Else use just the signal level.
4680 * This formula is a least squares fit of data points collected and
4681 * compared with a reference system that had a percentage (%) display
4682 * for signal quality. */
4683 } else
4684 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4685 (15 * RSSI_RANGE + 62 * degradation)) /
4686 (RSSI_RANGE * RSSI_RANGE);
4687
4688 if (sig_qual > 100)
4689 sig_qual = 100;
4690 else if (sig_qual < 1)
4691 sig_qual = 0;
4692
4693 return sig_qual;
4694}
4695
4696/**
9fbab516 4697 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
4698 *
4699 * Uses the priv->rx_handlers callback function array to invoke
4700 * the appropriate handlers, including command responses,
4701 * frame-received notifications, and other notifications.
4702 */
bb8c093b 4703static void iwl4965_rx_handle(struct iwl4965_priv *priv)
b481de9c 4704{
bb8c093b
CH
4705 struct iwl4965_rx_mem_buffer *rxb;
4706 struct iwl4965_rx_packet *pkt;
4707 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4708 u32 r, i;
4709 int reclaim;
4710 unsigned long flags;
5c0eef96
MA
4711 u8 fill_rx = 0;
4712 u32 count = 0;
b481de9c 4713
6440adb5
BC
4714 /* uCode's read index (stored in shared DRAM) indicates the last Rx
4715 * buffer that the driver may process (last buffer filled by ucode). */
bb8c093b 4716 r = iwl4965_hw_get_rx_read(priv);
b481de9c
ZY
4717 i = rxq->read;
4718
4719 /* Rx interrupt, but nothing sent from uCode */
4720 if (i == r)
4721 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4722
5c0eef96
MA
4723 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
4724 fill_rx = 1;
4725
b481de9c
ZY
4726 while (i != r) {
4727 rxb = rxq->queue[i];
4728
9fbab516 4729 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
4730 * then a bug has been introduced in the queue refilling
4731 * routines -- catch it here */
4732 BUG_ON(rxb == NULL);
4733
4734 rxq->queue[i] = NULL;
4735
4736 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4737 IWL_RX_BUF_SIZE,
4738 PCI_DMA_FROMDEVICE);
bb8c093b 4739 pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4740
4741 /* Reclaim a command buffer only if this packet is a response
4742 * to a (driver-originated) command.
4743 * If the packet (e.g. Rx frame) originated from uCode,
4744 * there is no command buffer to reclaim.
4745 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4746 * but apparently a few don't get set; catch them here. */
4747 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4748 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4749 (pkt->hdr.cmd != REPLY_4965_RX) &&
cfe01709 4750 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
b481de9c
ZY
4751 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4752 (pkt->hdr.cmd != REPLY_TX);
4753
4754 /* Based on type of command response or notification,
4755 * handle those that need handling via function in
bb8c093b 4756 * rx_handlers table. See iwl4965_setup_rx_handlers() */
b481de9c
ZY
4757 if (priv->rx_handlers[pkt->hdr.cmd]) {
4758 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4759 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4760 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4761 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4762 } else {
4763 /* No handling needed */
4764 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4765 "r %d i %d No handler needed for %s, 0x%02x\n",
4766 r, i, get_cmd_string(pkt->hdr.cmd),
4767 pkt->hdr.cmd);
4768 }
4769
4770 if (reclaim) {
9fbab516
BC
4771 /* Invoke any callbacks, transfer the skb to caller, and
4772 * fire off the (possibly) blocking iwl4965_send_cmd()
b481de9c
ZY
4773 * as we reclaim the driver command queue */
4774 if (rxb && rxb->skb)
bb8c093b 4775 iwl4965_tx_cmd_complete(priv, rxb);
b481de9c
ZY
4776 else
4777 IWL_WARNING("Claim null rxb?\n");
4778 }
4779
4780 /* For now we just don't re-use anything. We can tweak this
4781 * later to try and re-use notification packets and SKBs that
4782 * fail to Rx correctly */
4783 if (rxb->skb != NULL) {
4784 priv->alloc_rxb_skb--;
4785 dev_kfree_skb_any(rxb->skb);
4786 rxb->skb = NULL;
4787 }
4788
4789 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
4790 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4791 spin_lock_irqsave(&rxq->lock, flags);
4792 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4793 spin_unlock_irqrestore(&rxq->lock, flags);
4794 i = (i + 1) & RX_QUEUE_MASK;
5c0eef96
MA
4795 /* If there are a lot of unused frames,
4796 * restock the Rx queue so ucode wont assert. */
4797 if (fill_rx) {
4798 count++;
4799 if (count >= 8) {
4800 priv->rxq.read = i;
4801 __iwl4965_rx_replenish(priv);
4802 count = 0;
4803 }
4804 }
b481de9c
ZY
4805 }
4806
4807 /* Backtrack one entry */
4808 priv->rxq.read = i;
bb8c093b 4809 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4810}
4811
6440adb5
BC
4812/**
4813 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware
4814 */
bb8c093b
CH
4815static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
4816 struct iwl4965_tx_queue *txq)
b481de9c
ZY
4817{
4818 u32 reg = 0;
4819 int rc = 0;
4820 int txq_id = txq->q.id;
4821
4822 if (txq->need_update == 0)
4823 return rc;
4824
4825 /* if we're trying to save power */
4826 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4827 /* wake up nic if it's powered down ...
4828 * uCode will wake up, and interrupt us again, so next
4829 * time we'll skip this part. */
bb8c093b 4830 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4831
4832 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4833 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
bb8c093b 4834 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4835 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4836 return rc;
4837 }
4838
4839 /* restore this queue's parameters in nic hardware. */
bb8c093b 4840 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4841 if (rc)
4842 return rc;
bb8c093b 4843 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
fc4b6853 4844 txq->q.write_ptr | (txq_id << 8));
bb8c093b 4845 iwl4965_release_nic_access(priv);
b481de9c
ZY
4846
4847 /* else not in power-save mode, uCode will never sleep when we're
4848 * trying to tx (during RFKILL, we're not trying to tx). */
4849 } else
bb8c093b 4850 iwl4965_write32(priv, HBUS_TARG_WRPTR,
fc4b6853 4851 txq->q.write_ptr | (txq_id << 8));
b481de9c
ZY
4852
4853 txq->need_update = 0;
4854
4855 return rc;
4856}
4857
c8b0e6e1 4858#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 4859static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c 4860{
0795af57
JP
4861 DECLARE_MAC_BUF(mac);
4862
b481de9c 4863 IWL_DEBUG_RADIO("RX CONFIG:\n");
bb8c093b 4864 iwl4965_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
4865 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4866 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4867 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4868 le32_to_cpu(rxon->filter_flags));
4869 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4870 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4871 rxon->ofdm_basic_rates);
4872 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
0795af57
JP
4873 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n",
4874 print_mac(mac, rxon->node_addr));
4875 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4876 print_mac(mac, rxon->bssid_addr));
b481de9c
ZY
4877 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4878}
4879#endif
4880
bb8c093b 4881static void iwl4965_enable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4882{
4883 IWL_DEBUG_ISR("Enabling interrupts\n");
4884 set_bit(STATUS_INT_ENABLED, &priv->status);
bb8c093b 4885 iwl4965_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
4886}
4887
bb8c093b 4888static inline void iwl4965_disable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4889{
4890 clear_bit(STATUS_INT_ENABLED, &priv->status);
4891
4892 /* disable interrupts from uCode/NIC to host */
bb8c093b 4893 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
4894
4895 /* acknowledge/clear/reset any interrupts still pending
4896 * from uCode or flow handler (Rx/Tx DMA) */
bb8c093b
CH
4897 iwl4965_write32(priv, CSR_INT, 0xffffffff);
4898 iwl4965_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
4899 IWL_DEBUG_ISR("Disabled interrupts\n");
4900}
4901
4902static const char *desc_lookup(int i)
4903{
4904 switch (i) {
4905 case 1:
4906 return "FAIL";
4907 case 2:
4908 return "BAD_PARAM";
4909 case 3:
4910 return "BAD_CHECKSUM";
4911 case 4:
4912 return "NMI_INTERRUPT";
4913 case 5:
4914 return "SYSASSERT";
4915 case 6:
4916 return "FATAL_ERROR";
4917 }
4918
4919 return "UNKNOWN";
4920}
4921
4922#define ERROR_START_OFFSET (1 * sizeof(u32))
4923#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4924
bb8c093b 4925static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv)
b481de9c
ZY
4926{
4927 u32 data2, line;
4928 u32 desc, time, count, base, data1;
4929 u32 blink1, blink2, ilink1, ilink2;
4930 int rc;
4931
4932 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4933
bb8c093b 4934 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4935 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4936 return;
4937 }
4938
bb8c093b 4939 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4940 if (rc) {
4941 IWL_WARNING("Can not read from adapter at this time.\n");
4942 return;
4943 }
4944
bb8c093b 4945 count = iwl4965_read_targ_mem(priv, base);
b481de9c
ZY
4946
4947 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4948 IWL_ERROR("Start IWL Error Log Dump:\n");
4949 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4950 priv->status, priv->config, count);
4951 }
4952
bb8c093b
CH
4953 desc = iwl4965_read_targ_mem(priv, base + 1 * sizeof(u32));
4954 blink1 = iwl4965_read_targ_mem(priv, base + 3 * sizeof(u32));
4955 blink2 = iwl4965_read_targ_mem(priv, base + 4 * sizeof(u32));
4956 ilink1 = iwl4965_read_targ_mem(priv, base + 5 * sizeof(u32));
4957 ilink2 = iwl4965_read_targ_mem(priv, base + 6 * sizeof(u32));
4958 data1 = iwl4965_read_targ_mem(priv, base + 7 * sizeof(u32));
4959 data2 = iwl4965_read_targ_mem(priv, base + 8 * sizeof(u32));
4960 line = iwl4965_read_targ_mem(priv, base + 9 * sizeof(u32));
4961 time = iwl4965_read_targ_mem(priv, base + 11 * sizeof(u32));
b481de9c
ZY
4962
4963 IWL_ERROR("Desc Time "
4964 "data1 data2 line\n");
4965 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4966 desc_lookup(desc), desc, time, data1, data2, line);
4967 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4968 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4969 ilink1, ilink2);
4970
bb8c093b 4971 iwl4965_release_nic_access(priv);
b481de9c
ZY
4972}
4973
4974#define EVENT_START_OFFSET (4 * sizeof(u32))
4975
4976/**
bb8c093b 4977 * iwl4965_print_event_log - Dump error event log to syslog
b481de9c 4978 *
bb8c093b 4979 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
b481de9c 4980 */
bb8c093b 4981static void iwl4965_print_event_log(struct iwl4965_priv *priv, u32 start_idx,
b481de9c
ZY
4982 u32 num_events, u32 mode)
4983{
4984 u32 i;
4985 u32 base; /* SRAM byte address of event log header */
4986 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4987 u32 ptr; /* SRAM byte address of log data */
4988 u32 ev, time, data; /* event log data */
4989
4990 if (num_events == 0)
4991 return;
4992
4993 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4994
4995 if (mode == 0)
4996 event_size = 2 * sizeof(u32);
4997 else
4998 event_size = 3 * sizeof(u32);
4999
5000 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
5001
5002 /* "time" is actually "data" for mode 0 (no timestamp).
5003 * place event id # at far right for easier visual parsing. */
5004 for (i = 0; i < num_events; i++) {
bb8c093b 5005 ev = iwl4965_read_targ_mem(priv, ptr);
b481de9c 5006 ptr += sizeof(u32);
bb8c093b 5007 time = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
5008 ptr += sizeof(u32);
5009 if (mode == 0)
5010 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
5011 else {
bb8c093b 5012 data = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
5013 ptr += sizeof(u32);
5014 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
5015 }
5016 }
5017}
5018
bb8c093b 5019static void iwl4965_dump_nic_event_log(struct iwl4965_priv *priv)
b481de9c
ZY
5020{
5021 int rc;
5022 u32 base; /* SRAM byte address of event log header */
5023 u32 capacity; /* event log capacity in # entries */
5024 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
5025 u32 num_wraps; /* # times uCode wrapped to top of log */
5026 u32 next_entry; /* index of next entry to be written by uCode */
5027 u32 size; /* # entries that we'll print */
5028
5029 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
bb8c093b 5030 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
5031 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
5032 return;
5033 }
5034
bb8c093b 5035 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
5036 if (rc) {
5037 IWL_WARNING("Can not read from adapter at this time.\n");
5038 return;
5039 }
5040
5041 /* event log header */
bb8c093b
CH
5042 capacity = iwl4965_read_targ_mem(priv, base);
5043 mode = iwl4965_read_targ_mem(priv, base + (1 * sizeof(u32)));
5044 num_wraps = iwl4965_read_targ_mem(priv, base + (2 * sizeof(u32)));
5045 next_entry = iwl4965_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
5046
5047 size = num_wraps ? capacity : next_entry;
5048
5049 /* bail out if nothing in log */
5050 if (size == 0) {
583fab37 5051 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
bb8c093b 5052 iwl4965_release_nic_access(priv);
b481de9c
ZY
5053 return;
5054 }
5055
583fab37 5056 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
5057 size, num_wraps);
5058
5059 /* if uCode has wrapped back to top of log, start at the oldest entry,
5060 * i.e the next one that uCode would fill. */
5061 if (num_wraps)
bb8c093b 5062 iwl4965_print_event_log(priv, next_entry,
b481de9c
ZY
5063 capacity - next_entry, mode);
5064
5065 /* (then/else) start at top of log */
bb8c093b 5066 iwl4965_print_event_log(priv, 0, next_entry, mode);
b481de9c 5067
bb8c093b 5068 iwl4965_release_nic_access(priv);
b481de9c
ZY
5069}
5070
5071/**
bb8c093b 5072 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 5073 */
bb8c093b 5074static void iwl4965_irq_handle_error(struct iwl4965_priv *priv)
b481de9c 5075{
bb8c093b 5076 /* Set the FW error flag -- cleared on iwl4965_down */
b481de9c
ZY
5077 set_bit(STATUS_FW_ERROR, &priv->status);
5078
5079 /* Cancel currently queued command. */
5080 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
5081
c8b0e6e1 5082#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5083 if (iwl4965_debug_level & IWL_DL_FW_ERRORS) {
5084 iwl4965_dump_nic_error_log(priv);
5085 iwl4965_dump_nic_event_log(priv);
5086 iwl4965_print_rx_config_cmd(&priv->staging_rxon);
b481de9c
ZY
5087 }
5088#endif
5089
5090 wake_up_interruptible(&priv->wait_command_queue);
5091
5092 /* Keep the restart process from trying to send host
5093 * commands by clearing the INIT status bit */
5094 clear_bit(STATUS_READY, &priv->status);
5095
5096 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5097 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
5098 "Restarting adapter due to uCode error.\n");
5099
bb8c093b 5100 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5101 memcpy(&priv->recovery_rxon, &priv->active_rxon,
5102 sizeof(priv->recovery_rxon));
5103 priv->error_recovering = 1;
5104 }
5105 queue_work(priv->workqueue, &priv->restart);
5106 }
5107}
5108
bb8c093b 5109static void iwl4965_error_recovery(struct iwl4965_priv *priv)
b481de9c
ZY
5110{
5111 unsigned long flags;
5112
5113 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
5114 sizeof(priv->staging_rxon));
5115 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5116 iwl4965_commit_rxon(priv);
b481de9c 5117
bb8c093b 5118 iwl4965_rxon_add_station(priv, priv->bssid, 1);
b481de9c
ZY
5119
5120 spin_lock_irqsave(&priv->lock, flags);
5121 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
5122 priv->error_recovering = 0;
5123 spin_unlock_irqrestore(&priv->lock, flags);
5124}
5125
bb8c093b 5126static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
b481de9c
ZY
5127{
5128 u32 inta, handled = 0;
5129 u32 inta_fh;
5130 unsigned long flags;
c8b0e6e1 5131#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
5132 u32 inta_mask;
5133#endif
5134
5135 spin_lock_irqsave(&priv->lock, flags);
5136
5137 /* Ack/clear/reset pending uCode interrupts.
5138 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
5139 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
bb8c093b
CH
5140 inta = iwl4965_read32(priv, CSR_INT);
5141 iwl4965_write32(priv, CSR_INT, inta);
b481de9c
ZY
5142
5143 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
5144 * Any new interrupts that happen after this, either while we're
5145 * in this tasklet, or later, will show up in next ISR/tasklet. */
bb8c093b
CH
5146 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
5147 iwl4965_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 5148
c8b0e6e1 5149#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5150 if (iwl4965_debug_level & IWL_DL_ISR) {
9fbab516
BC
5151 /* just for debug */
5152 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
b481de9c
ZY
5153 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5154 inta, inta_mask, inta_fh);
5155 }
5156#endif
5157
5158 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
5159 * atomic, make sure that inta covers all the interrupts that
5160 * we've discovered, even if FH interrupt came in just after
5161 * reading CSR_INT. */
5162 if (inta_fh & CSR_FH_INT_RX_MASK)
5163 inta |= CSR_INT_BIT_FH_RX;
5164 if (inta_fh & CSR_FH_INT_TX_MASK)
5165 inta |= CSR_INT_BIT_FH_TX;
5166
5167 /* Now service all interrupt bits discovered above. */
5168 if (inta & CSR_INT_BIT_HW_ERR) {
5169 IWL_ERROR("Microcode HW error detected. Restarting.\n");
5170
5171 /* Tell the device to stop sending interrupts */
bb8c093b 5172 iwl4965_disable_interrupts(priv);
b481de9c 5173
bb8c093b 5174 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5175
5176 handled |= CSR_INT_BIT_HW_ERR;
5177
5178 spin_unlock_irqrestore(&priv->lock, flags);
5179
5180 return;
5181 }
5182
c8b0e6e1 5183#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5184 if (iwl4965_debug_level & (IWL_DL_ISR)) {
b481de9c
ZY
5185 /* NIC fires this, but we don't use it, redundant with WAKEUP */
5186 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
5187 IWL_DEBUG_ISR("Microcode started or stopped.\n");
5188
5189 /* Alive notification via Rx interrupt will do the real work */
5190 if (inta & CSR_INT_BIT_ALIVE)
5191 IWL_DEBUG_ISR("Alive interrupt\n");
5192 }
5193#endif
5194 /* Safely ignore these bits for debug checks below */
5195 inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
5196
9fbab516 5197 /* HW RF KILL switch toggled */
b481de9c
ZY
5198 if (inta & CSR_INT_BIT_RF_KILL) {
5199 int hw_rf_kill = 0;
bb8c093b 5200 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
5201 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5202 hw_rf_kill = 1;
5203
5204 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
5205 "RF_KILL bit toggled to %s.\n",
5206 hw_rf_kill ? "disable radio":"enable radio");
5207
5208 /* Queue restart only if RF_KILL switch was set to "kill"
5209 * when we loaded driver, and is now set to "enable".
5210 * After we're Alive, RF_KILL gets handled by
5211 * iwl_rx_card_state_notif() */
53e49093
ZY
5212 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
5213 clear_bit(STATUS_RF_KILL_HW, &priv->status);
b481de9c 5214 queue_work(priv->workqueue, &priv->restart);
53e49093 5215 }
b481de9c
ZY
5216
5217 handled |= CSR_INT_BIT_RF_KILL;
5218 }
5219
9fbab516 5220 /* Chip got too hot and stopped itself */
b481de9c
ZY
5221 if (inta & CSR_INT_BIT_CT_KILL) {
5222 IWL_ERROR("Microcode CT kill error detected.\n");
5223 handled |= CSR_INT_BIT_CT_KILL;
5224 }
5225
5226 /* Error detected by uCode */
5227 if (inta & CSR_INT_BIT_SW_ERR) {
5228 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
5229 inta);
bb8c093b 5230 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5231 handled |= CSR_INT_BIT_SW_ERR;
5232 }
5233
5234 /* uCode wakes up after power-down sleep */
5235 if (inta & CSR_INT_BIT_WAKEUP) {
5236 IWL_DEBUG_ISR("Wakeup interrupt\n");
bb8c093b
CH
5237 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq);
5238 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]);
5239 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]);
5240 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]);
5241 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]);
5242 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]);
5243 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
5244
5245 handled |= CSR_INT_BIT_WAKEUP;
5246 }
5247
5248 /* All uCode command responses, including Tx command responses,
5249 * Rx "responses" (frame-received notification), and other
5250 * notifications from uCode come through here*/
5251 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 5252 iwl4965_rx_handle(priv);
b481de9c
ZY
5253 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
5254 }
5255
5256 if (inta & CSR_INT_BIT_FH_TX) {
5257 IWL_DEBUG_ISR("Tx interrupt\n");
5258 handled |= CSR_INT_BIT_FH_TX;
5259 }
5260
5261 if (inta & ~handled)
5262 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
5263
5264 if (inta & ~CSR_INI_SET_MASK) {
5265 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
5266 inta & ~CSR_INI_SET_MASK);
5267 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
5268 }
5269
5270 /* Re-enable all interrupts */
bb8c093b 5271 iwl4965_enable_interrupts(priv);
b481de9c 5272
c8b0e6e1 5273#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5274 if (iwl4965_debug_level & (IWL_DL_ISR)) {
5275 inta = iwl4965_read32(priv, CSR_INT);
5276 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
5277 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5278 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
5279 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
5280 }
5281#endif
5282 spin_unlock_irqrestore(&priv->lock, flags);
5283}
5284
bb8c093b 5285static irqreturn_t iwl4965_isr(int irq, void *data)
b481de9c 5286{
bb8c093b 5287 struct iwl4965_priv *priv = data;
b481de9c
ZY
5288 u32 inta, inta_mask;
5289 u32 inta_fh;
5290 if (!priv)
5291 return IRQ_NONE;
5292
5293 spin_lock(&priv->lock);
5294
5295 /* Disable (but don't clear!) interrupts here to avoid
5296 * back-to-back ISRs and sporadic interrupts from our NIC.
5297 * If we have something to service, the tasklet will re-enable ints.
5298 * If we *don't* have something, we'll re-enable before leaving here. */
bb8c093b
CH
5299 inta_mask = iwl4965_read32(priv, CSR_INT_MASK); /* just for debug */
5300 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
5301
5302 /* Discover which interrupts are active/pending */
bb8c093b
CH
5303 inta = iwl4965_read32(priv, CSR_INT);
5304 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5305
5306 /* Ignore interrupt if there's nothing in NIC to service.
5307 * This may be due to IRQ shared with another device,
5308 * or due to sporadic interrupts thrown from our NIC. */
5309 if (!inta && !inta_fh) {
5310 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5311 goto none;
5312 }
5313
5314 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
66fbb541
ON
5315 /* Hardware disappeared. It might have already raised
5316 * an interrupt */
b481de9c 5317 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
66fbb541 5318 goto unplugged;
b481de9c
ZY
5319 }
5320
5321 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5322 inta, inta_mask, inta_fh);
5323
bb8c093b 5324 /* iwl4965_irq_tasklet() will service interrupts and re-enable them */
b481de9c 5325 tasklet_schedule(&priv->irq_tasklet);
b481de9c 5326
66fbb541
ON
5327 unplugged:
5328 spin_unlock(&priv->lock);
b481de9c
ZY
5329 return IRQ_HANDLED;
5330
5331 none:
5332 /* re-enable interrupts here since we don't have anything to service. */
bb8c093b 5333 iwl4965_enable_interrupts(priv);
b481de9c
ZY
5334 spin_unlock(&priv->lock);
5335 return IRQ_NONE;
5336}
5337
5338/************************** EEPROM BANDS ****************************
5339 *
bb8c093b 5340 * The iwl4965_eeprom_band definitions below provide the mapping from the
b481de9c
ZY
5341 * EEPROM contents to the specific channel number supported for each
5342 * band.
5343 *
bb8c093b 5344 * For example, iwl4965_priv->eeprom.band_3_channels[4] from the band_3
b481de9c
ZY
5345 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
5346 * The specific geography and calibration information for that channel
5347 * is contained in the eeprom map itself.
5348 *
5349 * During init, we copy the eeprom information and channel map
5350 * information into priv->channel_info_24/52 and priv->channel_map_24/52
5351 *
5352 * channel_map_24/52 provides the index in the channel_info array for a
5353 * given channel. We have to have two separate maps as there is channel
5354 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
5355 * band_2
5356 *
5357 * A value of 0xff stored in the channel_map indicates that the channel
5358 * is not supported by the hardware at all.
5359 *
5360 * A value of 0xfe in the channel_map indicates that the channel is not
5361 * valid for Tx with the current hardware. This means that
5362 * while the system can tune and receive on a given channel, it may not
5363 * be able to associate or transmit any frames on that
5364 * channel. There is no corresponding channel information for that
5365 * entry.
5366 *
5367 *********************************************************************/
5368
5369/* 2.4 GHz */
bb8c093b 5370static const u8 iwl4965_eeprom_band_1[14] = {
b481de9c
ZY
5371 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
5372};
5373
5374/* 5.2 GHz bands */
9fbab516 5375static const u8 iwl4965_eeprom_band_2[] = { /* 4915-5080MHz */
b481de9c
ZY
5376 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
5377};
5378
9fbab516 5379static const u8 iwl4965_eeprom_band_3[] = { /* 5170-5320MHz */
b481de9c
ZY
5380 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
5381};
5382
bb8c093b 5383static const u8 iwl4965_eeprom_band_4[] = { /* 5500-5700MHz */
b481de9c
ZY
5384 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
5385};
5386
bb8c093b 5387static const u8 iwl4965_eeprom_band_5[] = { /* 5725-5825MHz */
b481de9c
ZY
5388 145, 149, 153, 157, 161, 165
5389};
5390
bb8c093b 5391static u8 iwl4965_eeprom_band_6[] = { /* 2.4 FAT channel */
b481de9c
ZY
5392 1, 2, 3, 4, 5, 6, 7
5393};
5394
bb8c093b 5395static u8 iwl4965_eeprom_band_7[] = { /* 5.2 FAT channel */
b481de9c
ZY
5396 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
5397};
5398
9fbab516
BC
5399static void iwl4965_init_band_reference(const struct iwl4965_priv *priv,
5400 int band,
b481de9c 5401 int *eeprom_ch_count,
bb8c093b 5402 const struct iwl4965_eeprom_channel
b481de9c
ZY
5403 **eeprom_ch_info,
5404 const u8 **eeprom_ch_index)
5405{
5406 switch (band) {
5407 case 1: /* 2.4GHz band */
bb8c093b 5408 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_1);
b481de9c 5409 *eeprom_ch_info = priv->eeprom.band_1_channels;
bb8c093b 5410 *eeprom_ch_index = iwl4965_eeprom_band_1;
b481de9c 5411 break;
9fbab516 5412 case 2: /* 4.9GHz band */
bb8c093b 5413 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_2);
b481de9c 5414 *eeprom_ch_info = priv->eeprom.band_2_channels;
bb8c093b 5415 *eeprom_ch_index = iwl4965_eeprom_band_2;
b481de9c
ZY
5416 break;
5417 case 3: /* 5.2GHz band */
bb8c093b 5418 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_3);
b481de9c 5419 *eeprom_ch_info = priv->eeprom.band_3_channels;
bb8c093b 5420 *eeprom_ch_index = iwl4965_eeprom_band_3;
b481de9c 5421 break;
9fbab516 5422 case 4: /* 5.5GHz band */
bb8c093b 5423 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_4);
b481de9c 5424 *eeprom_ch_info = priv->eeprom.band_4_channels;
bb8c093b 5425 *eeprom_ch_index = iwl4965_eeprom_band_4;
b481de9c 5426 break;
9fbab516 5427 case 5: /* 5.7GHz band */
bb8c093b 5428 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c 5429 *eeprom_ch_info = priv->eeprom.band_5_channels;
bb8c093b 5430 *eeprom_ch_index = iwl4965_eeprom_band_5;
b481de9c 5431 break;
9fbab516 5432 case 6: /* 2.4GHz FAT channels */
bb8c093b 5433 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_6);
b481de9c 5434 *eeprom_ch_info = priv->eeprom.band_24_channels;
bb8c093b 5435 *eeprom_ch_index = iwl4965_eeprom_band_6;
b481de9c 5436 break;
9fbab516 5437 case 7: /* 5 GHz FAT channels */
bb8c093b 5438 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_7);
b481de9c 5439 *eeprom_ch_info = priv->eeprom.band_52_channels;
bb8c093b 5440 *eeprom_ch_index = iwl4965_eeprom_band_7;
b481de9c
ZY
5441 break;
5442 default:
5443 BUG();
5444 return;
5445 }
5446}
5447
6440adb5
BC
5448/**
5449 * iwl4965_get_channel_info - Find driver's private channel info
5450 *
5451 * Based on band and channel number.
5452 */
bb8c093b 5453const struct iwl4965_channel_info *iwl4965_get_channel_info(const struct iwl4965_priv *priv,
b481de9c
ZY
5454 int phymode, u16 channel)
5455{
5456 int i;
5457
5458 switch (phymode) {
5459 case MODE_IEEE80211A:
5460 for (i = 14; i < priv->channel_count; i++) {
5461 if (priv->channel_info[i].channel == channel)
5462 return &priv->channel_info[i];
5463 }
5464 break;
5465
5466 case MODE_IEEE80211B:
5467 case MODE_IEEE80211G:
5468 if (channel >= 1 && channel <= 14)
5469 return &priv->channel_info[channel - 1];
5470 break;
5471
5472 }
5473
5474 return NULL;
5475}
5476
5477#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
5478 ? # x " " : "")
5479
6440adb5
BC
5480/**
5481 * iwl4965_init_channel_map - Set up driver's info for all possible channels
5482 */
bb8c093b 5483static int iwl4965_init_channel_map(struct iwl4965_priv *priv)
b481de9c
ZY
5484{
5485 int eeprom_ch_count = 0;
5486 const u8 *eeprom_ch_index = NULL;
bb8c093b 5487 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL;
b481de9c 5488 int band, ch;
bb8c093b 5489 struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5490
5491 if (priv->channel_count) {
5492 IWL_DEBUG_INFO("Channel map already initialized.\n");
5493 return 0;
5494 }
5495
5496 if (priv->eeprom.version < 0x2f) {
5497 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5498 priv->eeprom.version);
5499 return -EINVAL;
5500 }
5501
5502 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5503
5504 priv->channel_count =
bb8c093b
CH
5505 ARRAY_SIZE(iwl4965_eeprom_band_1) +
5506 ARRAY_SIZE(iwl4965_eeprom_band_2) +
5507 ARRAY_SIZE(iwl4965_eeprom_band_3) +
5508 ARRAY_SIZE(iwl4965_eeprom_band_4) +
5509 ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c
ZY
5510
5511 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5512
bb8c093b 5513 priv->channel_info = kzalloc(sizeof(struct iwl4965_channel_info) *
b481de9c
ZY
5514 priv->channel_count, GFP_KERNEL);
5515 if (!priv->channel_info) {
5516 IWL_ERROR("Could not allocate channel_info\n");
5517 priv->channel_count = 0;
5518 return -ENOMEM;
5519 }
5520
5521 ch_info = priv->channel_info;
5522
5523 /* Loop through the 5 EEPROM bands adding them in order to the
5524 * channel map we maintain (that contains additional information than
5525 * what just in the EEPROM) */
5526 for (band = 1; band <= 5; band++) {
5527
bb8c093b 5528 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5529 &eeprom_ch_info, &eeprom_ch_index);
5530
5531 /* Loop through each band adding each of the channels */
5532 for (ch = 0; ch < eeprom_ch_count; ch++) {
5533 ch_info->channel = eeprom_ch_index[ch];
5534 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5535 MODE_IEEE80211A;
5536
5537 /* permanently store EEPROM's channel regulatory flags
5538 * and max power in channel info database. */
5539 ch_info->eeprom = eeprom_ch_info[ch];
5540
5541 /* Copy the run-time flags so they are there even on
5542 * invalid channels */
5543 ch_info->flags = eeprom_ch_info[ch].flags;
5544
5545 if (!(is_channel_valid(ch_info))) {
5546 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5547 "No traffic\n",
5548 ch_info->channel,
5549 ch_info->flags,
5550 is_channel_a_band(ch_info) ?
5551 "5.2" : "2.4");
5552 ch_info++;
5553 continue;
5554 }
5555
5556 /* Initialize regulatory-based run-time data */
5557 ch_info->max_power_avg = ch_info->curr_txpow =
5558 eeprom_ch_info[ch].max_power_avg;
5559 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5560 ch_info->min_power = 0;
5561
5562 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5563 " %ddBm): Ad-Hoc %ssupported\n",
5564 ch_info->channel,
5565 is_channel_a_band(ch_info) ?
5566 "5.2" : "2.4",
5567 CHECK_AND_PRINT(IBSS),
5568 CHECK_AND_PRINT(ACTIVE),
5569 CHECK_AND_PRINT(RADAR),
5570 CHECK_AND_PRINT(WIDE),
5571 CHECK_AND_PRINT(NARROW),
5572 CHECK_AND_PRINT(DFS),
5573 eeprom_ch_info[ch].flags,
5574 eeprom_ch_info[ch].max_power_avg,
5575 ((eeprom_ch_info[ch].
5576 flags & EEPROM_CHANNEL_IBSS)
5577 && !(eeprom_ch_info[ch].
5578 flags & EEPROM_CHANNEL_RADAR))
5579 ? "" : "not ");
5580
5581 /* Set the user_txpower_limit to the highest power
5582 * supported by any channel */
5583 if (eeprom_ch_info[ch].max_power_avg >
5584 priv->user_txpower_limit)
5585 priv->user_txpower_limit =
5586 eeprom_ch_info[ch].max_power_avg;
5587
5588 ch_info++;
5589 }
5590 }
5591
6440adb5 5592 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */
b481de9c
ZY
5593 for (band = 6; band <= 7; band++) {
5594 int phymode;
5595 u8 fat_extension_chan;
5596
bb8c093b 5597 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5598 &eeprom_ch_info, &eeprom_ch_index);
5599
6440adb5 5600 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
b481de9c 5601 phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A;
6440adb5 5602
b481de9c
ZY
5603 /* Loop through each band adding each of the channels */
5604 for (ch = 0; ch < eeprom_ch_count; ch++) {
5605
5606 if ((band == 6) &&
5607 ((eeprom_ch_index[ch] == 5) ||
5608 (eeprom_ch_index[ch] == 6) ||
5609 (eeprom_ch_index[ch] == 7)))
5610 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
5611 else
5612 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
5613
6440adb5 5614 /* Set up driver's info for lower half */
b481de9c
ZY
5615 iwl4965_set_fat_chan_info(priv, phymode,
5616 eeprom_ch_index[ch],
5617 &(eeprom_ch_info[ch]),
5618 fat_extension_chan);
5619
6440adb5 5620 /* Set up driver's info for upper half */
b481de9c
ZY
5621 iwl4965_set_fat_chan_info(priv, phymode,
5622 (eeprom_ch_index[ch] + 4),
5623 &(eeprom_ch_info[ch]),
5624 HT_IE_EXT_CHANNEL_BELOW);
5625 }
5626 }
5627
5628 return 0;
5629}
5630
5631/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5632 * sending probe req. This should be set long enough to hear probe responses
5633 * from more than one AP. */
5634#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
5635#define IWL_ACTIVE_DWELL_TIME_52 (10)
5636
5637/* For faster active scanning, scan will move to the next channel if fewer than
5638 * PLCP_QUIET_THRESH packets are heard on this channel within
5639 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
5640 * time if it's a quiet channel (nothing responded to our probe, and there's
5641 * no other traffic).
5642 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5643#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
5644#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
5645
5646/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5647 * Must be set longer than active dwell time.
5648 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5649#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
5650#define IWL_PASSIVE_DWELL_TIME_52 (10)
5651#define IWL_PASSIVE_DWELL_BASE (100)
5652#define IWL_CHANNEL_TUNE_TIME 5
5653
bb8c093b 5654static inline u16 iwl4965_get_active_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c
ZY
5655{
5656 if (phymode == MODE_IEEE80211A)
5657 return IWL_ACTIVE_DWELL_TIME_52;
5658 else
5659 return IWL_ACTIVE_DWELL_TIME_24;
5660}
5661
bb8c093b 5662static u16 iwl4965_get_passive_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c 5663{
bb8c093b 5664 u16 active = iwl4965_get_active_dwell_time(priv, phymode);
b481de9c
ZY
5665 u16 passive = (phymode != MODE_IEEE80211A) ?
5666 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5667 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5668
bb8c093b 5669 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5670 /* If we're associated, we clamp the maximum passive
5671 * dwell time to be 98% of the beacon interval (minus
5672 * 2 * channel tune time) */
5673 passive = priv->beacon_int;
5674 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5675 passive = IWL_PASSIVE_DWELL_BASE;
5676 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5677 }
5678
5679 if (passive <= active)
5680 passive = active + 1;
5681
5682 return passive;
5683}
5684
bb8c093b 5685static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
b481de9c 5686 u8 is_active, u8 direct_mask,
bb8c093b 5687 struct iwl4965_scan_channel *scan_ch)
b481de9c
ZY
5688{
5689 const struct ieee80211_channel *channels = NULL;
5690 const struct ieee80211_hw_mode *hw_mode;
bb8c093b 5691 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5692 u16 passive_dwell = 0;
5693 u16 active_dwell = 0;
5694 int added, i;
5695
bb8c093b 5696 hw_mode = iwl4965_get_hw_mode(priv, phymode);
b481de9c
ZY
5697 if (!hw_mode)
5698 return 0;
5699
5700 channels = hw_mode->channels;
5701
bb8c093b
CH
5702 active_dwell = iwl4965_get_active_dwell_time(priv, phymode);
5703 passive_dwell = iwl4965_get_passive_dwell_time(priv, phymode);
b481de9c
ZY
5704
5705 for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5706 if (channels[i].chan ==
5707 le16_to_cpu(priv->active_rxon.channel)) {
bb8c093b 5708 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5709 IWL_DEBUG_SCAN
5710 ("Skipping current channel %d\n",
5711 le16_to_cpu(priv->active_rxon.channel));
5712 continue;
5713 }
5714 } else if (priv->only_active_channel)
5715 continue;
5716
5717 scan_ch->channel = channels[i].chan;
5718
9fbab516
BC
5719 ch_info = iwl4965_get_channel_info(priv, phymode,
5720 scan_ch->channel);
b481de9c
ZY
5721 if (!is_channel_valid(ch_info)) {
5722 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5723 scan_ch->channel);
5724 continue;
5725 }
5726
5727 if (!is_active || is_channel_passive(ch_info) ||
5728 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5729 scan_ch->type = 0; /* passive */
5730 else
5731 scan_ch->type = 1; /* active */
5732
5733 if (scan_ch->type & 1)
5734 scan_ch->type |= (direct_mask << 1);
5735
5736 if (is_channel_narrow(ch_info))
5737 scan_ch->type |= (1 << 7);
5738
5739 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5740 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5741
9fbab516 5742 /* Set txpower levels to defaults */
b481de9c
ZY
5743 scan_ch->tpc.dsp_atten = 110;
5744 /* scan_pwr_info->tpc.dsp_atten; */
5745
5746 /*scan_pwr_info->tpc.tx_gain; */
5747 if (phymode == MODE_IEEE80211A)
5748 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5749 else {
5750 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5751 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516
BC
5752 * power level:
5753 * scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3;
b481de9c
ZY
5754 */
5755 }
5756
5757 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5758 scan_ch->channel,
5759 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5760 (scan_ch->type & 1) ?
5761 active_dwell : passive_dwell);
5762
5763 scan_ch++;
5764 added++;
5765 }
5766
5767 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5768 return added;
5769}
5770
bb8c093b 5771static void iwl4965_reset_channel_flag(struct iwl4965_priv *priv)
b481de9c
ZY
5772{
5773 int i, j;
5774 for (i = 0; i < 3; i++) {
5775 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5776 for (j = 0; j < hw_mode->num_channels; j++)
5777 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5778 }
5779}
5780
bb8c093b 5781static void iwl4965_init_hw_rates(struct iwl4965_priv *priv,
b481de9c
ZY
5782 struct ieee80211_rate *rates)
5783{
5784 int i;
5785
5786 for (i = 0; i < IWL_RATE_COUNT; i++) {
bb8c093b 5787 rates[i].rate = iwl4965_rates[i].ieee * 5;
b481de9c
ZY
5788 rates[i].val = i; /* Rate scaling will work on indexes */
5789 rates[i].val2 = i;
5790 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5791 /* Only OFDM have the bits-per-symbol set */
5792 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5793 rates[i].flags |= IEEE80211_RATE_OFDM;
5794 else {
5795 /*
5796 * If CCK 1M then set rate flag to CCK else CCK_2
5797 * which is CCK | PREAMBLE2
5798 */
bb8c093b 5799 rates[i].flags |= (iwl4965_rates[i].plcp == 10) ?
b481de9c
ZY
5800 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5801 }
5802
5803 /* Set up which ones are basic rates... */
5804 if (IWL_BASIC_RATES_MASK & (1 << i))
5805 rates[i].flags |= IEEE80211_RATE_BASIC;
5806 }
b481de9c
ZY
5807}
5808
5809/**
bb8c093b 5810 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 5811 */
bb8c093b 5812static int iwl4965_init_geos(struct iwl4965_priv *priv)
b481de9c 5813{
bb8c093b 5814 struct iwl4965_channel_info *ch;
b481de9c
ZY
5815 struct ieee80211_hw_mode *modes;
5816 struct ieee80211_channel *channels;
5817 struct ieee80211_channel *geo_ch;
5818 struct ieee80211_rate *rates;
5819 int i = 0;
5820 enum {
5821 A = 0,
5822 B = 1,
5823 G = 2,
5824 A_11N = 3,
5825 G_11N = 4,
5826 };
5827 int mode_count = 5;
5828
5829 if (priv->modes) {
5830 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5831 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5832 return 0;
5833 }
5834
5835 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5836 GFP_KERNEL);
5837 if (!modes)
5838 return -ENOMEM;
5839
5840 channels = kzalloc(sizeof(struct ieee80211_channel) *
5841 priv->channel_count, GFP_KERNEL);
5842 if (!channels) {
5843 kfree(modes);
5844 return -ENOMEM;
5845 }
5846
5847 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5848 GFP_KERNEL);
5849 if (!rates) {
5850 kfree(modes);
5851 kfree(channels);
5852 return -ENOMEM;
5853 }
5854
5855 /* 0 = 802.11a
5856 * 1 = 802.11b
5857 * 2 = 802.11g
5858 */
5859
5860 /* 5.2GHz channels start after the 2.4GHz channels */
5861 modes[A].mode = MODE_IEEE80211A;
bb8c093b 5862 modes[A].channels = &channels[ARRAY_SIZE(iwl4965_eeprom_band_1)];
b481de9c
ZY
5863 modes[A].rates = rates;
5864 modes[A].num_rates = 8; /* just OFDM */
5865 modes[A].rates = &rates[4];
5866 modes[A].num_channels = 0;
5867
5868 modes[B].mode = MODE_IEEE80211B;
5869 modes[B].channels = channels;
5870 modes[B].rates = rates;
5871 modes[B].num_rates = 4; /* just CCK */
5872 modes[B].num_channels = 0;
5873
5874 modes[G].mode = MODE_IEEE80211G;
5875 modes[G].channels = channels;
5876 modes[G].rates = rates;
5877 modes[G].num_rates = 12; /* OFDM & CCK */
5878 modes[G].num_channels = 0;
5879
5880 modes[G_11N].mode = MODE_IEEE80211G;
5881 modes[G_11N].channels = channels;
5882 modes[G_11N].num_rates = 13; /* OFDM & CCK */
5883 modes[G_11N].rates = rates;
5884 modes[G_11N].num_channels = 0;
5885
5886 modes[A_11N].mode = MODE_IEEE80211A;
bb8c093b 5887 modes[A_11N].channels = &channels[ARRAY_SIZE(iwl4965_eeprom_band_1)];
b481de9c
ZY
5888 modes[A_11N].rates = &rates[4];
5889 modes[A_11N].num_rates = 9; /* just OFDM */
5890 modes[A_11N].num_channels = 0;
5891
5892 priv->ieee_channels = channels;
5893 priv->ieee_rates = rates;
5894
bb8c093b 5895 iwl4965_init_hw_rates(priv, rates);
b481de9c
ZY
5896
5897 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5898 ch = &priv->channel_info[i];
5899
5900 if (!is_channel_valid(ch)) {
5901 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5902 "skipping.\n",
5903 ch->channel, is_channel_a_band(ch) ?
5904 "5.2" : "2.4");
5905 continue;
5906 }
5907
5908 if (is_channel_a_band(ch)) {
5909 geo_ch = &modes[A].channels[modes[A].num_channels++];
5910 modes[A_11N].num_channels++;
5911 } else {
5912 geo_ch = &modes[B].channels[modes[B].num_channels++];
5913 modes[G].num_channels++;
5914 modes[G_11N].num_channels++;
5915 }
5916
5917 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5918 geo_ch->chan = ch->channel;
5919 geo_ch->power_level = ch->max_power_avg;
5920 geo_ch->antenna_max = 0xff;
5921
5922 if (is_channel_valid(ch)) {
5923 geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5924 if (ch->flags & EEPROM_CHANNEL_IBSS)
5925 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5926
5927 if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5928 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5929
5930 if (ch->flags & EEPROM_CHANNEL_RADAR)
5931 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5932
5933 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5934 priv->max_channel_txpower_limit =
5935 ch->max_power_avg;
5936 }
5937
5938 geo_ch->val = geo_ch->flag;
5939 }
5940
5941 if ((modes[A].num_channels == 0) && priv->is_abg) {
5942 printk(KERN_INFO DRV_NAME
5943 ": Incorrectly detected BG card as ABG. Please send "
5944 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5945 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5946 priv->is_abg = 0;
5947 }
5948
5949 printk(KERN_INFO DRV_NAME
5950 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5951 modes[G].num_channels, modes[A].num_channels);
5952
5953 /*
5954 * NOTE: We register these in preference of order -- the
5955 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5956 * a phymode based on rates or AP capabilities but seems to
5957 * configure it purely on if the channel being configured
5958 * is supported by a mode -- and the first match is taken
5959 */
5960
5961 if (modes[G].num_channels)
5962 ieee80211_register_hwmode(priv->hw, &modes[G]);
5963 if (modes[B].num_channels)
5964 ieee80211_register_hwmode(priv->hw, &modes[B]);
5965 if (modes[A].num_channels)
5966 ieee80211_register_hwmode(priv->hw, &modes[A]);
5967
5968 priv->modes = modes;
5969 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5970
5971 return 0;
5972}
5973
5974/******************************************************************************
5975 *
5976 * uCode download functions
5977 *
5978 ******************************************************************************/
5979
bb8c093b 5980static void iwl4965_dealloc_ucode_pci(struct iwl4965_priv *priv)
b481de9c
ZY
5981{
5982 if (priv->ucode_code.v_addr != NULL) {
5983 pci_free_consistent(priv->pci_dev,
5984 priv->ucode_code.len,
5985 priv->ucode_code.v_addr,
5986 priv->ucode_code.p_addr);
5987 priv->ucode_code.v_addr = NULL;
5988 }
5989 if (priv->ucode_data.v_addr != NULL) {
5990 pci_free_consistent(priv->pci_dev,
5991 priv->ucode_data.len,
5992 priv->ucode_data.v_addr,
5993 priv->ucode_data.p_addr);
5994 priv->ucode_data.v_addr = NULL;
5995 }
5996 if (priv->ucode_data_backup.v_addr != NULL) {
5997 pci_free_consistent(priv->pci_dev,
5998 priv->ucode_data_backup.len,
5999 priv->ucode_data_backup.v_addr,
6000 priv->ucode_data_backup.p_addr);
6001 priv->ucode_data_backup.v_addr = NULL;
6002 }
6003 if (priv->ucode_init.v_addr != NULL) {
6004 pci_free_consistent(priv->pci_dev,
6005 priv->ucode_init.len,
6006 priv->ucode_init.v_addr,
6007 priv->ucode_init.p_addr);
6008 priv->ucode_init.v_addr = NULL;
6009 }
6010 if (priv->ucode_init_data.v_addr != NULL) {
6011 pci_free_consistent(priv->pci_dev,
6012 priv->ucode_init_data.len,
6013 priv->ucode_init_data.v_addr,
6014 priv->ucode_init_data.p_addr);
6015 priv->ucode_init_data.v_addr = NULL;
6016 }
6017 if (priv->ucode_boot.v_addr != NULL) {
6018 pci_free_consistent(priv->pci_dev,
6019 priv->ucode_boot.len,
6020 priv->ucode_boot.v_addr,
6021 priv->ucode_boot.p_addr);
6022 priv->ucode_boot.v_addr = NULL;
6023 }
6024}
6025
6026/**
bb8c093b 6027 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
6028 * looking at all data.
6029 */
9fbab516
BC
6030static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 * image,
6031 u32 len)
b481de9c
ZY
6032{
6033 u32 val;
6034 u32 save_len = len;
6035 int rc = 0;
6036 u32 errcnt;
6037
6038 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
6039
bb8c093b 6040 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6041 if (rc)
6042 return rc;
6043
bb8c093b 6044 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
b481de9c
ZY
6045
6046 errcnt = 0;
6047 for (; len > 0; len -= sizeof(u32), image++) {
6048 /* read data comes through single port, auto-incr addr */
6049 /* NOTE: Use the debugless read so we don't flood kernel log
6050 * if IWL_DL_IO is set */
bb8c093b 6051 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
6052 if (val != le32_to_cpu(*image)) {
6053 IWL_ERROR("uCode INST section is invalid at "
6054 "offset 0x%x, is 0x%x, s/b 0x%x\n",
6055 save_len - len, val, le32_to_cpu(*image));
6056 rc = -EIO;
6057 errcnt++;
6058 if (errcnt >= 20)
6059 break;
6060 }
6061 }
6062
bb8c093b 6063 iwl4965_release_nic_access(priv);
b481de9c
ZY
6064
6065 if (!errcnt)
6066 IWL_DEBUG_INFO
6067 ("ucode image in INSTRUCTION memory is good\n");
6068
6069 return rc;
6070}
6071
6072
6073/**
bb8c093b 6074 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
6075 * using sample data 100 bytes apart. If these sample points are good,
6076 * it's a pretty good bet that everything between them is good, too.
6077 */
bb8c093b 6078static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
6079{
6080 u32 val;
6081 int rc = 0;
6082 u32 errcnt = 0;
6083 u32 i;
6084
6085 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
6086
bb8c093b 6087 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6088 if (rc)
6089 return rc;
6090
6091 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
6092 /* read data comes through single port, auto-incr addr */
6093 /* NOTE: Use the debugless read so we don't flood kernel log
6094 * if IWL_DL_IO is set */
bb8c093b 6095 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR,
b481de9c 6096 i + RTC_INST_LOWER_BOUND);
bb8c093b 6097 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
6098 if (val != le32_to_cpu(*image)) {
6099#if 0 /* Enable this if you want to see details */
6100 IWL_ERROR("uCode INST section is invalid at "
6101 "offset 0x%x, is 0x%x, s/b 0x%x\n",
6102 i, val, *image);
6103#endif
6104 rc = -EIO;
6105 errcnt++;
6106 if (errcnt >= 3)
6107 break;
6108 }
6109 }
6110
bb8c093b 6111 iwl4965_release_nic_access(priv);
b481de9c
ZY
6112
6113 return rc;
6114}
6115
6116
6117/**
bb8c093b 6118 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
6119 * and verify its contents
6120 */
bb8c093b 6121static int iwl4965_verify_ucode(struct iwl4965_priv *priv)
b481de9c
ZY
6122{
6123 __le32 *image;
6124 u32 len;
6125 int rc = 0;
6126
6127 /* Try bootstrap */
6128 image = (__le32 *)priv->ucode_boot.v_addr;
6129 len = priv->ucode_boot.len;
bb8c093b 6130 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6131 if (rc == 0) {
6132 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
6133 return 0;
6134 }
6135
6136 /* Try initialize */
6137 image = (__le32 *)priv->ucode_init.v_addr;
6138 len = priv->ucode_init.len;
bb8c093b 6139 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6140 if (rc == 0) {
6141 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
6142 return 0;
6143 }
6144
6145 /* Try runtime/protocol */
6146 image = (__le32 *)priv->ucode_code.v_addr;
6147 len = priv->ucode_code.len;
bb8c093b 6148 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6149 if (rc == 0) {
6150 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
6151 return 0;
6152 }
6153
6154 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
6155
9fbab516
BC
6156 /* Since nothing seems to match, show first several data entries in
6157 * instruction SRAM, so maybe visual inspection will give a clue.
6158 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
6159 image = (__le32 *)priv->ucode_boot.v_addr;
6160 len = priv->ucode_boot.len;
bb8c093b 6161 rc = iwl4965_verify_inst_full(priv, image, len);
b481de9c
ZY
6162
6163 return rc;
6164}
6165
6166
6167/* check contents of special bootstrap uCode SRAM */
bb8c093b 6168static int iwl4965_verify_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6169{
6170 __le32 *image = priv->ucode_boot.v_addr;
6171 u32 len = priv->ucode_boot.len;
6172 u32 reg;
6173 u32 val;
6174
6175 IWL_DEBUG_INFO("Begin verify bsm\n");
6176
6177 /* verify BSM SRAM contents */
bb8c093b 6178 val = iwl4965_read_prph(priv, BSM_WR_DWCOUNT_REG);
b481de9c
ZY
6179 for (reg = BSM_SRAM_LOWER_BOUND;
6180 reg < BSM_SRAM_LOWER_BOUND + len;
6181 reg += sizeof(u32), image ++) {
bb8c093b 6182 val = iwl4965_read_prph(priv, reg);
b481de9c
ZY
6183 if (val != le32_to_cpu(*image)) {
6184 IWL_ERROR("BSM uCode verification failed at "
6185 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
6186 BSM_SRAM_LOWER_BOUND,
6187 reg - BSM_SRAM_LOWER_BOUND, len,
6188 val, le32_to_cpu(*image));
6189 return -EIO;
6190 }
6191 }
6192
6193 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
6194
6195 return 0;
6196}
6197
6198/**
bb8c093b 6199 * iwl4965_load_bsm - Load bootstrap instructions
b481de9c
ZY
6200 *
6201 * BSM operation:
6202 *
6203 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
6204 * in special SRAM that does not power down during RFKILL. When powering back
6205 * up after power-saving sleeps (or during initial uCode load), the BSM loads
6206 * the bootstrap program into the on-board processor, and starts it.
6207 *
6208 * The bootstrap program loads (via DMA) instructions and data for a new
6209 * program from host DRAM locations indicated by the host driver in the
6210 * BSM_DRAM_* registers. Once the new program is loaded, it starts
6211 * automatically.
6212 *
6213 * When initializing the NIC, the host driver points the BSM to the
6214 * "initialize" uCode image. This uCode sets up some internal data, then
6215 * notifies host via "initialize alive" that it is complete.
6216 *
6217 * The host then replaces the BSM_DRAM_* pointer values to point to the
6218 * normal runtime uCode instructions and a backup uCode data cache buffer
6219 * (filled initially with starting data values for the on-board processor),
6220 * then triggers the "initialize" uCode to load and launch the runtime uCode,
6221 * which begins normal operation.
6222 *
6223 * When doing a power-save shutdown, runtime uCode saves data SRAM into
6224 * the backup data cache in DRAM before SRAM is powered down.
6225 *
6226 * When powering back up, the BSM loads the bootstrap program. This reloads
6227 * the runtime uCode instructions and the backup data cache into SRAM,
6228 * and re-launches the runtime uCode from where it left off.
6229 */
bb8c093b 6230static int iwl4965_load_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6231{
6232 __le32 *image = priv->ucode_boot.v_addr;
6233 u32 len = priv->ucode_boot.len;
6234 dma_addr_t pinst;
6235 dma_addr_t pdata;
6236 u32 inst_len;
6237 u32 data_len;
6238 int rc;
6239 int i;
6240 u32 done;
6241 u32 reg_offset;
6242
6243 IWL_DEBUG_INFO("Begin load bsm\n");
6244
6245 /* make sure bootstrap program is no larger than BSM's SRAM size */
6246 if (len > IWL_MAX_BSM_SIZE)
6247 return -EINVAL;
6248
6249 /* Tell bootstrap uCode where to find the "Initialize" uCode
9fbab516 6250 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
bb8c093b 6251 * NOTE: iwl4965_initialize_alive_start() will replace these values,
b481de9c
ZY
6252 * after the "initialize" uCode has run, to point to
6253 * runtime/protocol instructions and backup data cache. */
6254 pinst = priv->ucode_init.p_addr >> 4;
6255 pdata = priv->ucode_init_data.p_addr >> 4;
6256 inst_len = priv->ucode_init.len;
6257 data_len = priv->ucode_init_data.len;
6258
bb8c093b 6259 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6260 if (rc)
6261 return rc;
6262
bb8c093b
CH
6263 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6264 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6265 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
6266 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
b481de9c
ZY
6267
6268 /* Fill BSM memory with bootstrap instructions */
6269 for (reg_offset = BSM_SRAM_LOWER_BOUND;
6270 reg_offset < BSM_SRAM_LOWER_BOUND + len;
6271 reg_offset += sizeof(u32), image++)
bb8c093b 6272 _iwl4965_write_prph(priv, reg_offset,
b481de9c
ZY
6273 le32_to_cpu(*image));
6274
bb8c093b 6275 rc = iwl4965_verify_bsm(priv);
b481de9c 6276 if (rc) {
bb8c093b 6277 iwl4965_release_nic_access(priv);
b481de9c
ZY
6278 return rc;
6279 }
6280
6281 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
bb8c093b
CH
6282 iwl4965_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
6283 iwl4965_write_prph(priv, BSM_WR_MEM_DST_REG,
b481de9c 6284 RTC_INST_LOWER_BOUND);
bb8c093b 6285 iwl4965_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
b481de9c
ZY
6286
6287 /* Load bootstrap code into instruction SRAM now,
6288 * to prepare to load "initialize" uCode */
bb8c093b 6289 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6290 BSM_WR_CTRL_REG_BIT_START);
6291
6292 /* Wait for load of bootstrap uCode to finish */
6293 for (i = 0; i < 100; i++) {
bb8c093b 6294 done = iwl4965_read_prph(priv, BSM_WR_CTRL_REG);
b481de9c
ZY
6295 if (!(done & BSM_WR_CTRL_REG_BIT_START))
6296 break;
6297 udelay(10);
6298 }
6299 if (i < 100)
6300 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
6301 else {
6302 IWL_ERROR("BSM write did not complete!\n");
6303 return -EIO;
6304 }
6305
6306 /* Enable future boot loads whenever power management unit triggers it
6307 * (e.g. when powering back up after power-save shutdown) */
bb8c093b 6308 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6309 BSM_WR_CTRL_REG_BIT_START_EN);
6310
bb8c093b 6311 iwl4965_release_nic_access(priv);
b481de9c
ZY
6312
6313 return 0;
6314}
6315
bb8c093b 6316static void iwl4965_nic_start(struct iwl4965_priv *priv)
b481de9c
ZY
6317{
6318 /* Remove all resets to allow NIC to operate */
bb8c093b 6319 iwl4965_write32(priv, CSR_RESET, 0);
b481de9c
ZY
6320}
6321
90e759d1
TW
6322static int iwl4965_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
6323{
6324 desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
6325 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
6326}
6327
b481de9c 6328/**
bb8c093b 6329 * iwl4965_read_ucode - Read uCode images from disk file.
b481de9c
ZY
6330 *
6331 * Copy into buffers for card to fetch via bus-mastering
6332 */
bb8c093b 6333static int iwl4965_read_ucode(struct iwl4965_priv *priv)
b481de9c 6334{
bb8c093b 6335 struct iwl4965_ucode *ucode;
90e759d1 6336 int ret;
b481de9c
ZY
6337 const struct firmware *ucode_raw;
6338 const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode";
6339 u8 *src;
6340 size_t len;
6341 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
6342
6343 /* Ask kernel firmware_class module to get the boot firmware off disk.
6344 * request_firmware() is synchronous, file is in memory on return. */
90e759d1
TW
6345 ret = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
6346 if (ret < 0) {
6347 IWL_ERROR("%s firmware file req failed: Reason %d\n",
6348 name, ret);
b481de9c
ZY
6349 goto error;
6350 }
6351
6352 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
6353 name, ucode_raw->size);
6354
6355 /* Make sure that we got at least our header! */
6356 if (ucode_raw->size < sizeof(*ucode)) {
6357 IWL_ERROR("File size way too small!\n");
90e759d1 6358 ret = -EINVAL;
b481de9c
ZY
6359 goto err_release;
6360 }
6361
6362 /* Data from ucode file: header followed by uCode images */
6363 ucode = (void *)ucode_raw->data;
6364
6365 ver = le32_to_cpu(ucode->ver);
6366 inst_size = le32_to_cpu(ucode->inst_size);
6367 data_size = le32_to_cpu(ucode->data_size);
6368 init_size = le32_to_cpu(ucode->init_size);
6369 init_data_size = le32_to_cpu(ucode->init_data_size);
6370 boot_size = le32_to_cpu(ucode->boot_size);
6371
6372 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
6373 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
6374 inst_size);
6375 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
6376 data_size);
6377 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
6378 init_size);
6379 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
6380 init_data_size);
6381 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
6382 boot_size);
6383
6384 /* Verify size of file vs. image size info in file's header */
6385 if (ucode_raw->size < sizeof(*ucode) +
6386 inst_size + data_size + init_size +
6387 init_data_size + boot_size) {
6388
6389 IWL_DEBUG_INFO("uCode file size %d too small\n",
6390 (int)ucode_raw->size);
90e759d1 6391 ret = -EINVAL;
b481de9c
ZY
6392 goto err_release;
6393 }
6394
6395 /* Verify that uCode images will fit in card's SRAM */
6396 if (inst_size > IWL_MAX_INST_SIZE) {
90e759d1
TW
6397 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
6398 inst_size);
6399 ret = -EINVAL;
b481de9c
ZY
6400 goto err_release;
6401 }
6402
6403 if (data_size > IWL_MAX_DATA_SIZE) {
90e759d1
TW
6404 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
6405 data_size);
6406 ret = -EINVAL;
b481de9c
ZY
6407 goto err_release;
6408 }
6409 if (init_size > IWL_MAX_INST_SIZE) {
6410 IWL_DEBUG_INFO
90e759d1
TW
6411 ("uCode init instr len %d too large to fit in\n",
6412 init_size);
6413 ret = -EINVAL;
b481de9c
ZY
6414 goto err_release;
6415 }
6416 if (init_data_size > IWL_MAX_DATA_SIZE) {
6417 IWL_DEBUG_INFO
90e759d1
TW
6418 ("uCode init data len %d too large to fit in\n",
6419 init_data_size);
6420 ret = -EINVAL;
b481de9c
ZY
6421 goto err_release;
6422 }
6423 if (boot_size > IWL_MAX_BSM_SIZE) {
6424 IWL_DEBUG_INFO
90e759d1
TW
6425 ("uCode boot instr len %d too large to fit in\n",
6426 boot_size);
6427 ret = -EINVAL;
b481de9c
ZY
6428 goto err_release;
6429 }
6430
6431 /* Allocate ucode buffers for card's bus-master loading ... */
6432
6433 /* Runtime instructions and 2 copies of data:
6434 * 1) unmodified from disk
6435 * 2) backup cache for save/restore during power-downs */
6436 priv->ucode_code.len = inst_size;
90e759d1 6437 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
6438
6439 priv->ucode_data.len = data_size;
90e759d1 6440 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
6441
6442 priv->ucode_data_backup.len = data_size;
90e759d1 6443 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c
ZY
6444
6445 /* Initialization instructions and data */
90e759d1
TW
6446 if (init_size && init_data_size) {
6447 priv->ucode_init.len = init_size;
6448 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
6449
6450 priv->ucode_init_data.len = init_data_size;
6451 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
6452
6453 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
6454 goto err_pci_alloc;
6455 }
b481de9c
ZY
6456
6457 /* Bootstrap (instructions only, no data) */
90e759d1
TW
6458 if (boot_size) {
6459 priv->ucode_boot.len = boot_size;
6460 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 6461
90e759d1
TW
6462 if (!priv->ucode_boot.v_addr)
6463 goto err_pci_alloc;
6464 }
b481de9c
ZY
6465
6466 /* Copy images into buffers for card's bus-master reads ... */
6467
6468 /* Runtime instructions (first block of data in file) */
6469 src = &ucode->data[0];
6470 len = priv->ucode_code.len;
90e759d1 6471 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
6472 memcpy(priv->ucode_code.v_addr, src, len);
6473 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
6474 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
6475
6476 /* Runtime data (2nd block)
bb8c093b 6477 * NOTE: Copy into backup buffer will be done in iwl4965_up() */
b481de9c
ZY
6478 src = &ucode->data[inst_size];
6479 len = priv->ucode_data.len;
90e759d1 6480 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
6481 memcpy(priv->ucode_data.v_addr, src, len);
6482 memcpy(priv->ucode_data_backup.v_addr, src, len);
6483
6484 /* Initialization instructions (3rd block) */
6485 if (init_size) {
6486 src = &ucode->data[inst_size + data_size];
6487 len = priv->ucode_init.len;
90e759d1
TW
6488 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
6489 len);
b481de9c
ZY
6490 memcpy(priv->ucode_init.v_addr, src, len);
6491 }
6492
6493 /* Initialization data (4th block) */
6494 if (init_data_size) {
6495 src = &ucode->data[inst_size + data_size + init_size];
6496 len = priv->ucode_init_data.len;
90e759d1
TW
6497 IWL_DEBUG_INFO("Copying (but not loading) init data len %Zd\n",
6498 len);
b481de9c
ZY
6499 memcpy(priv->ucode_init_data.v_addr, src, len);
6500 }
6501
6502 /* Bootstrap instructions (5th block) */
6503 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
6504 len = priv->ucode_boot.len;
90e759d1 6505 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %Zd\n", len);
b481de9c
ZY
6506 memcpy(priv->ucode_boot.v_addr, src, len);
6507
6508 /* We have our copies now, allow OS release its copies */
6509 release_firmware(ucode_raw);
6510 return 0;
6511
6512 err_pci_alloc:
6513 IWL_ERROR("failed to allocate pci memory\n");
90e759d1 6514 ret = -ENOMEM;
bb8c093b 6515 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
6516
6517 err_release:
6518 release_firmware(ucode_raw);
6519
6520 error:
90e759d1 6521 return ret;
b481de9c
ZY
6522}
6523
6524
6525/**
bb8c093b 6526 * iwl4965_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
6527 *
6528 * Tell initialization uCode where to find runtime uCode.
6529 *
6530 * BSM registers initially contain pointers to initialization uCode.
6531 * We need to replace them to load runtime uCode inst and data,
6532 * and to save runtime data when powering down.
6533 */
bb8c093b 6534static int iwl4965_set_ucode_ptrs(struct iwl4965_priv *priv)
b481de9c
ZY
6535{
6536 dma_addr_t pinst;
6537 dma_addr_t pdata;
6538 int rc = 0;
6539 unsigned long flags;
6540
6541 /* bits 35:4 for 4965 */
6542 pinst = priv->ucode_code.p_addr >> 4;
6543 pdata = priv->ucode_data_backup.p_addr >> 4;
6544
6545 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 6546 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6547 if (rc) {
6548 spin_unlock_irqrestore(&priv->lock, flags);
6549 return rc;
6550 }
6551
6552 /* Tell bootstrap uCode where to find image to load */
bb8c093b
CH
6553 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6554 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6555 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
6556 priv->ucode_data.len);
6557
6558 /* Inst bytecount must be last to set up, bit 31 signals uCode
6559 * that all new ptr/size info is in place */
bb8c093b 6560 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
6561 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6562
bb8c093b 6563 iwl4965_release_nic_access(priv);
b481de9c
ZY
6564
6565 spin_unlock_irqrestore(&priv->lock, flags);
6566
6567 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6568
6569 return rc;
6570}
6571
6572/**
bb8c093b 6573 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
6574 *
6575 * Called after REPLY_ALIVE notification received from "initialize" uCode.
6576 *
6577 * The 4965 "initialize" ALIVE reply contains calibration data for:
6578 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
6579 * (3945 does not contain this data).
6580 *
6581 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6582*/
bb8c093b 6583static void iwl4965_init_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6584{
6585 /* Check alive response for "valid" sign from uCode */
6586 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
6587 /* We had an error bringing up the hardware, so take it
6588 * all the way back down so we can try again */
6589 IWL_DEBUG_INFO("Initialize Alive failed.\n");
6590 goto restart;
6591 }
6592
6593 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
6594 * This is a paranoid check, because we would not have gotten the
6595 * "initialize" alive if code weren't properly loaded. */
bb8c093b 6596 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6597 /* Runtime instruction load was bad;
6598 * take it all the way back down so we can try again */
6599 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
6600 goto restart;
6601 }
6602
6603 /* Calculate temperature */
6604 priv->temperature = iwl4965_get_temperature(priv);
6605
6606 /* Send pointers to protocol/runtime uCode image ... init code will
6607 * load and launch runtime uCode, which will send us another "Alive"
6608 * notification. */
6609 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 6610 if (iwl4965_set_ucode_ptrs(priv)) {
b481de9c
ZY
6611 /* Runtime instruction load won't happen;
6612 * take it all the way back down so we can try again */
6613 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
6614 goto restart;
6615 }
6616 return;
6617
6618 restart:
6619 queue_work(priv->workqueue, &priv->restart);
6620}
6621
6622
6623/**
bb8c093b 6624 * iwl4965_alive_start - called after REPLY_ALIVE notification received
b481de9c 6625 * from protocol/runtime uCode (initialization uCode's
bb8c093b 6626 * Alive gets handled by iwl4965_init_alive_start()).
b481de9c 6627 */
bb8c093b 6628static void iwl4965_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6629{
6630 int rc = 0;
6631
6632 IWL_DEBUG_INFO("Runtime Alive received.\n");
6633
6634 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
6635 /* We had an error bringing up the hardware, so take it
6636 * all the way back down so we can try again */
6637 IWL_DEBUG_INFO("Alive failed.\n");
6638 goto restart;
6639 }
6640
6641 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
6642 * This is a paranoid check, because we would not have gotten the
6643 * "runtime" alive if code weren't properly loaded. */
bb8c093b 6644 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6645 /* Runtime instruction load was bad;
6646 * take it all the way back down so we can try again */
6647 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
6648 goto restart;
6649 }
6650
bb8c093b 6651 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6652
6653 rc = iwl4965_alive_notify(priv);
6654 if (rc) {
6655 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
6656 rc);
6657 goto restart;
6658 }
6659
9fbab516 6660 /* After the ALIVE response, we can send host commands to 4965 uCode */
b481de9c
ZY
6661 set_bit(STATUS_ALIVE, &priv->status);
6662
6663 /* Clear out the uCode error bit if it is set */
6664 clear_bit(STATUS_FW_ERROR, &priv->status);
6665
bb8c093b 6666 rc = iwl4965_init_channel_map(priv);
b481de9c
ZY
6667 if (rc) {
6668 IWL_ERROR("initializing regulatory failed: %d\n", rc);
6669 return;
6670 }
6671
bb8c093b 6672 iwl4965_init_geos(priv);
b481de9c 6673
bb8c093b 6674 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
6675 return;
6676
6677 if (!priv->mac80211_registered) {
6678 /* Unlock so any user space entry points can call back into
6679 * the driver without a deadlock... */
6680 mutex_unlock(&priv->mutex);
bb8c093b 6681 iwl4965_rate_control_register(priv->hw);
b481de9c
ZY
6682 rc = ieee80211_register_hw(priv->hw);
6683 priv->hw->conf.beacon_int = 100;
6684 mutex_lock(&priv->mutex);
6685
6686 if (rc) {
bb8c093b 6687 iwl4965_rate_control_unregister(priv->hw);
b481de9c
ZY
6688 IWL_ERROR("Failed to register network "
6689 "device (error %d)\n", rc);
6690 return;
6691 }
6692
6693 priv->mac80211_registered = 1;
6694
bb8c093b 6695 iwl4965_reset_channel_flag(priv);
b481de9c
ZY
6696 } else
6697 ieee80211_start_queues(priv->hw);
6698
6699 priv->active_rate = priv->rates_mask;
6700 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
6701
bb8c093b 6702 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 6703
bb8c093b
CH
6704 if (iwl4965_is_associated(priv)) {
6705 struct iwl4965_rxon_cmd *active_rxon =
6706 (struct iwl4965_rxon_cmd *)(&priv->active_rxon);
b481de9c
ZY
6707
6708 memcpy(&priv->staging_rxon, &priv->active_rxon,
6709 sizeof(priv->staging_rxon));
6710 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6711 } else {
6712 /* Initialize our rx_config data */
bb8c093b 6713 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
6714 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
6715 }
6716
9fbab516 6717 /* Configure Bluetooth device coexistence support */
bb8c093b 6718 iwl4965_send_bt_config(priv);
b481de9c
ZY
6719
6720 /* Configure the adapter for unassociated operation */
bb8c093b 6721 iwl4965_commit_rxon(priv);
b481de9c
ZY
6722
6723 /* At this point, the NIC is initialized and operational */
6724 priv->notif_missed_beacons = 0;
6725 set_bit(STATUS_READY, &priv->status);
6726
6727 iwl4965_rf_kill_ct_config(priv);
6728 IWL_DEBUG_INFO("ALIVE processing complete.\n");
6729
6730 if (priv->error_recovering)
bb8c093b 6731 iwl4965_error_recovery(priv);
b481de9c
ZY
6732
6733 return;
6734
6735 restart:
6736 queue_work(priv->workqueue, &priv->restart);
6737}
6738
bb8c093b 6739static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv);
b481de9c 6740
bb8c093b 6741static void __iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6742{
6743 unsigned long flags;
6744 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
6745 struct ieee80211_conf *conf = NULL;
6746
6747 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
6748
6749 conf = ieee80211_get_hw_conf(priv->hw);
6750
6751 if (!exit_pending)
6752 set_bit(STATUS_EXIT_PENDING, &priv->status);
6753
bb8c093b 6754 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6755
6756 /* Unblock any waiting calls */
6757 wake_up_interruptible_all(&priv->wait_command_queue);
6758
b481de9c
ZY
6759 /* Wipe out the EXIT_PENDING status bit if we are not actually
6760 * exiting the module */
6761 if (!exit_pending)
6762 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6763
6764 /* stop and reset the on-board processor */
bb8c093b 6765 iwl4965_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
6766
6767 /* tell the device to stop sending interrupts */
bb8c093b 6768 iwl4965_disable_interrupts(priv);
b481de9c
ZY
6769
6770 if (priv->mac80211_registered)
6771 ieee80211_stop_queues(priv->hw);
6772
bb8c093b 6773 /* If we have not previously called iwl4965_init() then
b481de9c 6774 * clear all bits but the RF Kill and SUSPEND bits and return */
bb8c093b 6775 if (!iwl4965_is_init(priv)) {
b481de9c
ZY
6776 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6777 STATUS_RF_KILL_HW |
6778 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6779 STATUS_RF_KILL_SW |
6780 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6781 STATUS_IN_SUSPEND;
6782 goto exit;
6783 }
6784
6785 /* ...otherwise clear out all the status bits but the RF Kill and
6786 * SUSPEND bits and continue taking the NIC down. */
6787 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6788 STATUS_RF_KILL_HW |
6789 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6790 STATUS_RF_KILL_SW |
6791 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6792 STATUS_IN_SUSPEND |
6793 test_bit(STATUS_FW_ERROR, &priv->status) <<
6794 STATUS_FW_ERROR;
6795
6796 spin_lock_irqsave(&priv->lock, flags);
9fbab516
BC
6797 iwl4965_clear_bit(priv, CSR_GP_CNTRL,
6798 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
6799 spin_unlock_irqrestore(&priv->lock, flags);
6800
bb8c093b
CH
6801 iwl4965_hw_txq_ctx_stop(priv);
6802 iwl4965_hw_rxq_stop(priv);
b481de9c
ZY
6803
6804 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
6805 if (!iwl4965_grab_nic_access(priv)) {
6806 iwl4965_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 6807 APMG_CLK_VAL_DMA_CLK_RQT);
bb8c093b 6808 iwl4965_release_nic_access(priv);
b481de9c
ZY
6809 }
6810 spin_unlock_irqrestore(&priv->lock, flags);
6811
6812 udelay(5);
6813
bb8c093b
CH
6814 iwl4965_hw_nic_stop_master(priv);
6815 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6816 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
6817
6818 exit:
bb8c093b 6819 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
6820
6821 if (priv->ibss_beacon)
6822 dev_kfree_skb(priv->ibss_beacon);
6823 priv->ibss_beacon = NULL;
6824
6825 /* clear out any free frames */
bb8c093b 6826 iwl4965_clear_free_frames(priv);
b481de9c
ZY
6827}
6828
bb8c093b 6829static void iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6830{
6831 mutex_lock(&priv->mutex);
bb8c093b 6832 __iwl4965_down(priv);
b481de9c 6833 mutex_unlock(&priv->mutex);
b24d22b1 6834
bb8c093b 6835 iwl4965_cancel_deferred_work(priv);
b481de9c
ZY
6836}
6837
6838#define MAX_HW_RESTARTS 5
6839
bb8c093b 6840static int __iwl4965_up(struct iwl4965_priv *priv)
b481de9c 6841{
0795af57 6842 DECLARE_MAC_BUF(mac);
b481de9c
ZY
6843 int rc, i;
6844 u32 hw_rf_kill = 0;
6845
6846 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6847 IWL_WARNING("Exit pending; will not bring the NIC up\n");
6848 return -EIO;
6849 }
6850
6851 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6852 IWL_WARNING("Radio disabled by SW RF kill (module "
6853 "parameter)\n");
6854 return 0;
6855 }
6856
a781cf94
RC
6857 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
6858 IWL_ERROR("ucode not available for device bringup\n");
6859 return -EIO;
6860 }
6861
bb8c093b 6862 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 6863
bb8c093b 6864 rc = iwl4965_hw_nic_init(priv);
b481de9c
ZY
6865 if (rc) {
6866 IWL_ERROR("Unable to int nic\n");
6867 return rc;
6868 }
6869
6870 /* make sure rfkill handshake bits are cleared */
bb8c093b
CH
6871 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6872 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
6873 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6874
6875 /* clear (again), then enable host interrupts */
bb8c093b
CH
6876 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
6877 iwl4965_enable_interrupts(priv);
b481de9c
ZY
6878
6879 /* really make sure rfkill handshake bits are cleared */
bb8c093b
CH
6880 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6881 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
6882
6883 /* Copy original ucode data image from disk into backup cache.
6884 * This will be used to initialize the on-board processor's
6885 * data SRAM for a clean start when the runtime program first loads. */
6886 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
6887 priv->ucode_data.len);
6888
6889 /* If platform's RF_KILL switch is set to KILL,
6890 * wait for BIT_INT_RF_KILL interrupt before loading uCode
6891 * and getting things started */
bb8c093b 6892 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
6893 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
6894 hw_rf_kill = 1;
6895
6896 if (test_bit(STATUS_RF_KILL_HW, &priv->status) || hw_rf_kill) {
6897 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
6898 return 0;
6899 }
6900
6901 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6902
bb8c093b 6903 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6904
6905 /* load bootstrap state machine,
6906 * load bootstrap program into processor's memory,
6907 * prepare to load the "initialize" uCode */
bb8c093b 6908 rc = iwl4965_load_bsm(priv);
b481de9c
ZY
6909
6910 if (rc) {
6911 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
6912 continue;
6913 }
6914
6915 /* start card; "initialize" will load runtime ucode */
bb8c093b 6916 iwl4965_nic_start(priv);
b481de9c 6917
9fbab516 6918 /* MAC Address location in EEPROM is same for 3945/4965 */
b481de9c 6919 get_eeprom_mac(priv, priv->mac_addr);
0795af57
JP
6920 IWL_DEBUG_INFO("MAC address: %s\n",
6921 print_mac(mac, priv->mac_addr));
b481de9c
ZY
6922
6923 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
6924
6925 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
6926
6927 return 0;
6928 }
6929
6930 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 6931 __iwl4965_down(priv);
b481de9c
ZY
6932
6933 /* tried to restart and config the device for as long as our
6934 * patience could withstand */
6935 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
6936 return -EIO;
6937}
6938
6939
6940/*****************************************************************************
6941 *
6942 * Workqueue callbacks
6943 *
6944 *****************************************************************************/
6945
bb8c093b 6946static void iwl4965_bg_init_alive_start(struct work_struct *data)
b481de9c 6947{
bb8c093b
CH
6948 struct iwl4965_priv *priv =
6949 container_of(data, struct iwl4965_priv, init_alive_start.work);
b481de9c
ZY
6950
6951 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6952 return;
6953
6954 mutex_lock(&priv->mutex);
bb8c093b 6955 iwl4965_init_alive_start(priv);
b481de9c
ZY
6956 mutex_unlock(&priv->mutex);
6957}
6958
bb8c093b 6959static void iwl4965_bg_alive_start(struct work_struct *data)
b481de9c 6960{
bb8c093b
CH
6961 struct iwl4965_priv *priv =
6962 container_of(data, struct iwl4965_priv, alive_start.work);
b481de9c
ZY
6963
6964 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6965 return;
6966
6967 mutex_lock(&priv->mutex);
bb8c093b 6968 iwl4965_alive_start(priv);
b481de9c
ZY
6969 mutex_unlock(&priv->mutex);
6970}
6971
bb8c093b 6972static void iwl4965_bg_rf_kill(struct work_struct *work)
b481de9c 6973{
bb8c093b 6974 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, rf_kill);
b481de9c
ZY
6975
6976 wake_up_interruptible(&priv->wait_command_queue);
6977
6978 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6979 return;
6980
6981 mutex_lock(&priv->mutex);
6982
bb8c093b 6983 if (!iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6984 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6985 "HW and/or SW RF Kill no longer active, restarting "
6986 "device\n");
6987 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6988 queue_work(priv->workqueue, &priv->restart);
6989 } else {
6990
6991 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6992 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6993 "disabled by SW switch\n");
6994 else
6995 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6996 "Kill switch must be turned off for "
6997 "wireless networking to work.\n");
6998 }
6999 mutex_unlock(&priv->mutex);
7000}
7001
7002#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
7003
bb8c093b 7004static void iwl4965_bg_scan_check(struct work_struct *data)
b481de9c 7005{
bb8c093b
CH
7006 struct iwl4965_priv *priv =
7007 container_of(data, struct iwl4965_priv, scan_check.work);
b481de9c
ZY
7008
7009 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7010 return;
7011
7012 mutex_lock(&priv->mutex);
7013 if (test_bit(STATUS_SCANNING, &priv->status) ||
7014 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
7015 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
7016 "Scan completion watchdog resetting adapter (%dms)\n",
7017 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
052c4b9f 7018
b481de9c 7019 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
bb8c093b 7020 iwl4965_send_scan_abort(priv);
b481de9c
ZY
7021 }
7022 mutex_unlock(&priv->mutex);
7023}
7024
bb8c093b 7025static void iwl4965_bg_request_scan(struct work_struct *data)
b481de9c 7026{
bb8c093b
CH
7027 struct iwl4965_priv *priv =
7028 container_of(data, struct iwl4965_priv, request_scan);
7029 struct iwl4965_host_cmd cmd = {
b481de9c 7030 .id = REPLY_SCAN_CMD,
bb8c093b 7031 .len = sizeof(struct iwl4965_scan_cmd),
b481de9c
ZY
7032 .meta.flags = CMD_SIZE_HUGE,
7033 };
7034 int rc = 0;
bb8c093b 7035 struct iwl4965_scan_cmd *scan;
b481de9c
ZY
7036 struct ieee80211_conf *conf = NULL;
7037 u8 direct_mask;
7038 int phymode;
7039
7040 conf = ieee80211_get_hw_conf(priv->hw);
7041
7042 mutex_lock(&priv->mutex);
7043
bb8c093b 7044 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
7045 IWL_WARNING("request scan called when driver not ready.\n");
7046 goto done;
7047 }
7048
7049 /* Make sure the scan wasn't cancelled before this queued work
7050 * was given the chance to run... */
7051 if (!test_bit(STATUS_SCANNING, &priv->status))
7052 goto done;
7053
7054 /* This should never be called or scheduled if there is currently
7055 * a scan active in the hardware. */
7056 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
7057 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
7058 "Ignoring second request.\n");
7059 rc = -EIO;
7060 goto done;
7061 }
7062
7063 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
7064 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
7065 goto done;
7066 }
7067
7068 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
7069 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
7070 goto done;
7071 }
7072
bb8c093b 7073 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
7074 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
7075 goto done;
7076 }
7077
7078 if (!test_bit(STATUS_READY, &priv->status)) {
7079 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
7080 goto done;
7081 }
7082
7083 if (!priv->scan_bands) {
7084 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
7085 goto done;
7086 }
7087
7088 if (!priv->scan) {
bb8c093b 7089 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) +
b481de9c
ZY
7090 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
7091 if (!priv->scan) {
7092 rc = -ENOMEM;
7093 goto done;
7094 }
7095 }
7096 scan = priv->scan;
bb8c093b 7097 memset(scan, 0, sizeof(struct iwl4965_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
7098
7099 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
7100 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
7101
bb8c093b 7102 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
7103 u16 interval = 0;
7104 u32 extra;
7105 u32 suspend_time = 100;
7106 u32 scan_suspend_time = 100;
7107 unsigned long flags;
7108
7109 IWL_DEBUG_INFO("Scanning while associated...\n");
7110
7111 spin_lock_irqsave(&priv->lock, flags);
7112 interval = priv->beacon_int;
7113 spin_unlock_irqrestore(&priv->lock, flags);
7114
7115 scan->suspend_time = 0;
052c4b9f 7116 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
7117 if (!interval)
7118 interval = suspend_time;
7119
7120 extra = (suspend_time / interval) << 22;
7121 scan_suspend_time = (extra |
7122 ((suspend_time % interval) * 1024));
7123 scan->suspend_time = cpu_to_le32(scan_suspend_time);
7124 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
7125 scan_suspend_time, interval);
7126 }
7127
7128 /* We should add the ability for user to lock to PASSIVE ONLY */
7129 if (priv->one_direct_scan) {
7130 IWL_DEBUG_SCAN
7131 ("Kicking off one direct scan for '%s'\n",
bb8c093b 7132 iwl4965_escape_essid(priv->direct_ssid,
b481de9c
ZY
7133 priv->direct_ssid_len));
7134 scan->direct_scan[0].id = WLAN_EID_SSID;
7135 scan->direct_scan[0].len = priv->direct_ssid_len;
7136 memcpy(scan->direct_scan[0].ssid,
7137 priv->direct_ssid, priv->direct_ssid_len);
7138 direct_mask = 1;
bb8c093b 7139 } else if (!iwl4965_is_associated(priv) && priv->essid_len) {
b481de9c
ZY
7140 scan->direct_scan[0].id = WLAN_EID_SSID;
7141 scan->direct_scan[0].len = priv->essid_len;
7142 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
7143 direct_mask = 1;
7144 } else
7145 direct_mask = 0;
7146
7147 /* We don't build a direct scan probe request; the uCode will do
7148 * that based on the direct_mask added to each channel entry */
7149 scan->tx_cmd.len = cpu_to_le16(
bb8c093b 7150 iwl4965_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
b481de9c
ZY
7151 IWL_MAX_SCAN_SIZE - sizeof(scan), 0));
7152 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
7153 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
7154 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
7155
7156 /* flags + rate selection */
7157
7158 scan->tx_cmd.tx_flags |= cpu_to_le32(0x200);
7159
7160 switch (priv->scan_bands) {
7161 case 2:
7162 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
7163 scan->tx_cmd.rate_n_flags =
bb8c093b 7164 iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
b481de9c
ZY
7165 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
7166
7167 scan->good_CRC_th = 0;
7168 phymode = MODE_IEEE80211G;
7169 break;
7170
7171 case 1:
7172 scan->tx_cmd.rate_n_flags =
bb8c093b 7173 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
b481de9c
ZY
7174 RATE_MCS_ANT_B_MSK);
7175 scan->good_CRC_th = IWL_GOOD_CRC_TH;
7176 phymode = MODE_IEEE80211A;
7177 break;
7178
7179 default:
7180 IWL_WARNING("Invalid scan band count\n");
7181 goto done;
7182 }
7183
7184 /* select Rx chains */
7185
7186 /* Force use of chains B and C (0x6) for scan Rx.
7187 * Avoid A (0x1) because of its off-channel reception on A-band.
7188 * MIMO is not used here, but value is required to make uCode happy. */
7189 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
7190 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
7191 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
7192 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
7193
7194 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
7195 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
7196
7197 if (direct_mask)
7198 IWL_DEBUG_SCAN
7199 ("Initiating direct scan for %s.\n",
bb8c093b 7200 iwl4965_escape_essid(priv->essid, priv->essid_len));
b481de9c
ZY
7201 else
7202 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
7203
7204 scan->channel_count =
bb8c093b 7205 iwl4965_get_channels_for_scan(
b481de9c
ZY
7206 priv, phymode, 1, /* active */
7207 direct_mask,
7208 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
7209
7210 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 7211 scan->channel_count * sizeof(struct iwl4965_scan_channel);
b481de9c
ZY
7212 cmd.data = scan;
7213 scan->len = cpu_to_le16(cmd.len);
7214
7215 set_bit(STATUS_SCAN_HW, &priv->status);
bb8c093b 7216 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
7217 if (rc)
7218 goto done;
7219
7220 queue_delayed_work(priv->workqueue, &priv->scan_check,
7221 IWL_SCAN_CHECK_WATCHDOG);
7222
7223 mutex_unlock(&priv->mutex);
7224 return;
7225
7226 done:
01ebd063 7227 /* inform mac80211 scan aborted */
b481de9c
ZY
7228 queue_work(priv->workqueue, &priv->scan_completed);
7229 mutex_unlock(&priv->mutex);
7230}
7231
bb8c093b 7232static void iwl4965_bg_up(struct work_struct *data)
b481de9c 7233{
bb8c093b 7234 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, up);
b481de9c
ZY
7235
7236 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7237 return;
7238
7239 mutex_lock(&priv->mutex);
bb8c093b 7240 __iwl4965_up(priv);
b481de9c
ZY
7241 mutex_unlock(&priv->mutex);
7242}
7243
bb8c093b 7244static void iwl4965_bg_restart(struct work_struct *data)
b481de9c 7245{
bb8c093b 7246 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, restart);
b481de9c
ZY
7247
7248 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7249 return;
7250
bb8c093b 7251 iwl4965_down(priv);
b481de9c
ZY
7252 queue_work(priv->workqueue, &priv->up);
7253}
7254
bb8c093b 7255static void iwl4965_bg_rx_replenish(struct work_struct *data)
b481de9c 7256{
bb8c093b
CH
7257 struct iwl4965_priv *priv =
7258 container_of(data, struct iwl4965_priv, rx_replenish);
b481de9c
ZY
7259
7260 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7261 return;
7262
7263 mutex_lock(&priv->mutex);
bb8c093b 7264 iwl4965_rx_replenish(priv);
b481de9c
ZY
7265 mutex_unlock(&priv->mutex);
7266}
7267
7878a5a4
MA
7268#define IWL_DELAY_NEXT_SCAN (HZ*2)
7269
bb8c093b 7270static void iwl4965_bg_post_associate(struct work_struct *data)
b481de9c 7271{
bb8c093b 7272 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv,
b481de9c
ZY
7273 post_associate.work);
7274
7275 int rc = 0;
7276 struct ieee80211_conf *conf = NULL;
0795af57 7277 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7278
7279 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7280 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
7281 return;
7282 }
7283
0795af57
JP
7284 IWL_DEBUG_ASSOC("Associated as %d to: %s\n",
7285 priv->assoc_id,
7286 print_mac(mac, priv->active_rxon.bssid_addr));
b481de9c
ZY
7287
7288
7289 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7290 return;
7291
7292 mutex_lock(&priv->mutex);
7293
948c171c
MA
7294 if (!priv->interface_id || !priv->is_open) {
7295 mutex_unlock(&priv->mutex);
7296 return;
7297 }
bb8c093b 7298 iwl4965_scan_cancel_timeout(priv, 200);
052c4b9f 7299
b481de9c
ZY
7300 conf = ieee80211_get_hw_conf(priv->hw);
7301
7302 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7303 iwl4965_commit_rxon(priv);
b481de9c 7304
bb8c093b
CH
7305 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7306 iwl4965_setup_rxon_timing(priv);
7307 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7308 sizeof(priv->rxon_timing), &priv->rxon_timing);
7309 if (rc)
7310 IWL_WARNING("REPLY_RXON_TIMING failed - "
7311 "Attempting to continue.\n");
7312
7313 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7314
c8b0e6e1 7315#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
7316 if (priv->is_ht_enabled && priv->current_assoc_ht.is_ht)
7317 iwl4965_set_rxon_ht(priv, &priv->current_assoc_ht);
7318 else {
7319 priv->active_rate_ht[0] = 0;
7320 priv->active_rate_ht[1] = 0;
7321 priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ;
7322 }
c8b0e6e1 7323#endif /* CONFIG_IWL4965_HT*/
b481de9c
ZY
7324 iwl4965_set_rxon_chain(priv);
7325 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7326
7327 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
7328 priv->assoc_id, priv->beacon_int);
7329
7330 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7331 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7332 else
7333 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7334
7335 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7336 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
7337 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
7338 else
7339 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7340
7341 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7342 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7343
7344 }
7345
bb8c093b 7346 iwl4965_commit_rxon(priv);
b481de9c
ZY
7347
7348 switch (priv->iw_mode) {
7349 case IEEE80211_IF_TYPE_STA:
bb8c093b 7350 iwl4965_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
7351 break;
7352
7353 case IEEE80211_IF_TYPE_IBSS:
7354
7355 /* clear out the station table */
bb8c093b 7356 iwl4965_clear_stations_table(priv);
b481de9c 7357
bb8c093b
CH
7358 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
7359 iwl4965_rxon_add_station(priv, priv->bssid, 0);
7360 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
7361 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7362
7363 break;
7364
7365 default:
7366 IWL_ERROR("%s Should not be called in %d mode\n",
7367 __FUNCTION__, priv->iw_mode);
7368 break;
7369 }
7370
bb8c093b 7371 iwl4965_sequence_reset(priv);
b481de9c 7372
c8b0e6e1 7373#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
7374 /* Enable Rx differential gain and sensitivity calibrations */
7375 iwl4965_chain_noise_reset(priv);
7376 priv->start_calib = 1;
c8b0e6e1 7377#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
7378
7379 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7380 priv->assoc_station_added = 1;
7381
c8b0e6e1 7382#ifdef CONFIG_IWL4965_QOS
bb8c093b 7383 iwl4965_activate_qos(priv, 0);
c8b0e6e1 7384#endif /* CONFIG_IWL4965_QOS */
7878a5a4
MA
7385 /* we have just associated, don't start scan too early */
7386 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
b481de9c
ZY
7387 mutex_unlock(&priv->mutex);
7388}
7389
bb8c093b 7390static void iwl4965_bg_abort_scan(struct work_struct *work)
b481de9c 7391{
bb8c093b 7392 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, abort_scan);
b481de9c 7393
bb8c093b 7394 if (!iwl4965_is_ready(priv))
b481de9c
ZY
7395 return;
7396
7397 mutex_lock(&priv->mutex);
7398
7399 set_bit(STATUS_SCAN_ABORTING, &priv->status);
bb8c093b 7400 iwl4965_send_scan_abort(priv);
b481de9c
ZY
7401
7402 mutex_unlock(&priv->mutex);
7403}
7404
bb8c093b 7405static void iwl4965_bg_scan_completed(struct work_struct *work)
b481de9c 7406{
bb8c093b
CH
7407 struct iwl4965_priv *priv =
7408 container_of(work, struct iwl4965_priv, scan_completed);
b481de9c
ZY
7409
7410 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
7411
7412 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7413 return;
7414
7415 ieee80211_scan_completed(priv->hw);
7416
7417 /* Since setting the TXPOWER may have been deferred while
7418 * performing the scan, fire one off */
7419 mutex_lock(&priv->mutex);
bb8c093b 7420 iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
7421 mutex_unlock(&priv->mutex);
7422}
7423
7424/*****************************************************************************
7425 *
7426 * mac80211 entry point functions
7427 *
7428 *****************************************************************************/
7429
bb8c093b 7430static int iwl4965_mac_start(struct ieee80211_hw *hw)
b481de9c 7431{
bb8c093b 7432 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7433
7434 IWL_DEBUG_MAC80211("enter\n");
7435
7436 /* we should be verifying the device is ready to be opened */
7437 mutex_lock(&priv->mutex);
7438
7439 priv->is_open = 1;
7440
bb8c093b 7441 if (!iwl4965_is_rfkill(priv))
b481de9c
ZY
7442 ieee80211_start_queues(priv->hw);
7443
7444 mutex_unlock(&priv->mutex);
7445 IWL_DEBUG_MAC80211("leave\n");
7446 return 0;
7447}
7448
bb8c093b 7449static void iwl4965_mac_stop(struct ieee80211_hw *hw)
b481de9c 7450{
bb8c093b 7451 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7452
7453 IWL_DEBUG_MAC80211("enter\n");
948c171c
MA
7454
7455
7456 mutex_lock(&priv->mutex);
7457 /* stop mac, cancel any scan request and clear
7458 * RXON_FILTER_ASSOC_MSK BIT
7459 */
b481de9c 7460 priv->is_open = 0;
bb8c093b 7461 iwl4965_scan_cancel_timeout(priv, 100);
948c171c
MA
7462 cancel_delayed_work(&priv->post_associate);
7463 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7464 iwl4965_commit_rxon(priv);
948c171c
MA
7465 mutex_unlock(&priv->mutex);
7466
b481de9c 7467 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7468}
7469
bb8c093b 7470static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
7471 struct ieee80211_tx_control *ctl)
7472{
bb8c093b 7473 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7474
7475 IWL_DEBUG_MAC80211("enter\n");
7476
7477 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
7478 IWL_DEBUG_MAC80211("leave - monitor\n");
7479 return -1;
7480 }
7481
7482 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
7483 ctl->tx_rate);
7484
bb8c093b 7485 if (iwl4965_tx_skb(priv, skb, ctl))
b481de9c
ZY
7486 dev_kfree_skb_any(skb);
7487
7488 IWL_DEBUG_MAC80211("leave\n");
7489 return 0;
7490}
7491
bb8c093b 7492static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7493 struct ieee80211_if_init_conf *conf)
7494{
bb8c093b 7495 struct iwl4965_priv *priv = hw->priv;
b481de9c 7496 unsigned long flags;
0795af57 7497 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7498
7499 IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type);
b481de9c
ZY
7500
7501 if (priv->interface_id) {
7502 IWL_DEBUG_MAC80211("leave - interface_id != 0\n");
7503 return 0;
7504 }
7505
7506 spin_lock_irqsave(&priv->lock, flags);
7507 priv->interface_id = conf->if_id;
7508
7509 spin_unlock_irqrestore(&priv->lock, flags);
7510
7511 mutex_lock(&priv->mutex);
864792e3
TW
7512
7513 if (conf->mac_addr) {
7514 IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr));
7515 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
7516 }
bb8c093b 7517 iwl4965_set_mode(priv, conf->type);
b481de9c
ZY
7518
7519 IWL_DEBUG_MAC80211("leave\n");
7520 mutex_unlock(&priv->mutex);
7521
7522 return 0;
7523}
7524
7525/**
bb8c093b 7526 * iwl4965_mac_config - mac80211 config callback
b481de9c
ZY
7527 *
7528 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
7529 * be set inappropriately and the driver currently sets the hardware up to
7530 * use it whenever needed.
7531 */
bb8c093b 7532static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
b481de9c 7533{
bb8c093b
CH
7534 struct iwl4965_priv *priv = hw->priv;
7535 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
7536 unsigned long flags;
7537
7538 mutex_lock(&priv->mutex);
7539 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel);
7540
bb8c093b 7541 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
7542 IWL_DEBUG_MAC80211("leave - not ready\n");
7543 mutex_unlock(&priv->mutex);
7544 return -EIO;
7545 }
7546
7547 /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only
01ebd063 7548 * what is exposed through include/ declarations */
bb8c093b 7549 if (unlikely(!iwl4965_param_disable_hw_scan &&
b481de9c
ZY
7550 test_bit(STATUS_SCANNING, &priv->status))) {
7551 IWL_DEBUG_MAC80211("leave - scanning\n");
7552 mutex_unlock(&priv->mutex);
7553 return 0;
7554 }
7555
7556 spin_lock_irqsave(&priv->lock, flags);
7557
bb8c093b 7558 ch_info = iwl4965_get_channel_info(priv, conf->phymode, conf->channel);
b481de9c
ZY
7559 if (!is_channel_valid(ch_info)) {
7560 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
7561 conf->channel, conf->phymode);
7562 IWL_DEBUG_MAC80211("leave - invalid channel\n");
7563 spin_unlock_irqrestore(&priv->lock, flags);
7564 mutex_unlock(&priv->mutex);
7565 return -EINVAL;
7566 }
7567
c8b0e6e1 7568#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
7569 /* if we are switching fron ht to 2.4 clear flags
7570 * from any ht related info since 2.4 does not
7571 * support ht */
7572 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel)
7573#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7574 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
7575#endif
7576 )
7577 priv->staging_rxon.flags = 0;
c8b0e6e1 7578#endif /* CONFIG_IWL4965_HT */
b481de9c 7579
bb8c093b 7580 iwl4965_set_rxon_channel(priv, conf->phymode, conf->channel);
b481de9c 7581
bb8c093b 7582 iwl4965_set_flags_for_phymode(priv, conf->phymode);
b481de9c
ZY
7583
7584 /* The list of supported rates and rate mask can be different
7585 * for each phymode; since the phymode may have changed, reset
7586 * the rate mask to what mac80211 lists */
bb8c093b 7587 iwl4965_set_rate(priv);
b481de9c
ZY
7588
7589 spin_unlock_irqrestore(&priv->lock, flags);
7590
7591#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7592 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 7593 iwl4965_hw_channel_switch(priv, conf->channel);
b481de9c
ZY
7594 mutex_unlock(&priv->mutex);
7595 return 0;
7596 }
7597#endif
7598
bb8c093b 7599 iwl4965_radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
7600
7601 if (!conf->radio_enabled) {
7602 IWL_DEBUG_MAC80211("leave - radio disabled\n");
7603 mutex_unlock(&priv->mutex);
7604 return 0;
7605 }
7606
bb8c093b 7607 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
7608 IWL_DEBUG_MAC80211("leave - RF kill\n");
7609 mutex_unlock(&priv->mutex);
7610 return -EIO;
7611 }
7612
bb8c093b 7613 iwl4965_set_rate(priv);
b481de9c
ZY
7614
7615 if (memcmp(&priv->active_rxon,
7616 &priv->staging_rxon, sizeof(priv->staging_rxon)))
bb8c093b 7617 iwl4965_commit_rxon(priv);
b481de9c
ZY
7618 else
7619 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
7620
7621 IWL_DEBUG_MAC80211("leave\n");
7622
7623 mutex_unlock(&priv->mutex);
7624
7625 return 0;
7626}
7627
bb8c093b 7628static void iwl4965_config_ap(struct iwl4965_priv *priv)
b481de9c
ZY
7629{
7630 int rc = 0;
7631
7632 if (priv->status & STATUS_EXIT_PENDING)
7633 return;
7634
7635 /* The following should be done only at AP bring up */
7636 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
7637
7638 /* RXON - unassoc (to set timing command) */
7639 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7640 iwl4965_commit_rxon(priv);
b481de9c
ZY
7641
7642 /* RXON Timing */
bb8c093b
CH
7643 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7644 iwl4965_setup_rxon_timing(priv);
7645 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7646 sizeof(priv->rxon_timing), &priv->rxon_timing);
7647 if (rc)
7648 IWL_WARNING("REPLY_RXON_TIMING failed - "
7649 "Attempting to continue.\n");
7650
7651 iwl4965_set_rxon_chain(priv);
7652
7653 /* FIXME: what should be the assoc_id for AP? */
7654 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7655 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7656 priv->staging_rxon.flags |=
7657 RXON_FLG_SHORT_PREAMBLE_MSK;
7658 else
7659 priv->staging_rxon.flags &=
7660 ~RXON_FLG_SHORT_PREAMBLE_MSK;
7661
7662 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7663 if (priv->assoc_capability &
7664 WLAN_CAPABILITY_SHORT_SLOT_TIME)
7665 priv->staging_rxon.flags |=
7666 RXON_FLG_SHORT_SLOT_MSK;
7667 else
7668 priv->staging_rxon.flags &=
7669 ~RXON_FLG_SHORT_SLOT_MSK;
7670
7671 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7672 priv->staging_rxon.flags &=
7673 ~RXON_FLG_SHORT_SLOT_MSK;
7674 }
7675 /* restore RXON assoc */
7676 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 7677 iwl4965_commit_rxon(priv);
c8b0e6e1 7678#ifdef CONFIG_IWL4965_QOS
bb8c093b 7679 iwl4965_activate_qos(priv, 1);
b481de9c 7680#endif
bb8c093b 7681 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
e1493deb 7682 }
bb8c093b 7683 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7684
7685 /* FIXME - we need to add code here to detect a totally new
7686 * configuration, reset the AP, unassoc, rxon timing, assoc,
7687 * clear sta table, add BCAST sta... */
7688}
7689
bb8c093b 7690static int iwl4965_mac_config_interface(struct ieee80211_hw *hw, int if_id,
b481de9c
ZY
7691 struct ieee80211_if_conf *conf)
7692{
bb8c093b 7693 struct iwl4965_priv *priv = hw->priv;
0795af57 7694 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7695 unsigned long flags;
7696 int rc;
7697
7698 if (conf == NULL)
7699 return -EIO;
7700
7701 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7702 (!conf->beacon || !conf->ssid_len)) {
7703 IWL_DEBUG_MAC80211
7704 ("Leaving in AP mode because HostAPD is not ready.\n");
7705 return 0;
7706 }
7707
7708 mutex_lock(&priv->mutex);
7709
7710 IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id);
7711 if (conf->bssid)
0795af57
JP
7712 IWL_DEBUG_MAC80211("bssid: %s\n",
7713 print_mac(mac, conf->bssid));
b481de9c 7714
4150c572
JB
7715/*
7716 * very dubious code was here; the probe filtering flag is never set:
7717 *
b481de9c
ZY
7718 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7719 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572
JB
7720 */
7721 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
b481de9c
ZY
7722 IWL_DEBUG_MAC80211("leave - scanning\n");
7723 mutex_unlock(&priv->mutex);
7724 return 0;
7725 }
7726
7727 if (priv->interface_id != if_id) {
7728 IWL_DEBUG_MAC80211("leave - interface_id != if_id\n");
7729 mutex_unlock(&priv->mutex);
7730 return 0;
7731 }
7732
7733 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7734 if (!conf->bssid) {
7735 conf->bssid = priv->mac_addr;
7736 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
0795af57
JP
7737 IWL_DEBUG_MAC80211("bssid was set to: %s\n",
7738 print_mac(mac, conf->bssid));
b481de9c
ZY
7739 }
7740 if (priv->ibss_beacon)
7741 dev_kfree_skb(priv->ibss_beacon);
7742
7743 priv->ibss_beacon = conf->beacon;
7744 }
7745
7746 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
7747 !is_multicast_ether_addr(conf->bssid)) {
7748 /* If there is currently a HW scan going on in the background
7749 * then we need to cancel it else the RXON below will fail. */
bb8c093b 7750 if (iwl4965_scan_cancel_timeout(priv, 100)) {
b481de9c
ZY
7751 IWL_WARNING("Aborted scan still in progress "
7752 "after 100ms\n");
7753 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
7754 mutex_unlock(&priv->mutex);
7755 return -EAGAIN;
7756 }
7757 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
7758
7759 /* TODO: Audit driver for usage of these members and see
7760 * if mac80211 deprecates them (priv->bssid looks like it
7761 * shouldn't be there, but I haven't scanned the IBSS code
7762 * to verify) - jpk */
7763 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
7764
7765 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b 7766 iwl4965_config_ap(priv);
b481de9c 7767 else {
bb8c093b 7768 rc = iwl4965_commit_rxon(priv);
b481de9c 7769 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
bb8c093b 7770 iwl4965_rxon_add_station(
b481de9c
ZY
7771 priv, priv->active_rxon.bssid_addr, 1);
7772 }
7773
7774 } else {
bb8c093b 7775 iwl4965_scan_cancel_timeout(priv, 100);
b481de9c 7776 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7777 iwl4965_commit_rxon(priv);
b481de9c
ZY
7778 }
7779
7780 spin_lock_irqsave(&priv->lock, flags);
7781 if (!conf->ssid_len)
7782 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7783 else
7784 memcpy(priv->essid, conf->ssid, conf->ssid_len);
7785
7786 priv->essid_len = conf->ssid_len;
7787 spin_unlock_irqrestore(&priv->lock, flags);
7788
7789 IWL_DEBUG_MAC80211("leave\n");
7790 mutex_unlock(&priv->mutex);
7791
7792 return 0;
7793}
7794
bb8c093b 7795static void iwl4965_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
7796 unsigned int changed_flags,
7797 unsigned int *total_flags,
7798 int mc_count, struct dev_addr_list *mc_list)
7799{
7800 /*
7801 * XXX: dummy
bb8c093b 7802 * see also iwl4965_connection_init_rx_config
4150c572
JB
7803 */
7804 *total_flags = 0;
7805}
7806
bb8c093b 7807static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7808 struct ieee80211_if_init_conf *conf)
7809{
bb8c093b 7810 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7811
7812 IWL_DEBUG_MAC80211("enter\n");
7813
7814 mutex_lock(&priv->mutex);
948c171c 7815
bb8c093b 7816 iwl4965_scan_cancel_timeout(priv, 100);
948c171c
MA
7817 cancel_delayed_work(&priv->post_associate);
7818 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7819 iwl4965_commit_rxon(priv);
948c171c 7820
b481de9c
ZY
7821 if (priv->interface_id == conf->if_id) {
7822 priv->interface_id = 0;
7823 memset(priv->bssid, 0, ETH_ALEN);
7824 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7825 priv->essid_len = 0;
7826 }
7827 mutex_unlock(&priv->mutex);
7828
7829 IWL_DEBUG_MAC80211("leave\n");
7830
7831}
bb8c093b 7832static void iwl4965_mac_erp_ie_changed(struct ieee80211_hw *hw,
220173b0
TW
7833 u8 changes, int cts_protection, int preamble)
7834{
bb8c093b 7835 struct iwl4965_priv *priv = hw->priv;
220173b0
TW
7836
7837 if (changes & IEEE80211_ERP_CHANGE_PREAMBLE) {
7838 if (preamble == WLAN_ERP_PREAMBLE_SHORT)
7839 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7840 else
7841 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7842 }
7843
7844 if (changes & IEEE80211_ERP_CHANGE_PROTECTION) {
797a54c6 7845 if (cts_protection && (priv->phymode != MODE_IEEE80211A))
220173b0
TW
7846 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
7847 else
7848 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
7849 }
7850
bb8c093b
CH
7851 if (iwl4965_is_associated(priv))
7852 iwl4965_send_rxon_assoc(priv);
220173b0 7853}
b481de9c 7854
bb8c093b 7855static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
7856{
7857 int rc = 0;
7858 unsigned long flags;
bb8c093b 7859 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7860
7861 IWL_DEBUG_MAC80211("enter\n");
7862
052c4b9f 7863 mutex_lock(&priv->mutex);
b481de9c
ZY
7864 spin_lock_irqsave(&priv->lock, flags);
7865
bb8c093b 7866 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7867 rc = -EIO;
7868 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7869 goto out_unlock;
7870 }
7871
7872 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
7873 rc = -EIO;
7874 IWL_ERROR("ERROR: APs don't scan\n");
7875 goto out_unlock;
7876 }
7877
7878a5a4
MA
7878 /* we don't schedule scan within next_scan_jiffies period */
7879 if (priv->next_scan_jiffies &&
7880 time_after(priv->next_scan_jiffies, jiffies)) {
7881 rc = -EAGAIN;
7882 goto out_unlock;
7883 }
b481de9c 7884 /* if we just finished scan ask for delay */
7878a5a4
MA
7885 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies +
7886 IWL_DELAY_NEXT_SCAN, jiffies)) {
b481de9c
ZY
7887 rc = -EAGAIN;
7888 goto out_unlock;
7889 }
7890 if (len) {
7878a5a4 7891 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
bb8c093b 7892 iwl4965_escape_essid(ssid, len), (int)len);
b481de9c
ZY
7893
7894 priv->one_direct_scan = 1;
7895 priv->direct_ssid_len = (u8)
7896 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
7897 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
948c171c
MA
7898 } else
7899 priv->one_direct_scan = 0;
b481de9c 7900
bb8c093b 7901 rc = iwl4965_scan_initiate(priv);
b481de9c
ZY
7902
7903 IWL_DEBUG_MAC80211("leave\n");
7904
7905out_unlock:
7906 spin_unlock_irqrestore(&priv->lock, flags);
052c4b9f 7907 mutex_unlock(&priv->mutex);
b481de9c
ZY
7908
7909 return rc;
7910}
7911
bb8c093b 7912static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
b481de9c
ZY
7913 const u8 *local_addr, const u8 *addr,
7914 struct ieee80211_key_conf *key)
7915{
bb8c093b 7916 struct iwl4965_priv *priv = hw->priv;
0795af57 7917 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7918 int rc = 0;
7919 u8 sta_id;
7920
7921 IWL_DEBUG_MAC80211("enter\n");
7922
bb8c093b 7923 if (!iwl4965_param_hwcrypto) {
b481de9c
ZY
7924 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7925 return -EOPNOTSUPP;
7926 }
7927
7928 if (is_zero_ether_addr(addr))
7929 /* only support pairwise keys */
7930 return -EOPNOTSUPP;
7931
bb8c093b 7932 sta_id = iwl4965_hw_find_station(priv, addr);
b481de9c 7933 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
7934 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7935 print_mac(mac, addr));
b481de9c
ZY
7936 return -EINVAL;
7937 }
7938
7939 mutex_lock(&priv->mutex);
7940
bb8c093b 7941 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 7942
b481de9c
ZY
7943 switch (cmd) {
7944 case SET_KEY:
bb8c093b 7945 rc = iwl4965_update_sta_key_info(priv, key, sta_id);
b481de9c 7946 if (!rc) {
bb8c093b
CH
7947 iwl4965_set_rxon_hwcrypto(priv, 1);
7948 iwl4965_commit_rxon(priv);
b481de9c
ZY
7949 key->hw_key_idx = sta_id;
7950 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7951 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7952 }
7953 break;
7954 case DISABLE_KEY:
bb8c093b 7955 rc = iwl4965_clear_sta_key_info(priv, sta_id);
b481de9c 7956 if (!rc) {
bb8c093b
CH
7957 iwl4965_set_rxon_hwcrypto(priv, 0);
7958 iwl4965_commit_rxon(priv);
b481de9c
ZY
7959 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7960 }
7961 break;
7962 default:
7963 rc = -EINVAL;
7964 }
7965
7966 IWL_DEBUG_MAC80211("leave\n");
7967 mutex_unlock(&priv->mutex);
7968
7969 return rc;
7970}
7971
bb8c093b 7972static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
b481de9c
ZY
7973 const struct ieee80211_tx_queue_params *params)
7974{
bb8c093b 7975 struct iwl4965_priv *priv = hw->priv;
c8b0e6e1 7976#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7977 unsigned long flags;
7978 int q;
0054b34d 7979#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
7980
7981 IWL_DEBUG_MAC80211("enter\n");
7982
bb8c093b 7983 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7984 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7985 return -EIO;
7986 }
7987
7988 if (queue >= AC_NUM) {
7989 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7990 return 0;
7991 }
7992
c8b0e6e1 7993#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7994 if (!priv->qos_data.qos_enable) {
7995 priv->qos_data.qos_active = 0;
7996 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7997 return 0;
7998 }
7999 q = AC_NUM - 1 - queue;
8000
8001 spin_lock_irqsave(&priv->lock, flags);
8002
8003 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
8004 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
8005 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
8006 priv->qos_data.def_qos_parm.ac[q].edca_txop =
8007 cpu_to_le16((params->burst_time * 100));
8008
8009 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
8010 priv->qos_data.qos_active = 1;
8011
8012 spin_unlock_irqrestore(&priv->lock, flags);
8013
8014 mutex_lock(&priv->mutex);
8015 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b
CH
8016 iwl4965_activate_qos(priv, 1);
8017 else if (priv->assoc_id && iwl4965_is_associated(priv))
8018 iwl4965_activate_qos(priv, 0);
b481de9c
ZY
8019
8020 mutex_unlock(&priv->mutex);
8021
c8b0e6e1 8022#endif /*CONFIG_IWL4965_QOS */
b481de9c
ZY
8023
8024 IWL_DEBUG_MAC80211("leave\n");
8025 return 0;
8026}
8027
bb8c093b 8028static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
8029 struct ieee80211_tx_queue_stats *stats)
8030{
bb8c093b 8031 struct iwl4965_priv *priv = hw->priv;
b481de9c 8032 int i, avail;
bb8c093b
CH
8033 struct iwl4965_tx_queue *txq;
8034 struct iwl4965_queue *q;
b481de9c
ZY
8035 unsigned long flags;
8036
8037 IWL_DEBUG_MAC80211("enter\n");
8038
bb8c093b 8039 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8040 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8041 return -EIO;
8042 }
8043
8044 spin_lock_irqsave(&priv->lock, flags);
8045
8046 for (i = 0; i < AC_NUM; i++) {
8047 txq = &priv->txq[i];
8048 q = &txq->q;
bb8c093b 8049 avail = iwl4965_queue_space(q);
b481de9c
ZY
8050
8051 stats->data[i].len = q->n_window - avail;
8052 stats->data[i].limit = q->n_window - q->high_mark;
8053 stats->data[i].count = q->n_window;
8054
8055 }
8056 spin_unlock_irqrestore(&priv->lock, flags);
8057
8058 IWL_DEBUG_MAC80211("leave\n");
8059
8060 return 0;
8061}
8062
bb8c093b 8063static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
b481de9c
ZY
8064 struct ieee80211_low_level_stats *stats)
8065{
8066 IWL_DEBUG_MAC80211("enter\n");
8067 IWL_DEBUG_MAC80211("leave\n");
8068
8069 return 0;
8070}
8071
bb8c093b 8072static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
b481de9c
ZY
8073{
8074 IWL_DEBUG_MAC80211("enter\n");
8075 IWL_DEBUG_MAC80211("leave\n");
8076
8077 return 0;
8078}
8079
bb8c093b 8080static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 8081{
bb8c093b 8082 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8083 unsigned long flags;
8084
8085 mutex_lock(&priv->mutex);
8086 IWL_DEBUG_MAC80211("enter\n");
8087
8088 priv->lq_mngr.lq_ready = 0;
c8b0e6e1 8089#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
8090 spin_lock_irqsave(&priv->lock, flags);
8091 memset(&priv->current_assoc_ht, 0, sizeof(struct sta_ht_info));
8092 spin_unlock_irqrestore(&priv->lock, flags);
c8b0e6e1 8093#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
8094/* if (priv->lq_mngr.agg_ctrl.granted_ba)
8095 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/
8096
bb8c093b 8097 memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl4965_agg_control));
b481de9c
ZY
8098 priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10;
8099 priv->lq_mngr.agg_ctrl.ba_timeout = 5000;
8100 priv->lq_mngr.agg_ctrl.auto_agg = 1;
8101
8102 if (priv->lq_mngr.agg_ctrl.auto_agg)
8103 priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED;
c8b0e6e1
CH
8104#endif /*CONFIG_IWL4965_HT_AGG */
8105#endif /* CONFIG_IWL4965_HT */
b481de9c 8106
c8b0e6e1 8107#ifdef CONFIG_IWL4965_QOS
bb8c093b 8108 iwl4965_reset_qos(priv);
b481de9c
ZY
8109#endif
8110
8111 cancel_delayed_work(&priv->post_associate);
8112
8113 spin_lock_irqsave(&priv->lock, flags);
8114 priv->assoc_id = 0;
8115 priv->assoc_capability = 0;
8116 priv->call_post_assoc_from_beacon = 0;
8117 priv->assoc_station_added = 0;
8118
8119 /* new association get rid of ibss beacon skb */
8120 if (priv->ibss_beacon)
8121 dev_kfree_skb(priv->ibss_beacon);
8122
8123 priv->ibss_beacon = NULL;
8124
8125 priv->beacon_int = priv->hw->conf.beacon_int;
8126 priv->timestamp1 = 0;
8127 priv->timestamp0 = 0;
8128 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
8129 priv->beacon_int = 0;
8130
8131 spin_unlock_irqrestore(&priv->lock, flags);
8132
052c4b9f 8133 /* we are restarting association process
8134 * clear RXON_FILTER_ASSOC_MSK bit
8135 */
8136 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
bb8c093b 8137 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 8138 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 8139 iwl4965_commit_rxon(priv);
052c4b9f 8140 }
8141
b481de9c
ZY
8142 /* Per mac80211.h: This is only used in IBSS mode... */
8143 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
052c4b9f 8144
b481de9c
ZY
8145 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
8146 mutex_unlock(&priv->mutex);
8147 return;
8148 }
8149
bb8c093b 8150 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8151 IWL_DEBUG_MAC80211("leave - not ready\n");
8152 mutex_unlock(&priv->mutex);
8153 return;
8154 }
8155
8156 priv->only_active_channel = 0;
8157
bb8c093b 8158 iwl4965_set_rate(priv);
b481de9c
ZY
8159
8160 mutex_unlock(&priv->mutex);
8161
8162 IWL_DEBUG_MAC80211("leave\n");
8163
8164}
8165
bb8c093b 8166static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
8167 struct ieee80211_tx_control *control)
8168{
bb8c093b 8169 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8170 unsigned long flags;
8171
8172 mutex_lock(&priv->mutex);
8173 IWL_DEBUG_MAC80211("enter\n");
8174
bb8c093b 8175 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8176 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8177 mutex_unlock(&priv->mutex);
8178 return -EIO;
8179 }
8180
8181 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
8182 IWL_DEBUG_MAC80211("leave - not IBSS\n");
8183 mutex_unlock(&priv->mutex);
8184 return -EIO;
8185 }
8186
8187 spin_lock_irqsave(&priv->lock, flags);
8188
8189 if (priv->ibss_beacon)
8190 dev_kfree_skb(priv->ibss_beacon);
8191
8192 priv->ibss_beacon = skb;
8193
8194 priv->assoc_id = 0;
8195
8196 IWL_DEBUG_MAC80211("leave\n");
8197 spin_unlock_irqrestore(&priv->lock, flags);
8198
c8b0e6e1 8199#ifdef CONFIG_IWL4965_QOS
bb8c093b 8200 iwl4965_reset_qos(priv);
b481de9c
ZY
8201#endif
8202
8203 queue_work(priv->workqueue, &priv->post_associate.work);
8204
8205 mutex_unlock(&priv->mutex);
8206
8207 return 0;
8208}
8209
c8b0e6e1 8210#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
8211union ht_cap_info {
8212 struct {
8213 u16 advanced_coding_cap :1;
8214 u16 supported_chan_width_set :1;
8215 u16 mimo_power_save_mode :2;
8216 u16 green_field :1;
8217 u16 short_GI20 :1;
8218 u16 short_GI40 :1;
8219 u16 tx_stbc :1;
8220 u16 rx_stbc :1;
8221 u16 beam_forming :1;
8222 u16 delayed_ba :1;
8223 u16 maximal_amsdu_size :1;
8224 u16 cck_mode_at_40MHz :1;
8225 u16 psmp_support :1;
8226 u16 stbc_ctrl_frame_support :1;
8227 u16 sig_txop_protection_support :1;
8228 };
8229 u16 val;
8230} __attribute__ ((packed));
8231
8232union ht_param_info{
8233 struct {
8234 u8 max_rx_ampdu_factor :2;
8235 u8 mpdu_density :3;
8236 u8 reserved :3;
8237 };
8238 u8 val;
8239} __attribute__ ((packed));
8240
8241union ht_exra_param_info {
8242 struct {
8243 u8 ext_chan_offset :2;
8244 u8 tx_chan_width :1;
8245 u8 rifs_mode :1;
8246 u8 controlled_access_only :1;
8247 u8 service_interval_granularity :3;
8248 };
8249 u8 val;
8250} __attribute__ ((packed));
8251
8252union ht_operation_mode{
8253 struct {
8254 u16 op_mode :2;
8255 u16 non_GF :1;
8256 u16 reserved :13;
8257 };
8258 u16 val;
8259} __attribute__ ((packed));
8260
8261
8262static int sta_ht_info_init(struct ieee80211_ht_capability *ht_cap,
8263 struct ieee80211_ht_additional_info *ht_extra,
8264 struct sta_ht_info *ht_info_ap,
8265 struct sta_ht_info *ht_info)
8266{
8267 union ht_cap_info cap;
8268 union ht_operation_mode op_mode;
8269 union ht_param_info param_info;
8270 union ht_exra_param_info extra_param_info;
8271
8272 IWL_DEBUG_MAC80211("enter: \n");
8273
8274 if (!ht_info) {
8275 IWL_DEBUG_MAC80211("leave: ht_info is NULL\n");
8276 return -1;
8277 }
8278
8279 if (ht_cap) {
8280 cap.val = (u16) le16_to_cpu(ht_cap->capabilities_info);
8281 param_info.val = ht_cap->mac_ht_params_info;
8282 ht_info->is_ht = 1;
8283 if (cap.short_GI20)
8284 ht_info->sgf |= 0x1;
8285 if (cap.short_GI40)
8286 ht_info->sgf |= 0x2;
8287 ht_info->is_green_field = cap.green_field;
8288 ht_info->max_amsdu_size = cap.maximal_amsdu_size;
8289 ht_info->supported_chan_width = cap.supported_chan_width_set;
8290 ht_info->tx_mimo_ps_mode = cap.mimo_power_save_mode;
8291 memcpy(ht_info->supp_rates, ht_cap->supported_mcs_set, 16);
8292
8293 ht_info->ampdu_factor = param_info.max_rx_ampdu_factor;
8294 ht_info->mpdu_density = param_info.mpdu_density;
8295
8296 IWL_DEBUG_MAC80211("SISO mask 0x%X MIMO mask 0x%X \n",
8297 ht_cap->supported_mcs_set[0],
8298 ht_cap->supported_mcs_set[1]);
8299
8300 if (ht_info_ap) {
8301 ht_info->control_channel = ht_info_ap->control_channel;
8302 ht_info->extension_chan_offset =
8303 ht_info_ap->extension_chan_offset;
8304 ht_info->tx_chan_width = ht_info_ap->tx_chan_width;
8305 ht_info->operating_mode = ht_info_ap->operating_mode;
8306 }
8307
8308 if (ht_extra) {
8309 extra_param_info.val = ht_extra->ht_param;
8310 ht_info->control_channel = ht_extra->control_chan;
8311 ht_info->extension_chan_offset =
8312 extra_param_info.ext_chan_offset;
8313 ht_info->tx_chan_width = extra_param_info.tx_chan_width;
8314 op_mode.val = (u16)
8315 le16_to_cpu(ht_extra->operation_mode);
8316 ht_info->operating_mode = op_mode.op_mode;
8317 IWL_DEBUG_MAC80211("control channel %d\n",
8318 ht_extra->control_chan);
8319 }
8320 } else
8321 ht_info->is_ht = 0;
8322
8323 IWL_DEBUG_MAC80211("leave\n");
8324 return 0;
8325}
8326
bb8c093b 8327static int iwl4965_mac_conf_ht(struct ieee80211_hw *hw,
b481de9c
ZY
8328 struct ieee80211_ht_capability *ht_cap,
8329 struct ieee80211_ht_additional_info *ht_extra)
8330{
bb8c093b 8331 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8332 int rs;
8333
8334 IWL_DEBUG_MAC80211("enter: \n");
8335
8336 rs = sta_ht_info_init(ht_cap, ht_extra, NULL, &priv->current_assoc_ht);
8337 iwl4965_set_rxon_chain(priv);
8338
8339 if (priv && priv->assoc_id &&
8340 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
8341 unsigned long flags;
8342
8343 spin_lock_irqsave(&priv->lock, flags);
8344 if (priv->beacon_int)
8345 queue_work(priv->workqueue, &priv->post_associate.work);
8346 else
8347 priv->call_post_assoc_from_beacon = 1;
8348 spin_unlock_irqrestore(&priv->lock, flags);
8349 }
8350
8351 IWL_DEBUG_MAC80211("leave: control channel %d\n",
8352 ht_extra->control_chan);
8353 return rs;
8354
8355}
8356
bb8c093b 8357static void iwl4965_set_ht_capab(struct ieee80211_hw *hw,
b481de9c
ZY
8358 struct ieee80211_ht_capability *ht_cap,
8359 u8 use_wide_chan)
8360{
8361 union ht_cap_info cap;
8362 union ht_param_info param_info;
8363
8364 memset(&cap, 0, sizeof(union ht_cap_info));
8365 memset(&param_info, 0, sizeof(union ht_param_info));
8366
8367 cap.maximal_amsdu_size = HT_IE_MAX_AMSDU_SIZE_4K;
8368 cap.green_field = 1;
8369 cap.short_GI20 = 1;
8370 cap.short_GI40 = 1;
8371 cap.supported_chan_width_set = use_wide_chan;
8372 cap.mimo_power_save_mode = 0x3;
8373
8374 param_info.max_rx_ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
8375 param_info.mpdu_density = CFG_HT_MPDU_DENSITY_DEF;
8376 ht_cap->capabilities_info = (__le16) cpu_to_le16(cap.val);
8377 ht_cap->mac_ht_params_info = (u8) param_info.val;
8378
8379 ht_cap->supported_mcs_set[0] = 0xff;
8380 ht_cap->supported_mcs_set[1] = 0xff;
8381 ht_cap->supported_mcs_set[4] =
8382 (cap.supported_chan_width_set) ? 0x1: 0x0;
8383}
8384
bb8c093b 8385static void iwl4965_mac_get_ht_capab(struct ieee80211_hw *hw,
b481de9c
ZY
8386 struct ieee80211_ht_capability *ht_cap)
8387{
8388 u8 use_wide_channel = 1;
bb8c093b 8389 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8390
8391 IWL_DEBUG_MAC80211("enter: \n");
8392 if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
8393 use_wide_channel = 0;
8394
8395 /* no fat tx allowed on 2.4GHZ */
8396 if (priv->phymode != MODE_IEEE80211A)
8397 use_wide_channel = 0;
8398
bb8c093b 8399 iwl4965_set_ht_capab(hw, ht_cap, use_wide_channel);
b481de9c
ZY
8400 IWL_DEBUG_MAC80211("leave: \n");
8401}
c8b0e6e1 8402#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
8403
8404/*****************************************************************************
8405 *
8406 * sysfs attributes
8407 *
8408 *****************************************************************************/
8409
c8b0e6e1 8410#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
8411
8412/*
8413 * The following adds a new attribute to the sysfs representation
8414 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
8415 * used for controlling the debug level.
8416 *
8417 * See the level definitions in iwl for details.
8418 */
8419
8420static ssize_t show_debug_level(struct device_driver *d, char *buf)
8421{
bb8c093b 8422 return sprintf(buf, "0x%08X\n", iwl4965_debug_level);
b481de9c
ZY
8423}
8424static ssize_t store_debug_level(struct device_driver *d,
8425 const char *buf, size_t count)
8426{
8427 char *p = (char *)buf;
8428 u32 val;
8429
8430 val = simple_strtoul(p, &p, 0);
8431 if (p == buf)
8432 printk(KERN_INFO DRV_NAME
8433 ": %s is not in hex or decimal form.\n", buf);
8434 else
bb8c093b 8435 iwl4965_debug_level = val;
b481de9c
ZY
8436
8437 return strnlen(buf, count);
8438}
8439
8440static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
8441 show_debug_level, store_debug_level);
8442
c8b0e6e1 8443#endif /* CONFIG_IWL4965_DEBUG */
b481de9c
ZY
8444
8445static ssize_t show_rf_kill(struct device *d,
8446 struct device_attribute *attr, char *buf)
8447{
8448 /*
8449 * 0 - RF kill not enabled
8450 * 1 - SW based RF kill active (sysfs)
8451 * 2 - HW based RF kill active
8452 * 3 - Both HW and SW based RF kill active
8453 */
bb8c093b 8454 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8455 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
8456 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
8457
8458 return sprintf(buf, "%i\n", val);
8459}
8460
8461static ssize_t store_rf_kill(struct device *d,
8462 struct device_attribute *attr,
8463 const char *buf, size_t count)
8464{
bb8c093b 8465 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8466
8467 mutex_lock(&priv->mutex);
bb8c093b 8468 iwl4965_radio_kill_sw(priv, buf[0] == '1');
b481de9c
ZY
8469 mutex_unlock(&priv->mutex);
8470
8471 return count;
8472}
8473
8474static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
8475
8476static ssize_t show_temperature(struct device *d,
8477 struct device_attribute *attr, char *buf)
8478{
bb8c093b 8479 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c 8480
bb8c093b 8481 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8482 return -EAGAIN;
8483
bb8c093b 8484 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv));
b481de9c
ZY
8485}
8486
8487static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
8488
8489static ssize_t show_rs_window(struct device *d,
8490 struct device_attribute *attr,
8491 char *buf)
8492{
bb8c093b
CH
8493 struct iwl4965_priv *priv = d->driver_data;
8494 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
b481de9c
ZY
8495}
8496static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
8497
8498static ssize_t show_tx_power(struct device *d,
8499 struct device_attribute *attr, char *buf)
8500{
bb8c093b 8501 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8502 return sprintf(buf, "%d\n", priv->user_txpower_limit);
8503}
8504
8505static ssize_t store_tx_power(struct device *d,
8506 struct device_attribute *attr,
8507 const char *buf, size_t count)
8508{
bb8c093b 8509 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8510 char *p = (char *)buf;
8511 u32 val;
8512
8513 val = simple_strtoul(p, &p, 10);
8514 if (p == buf)
8515 printk(KERN_INFO DRV_NAME
8516 ": %s is not in decimal form.\n", buf);
8517 else
bb8c093b 8518 iwl4965_hw_reg_set_txpower(priv, val);
b481de9c
ZY
8519
8520 return count;
8521}
8522
8523static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
8524
8525static ssize_t show_flags(struct device *d,
8526 struct device_attribute *attr, char *buf)
8527{
bb8c093b 8528 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8529
8530 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
8531}
8532
8533static ssize_t store_flags(struct device *d,
8534 struct device_attribute *attr,
8535 const char *buf, size_t count)
8536{
bb8c093b 8537 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8538 u32 flags = simple_strtoul(buf, NULL, 0);
8539
8540 mutex_lock(&priv->mutex);
8541 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
8542 /* Cancel any currently running scans... */
bb8c093b 8543 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8544 IWL_WARNING("Could not cancel scan.\n");
8545 else {
8546 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
8547 flags);
8548 priv->staging_rxon.flags = cpu_to_le32(flags);
bb8c093b 8549 iwl4965_commit_rxon(priv);
b481de9c
ZY
8550 }
8551 }
8552 mutex_unlock(&priv->mutex);
8553
8554 return count;
8555}
8556
8557static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
8558
8559static ssize_t show_filter_flags(struct device *d,
8560 struct device_attribute *attr, char *buf)
8561{
bb8c093b 8562 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8563
8564 return sprintf(buf, "0x%04X\n",
8565 le32_to_cpu(priv->active_rxon.filter_flags));
8566}
8567
8568static ssize_t store_filter_flags(struct device *d,
8569 struct device_attribute *attr,
8570 const char *buf, size_t count)
8571{
bb8c093b 8572 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8573 u32 filter_flags = simple_strtoul(buf, NULL, 0);
8574
8575 mutex_lock(&priv->mutex);
8576 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
8577 /* Cancel any currently running scans... */
bb8c093b 8578 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8579 IWL_WARNING("Could not cancel scan.\n");
8580 else {
8581 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
8582 "0x%04X\n", filter_flags);
8583 priv->staging_rxon.filter_flags =
8584 cpu_to_le32(filter_flags);
bb8c093b 8585 iwl4965_commit_rxon(priv);
b481de9c
ZY
8586 }
8587 }
8588 mutex_unlock(&priv->mutex);
8589
8590 return count;
8591}
8592
8593static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
8594 store_filter_flags);
8595
8596static ssize_t show_tune(struct device *d,
8597 struct device_attribute *attr, char *buf)
8598{
bb8c093b 8599 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8600
8601 return sprintf(buf, "0x%04X\n",
8602 (priv->phymode << 8) |
8603 le16_to_cpu(priv->active_rxon.channel));
8604}
8605
bb8c093b 8606static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode);
b481de9c
ZY
8607
8608static ssize_t store_tune(struct device *d,
8609 struct device_attribute *attr,
8610 const char *buf, size_t count)
8611{
bb8c093b 8612 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8613 char *p = (char *)buf;
8614 u16 tune = simple_strtoul(p, &p, 0);
8615 u8 phymode = (tune >> 8) & 0xff;
8616 u16 channel = tune & 0xff;
8617
8618 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
8619
8620 mutex_lock(&priv->mutex);
8621 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
8622 (priv->phymode != phymode)) {
bb8c093b 8623 const struct iwl4965_channel_info *ch_info;
b481de9c 8624
bb8c093b 8625 ch_info = iwl4965_get_channel_info(priv, phymode, channel);
b481de9c
ZY
8626 if (!ch_info) {
8627 IWL_WARNING("Requested invalid phymode/channel "
8628 "combination: %d %d\n", phymode, channel);
8629 mutex_unlock(&priv->mutex);
8630 return -EINVAL;
8631 }
8632
8633 /* Cancel any currently running scans... */
bb8c093b 8634 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8635 IWL_WARNING("Could not cancel scan.\n");
8636 else {
8637 IWL_DEBUG_INFO("Committing phymode and "
8638 "rxon.channel = %d %d\n",
8639 phymode, channel);
8640
bb8c093b
CH
8641 iwl4965_set_rxon_channel(priv, phymode, channel);
8642 iwl4965_set_flags_for_phymode(priv, phymode);
b481de9c 8643
bb8c093b
CH
8644 iwl4965_set_rate(priv);
8645 iwl4965_commit_rxon(priv);
b481de9c
ZY
8646 }
8647 }
8648 mutex_unlock(&priv->mutex);
8649
8650 return count;
8651}
8652
8653static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
8654
c8b0e6e1 8655#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8656
8657static ssize_t show_measurement(struct device *d,
8658 struct device_attribute *attr, char *buf)
8659{
bb8c093b
CH
8660 struct iwl4965_priv *priv = dev_get_drvdata(d);
8661 struct iwl4965_spectrum_notification measure_report;
b481de9c
ZY
8662 u32 size = sizeof(measure_report), len = 0, ofs = 0;
8663 u8 *data = (u8 *) & measure_report;
8664 unsigned long flags;
8665
8666 spin_lock_irqsave(&priv->lock, flags);
8667 if (!(priv->measurement_status & MEASUREMENT_READY)) {
8668 spin_unlock_irqrestore(&priv->lock, flags);
8669 return 0;
8670 }
8671 memcpy(&measure_report, &priv->measure_report, size);
8672 priv->measurement_status = 0;
8673 spin_unlock_irqrestore(&priv->lock, flags);
8674
8675 while (size && (PAGE_SIZE - len)) {
8676 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8677 PAGE_SIZE - len, 1);
8678 len = strlen(buf);
8679 if (PAGE_SIZE - len)
8680 buf[len++] = '\n';
8681
8682 ofs += 16;
8683 size -= min(size, 16U);
8684 }
8685
8686 return len;
8687}
8688
8689static ssize_t store_measurement(struct device *d,
8690 struct device_attribute *attr,
8691 const char *buf, size_t count)
8692{
bb8c093b 8693 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8694 struct ieee80211_measurement_params params = {
8695 .channel = le16_to_cpu(priv->active_rxon.channel),
8696 .start_time = cpu_to_le64(priv->last_tsf),
8697 .duration = cpu_to_le16(1),
8698 };
8699 u8 type = IWL_MEASURE_BASIC;
8700 u8 buffer[32];
8701 u8 channel;
8702
8703 if (count) {
8704 char *p = buffer;
8705 strncpy(buffer, buf, min(sizeof(buffer), count));
8706 channel = simple_strtoul(p, NULL, 0);
8707 if (channel)
8708 params.channel = channel;
8709
8710 p = buffer;
8711 while (*p && *p != ' ')
8712 p++;
8713 if (*p)
8714 type = simple_strtoul(p + 1, NULL, 0);
8715 }
8716
8717 IWL_DEBUG_INFO("Invoking measurement of type %d on "
8718 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 8719 iwl4965_get_measurement(priv, &params, type);
b481de9c
ZY
8720
8721 return count;
8722}
8723
8724static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
8725 show_measurement, store_measurement);
c8b0e6e1 8726#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */
b481de9c
ZY
8727
8728static ssize_t store_retry_rate(struct device *d,
8729 struct device_attribute *attr,
8730 const char *buf, size_t count)
8731{
bb8c093b 8732 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8733
8734 priv->retry_rate = simple_strtoul(buf, NULL, 0);
8735 if (priv->retry_rate <= 0)
8736 priv->retry_rate = 1;
8737
8738 return count;
8739}
8740
8741static ssize_t show_retry_rate(struct device *d,
8742 struct device_attribute *attr, char *buf)
8743{
bb8c093b 8744 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8745 return sprintf(buf, "%d", priv->retry_rate);
8746}
8747
8748static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
8749 store_retry_rate);
8750
8751static ssize_t store_power_level(struct device *d,
8752 struct device_attribute *attr,
8753 const char *buf, size_t count)
8754{
bb8c093b 8755 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8756 int rc;
8757 int mode;
8758
8759 mode = simple_strtoul(buf, NULL, 0);
8760 mutex_lock(&priv->mutex);
8761
bb8c093b 8762 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
8763 rc = -EAGAIN;
8764 goto out;
8765 }
8766
8767 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
8768 mode = IWL_POWER_AC;
8769 else
8770 mode |= IWL_POWER_ENABLED;
8771
8772 if (mode != priv->power_mode) {
bb8c093b 8773 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
8774 if (rc) {
8775 IWL_DEBUG_MAC80211("failed setting power mode.\n");
8776 goto out;
8777 }
8778 priv->power_mode = mode;
8779 }
8780
8781 rc = count;
8782
8783 out:
8784 mutex_unlock(&priv->mutex);
8785 return rc;
8786}
8787
8788#define MAX_WX_STRING 80
8789
8790/* Values are in microsecond */
8791static const s32 timeout_duration[] = {
8792 350000,
8793 250000,
8794 75000,
8795 37000,
8796 25000,
8797};
8798static const s32 period_duration[] = {
8799 400000,
8800 700000,
8801 1000000,
8802 1000000,
8803 1000000
8804};
8805
8806static ssize_t show_power_level(struct device *d,
8807 struct device_attribute *attr, char *buf)
8808{
bb8c093b 8809 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8810 int level = IWL_POWER_LEVEL(priv->power_mode);
8811 char *p = buf;
8812
8813 p += sprintf(p, "%d ", level);
8814 switch (level) {
8815 case IWL_POWER_MODE_CAM:
8816 case IWL_POWER_AC:
8817 p += sprintf(p, "(AC)");
8818 break;
8819 case IWL_POWER_BATTERY:
8820 p += sprintf(p, "(BATTERY)");
8821 break;
8822 default:
8823 p += sprintf(p,
8824 "(Timeout %dms, Period %dms)",
8825 timeout_duration[level - 1] / 1000,
8826 period_duration[level - 1] / 1000);
8827 }
8828
8829 if (!(priv->power_mode & IWL_POWER_ENABLED))
8830 p += sprintf(p, " OFF\n");
8831 else
8832 p += sprintf(p, " \n");
8833
8834 return (p - buf + 1);
8835
8836}
8837
8838static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8839 store_power_level);
8840
8841static ssize_t show_channels(struct device *d,
8842 struct device_attribute *attr, char *buf)
8843{
bb8c093b 8844 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8845 int len = 0, i;
8846 struct ieee80211_channel *channels = NULL;
8847 const struct ieee80211_hw_mode *hw_mode = NULL;
8848 int count = 0;
8849
bb8c093b 8850 if (!iwl4965_is_ready(priv))
b481de9c
ZY
8851 return -EAGAIN;
8852
bb8c093b 8853 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211G);
b481de9c 8854 if (!hw_mode)
bb8c093b 8855 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211B);
b481de9c
ZY
8856 if (hw_mode) {
8857 channels = hw_mode->channels;
8858 count = hw_mode->num_channels;
8859 }
8860
8861 len +=
8862 sprintf(&buf[len],
8863 "Displaying %d channels in 2.4GHz band "
8864 "(802.11bg):\n", count);
8865
8866 for (i = 0; i < count; i++)
8867 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8868 channels[i].chan,
8869 channels[i].power_level,
8870 channels[i].
8871 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8872 " (IEEE 802.11h required)" : "",
8873 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8874 || (channels[i].
8875 flag &
8876 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8877 ", IBSS",
8878 channels[i].
8879 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8880 "active/passive" : "passive only");
8881
bb8c093b 8882 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211A);
b481de9c
ZY
8883 if (hw_mode) {
8884 channels = hw_mode->channels;
8885 count = hw_mode->num_channels;
8886 } else {
8887 channels = NULL;
8888 count = 0;
8889 }
8890
8891 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8892 "(802.11a):\n", count);
8893
8894 for (i = 0; i < count; i++)
8895 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8896 channels[i].chan,
8897 channels[i].power_level,
8898 channels[i].
8899 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8900 " (IEEE 802.11h required)" : "",
8901 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8902 || (channels[i].
8903 flag &
8904 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8905 ", IBSS",
8906 channels[i].
8907 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8908 "active/passive" : "passive only");
8909
8910 return len;
8911}
8912
8913static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8914
8915static ssize_t show_statistics(struct device *d,
8916 struct device_attribute *attr, char *buf)
8917{
bb8c093b
CH
8918 struct iwl4965_priv *priv = dev_get_drvdata(d);
8919 u32 size = sizeof(struct iwl4965_notif_statistics);
b481de9c
ZY
8920 u32 len = 0, ofs = 0;
8921 u8 *data = (u8 *) & priv->statistics;
8922 int rc = 0;
8923
bb8c093b 8924 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8925 return -EAGAIN;
8926
8927 mutex_lock(&priv->mutex);
bb8c093b 8928 rc = iwl4965_send_statistics_request(priv);
b481de9c
ZY
8929 mutex_unlock(&priv->mutex);
8930
8931 if (rc) {
8932 len = sprintf(buf,
8933 "Error sending statistics request: 0x%08X\n", rc);
8934 return len;
8935 }
8936
8937 while (size && (PAGE_SIZE - len)) {
8938 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8939 PAGE_SIZE - len, 1);
8940 len = strlen(buf);
8941 if (PAGE_SIZE - len)
8942 buf[len++] = '\n';
8943
8944 ofs += 16;
8945 size -= min(size, 16U);
8946 }
8947
8948 return len;
8949}
8950
8951static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8952
8953static ssize_t show_antenna(struct device *d,
8954 struct device_attribute *attr, char *buf)
8955{
bb8c093b 8956 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c 8957
bb8c093b 8958 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8959 return -EAGAIN;
8960
8961 return sprintf(buf, "%d\n", priv->antenna);
8962}
8963
8964static ssize_t store_antenna(struct device *d,
8965 struct device_attribute *attr,
8966 const char *buf, size_t count)
8967{
8968 int ant;
bb8c093b 8969 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8970
8971 if (count == 0)
8972 return 0;
8973
8974 if (sscanf(buf, "%1i", &ant) != 1) {
8975 IWL_DEBUG_INFO("not in hex or decimal form.\n");
8976 return count;
8977 }
8978
8979 if ((ant >= 0) && (ant <= 2)) {
8980 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 8981 priv->antenna = (enum iwl4965_antenna)ant;
b481de9c
ZY
8982 } else
8983 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
8984
8985
8986 return count;
8987}
8988
8989static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8990
8991static ssize_t show_status(struct device *d,
8992 struct device_attribute *attr, char *buf)
8993{
bb8c093b
CH
8994 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8995 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8996 return -EAGAIN;
8997 return sprintf(buf, "0x%08x\n", (int)priv->status);
8998}
8999
9000static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
9001
9002static ssize_t dump_error_log(struct device *d,
9003 struct device_attribute *attr,
9004 const char *buf, size_t count)
9005{
9006 char *p = (char *)buf;
9007
9008 if (p[0] == '1')
bb8c093b 9009 iwl4965_dump_nic_error_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
9010
9011 return strnlen(buf, count);
9012}
9013
9014static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
9015
9016static ssize_t dump_event_log(struct device *d,
9017 struct device_attribute *attr,
9018 const char *buf, size_t count)
9019{
9020 char *p = (char *)buf;
9021
9022 if (p[0] == '1')
bb8c093b 9023 iwl4965_dump_nic_event_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
9024
9025 return strnlen(buf, count);
9026}
9027
9028static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
9029
9030/*****************************************************************************
9031 *
9032 * driver setup and teardown
9033 *
9034 *****************************************************************************/
9035
bb8c093b 9036static void iwl4965_setup_deferred_work(struct iwl4965_priv *priv)
b481de9c
ZY
9037{
9038 priv->workqueue = create_workqueue(DRV_NAME);
9039
9040 init_waitqueue_head(&priv->wait_command_queue);
9041
bb8c093b
CH
9042 INIT_WORK(&priv->up, iwl4965_bg_up);
9043 INIT_WORK(&priv->restart, iwl4965_bg_restart);
9044 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
9045 INIT_WORK(&priv->scan_completed, iwl4965_bg_scan_completed);
9046 INIT_WORK(&priv->request_scan, iwl4965_bg_request_scan);
9047 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
9048 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
9049 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
9050 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate);
9051 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
9052 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
9053 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check);
9054
9055 iwl4965_hw_setup_deferred_work(priv);
b481de9c
ZY
9056
9057 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 9058 iwl4965_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
9059}
9060
bb8c093b 9061static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv)
b481de9c 9062{
bb8c093b 9063 iwl4965_hw_cancel_deferred_work(priv);
b481de9c 9064
3ae6a054 9065 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
9066 cancel_delayed_work(&priv->scan_check);
9067 cancel_delayed_work(&priv->alive_start);
9068 cancel_delayed_work(&priv->post_associate);
9069 cancel_work_sync(&priv->beacon_update);
9070}
9071
bb8c093b 9072static struct attribute *iwl4965_sysfs_entries[] = {
b481de9c
ZY
9073 &dev_attr_antenna.attr,
9074 &dev_attr_channels.attr,
9075 &dev_attr_dump_errors.attr,
9076 &dev_attr_dump_events.attr,
9077 &dev_attr_flags.attr,
9078 &dev_attr_filter_flags.attr,
c8b0e6e1 9079#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
9080 &dev_attr_measurement.attr,
9081#endif
9082 &dev_attr_power_level.attr,
9083 &dev_attr_retry_rate.attr,
9084 &dev_attr_rf_kill.attr,
9085 &dev_attr_rs_window.attr,
9086 &dev_attr_statistics.attr,
9087 &dev_attr_status.attr,
9088 &dev_attr_temperature.attr,
9089 &dev_attr_tune.attr,
9090 &dev_attr_tx_power.attr,
9091
9092 NULL
9093};
9094
bb8c093b 9095static struct attribute_group iwl4965_attribute_group = {
b481de9c 9096 .name = NULL, /* put in device directory */
bb8c093b 9097 .attrs = iwl4965_sysfs_entries,
b481de9c
ZY
9098};
9099
bb8c093b
CH
9100static struct ieee80211_ops iwl4965_hw_ops = {
9101 .tx = iwl4965_mac_tx,
9102 .start = iwl4965_mac_start,
9103 .stop = iwl4965_mac_stop,
9104 .add_interface = iwl4965_mac_add_interface,
9105 .remove_interface = iwl4965_mac_remove_interface,
9106 .config = iwl4965_mac_config,
9107 .config_interface = iwl4965_mac_config_interface,
9108 .configure_filter = iwl4965_configure_filter,
9109 .set_key = iwl4965_mac_set_key,
9110 .get_stats = iwl4965_mac_get_stats,
9111 .get_tx_stats = iwl4965_mac_get_tx_stats,
9112 .conf_tx = iwl4965_mac_conf_tx,
9113 .get_tsf = iwl4965_mac_get_tsf,
9114 .reset_tsf = iwl4965_mac_reset_tsf,
9115 .beacon_update = iwl4965_mac_beacon_update,
9116 .erp_ie_changed = iwl4965_mac_erp_ie_changed,
c8b0e6e1 9117#ifdef CONFIG_IWL4965_HT
bb8c093b
CH
9118 .conf_ht = iwl4965_mac_conf_ht,
9119 .get_ht_capab = iwl4965_mac_get_ht_capab,
c8b0e6e1 9120#ifdef CONFIG_IWL4965_HT_AGG
bb8c093b
CH
9121 .ht_tx_agg_start = iwl4965_mac_ht_tx_agg_start,
9122 .ht_tx_agg_stop = iwl4965_mac_ht_tx_agg_stop,
9123 .ht_rx_agg_start = iwl4965_mac_ht_rx_agg_start,
9124 .ht_rx_agg_stop = iwl4965_mac_ht_rx_agg_stop,
c8b0e6e1
CH
9125#endif /* CONFIG_IWL4965_HT_AGG */
9126#endif /* CONFIG_IWL4965_HT */
bb8c093b 9127 .hw_scan = iwl4965_mac_hw_scan
b481de9c
ZY
9128};
9129
bb8c093b 9130static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
9131{
9132 int err = 0;
bb8c093b 9133 struct iwl4965_priv *priv;
b481de9c
ZY
9134 struct ieee80211_hw *hw;
9135 int i;
9136
6440adb5
BC
9137 /* Disabling hardware scan means that mac80211 will perform scans
9138 * "the hard way", rather than using device's scan. */
bb8c093b 9139 if (iwl4965_param_disable_hw_scan) {
b481de9c 9140 IWL_DEBUG_INFO("Disabling hw_scan\n");
bb8c093b 9141 iwl4965_hw_ops.hw_scan = NULL;
b481de9c
ZY
9142 }
9143
bb8c093b
CH
9144 if ((iwl4965_param_queues_num > IWL_MAX_NUM_QUEUES) ||
9145 (iwl4965_param_queues_num < IWL_MIN_NUM_QUEUES)) {
b481de9c
ZY
9146 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
9147 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
9148 err = -EINVAL;
9149 goto out;
9150 }
9151
9152 /* mac80211 allocates memory for this device instance, including
9153 * space for this driver's private structure */
bb8c093b 9154 hw = ieee80211_alloc_hw(sizeof(struct iwl4965_priv), &iwl4965_hw_ops);
b481de9c
ZY
9155 if (hw == NULL) {
9156 IWL_ERROR("Can not allocate network device\n");
9157 err = -ENOMEM;
9158 goto out;
9159 }
9160 SET_IEEE80211_DEV(hw, &pdev->dev);
9161
f51359a8
JB
9162 hw->rate_control_algorithm = "iwl-4965-rs";
9163
b481de9c
ZY
9164 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
9165 priv = hw->priv;
9166 priv->hw = hw;
9167
9168 priv->pci_dev = pdev;
bb8c093b 9169 priv->antenna = (enum iwl4965_antenna)iwl4965_param_antenna;
c8b0e6e1 9170#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9171 iwl4965_debug_level = iwl4965_param_debug;
b481de9c
ZY
9172 atomic_set(&priv->restrict_refcnt, 0);
9173#endif
9174 priv->retry_rate = 1;
9175
9176 priv->ibss_beacon = NULL;
9177
9178 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
9179 * the range of signal quality values that we'll provide.
9180 * Negative values for level/noise indicate that we'll provide dBm.
9181 * For WE, at least, non-0 values here *enable* display of values
9182 * in app (iwconfig). */
9183 hw->max_rssi = -20; /* signal level, negative indicates dBm */
9184 hw->max_noise = -20; /* noise level, negative indicates dBm */
9185 hw->max_signal = 100; /* link quality indication (%) */
9186
9187 /* Tell mac80211 our Tx characteristics */
9188 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
9189
6440adb5 9190 /* Default value; 4 EDCA QOS priorities */
b481de9c 9191 hw->queues = 4;
c8b0e6e1
CH
9192#ifdef CONFIG_IWL4965_HT
9193#ifdef CONFIG_IWL4965_HT_AGG
6440adb5 9194 /* Enhanced value; more queues, to support 11n aggregation */
b481de9c 9195 hw->queues = 16;
c8b0e6e1
CH
9196#endif /* CONFIG_IWL4965_HT_AGG */
9197#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
9198
9199 spin_lock_init(&priv->lock);
9200 spin_lock_init(&priv->power_data.lock);
9201 spin_lock_init(&priv->sta_lock);
9202 spin_lock_init(&priv->hcmd_lock);
9203 spin_lock_init(&priv->lq_mngr.lock);
9204
9205 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
9206 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
9207
9208 INIT_LIST_HEAD(&priv->free_frames);
9209
9210 mutex_init(&priv->mutex);
9211 if (pci_enable_device(pdev)) {
9212 err = -ENODEV;
9213 goto out_ieee80211_free_hw;
9214 }
9215
9216 pci_set_master(pdev);
9217
6440adb5 9218 /* Clear the driver's (not device's) station table */
bb8c093b 9219 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9220
9221 priv->data_retry_limit = -1;
9222 priv->ieee_channels = NULL;
9223 priv->ieee_rates = NULL;
9224 priv->phymode = -1;
9225
9226 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
9227 if (!err)
9228 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
9229 if (err) {
9230 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
9231 goto out_pci_disable_device;
9232 }
9233
9234 pci_set_drvdata(pdev, priv);
9235 err = pci_request_regions(pdev, DRV_NAME);
9236 if (err)
9237 goto out_pci_disable_device;
6440adb5 9238
b481de9c
ZY
9239 /* We disable the RETRY_TIMEOUT register (0x41) to keep
9240 * PCI Tx retries from interfering with C3 CPU state */
9241 pci_write_config_byte(pdev, 0x41, 0x00);
6440adb5 9242
b481de9c
ZY
9243 priv->hw_base = pci_iomap(pdev, 0, 0);
9244 if (!priv->hw_base) {
9245 err = -ENODEV;
9246 goto out_pci_release_regions;
9247 }
9248
9249 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
9250 (unsigned long long) pci_resource_len(pdev, 0));
9251 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
9252
9253 /* Initialize module parameter values here */
9254
6440adb5 9255 /* Disable radio (SW RF KILL) via parameter when loading driver */
bb8c093b 9256 if (iwl4965_param_disable) {
b481de9c
ZY
9257 set_bit(STATUS_RF_KILL_SW, &priv->status);
9258 IWL_DEBUG_INFO("Radio disabled.\n");
9259 }
9260
9261 priv->iw_mode = IEEE80211_IF_TYPE_STA;
9262
9263 priv->ps_mode = 0;
9264 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
9265 priv->is_ht_enabled = 1;
9266 priv->channel_width = IWL_CHANNEL_WIDTH_40MHZ;
9267 priv->valid_antenna = 0x7; /* assume all 3 connected */
9268 priv->ps_mode = IWL_MIMO_PS_NONE;
b481de9c 9269
6440adb5 9270 /* Choose which receivers/antennas to use */
b481de9c
ZY
9271 iwl4965_set_rxon_chain(priv);
9272
9273 printk(KERN_INFO DRV_NAME
9274 ": Detected Intel Wireless WiFi Link 4965AGN\n");
9275
9276 /* Device-specific setup */
bb8c093b 9277 if (iwl4965_hw_set_hw_setting(priv)) {
b481de9c
ZY
9278 IWL_ERROR("failed to set hw settings\n");
9279 mutex_unlock(&priv->mutex);
9280 goto out_iounmap;
9281 }
9282
c8b0e6e1 9283#ifdef CONFIG_IWL4965_QOS
bb8c093b 9284 if (iwl4965_param_qos_enable)
b481de9c
ZY
9285 priv->qos_data.qos_enable = 1;
9286
bb8c093b 9287 iwl4965_reset_qos(priv);
b481de9c
ZY
9288
9289 priv->qos_data.qos_active = 0;
9290 priv->qos_data.qos_cap.val = 0;
c8b0e6e1 9291#endif /* CONFIG_IWL4965_QOS */
b481de9c 9292
bb8c093b
CH
9293 iwl4965_set_rxon_channel(priv, MODE_IEEE80211G, 6);
9294 iwl4965_setup_deferred_work(priv);
9295 iwl4965_setup_rx_handlers(priv);
b481de9c
ZY
9296
9297 priv->rates_mask = IWL_RATES_MASK;
9298 /* If power management is turned on, default to AC mode */
9299 priv->power_mode = IWL_POWER_AC;
9300 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
9301
bb8c093b 9302 iwl4965_disable_interrupts(priv);
49df2b33 9303
b481de9c
ZY
9304 pci_enable_msi(pdev);
9305
bb8c093b 9306 err = request_irq(pdev->irq, iwl4965_isr, IRQF_SHARED, DRV_NAME, priv);
b481de9c
ZY
9307 if (err) {
9308 IWL_ERROR("Error allocating IRQ %d\n", pdev->irq);
9309 goto out_disable_msi;
9310 }
9311
9312 mutex_lock(&priv->mutex);
9313
bb8c093b 9314 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9315 if (err) {
9316 IWL_ERROR("failed to create sysfs device attributes\n");
9317 mutex_unlock(&priv->mutex);
9318 goto out_release_irq;
9319 }
9320
9321 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
9322 * ucode filename and max sizes are card-specific. */
bb8c093b 9323 err = iwl4965_read_ucode(priv);
b481de9c
ZY
9324 if (err) {
9325 IWL_ERROR("Could not read microcode: %d\n", err);
9326 mutex_unlock(&priv->mutex);
9327 goto out_pci_alloc;
9328 }
9329
9330 mutex_unlock(&priv->mutex);
9331
01ebd063 9332 IWL_DEBUG_INFO("Queueing UP work.\n");
b481de9c
ZY
9333
9334 queue_work(priv->workqueue, &priv->up);
9335
9336 return 0;
9337
9338 out_pci_alloc:
bb8c093b 9339 iwl4965_dealloc_ucode_pci(priv);
b481de9c 9340
bb8c093b 9341 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9342
9343 out_release_irq:
9344 free_irq(pdev->irq, priv);
9345
9346 out_disable_msi:
9347 pci_disable_msi(pdev);
9348 destroy_workqueue(priv->workqueue);
9349 priv->workqueue = NULL;
bb8c093b 9350 iwl4965_unset_hw_setting(priv);
b481de9c
ZY
9351
9352 out_iounmap:
9353 pci_iounmap(pdev, priv->hw_base);
9354 out_pci_release_regions:
9355 pci_release_regions(pdev);
9356 out_pci_disable_device:
9357 pci_disable_device(pdev);
9358 pci_set_drvdata(pdev, NULL);
9359 out_ieee80211_free_hw:
9360 ieee80211_free_hw(priv->hw);
9361 out:
9362 return err;
9363}
9364
bb8c093b 9365static void iwl4965_pci_remove(struct pci_dev *pdev)
b481de9c 9366{
bb8c093b 9367 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
9368 struct list_head *p, *q;
9369 int i;
9370
9371 if (!priv)
9372 return;
9373
9374 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
9375
b481de9c 9376 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 9377
bb8c093b 9378 iwl4965_down(priv);
b481de9c
ZY
9379
9380 /* Free MAC hash list for ADHOC */
9381 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
9382 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
9383 list_del(p);
bb8c093b 9384 kfree(list_entry(p, struct iwl4965_ibss_seq, list));
b481de9c
ZY
9385 }
9386 }
9387
bb8c093b 9388 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c 9389
bb8c093b 9390 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
9391
9392 if (priv->rxq.bd)
bb8c093b
CH
9393 iwl4965_rx_queue_free(priv, &priv->rxq);
9394 iwl4965_hw_txq_ctx_free(priv);
b481de9c 9395
bb8c093b
CH
9396 iwl4965_unset_hw_setting(priv);
9397 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9398
9399 if (priv->mac80211_registered) {
9400 ieee80211_unregister_hw(priv->hw);
bb8c093b 9401 iwl4965_rate_control_unregister(priv->hw);
b481de9c
ZY
9402 }
9403
948c171c
MA
9404 /*netif_stop_queue(dev); */
9405 flush_workqueue(priv->workqueue);
9406
bb8c093b 9407 /* ieee80211_unregister_hw calls iwl4965_mac_stop, which flushes
b481de9c
ZY
9408 * priv->workqueue... so we can't take down the workqueue
9409 * until now... */
9410 destroy_workqueue(priv->workqueue);
9411 priv->workqueue = NULL;
9412
9413 free_irq(pdev->irq, priv);
9414 pci_disable_msi(pdev);
9415 pci_iounmap(pdev, priv->hw_base);
9416 pci_release_regions(pdev);
9417 pci_disable_device(pdev);
9418 pci_set_drvdata(pdev, NULL);
9419
9420 kfree(priv->channel_info);
9421
9422 kfree(priv->ieee_channels);
9423 kfree(priv->ieee_rates);
9424
9425 if (priv->ibss_beacon)
9426 dev_kfree_skb(priv->ibss_beacon);
9427
9428 ieee80211_free_hw(priv->hw);
9429}
9430
9431#ifdef CONFIG_PM
9432
bb8c093b 9433static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 9434{
bb8c093b 9435 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c 9436
b481de9c
ZY
9437 set_bit(STATUS_IN_SUSPEND, &priv->status);
9438
9439 /* Take down the device; powers it off, etc. */
bb8c093b 9440 iwl4965_down(priv);
b481de9c
ZY
9441
9442 if (priv->mac80211_registered)
9443 ieee80211_stop_queues(priv->hw);
9444
9445 pci_save_state(pdev);
9446 pci_disable_device(pdev);
9447 pci_set_power_state(pdev, PCI_D3hot);
9448
b481de9c
ZY
9449 return 0;
9450}
9451
bb8c093b 9452static void iwl4965_resume(struct iwl4965_priv *priv)
b481de9c
ZY
9453{
9454 unsigned long flags;
9455
9456 /* The following it a temporary work around due to the
9457 * suspend / resume not fully initializing the NIC correctly.
9458 * Without all of the following, resume will not attempt to take
9459 * down the NIC (it shouldn't really need to) and will just try
9460 * and bring the NIC back up. However that fails during the
bb8c093b
CH
9461 * ucode verification process. This then causes iwl4965_down to be
9462 * called *after* iwl4965_hw_nic_init() has succeeded -- which
b481de9c
ZY
9463 * then lets the next init sequence succeed. So, we've
9464 * replicated all of that NIC init code here... */
9465
bb8c093b 9466 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 9467
bb8c093b 9468 iwl4965_hw_nic_init(priv);
b481de9c 9469
bb8c093b
CH
9470 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
9471 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 9472 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
bb8c093b
CH
9473 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
9474 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
9475 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
9476
9477 /* tell the device to stop sending interrupts */
bb8c093b 9478 iwl4965_disable_interrupts(priv);
b481de9c
ZY
9479
9480 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 9481 iwl4965_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c 9482
bb8c093b
CH
9483 if (!iwl4965_grab_nic_access(priv)) {
9484 iwl4965_write_prph(priv, APMG_CLK_DIS_REG,
ac17a947 9485 APMG_CLK_VAL_DMA_CLK_RQT);
bb8c093b 9486 iwl4965_release_nic_access(priv);
b481de9c
ZY
9487 }
9488 spin_unlock_irqrestore(&priv->lock, flags);
9489
9490 udelay(5);
9491
bb8c093b 9492 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
9493
9494 /* Bring the device back up */
9495 clear_bit(STATUS_IN_SUSPEND, &priv->status);
9496 queue_work(priv->workqueue, &priv->up);
9497}
9498
bb8c093b 9499static int iwl4965_pci_resume(struct pci_dev *pdev)
b481de9c 9500{
bb8c093b 9501 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
9502 int err;
9503
9504 printk(KERN_INFO "Coming out of suspend...\n");
9505
b481de9c
ZY
9506 pci_set_power_state(pdev, PCI_D0);
9507 err = pci_enable_device(pdev);
9508 pci_restore_state(pdev);
9509
9510 /*
9511 * Suspend/Resume resets the PCI configuration space, so we have to
9512 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
9513 * from interfering with C3 CPU state. pci_restore_state won't help
9514 * here since it only restores the first 64 bytes pci config header.
9515 */
9516 pci_write_config_byte(pdev, 0x41, 0x00);
9517
bb8c093b 9518 iwl4965_resume(priv);
b481de9c
ZY
9519
9520 return 0;
9521}
9522
9523#endif /* CONFIG_PM */
9524
9525/*****************************************************************************
9526 *
9527 * driver and module entry point
9528 *
9529 *****************************************************************************/
9530
bb8c093b 9531static struct pci_driver iwl4965_driver = {
b481de9c 9532 .name = DRV_NAME,
bb8c093b
CH
9533 .id_table = iwl4965_hw_card_ids,
9534 .probe = iwl4965_pci_probe,
9535 .remove = __devexit_p(iwl4965_pci_remove),
b481de9c 9536#ifdef CONFIG_PM
bb8c093b
CH
9537 .suspend = iwl4965_pci_suspend,
9538 .resume = iwl4965_pci_resume,
b481de9c
ZY
9539#endif
9540};
9541
bb8c093b 9542static int __init iwl4965_init(void)
b481de9c
ZY
9543{
9544
9545 int ret;
9546 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
9547 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
bb8c093b 9548 ret = pci_register_driver(&iwl4965_driver);
b481de9c
ZY
9549 if (ret) {
9550 IWL_ERROR("Unable to initialize PCI module\n");
9551 return ret;
9552 }
c8b0e6e1 9553#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9554 ret = driver_create_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c
ZY
9555 if (ret) {
9556 IWL_ERROR("Unable to create driver sysfs file\n");
bb8c093b 9557 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9558 return ret;
9559 }
9560#endif
9561
9562 return ret;
9563}
9564
bb8c093b 9565static void __exit iwl4965_exit(void)
b481de9c 9566{
c8b0e6e1 9567#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9568 driver_remove_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c 9569#endif
bb8c093b 9570 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9571}
9572
bb8c093b 9573module_param_named(antenna, iwl4965_param_antenna, int, 0444);
b481de9c 9574MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
bb8c093b 9575module_param_named(disable, iwl4965_param_disable, int, 0444);
b481de9c 9576MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
bb8c093b 9577module_param_named(hwcrypto, iwl4965_param_hwcrypto, int, 0444);
b481de9c
ZY
9578MODULE_PARM_DESC(hwcrypto,
9579 "using hardware crypto engine (default 0 [software])\n");
bb8c093b 9580module_param_named(debug, iwl4965_param_debug, int, 0444);
b481de9c 9581MODULE_PARM_DESC(debug, "debug output mask");
bb8c093b 9582module_param_named(disable_hw_scan, iwl4965_param_disable_hw_scan, int, 0444);
b481de9c
ZY
9583MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
9584
bb8c093b 9585module_param_named(queues_num, iwl4965_param_queues_num, int, 0444);
b481de9c
ZY
9586MODULE_PARM_DESC(queues_num, "number of hw queues.");
9587
9588/* QoS */
bb8c093b 9589module_param_named(qos_enable, iwl4965_param_qos_enable, int, 0444);
b481de9c
ZY
9590MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
9591
bb8c093b
CH
9592module_exit(iwl4965_exit);
9593module_init(iwl4965_init);