]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl4965-base.c
iwlwifi: maintain uCode key table state
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlwifi / iwl4965-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
eb7ae89c 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
b481de9c
ZY
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/delay.h>
37#include <linux/skbuff.h>
38#include <linux/netdevice.h>
39#include <linux/wireless.h>
40#include <linux/firmware.h>
b481de9c
ZY
41#include <linux/etherdevice.h>
42#include <linux/if_arp.h>
43
b481de9c
ZY
44#include <net/mac80211.h>
45
46#include <asm/div64.h>
47
6bc913bd 48#include "iwl-eeprom.h"
b481de9c 49#include "iwl-4965.h"
fee1247a 50#include "iwl-core.h"
3395f6e9 51#include "iwl-io.h"
b481de9c 52#include "iwl-helpers.h"
6974e363 53#include "iwl-sta.h"
b481de9c 54
c79dd5b5 55static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv,
bb8c093b 56 struct iwl4965_tx_queue *txq);
416e1438 57
b481de9c
ZY
58/******************************************************************************
59 *
60 * module boiler plate
61 *
62 ******************************************************************************/
63
b481de9c
ZY
64/*
65 * module name, copyright, version, etc.
66 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
67 */
68
69#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
70
0a6857e7 71#ifdef CONFIG_IWLWIFI_DEBUG
b481de9c
ZY
72#define VD "d"
73#else
74#define VD
75#endif
76
c8b0e6e1 77#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
78#define VS "s"
79#else
80#define VS
81#endif
82
df48c323 83#define DRV_VERSION IWLWIFI_VERSION VD VS
b481de9c 84
b481de9c
ZY
85
86MODULE_DESCRIPTION(DRV_DESCRIPTION);
87MODULE_VERSION(DRV_VERSION);
88MODULE_AUTHOR(DRV_COPYRIGHT);
89MODULE_LICENSE("GPL");
90
91__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
92{
93 u16 fc = le16_to_cpu(hdr->frame_control);
94 int hdr_len = ieee80211_get_hdrlen(fc);
95
96 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
97 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
98 return NULL;
99}
100
8318d78a 101static const struct ieee80211_supported_band *iwl4965_get_hw_mode(
c79dd5b5 102 struct iwl_priv *priv, enum ieee80211_band band)
b481de9c 103{
8318d78a 104 return priv->hw->wiphy->bands[band];
b481de9c
ZY
105}
106
bb8c093b 107static int iwl4965_is_empty_essid(const char *essid, int essid_len)
b481de9c
ZY
108{
109 /* Single white space is for Linksys APs */
110 if (essid_len == 1 && essid[0] == ' ')
111 return 1;
112
113 /* Otherwise, if the entire essid is 0, we assume it is hidden */
114 while (essid_len) {
115 essid_len--;
116 if (essid[essid_len] != '\0')
117 return 0;
118 }
119
120 return 1;
121}
122
bb8c093b 123static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
b481de9c
ZY
124{
125 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
126 const char *s = essid;
127 char *d = escaped;
128
bb8c093b 129 if (iwl4965_is_empty_essid(essid, essid_len)) {
b481de9c
ZY
130 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
131 return escaped;
132 }
133
134 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
135 while (essid_len--) {
136 if (*s == '\0') {
137 *d++ = '\\';
138 *d++ = '0';
139 s++;
140 } else
141 *d++ = *s++;
142 }
143 *d = '\0';
144 return escaped;
145}
146
b481de9c
ZY
147/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
148 * DMA services
149 *
150 * Theory of operation
151 *
6440adb5
BC
152 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
153 * of buffer descriptors, each of which points to one or more data buffers for
154 * the device to read from or fill. Driver and device exchange status of each
155 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
156 * entries in each circular buffer, to protect against confusing empty and full
157 * queue states.
158 *
159 * The device reads or writes the data in the queues via the device's several
160 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
161 *
162 * For Tx queue, there are low mark and high mark limits. If, after queuing
163 * the packet for Tx, free space become < low mark, Tx queue stopped. When
164 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
165 * Tx queue resumed.
166 *
6440adb5
BC
167 * The 4965 operates with up to 17 queues: One receive queue, one transmit
168 * queue (#4) for sending commands to the device firmware, and 15 other
169 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
e3851447
BC
170 *
171 * See more detailed info in iwl-4965-hw.h.
b481de9c
ZY
172 ***************************************************/
173
fe01b477 174int iwl4965_queue_space(const struct iwl4965_queue *q)
b481de9c 175{
fc4b6853 176 int s = q->read_ptr - q->write_ptr;
b481de9c 177
fc4b6853 178 if (q->read_ptr > q->write_ptr)
b481de9c
ZY
179 s -= q->n_bd;
180
181 if (s <= 0)
182 s += q->n_window;
183 /* keep some reserve to not confuse empty and full situations */
184 s -= 2;
185 if (s < 0)
186 s = 0;
187 return s;
188}
189
b481de9c 190
bb8c093b 191static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
b481de9c 192{
fc4b6853
TW
193 return q->write_ptr > q->read_ptr ?
194 (i >= q->read_ptr && i < q->write_ptr) :
195 !(i < q->read_ptr && i >= q->write_ptr);
b481de9c
ZY
196}
197
bb8c093b 198static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
b481de9c 199{
6440adb5 200 /* This is for scan command, the big buffer at end of command array */
b481de9c 201 if (is_huge)
6440adb5 202 return q->n_window; /* must be power of 2 */
b481de9c 203
6440adb5 204 /* Otherwise, use normal size buffers */
b481de9c
ZY
205 return index & (q->n_window - 1);
206}
207
6440adb5
BC
208/**
209 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
210 */
c79dd5b5 211static int iwl4965_queue_init(struct iwl_priv *priv, struct iwl4965_queue *q,
b481de9c
ZY
212 int count, int slots_num, u32 id)
213{
214 q->n_bd = count;
215 q->n_window = slots_num;
216 q->id = id;
217
c54b679d
TW
218 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
219 * and iwl_queue_dec_wrap are broken. */
b481de9c
ZY
220 BUG_ON(!is_power_of_2(count));
221
222 /* slots_num must be power-of-two size, otherwise
223 * get_cmd_index is broken. */
224 BUG_ON(!is_power_of_2(slots_num));
225
226 q->low_mark = q->n_window / 4;
227 if (q->low_mark < 4)
228 q->low_mark = 4;
229
230 q->high_mark = q->n_window / 8;
231 if (q->high_mark < 2)
232 q->high_mark = 2;
233
fc4b6853 234 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
235
236 return 0;
237}
238
6440adb5
BC
239/**
240 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
241 */
c79dd5b5 242static int iwl4965_tx_queue_alloc(struct iwl_priv *priv,
bb8c093b 243 struct iwl4965_tx_queue *txq, u32 id)
b481de9c
ZY
244{
245 struct pci_dev *dev = priv->pci_dev;
246
6440adb5
BC
247 /* Driver private data, only for Tx (not command) queues,
248 * not shared with device. */
b481de9c
ZY
249 if (id != IWL_CMD_QUEUE_NUM) {
250 txq->txb = kmalloc(sizeof(txq->txb[0]) *
251 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
252 if (!txq->txb) {
01ebd063 253 IWL_ERROR("kmalloc for auxiliary BD "
b481de9c
ZY
254 "structures failed\n");
255 goto error;
256 }
257 } else
258 txq->txb = NULL;
259
6440adb5
BC
260 /* Circular buffer of transmit frame descriptors (TFDs),
261 * shared with device */
b481de9c
ZY
262 txq->bd = pci_alloc_consistent(dev,
263 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
264 &txq->q.dma_addr);
265
266 if (!txq->bd) {
267 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
268 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
269 goto error;
270 }
271 txq->q.id = id;
272
273 return 0;
274
275 error:
276 if (txq->txb) {
277 kfree(txq->txb);
278 txq->txb = NULL;
279 }
280
281 return -ENOMEM;
282}
283
8b6eaea8
BC
284/**
285 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
286 */
c79dd5b5 287int iwl4965_tx_queue_init(struct iwl_priv *priv,
bb8c093b 288 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c
ZY
289{
290 struct pci_dev *dev = priv->pci_dev;
291 int len;
292 int rc = 0;
293
8b6eaea8
BC
294 /*
295 * Alloc buffer array for commands (Tx or other types of commands).
296 * For the command queue (#4), allocate command space + one big
297 * command for scan, since scan command is very huge; the system will
298 * not have two scans at the same time, so only one is needed.
bb54244b 299 * For normal Tx queues (all other queues), no super-size command
8b6eaea8
BC
300 * space is needed.
301 */
857485c0 302 len = sizeof(struct iwl_cmd) * slots_num;
b481de9c
ZY
303 if (txq_id == IWL_CMD_QUEUE_NUM)
304 len += IWL_MAX_SCAN_SIZE;
305 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
306 if (!txq->cmd)
307 return -ENOMEM;
308
8b6eaea8 309 /* Alloc driver data array and TFD circular buffer */
bb8c093b 310 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
b481de9c
ZY
311 if (rc) {
312 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
313
314 return -ENOMEM;
315 }
316 txq->need_update = 0;
317
318 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
c54b679d 319 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
b481de9c 320 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
8b6eaea8
BC
321
322 /* Initialize queue's high/low-water marks, and head/tail indexes */
bb8c093b 323 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 324
8b6eaea8 325 /* Tell device where to find queue */
bb8c093b 326 iwl4965_hw_tx_queue_init(priv, txq);
b481de9c
ZY
327
328 return 0;
329}
330
331/**
bb8c093b 332 * iwl4965_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
333 * @txq: Transmit queue to deallocate.
334 *
335 * Empty queue by removing and destroying all BD's.
6440adb5
BC
336 * Free all buffers.
337 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 338 */
c79dd5b5 339void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
b481de9c 340{
bb8c093b 341 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
342 struct pci_dev *dev = priv->pci_dev;
343 int len;
344
345 if (q->n_bd == 0)
346 return;
347
348 /* first, empty all BD's */
fc4b6853 349 for (; q->write_ptr != q->read_ptr;
c54b679d 350 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
bb8c093b 351 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c 352
857485c0 353 len = sizeof(struct iwl_cmd) * q->n_window;
b481de9c
ZY
354 if (q->id == IWL_CMD_QUEUE_NUM)
355 len += IWL_MAX_SCAN_SIZE;
356
6440adb5 357 /* De-alloc array of command/tx buffers */
b481de9c
ZY
358 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
359
6440adb5 360 /* De-alloc circular buffer of TFDs */
b481de9c 361 if (txq->q.n_bd)
bb8c093b 362 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
b481de9c
ZY
363 txq->q.n_bd, txq->bd, txq->q.dma_addr);
364
6440adb5 365 /* De-alloc array of per-TFD driver data */
b481de9c
ZY
366 if (txq->txb) {
367 kfree(txq->txb);
368 txq->txb = NULL;
369 }
370
6440adb5 371 /* 0-fill queue descriptor structure */
b481de9c
ZY
372 memset(txq, 0, sizeof(*txq));
373}
374
bb8c093b 375const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
b481de9c
ZY
376
377/*************** STATION TABLE MANAGEMENT ****
9fbab516 378 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
379 * the functionality provided here
380 */
381
382/**************************************************************/
383
01ebd063 384#if 0 /* temporary disable till we add real remove station */
6440adb5
BC
385/**
386 * iwl4965_remove_station - Remove driver's knowledge of station.
387 *
388 * NOTE: This does not remove station from device's station table.
389 */
c79dd5b5 390static u8 iwl4965_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
391{
392 int index = IWL_INVALID_STATION;
393 int i;
394 unsigned long flags;
395
396 spin_lock_irqsave(&priv->sta_lock, flags);
397
398 if (is_ap)
399 index = IWL_AP_ID;
400 else if (is_broadcast_ether_addr(addr))
401 index = priv->hw_setting.bcast_sta_id;
402 else
403 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
404 if (priv->stations[i].used &&
405 !compare_ether_addr(priv->stations[i].sta.sta.addr,
406 addr)) {
407 index = i;
408 break;
409 }
410
411 if (unlikely(index == IWL_INVALID_STATION))
412 goto out;
413
414 if (priv->stations[index].used) {
415 priv->stations[index].used = 0;
416 priv->num_stations--;
417 }
418
419 BUG_ON(priv->num_stations < 0);
420
421out:
422 spin_unlock_irqrestore(&priv->sta_lock, flags);
423 return 0;
424}
556f8db7 425#endif
b481de9c 426
6440adb5
BC
427/**
428 * iwl4965_add_station_flags - Add station to tables in driver and device
429 */
c79dd5b5 430u8 iwl4965_add_station_flags(struct iwl_priv *priv, const u8 *addr,
67d62035 431 int is_ap, u8 flags, void *ht_data)
b481de9c
ZY
432{
433 int i;
434 int index = IWL_INVALID_STATION;
bb8c093b 435 struct iwl4965_station_entry *station;
b481de9c 436 unsigned long flags_spin;
0795af57 437 DECLARE_MAC_BUF(mac);
b481de9c
ZY
438
439 spin_lock_irqsave(&priv->sta_lock, flags_spin);
440 if (is_ap)
441 index = IWL_AP_ID;
442 else if (is_broadcast_ether_addr(addr))
443 index = priv->hw_setting.bcast_sta_id;
444 else
445 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
446 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
447 addr)) {
448 index = i;
449 break;
450 }
451
452 if (!priv->stations[i].used &&
453 index == IWL_INVALID_STATION)
454 index = i;
455 }
456
457
9fbab516
BC
458 /* These two conditions have the same outcome, but keep them separate
459 since they have different meanings */
b481de9c
ZY
460 if (unlikely(index == IWL_INVALID_STATION)) {
461 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
462 return index;
463 }
464
465 if (priv->stations[index].used &&
466 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
467 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
468 return index;
469 }
470
471
0795af57 472 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
b481de9c
ZY
473 station = &priv->stations[index];
474 station->used = 1;
475 priv->num_stations++;
476
6440adb5 477 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 478 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
b481de9c
ZY
479 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
480 station->sta.mode = 0;
481 station->sta.sta.sta_id = index;
482 station->sta.station_flags = 0;
483
c8b0e6e1 484#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
485 /* BCAST station and IBSS stations do not work in HT mode */
486 if (index != priv->hw_setting.bcast_sta_id &&
487 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
67d62035
RR
488 iwl4965_set_ht_add_station(priv, index,
489 (struct ieee80211_ht_info *) ht_data);
c8b0e6e1 490#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
491
492 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
BC
493
494 /* Add station to device's station table */
bb8c093b 495 iwl4965_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
496 return index;
497
498}
499
b481de9c 500
b481de9c
ZY
501
502/*************** HOST COMMAND QUEUE FUNCTIONS *****/
503
b481de9c 504/**
bb8c093b 505 * iwl4965_enqueue_hcmd - enqueue a uCode command
b481de9c
ZY
506 * @priv: device private data point
507 * @cmd: a point to the ucode command structure
508 *
509 * The function returns < 0 values to indicate the operation is
510 * failed. On success, it turns the index (> 0) of command in the
511 * command queue.
512 */
857485c0 513int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
b481de9c 514{
bb8c093b
CH
515 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
516 struct iwl4965_queue *q = &txq->q;
517 struct iwl4965_tfd_frame *tfd;
b481de9c 518 u32 *control_flags;
857485c0 519 struct iwl_cmd *out_cmd;
b481de9c
ZY
520 u32 idx;
521 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
522 dma_addr_t phys_addr;
523 int ret;
524 unsigned long flags;
525
526 /* If any of the command structures end up being larger than
527 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
528 * we will need to increase the size of the TFD entries */
529 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
530 !(cmd->meta.flags & CMD_SIZE_HUGE));
531
fee1247a 532 if (iwl_is_rfkill(priv)) {
c342a1b9
GG
533 IWL_DEBUG_INFO("Not sending command - RF KILL");
534 return -EIO;
535 }
536
bb8c093b 537 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
b481de9c
ZY
538 IWL_ERROR("No space for Tx\n");
539 return -ENOSPC;
540 }
541
542 spin_lock_irqsave(&priv->hcmd_lock, flags);
543
fc4b6853 544 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
545 memset(tfd, 0, sizeof(*tfd));
546
547 control_flags = (u32 *) tfd;
548
fc4b6853 549 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
b481de9c
ZY
550 out_cmd = &txq->cmd[idx];
551
552 out_cmd->hdr.cmd = cmd->id;
553 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
554 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
555
556 /* At this point, the out_cmd now has all of the incoming cmd
557 * information */
558
559 out_cmd->hdr.flags = 0;
560 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
fc4b6853 561 INDEX_TO_SEQ(q->write_ptr));
b481de9c
ZY
562 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
563 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
564
565 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
857485c0 566 offsetof(struct iwl_cmd, hdr);
bb8c093b 567 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
b481de9c
ZY
568
569 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
570 "%d bytes at %d[%d]:%d\n",
571 get_cmd_string(out_cmd->hdr.cmd),
572 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fc4b6853 573 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
b481de9c
ZY
574
575 txq->need_update = 1;
6440adb5
BC
576
577 /* Set up entry in queue's byte count circular buffer */
b481de9c 578 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
6440adb5
BC
579
580 /* Increment and update queue's write index */
c54b679d 581 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
bb8c093b 582 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
583
584 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
585 return ret ? ret : idx;
586}
587
deb09c43
EG
588static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
589{
590 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
591
592 if (hw_decrypt)
593 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
594 else
595 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
596
597}
598
c79dd5b5 599int iwl4965_send_statistics_request(struct iwl_priv *priv)
b481de9c 600{
857485c0
TW
601 u32 flags = 0;
602 return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
603 sizeof(flags), &flags);
b481de9c
ZY
604}
605
606/**
bb8c093b 607 * iwl4965_rxon_add_station - add station into station table.
b481de9c
ZY
608 *
609 * there is only one AP station with id= IWL_AP_ID
9fbab516
BC
610 * NOTE: mutex must be held before calling this fnction
611 */
c79dd5b5 612static int iwl4965_rxon_add_station(struct iwl_priv *priv,
b481de9c
ZY
613 const u8 *addr, int is_ap)
614{
556f8db7 615 u8 sta_id;
b481de9c 616
6440adb5 617 /* Add station to device's station table */
67d62035
RR
618#ifdef CONFIG_IWL4965_HT
619 struct ieee80211_conf *conf = &priv->hw->conf;
620 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
621
622 if ((is_ap) &&
623 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
624 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
625 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
626 0, cur_ht_config);
627 else
628#endif /* CONFIG_IWL4965_HT */
629 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
630 0, NULL);
6440adb5
BC
631
632 /* Set up default rate scaling table in device's station table */
b481de9c
ZY
633 iwl4965_add_station(priv, addr, is_ap);
634
556f8db7 635 return sta_id;
b481de9c
ZY
636}
637
b481de9c 638/**
bb8c093b 639 * iwl4965_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
640 *
641 * NOTE: This is really only useful during development and can eventually
642 * be #ifdef'd out once the driver is stable and folks aren't actively
643 * making changes
644 */
bb8c093b 645static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c
ZY
646{
647 int error = 0;
648 int counter = 1;
649
650 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
651 error |= le32_to_cpu(rxon->flags &
652 (RXON_FLG_TGJ_NARROW_BAND_MSK |
653 RXON_FLG_RADAR_DETECT_MSK));
654 if (error)
655 IWL_WARNING("check 24G fields %d | %d\n",
656 counter++, error);
657 } else {
658 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
659 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
660 if (error)
661 IWL_WARNING("check 52 fields %d | %d\n",
662 counter++, error);
663 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
664 if (error)
665 IWL_WARNING("check 52 CCK %d | %d\n",
666 counter++, error);
667 }
668 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
669 if (error)
670 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
671
672 /* make sure basic rates 6Mbps and 1Mbps are supported */
673 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
674 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
675 if (error)
676 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
677
678 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
679 if (error)
680 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
681
682 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
683 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
684 if (error)
685 IWL_WARNING("check CCK and short slot %d | %d\n",
686 counter++, error);
687
688 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
689 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
690 if (error)
691 IWL_WARNING("check CCK & auto detect %d | %d\n",
692 counter++, error);
693
694 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
695 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
696 if (error)
697 IWL_WARNING("check TGG and auto detect %d | %d\n",
698 counter++, error);
699
700 if (error)
701 IWL_WARNING("Tuning to channel %d\n",
702 le16_to_cpu(rxon->channel));
703
704 if (error) {
bb8c093b 705 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
b481de9c
ZY
706 return -1;
707 }
708 return 0;
709}
710
711/**
9fbab516 712 * iwl4965_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 713 * @priv: staging_rxon is compared to active_rxon
b481de9c 714 *
9fbab516
BC
715 * If the RXON structure is changing enough to require a new tune,
716 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
717 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 718 */
c79dd5b5 719static int iwl4965_full_rxon_required(struct iwl_priv *priv)
b481de9c
ZY
720{
721
722 /* These items are only settable from the full RXON command */
723 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
724 compare_ether_addr(priv->staging_rxon.bssid_addr,
725 priv->active_rxon.bssid_addr) ||
726 compare_ether_addr(priv->staging_rxon.node_addr,
727 priv->active_rxon.node_addr) ||
728 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
729 priv->active_rxon.wlap_bssid_addr) ||
730 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
731 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
732 (priv->staging_rxon.air_propagation !=
733 priv->active_rxon.air_propagation) ||
734 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
735 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
736 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
737 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
738 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
739 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
740 return 1;
741
742 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
743 * be updated with the RXON_ASSOC command -- however only some
744 * flag transitions are allowed using RXON_ASSOC */
745
746 /* Check if we are not switching bands */
747 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
748 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
749 return 1;
750
751 /* Check if we are switching association toggle */
752 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
753 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
754 return 1;
755
756 return 0;
757}
758
c79dd5b5 759static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
b481de9c
ZY
760{
761 int rc = 0;
bb8c093b
CH
762 struct iwl4965_rx_packet *res = NULL;
763 struct iwl4965_rxon_assoc_cmd rxon_assoc;
857485c0 764 struct iwl_host_cmd cmd = {
b481de9c
ZY
765 .id = REPLY_RXON_ASSOC,
766 .len = sizeof(rxon_assoc),
767 .meta.flags = CMD_WANT_SKB,
768 .data = &rxon_assoc,
769 };
bb8c093b
CH
770 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
771 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
b481de9c
ZY
772
773 if ((rxon1->flags == rxon2->flags) &&
774 (rxon1->filter_flags == rxon2->filter_flags) &&
775 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
776 (rxon1->ofdm_ht_single_stream_basic_rates ==
777 rxon2->ofdm_ht_single_stream_basic_rates) &&
778 (rxon1->ofdm_ht_dual_stream_basic_rates ==
779 rxon2->ofdm_ht_dual_stream_basic_rates) &&
780 (rxon1->rx_chain == rxon2->rx_chain) &&
781 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
782 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
783 return 0;
784 }
785
786 rxon_assoc.flags = priv->staging_rxon.flags;
787 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
788 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
789 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
790 rxon_assoc.reserved = 0;
791 rxon_assoc.ofdm_ht_single_stream_basic_rates =
792 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
793 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
794 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
795 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
796
857485c0 797 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
798 if (rc)
799 return rc;
800
bb8c093b 801 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
802 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
803 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
804 rc = -EIO;
805 }
806
807 priv->alloc_rxb_skb--;
808 dev_kfree_skb_any(cmd.meta.u.skb);
809
810 return rc;
811}
812
813/**
bb8c093b 814 * iwl4965_commit_rxon - commit staging_rxon to hardware
b481de9c 815 *
01ebd063 816 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
817 * the active_rxon structure is updated with the new data. This
818 * function correctly transitions out of the RXON_ASSOC_MSK state if
819 * a HW tune is required based on the RXON structure changes.
820 */
c79dd5b5 821static int iwl4965_commit_rxon(struct iwl_priv *priv)
b481de9c
ZY
822{
823 /* cast away the const for active_rxon in this function */
bb8c093b 824 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
0795af57 825 DECLARE_MAC_BUF(mac);
b481de9c
ZY
826 int rc = 0;
827
fee1247a 828 if (!iwl_is_alive(priv))
b481de9c
ZY
829 return -1;
830
831 /* always get timestamp with Rx frame */
832 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
833
bb8c093b 834 rc = iwl4965_check_rxon_cmd(&priv->staging_rxon);
b481de9c
ZY
835 if (rc) {
836 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
837 return -EINVAL;
838 }
839
840 /* If we don't need to send a full RXON, we can use
bb8c093b 841 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 842 * and other flags for the current radio configuration. */
bb8c093b
CH
843 if (!iwl4965_full_rxon_required(priv)) {
844 rc = iwl4965_send_rxon_assoc(priv);
b481de9c
ZY
845 if (rc) {
846 IWL_ERROR("Error setting RXON_ASSOC "
847 "configuration (%d).\n", rc);
848 return rc;
849 }
850
851 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
852
853 return 0;
854 }
855
856 /* station table will be cleared */
857 priv->assoc_station_added = 0;
858
c8b0e6e1 859#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
860 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
861 if (!priv->error_recovering)
862 priv->start_calib = 0;
863
864 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 865#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
866
867 /* If we are currently associated and the new config requires
868 * an RXON_ASSOC and the new config wants the associated mask enabled,
869 * we must clear the associated from the active configuration
870 * before we apply the new config */
3109ece1 871 if (iwl_is_associated(priv) &&
b481de9c
ZY
872 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
873 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
874 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
875
857485c0 876 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
bb8c093b 877 sizeof(struct iwl4965_rxon_cmd),
b481de9c
ZY
878 &priv->active_rxon);
879
880 /* If the mask clearing failed then we set
881 * active_rxon back to what it was previously */
882 if (rc) {
883 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
884 IWL_ERROR("Error clearing ASSOC_MSK on current "
885 "configuration (%d).\n", rc);
886 return rc;
887 }
b481de9c
ZY
888 }
889
890 IWL_DEBUG_INFO("Sending RXON\n"
891 "* with%s RXON_FILTER_ASSOC_MSK\n"
892 "* channel = %d\n"
0795af57 893 "* bssid = %s\n",
b481de9c
ZY
894 ((priv->staging_rxon.filter_flags &
895 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
896 le16_to_cpu(priv->staging_rxon.channel),
0795af57 897 print_mac(mac, priv->staging_rxon.bssid_addr));
b481de9c 898
deb09c43 899 iwl4965_set_rxon_hwcrypto(priv, priv->cfg->mod_params->hw_crypto);
b481de9c 900 /* Apply the new configuration */
857485c0 901 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
bb8c093b 902 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon);
b481de9c
ZY
903 if (rc) {
904 IWL_ERROR("Error setting new configuration (%d).\n", rc);
905 return rc;
906 }
907
bf85ea4f 908 iwlcore_clear_stations_table(priv);
556f8db7 909
c8b0e6e1 910#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
911 if (!priv->error_recovering)
912 priv->start_calib = 0;
913
914 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
915 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 916#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
917
918 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
919
920 /* If we issue a new RXON command which required a tune then we must
921 * send a new TXPOWER command or we won't be able to Tx any frames */
bb8c093b 922 rc = iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
923 if (rc) {
924 IWL_ERROR("Error setting Tx power (%d).\n", rc);
925 return rc;
926 }
927
928 /* Add the broadcast address so we can send broadcast frames */
bb8c093b 929 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) ==
b481de9c
ZY
930 IWL_INVALID_STATION) {
931 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
932 return -EIO;
933 }
934
935 /* If we have set the ASSOC_MSK and we are in BSS mode then
936 * add the IWL_AP_ID to the station rate table */
3109ece1 937 if (iwl_is_associated(priv) &&
b481de9c 938 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
bb8c093b 939 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
b481de9c
ZY
940 == IWL_INVALID_STATION) {
941 IWL_ERROR("Error adding AP address for transmit.\n");
942 return -EIO;
943 }
944 priv->assoc_station_added = 1;
6974e363
EG
945 if (priv->default_wep_key &&
946 iwl_send_static_wepkey_cmd(priv, 0))
947 IWL_ERROR("Could not send WEP static key.\n");
b481de9c
ZY
948 }
949
950 return 0;
951}
952
c79dd5b5 953static int iwl4965_send_bt_config(struct iwl_priv *priv)
b481de9c 954{
bb8c093b 955 struct iwl4965_bt_cmd bt_cmd = {
b481de9c
ZY
956 .flags = 3,
957 .lead_time = 0xAA,
958 .max_kill = 1,
959 .kill_ack_mask = 0,
960 .kill_cts_mask = 0,
961 };
962
857485c0 963 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
bb8c093b 964 sizeof(struct iwl4965_bt_cmd), &bt_cmd);
b481de9c
ZY
965}
966
c79dd5b5 967static int iwl4965_send_scan_abort(struct iwl_priv *priv)
b481de9c
ZY
968{
969 int rc = 0;
bb8c093b 970 struct iwl4965_rx_packet *res;
857485c0 971 struct iwl_host_cmd cmd = {
b481de9c
ZY
972 .id = REPLY_SCAN_ABORT_CMD,
973 .meta.flags = CMD_WANT_SKB,
974 };
975
976 /* If there isn't a scan actively going on in the hardware
977 * then we are in between scan bands and not actually
978 * actively scanning, so don't send the abort command */
979 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
980 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
981 return 0;
982 }
983
857485c0 984 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
985 if (rc) {
986 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
987 return rc;
988 }
989
bb8c093b 990 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
991 if (res->u.status != CAN_ABORT_STATUS) {
992 /* The scan abort will return 1 for success or
993 * 2 for "failure". A failure condition can be
994 * due to simply not being in an active scan which
995 * can occur if we send the scan abort before we
996 * the microcode has notified us that a scan is
997 * completed. */
998 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
999 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1000 clear_bit(STATUS_SCAN_HW, &priv->status);
1001 }
1002
1003 dev_kfree_skb_any(cmd.meta.u.skb);
1004
1005 return rc;
1006}
1007
c79dd5b5 1008static int iwl4965_card_state_sync_callback(struct iwl_priv *priv,
857485c0 1009 struct iwl_cmd *cmd,
b481de9c
ZY
1010 struct sk_buff *skb)
1011{
1012 return 1;
1013}
1014
1015/*
1016 * CARD_STATE_CMD
1017 *
9fbab516 1018 * Use: Sets the device's internal card state to enable, disable, or halt
b481de9c
ZY
1019 *
1020 * When in the 'enable' state the card operates as normal.
1021 * When in the 'disable' state, the card enters into a low power mode.
1022 * When in the 'halt' state, the card is shut down and must be fully
1023 * restarted to come back on.
1024 */
c79dd5b5 1025static int iwl4965_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
b481de9c 1026{
857485c0 1027 struct iwl_host_cmd cmd = {
b481de9c
ZY
1028 .id = REPLY_CARD_STATE_CMD,
1029 .len = sizeof(u32),
1030 .data = &flags,
1031 .meta.flags = meta_flag,
1032 };
1033
1034 if (meta_flag & CMD_ASYNC)
bb8c093b 1035 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
b481de9c 1036
857485c0 1037 return iwl_send_cmd(priv, &cmd);
b481de9c
ZY
1038}
1039
c79dd5b5 1040static int iwl4965_add_sta_sync_callback(struct iwl_priv *priv,
857485c0 1041 struct iwl_cmd *cmd, struct sk_buff *skb)
b481de9c 1042{
bb8c093b 1043 struct iwl4965_rx_packet *res = NULL;
b481de9c
ZY
1044
1045 if (!skb) {
1046 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1047 return 1;
1048 }
1049
bb8c093b 1050 res = (struct iwl4965_rx_packet *)skb->data;
b481de9c
ZY
1051 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1052 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1053 res->hdr.flags);
1054 return 1;
1055 }
1056
1057 switch (res->u.add_sta.status) {
1058 case ADD_STA_SUCCESS_MSK:
1059 break;
1060 default:
1061 break;
1062 }
1063
1064 /* We didn't cache the SKB; let the caller free it */
1065 return 1;
1066}
1067
c79dd5b5 1068int iwl4965_send_add_station(struct iwl_priv *priv,
bb8c093b 1069 struct iwl4965_addsta_cmd *sta, u8 flags)
b481de9c 1070{
bb8c093b 1071 struct iwl4965_rx_packet *res = NULL;
b481de9c 1072 int rc = 0;
857485c0 1073 struct iwl_host_cmd cmd = {
b481de9c 1074 .id = REPLY_ADD_STA,
bb8c093b 1075 .len = sizeof(struct iwl4965_addsta_cmd),
b481de9c
ZY
1076 .meta.flags = flags,
1077 .data = sta,
1078 };
1079
1080 if (flags & CMD_ASYNC)
bb8c093b 1081 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
b481de9c
ZY
1082 else
1083 cmd.meta.flags |= CMD_WANT_SKB;
1084
857485c0 1085 rc = iwl_send_cmd(priv, &cmd);
b481de9c
ZY
1086
1087 if (rc || (flags & CMD_ASYNC))
1088 return rc;
1089
bb8c093b 1090 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1091 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1092 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1093 res->hdr.flags);
1094 rc = -EIO;
1095 }
1096
1097 if (rc == 0) {
1098 switch (res->u.add_sta.status) {
1099 case ADD_STA_SUCCESS_MSK:
1100 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1101 break;
1102 default:
1103 rc = -EIO;
1104 IWL_WARNING("REPLY_ADD_STA failed\n");
1105 break;
1106 }
1107 }
1108
1109 priv->alloc_rxb_skb--;
1110 dev_kfree_skb_any(cmd.meta.u.skb);
1111
1112 return rc;
1113}
1114
deb09c43 1115static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
b481de9c
ZY
1116 struct ieee80211_key_conf *keyconf,
1117 u8 sta_id)
1118{
1119 unsigned long flags;
1120 __le16 key_flags = 0;
1121
deb09c43
EG
1122 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
1123 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1124
1125 if (sta_id == priv->hw_setting.bcast_sta_id)
1126 key_flags |= STA_KEY_MULTICAST_MSK;
1127
1128 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1129 keyconf->hw_key_idx = keyconf->keyidx;
1130
1131 key_flags &= ~STA_KEY_FLG_INVALID;
1132
b481de9c
ZY
1133 spin_lock_irqsave(&priv->sta_lock, flags);
1134 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1135 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
deb09c43 1136
b481de9c
ZY
1137 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1138 keyconf->keylen);
1139
1140 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1141 keyconf->keylen);
deb09c43 1142
80fb47a1
EG
1143 priv->stations[sta_id].sta.key.key_offset =
1144 iwl_get_free_ucode_key_index(priv);
b481de9c
ZY
1145 priv->stations[sta_id].sta.key.key_flags = key_flags;
1146 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1147 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1148
1149 spin_unlock_irqrestore(&priv->sta_lock, flags);
1150
1151 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
deb09c43
EG
1152 return iwl4965_send_add_station(priv,
1153 &priv->stations[sta_id].sta, CMD_ASYNC);
1154}
1155
1156static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
1157 struct ieee80211_key_conf *keyconf,
1158 u8 sta_id)
1159{
2bc75089
EG
1160 unsigned long flags;
1161 int ret = 0;
1162
1163 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1164 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1165 keyconf->hw_key_idx = keyconf->keyidx;
1166
1167 spin_lock_irqsave(&priv->sta_lock, flags);
1168
1169 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1170 priv->stations[sta_id].keyinfo.conf = keyconf;
1171 priv->stations[sta_id].keyinfo.keylen = 16;
1172
1173 /* This copy is acutally not needed: we get the key with each TX */
1174 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
1175
1176 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
1177
1178 spin_unlock_irqrestore(&priv->sta_lock, flags);
1179
1180 return ret;
b481de9c
ZY
1181}
1182
c79dd5b5 1183static int iwl4965_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
b481de9c
ZY
1184{
1185 unsigned long flags;
1186
6974e363
EG
1187 priv->key_mapping_key = 0;
1188
b481de9c 1189 spin_lock_irqsave(&priv->sta_lock, flags);
80fb47a1
EG
1190 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
1191 &priv->ucode_key_table))
1192 IWL_ERROR("index %d not used in uCode key table.\n",
1193 priv->stations[sta_id].sta.key.key_offset);
bb8c093b
CH
1194 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl4965_hw_key));
1195 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl4965_keyinfo));
b481de9c
ZY
1196 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1197 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1198 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1199 spin_unlock_irqrestore(&priv->sta_lock, flags);
1200
1201 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
bb8c093b 1202 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1203 return 0;
1204}
1205
deb09c43
EG
1206static int iwl4965_set_dynamic_key(struct iwl_priv *priv,
1207 struct ieee80211_key_conf *key, u8 sta_id)
1208{
1209 int ret;
1210
6974e363
EG
1211 priv->key_mapping_key = 1;
1212
deb09c43
EG
1213 switch (key->alg) {
1214 case ALG_CCMP:
1215 ret = iwl4965_set_ccmp_dynamic_key_info(priv, key, sta_id);
1216 break;
1217 case ALG_TKIP:
1218 ret = iwl4965_set_tkip_dynamic_key_info(priv, key, sta_id);
1219 break;
1220 case ALG_WEP:
0211ddda 1221 ret = iwl_set_wep_dynamic_key_info(priv, key, sta_id);
deb09c43
EG
1222 break;
1223 default:
1224 IWL_ERROR("Unknown alg: %s alg = %d\n", __func__, key->alg);
1225 ret = -EINVAL;
1226 }
1227
1228 return ret;
1229}
1230
c79dd5b5 1231static void iwl4965_clear_free_frames(struct iwl_priv *priv)
b481de9c
ZY
1232{
1233 struct list_head *element;
1234
1235 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1236 priv->frames_count);
1237
1238 while (!list_empty(&priv->free_frames)) {
1239 element = priv->free_frames.next;
1240 list_del(element);
bb8c093b 1241 kfree(list_entry(element, struct iwl4965_frame, list));
b481de9c
ZY
1242 priv->frames_count--;
1243 }
1244
1245 if (priv->frames_count) {
1246 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1247 priv->frames_count);
1248 priv->frames_count = 0;
1249 }
1250}
1251
c79dd5b5 1252static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
b481de9c 1253{
bb8c093b 1254 struct iwl4965_frame *frame;
b481de9c
ZY
1255 struct list_head *element;
1256 if (list_empty(&priv->free_frames)) {
1257 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1258 if (!frame) {
1259 IWL_ERROR("Could not allocate frame!\n");
1260 return NULL;
1261 }
1262
1263 priv->frames_count++;
1264 return frame;
1265 }
1266
1267 element = priv->free_frames.next;
1268 list_del(element);
bb8c093b 1269 return list_entry(element, struct iwl4965_frame, list);
b481de9c
ZY
1270}
1271
c79dd5b5 1272static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl4965_frame *frame)
b481de9c
ZY
1273{
1274 memset(frame, 0, sizeof(*frame));
1275 list_add(&frame->list, &priv->free_frames);
1276}
1277
c79dd5b5 1278unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
b481de9c
ZY
1279 struct ieee80211_hdr *hdr,
1280 const u8 *dest, int left)
1281{
1282
3109ece1 1283 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
b481de9c
ZY
1284 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1285 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1286 return 0;
1287
1288 if (priv->ibss_beacon->len > left)
1289 return 0;
1290
1291 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1292
1293 return priv->ibss_beacon->len;
1294}
1295
bb8c093b 1296static u8 iwl4965_rate_get_lowest_plcp(int rate_mask)
b481de9c
ZY
1297{
1298 u8 i;
1299
1300 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1301 i = iwl4965_rates[i].next_ieee) {
b481de9c 1302 if (rate_mask & (1 << i))
bb8c093b 1303 return iwl4965_rates[i].plcp;
b481de9c
ZY
1304 }
1305
1306 return IWL_RATE_INVALID;
1307}
1308
c79dd5b5 1309static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
b481de9c 1310{
bb8c093b 1311 struct iwl4965_frame *frame;
b481de9c
ZY
1312 unsigned int frame_size;
1313 int rc;
1314 u8 rate;
1315
bb8c093b 1316 frame = iwl4965_get_free_frame(priv);
b481de9c
ZY
1317
1318 if (!frame) {
1319 IWL_ERROR("Could not obtain free frame buffer for beacon "
1320 "command.\n");
1321 return -ENOMEM;
1322 }
1323
1324 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
bb8c093b 1325 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
b481de9c
ZY
1326 0xFF0);
1327 if (rate == IWL_INVALID_RATE)
1328 rate = IWL_RATE_6M_PLCP;
1329 } else {
bb8c093b 1330 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
b481de9c
ZY
1331 if (rate == IWL_INVALID_RATE)
1332 rate = IWL_RATE_1M_PLCP;
1333 }
1334
bb8c093b 1335 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1336
857485c0 1337 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1338 &frame->u.cmd[0]);
1339
bb8c093b 1340 iwl4965_free_frame(priv, frame);
b481de9c
ZY
1341
1342 return rc;
1343}
1344
b481de9c
ZY
1345/******************************************************************************
1346 *
1347 * Misc. internal state and helper functions
1348 *
1349 ******************************************************************************/
b481de9c 1350
c79dd5b5 1351static void iwl4965_unset_hw_setting(struct iwl_priv *priv)
b481de9c
ZY
1352{
1353 if (priv->hw_setting.shared_virt)
1354 pci_free_consistent(priv->pci_dev,
bb8c093b 1355 sizeof(struct iwl4965_shared),
b481de9c
ZY
1356 priv->hw_setting.shared_virt,
1357 priv->hw_setting.shared_phys);
1358}
1359
1360/**
bb8c093b 1361 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
b481de9c
ZY
1362 *
1363 * return : set the bit for each supported rate insert in ie
1364 */
bb8c093b 1365static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
c7c46676 1366 u16 basic_rate, int *left)
b481de9c
ZY
1367{
1368 u16 ret_rates = 0, bit;
1369 int i;
c7c46676
TW
1370 u8 *cnt = ie;
1371 u8 *rates = ie + 1;
b481de9c
ZY
1372
1373 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1374 if (bit & supported_rate) {
1375 ret_rates |= bit;
bb8c093b 1376 rates[*cnt] = iwl4965_rates[i].ieee |
c7c46676
TW
1377 ((bit & basic_rate) ? 0x80 : 0x00);
1378 (*cnt)++;
1379 (*left)--;
1380 if ((*left <= 0) ||
1381 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
b481de9c
ZY
1382 break;
1383 }
1384 }
1385
1386 return ret_rates;
1387}
1388
b481de9c 1389/**
bb8c093b 1390 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
b481de9c 1391 */
c79dd5b5 1392static u16 iwl4965_fill_probe_req(struct iwl_priv *priv,
78330fdd
TW
1393 enum ieee80211_band band,
1394 struct ieee80211_mgmt *frame,
1395 int left, int is_direct)
b481de9c
ZY
1396{
1397 int len = 0;
1398 u8 *pos = NULL;
bee488db 1399 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
8fb88032 1400#ifdef CONFIG_IWL4965_HT
78330fdd
TW
1401 const struct ieee80211_supported_band *sband =
1402 iwl4965_get_hw_mode(priv, band);
8fb88032 1403#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
1404
1405 /* Make sure there is enough space for the probe request,
1406 * two mandatory IEs and the data */
1407 left -= 24;
1408 if (left < 0)
1409 return 0;
1410 len += 24;
1411
1412 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
bb8c093b 1413 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c 1414 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
bb8c093b 1415 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c
ZY
1416 frame->seq_ctrl = 0;
1417
1418 /* fill in our indirect SSID IE */
1419 /* ...next IE... */
1420
1421 left -= 2;
1422 if (left < 0)
1423 return 0;
1424 len += 2;
1425 pos = &(frame->u.probe_req.variable[0]);
1426 *pos++ = WLAN_EID_SSID;
1427 *pos++ = 0;
1428
1429 /* fill in our direct SSID IE... */
1430 if (is_direct) {
1431 /* ...next IE... */
1432 left -= 2 + priv->essid_len;
1433 if (left < 0)
1434 return 0;
1435 /* ... fill it in... */
1436 *pos++ = WLAN_EID_SSID;
1437 *pos++ = priv->essid_len;
1438 memcpy(pos, priv->essid, priv->essid_len);
1439 pos += priv->essid_len;
1440 len += 2 + priv->essid_len;
1441 }
1442
1443 /* fill in supported rate */
1444 /* ...next IE... */
1445 left -= 2;
1446 if (left < 0)
1447 return 0;
c7c46676 1448
b481de9c
ZY
1449 /* ... fill it in... */
1450 *pos++ = WLAN_EID_SUPP_RATES;
1451 *pos = 0;
c7c46676 1452
bee488db 1453 /* exclude 60M rate */
1454 active_rates = priv->rates_mask;
1455 active_rates &= ~IWL_RATE_60M_MASK;
1456
1457 active_rate_basic = active_rates & IWL_BASIC_RATES_MASK;
b481de9c 1458
c7c46676 1459 cck_rates = IWL_CCK_RATES_MASK & active_rates;
bb8c093b 1460 ret_rates = iwl4965_supported_rate_to_ie(pos, cck_rates,
bee488db 1461 active_rate_basic, &left);
c7c46676
TW
1462 active_rates &= ~ret_rates;
1463
bb8c093b 1464 ret_rates = iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1465 active_rate_basic, &left);
c7c46676
TW
1466 active_rates &= ~ret_rates;
1467
b481de9c
ZY
1468 len += 2 + *pos;
1469 pos += (*pos) + 1;
c7c46676 1470 if (active_rates == 0)
b481de9c
ZY
1471 goto fill_end;
1472
1473 /* fill in supported extended rate */
1474 /* ...next IE... */
1475 left -= 2;
1476 if (left < 0)
1477 return 0;
1478 /* ... fill it in... */
1479 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1480 *pos = 0;
bb8c093b 1481 iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1482 active_rate_basic, &left);
b481de9c
ZY
1483 if (*pos > 0)
1484 len += 2 + *pos;
1485
c8b0e6e1 1486#ifdef CONFIG_IWL4965_HT
78330fdd
TW
1487 if (sband && sband->ht_info.ht_supported) {
1488 struct ieee80211_ht_cap *ht_cap;
b481de9c
ZY
1489 pos += (*pos) + 1;
1490 *pos++ = WLAN_EID_HT_CAPABILITY;
8fb88032 1491 *pos++ = sizeof(struct ieee80211_ht_cap);
78330fdd
TW
1492 ht_cap = (struct ieee80211_ht_cap *)pos;
1493 ht_cap->cap_info = cpu_to_le16(sband->ht_info.cap);
1494 memcpy(ht_cap->supp_mcs_set, sband->ht_info.supp_mcs_set, 16);
1495 ht_cap->ampdu_params_info =(sband->ht_info.ampdu_factor &
1496 IEEE80211_HT_CAP_AMPDU_FACTOR) |
1497 ((sband->ht_info.ampdu_density << 2) &
1498 IEEE80211_HT_CAP_AMPDU_DENSITY);
8fb88032 1499 len += 2 + sizeof(struct ieee80211_ht_cap);
b481de9c 1500 }
c8b0e6e1 1501#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
1502
1503 fill_end:
1504 return (u16)len;
1505}
1506
1507/*
1508 * QoS support
1509*/
c79dd5b5 1510static int iwl4965_send_qos_params_command(struct iwl_priv *priv,
bb8c093b 1511 struct iwl4965_qosparam_cmd *qos)
b481de9c
ZY
1512{
1513
857485c0 1514 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
bb8c093b 1515 sizeof(struct iwl4965_qosparam_cmd), qos);
b481de9c
ZY
1516}
1517
c79dd5b5 1518static void iwl4965_activate_qos(struct iwl_priv *priv, u8 force)
b481de9c
ZY
1519{
1520 unsigned long flags;
1521
b481de9c
ZY
1522 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1523 return;
1524
1525 if (!priv->qos_data.qos_enable)
1526 return;
1527
1528 spin_lock_irqsave(&priv->lock, flags);
1529 priv->qos_data.def_qos_parm.qos_flags = 0;
1530
1531 if (priv->qos_data.qos_cap.q_AP.queue_request &&
1532 !priv->qos_data.qos_cap.q_AP.txop_request)
1533 priv->qos_data.def_qos_parm.qos_flags |=
1534 QOS_PARAM_FLG_TXOP_TYPE_MSK;
b481de9c
ZY
1535 if (priv->qos_data.qos_active)
1536 priv->qos_data.def_qos_parm.qos_flags |=
1537 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1538
c8b0e6e1 1539#ifdef CONFIG_IWL4965_HT
fd105e79 1540 if (priv->current_ht_config.is_ht)
f1f1f5c7 1541 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
c8b0e6e1 1542#endif /* CONFIG_IWL4965_HT */
f1f1f5c7 1543
b481de9c
ZY
1544 spin_unlock_irqrestore(&priv->lock, flags);
1545
3109ece1 1546 if (force || iwl_is_associated(priv)) {
f1f1f5c7
TW
1547 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
1548 priv->qos_data.qos_active,
1549 priv->qos_data.def_qos_parm.qos_flags);
b481de9c 1550
bb8c093b 1551 iwl4965_send_qos_params_command(priv,
b481de9c
ZY
1552 &(priv->qos_data.def_qos_parm));
1553 }
1554}
1555
b481de9c
ZY
1556/*
1557 * Power management (not Tx power!) functions
1558 */
1559#define MSEC_TO_USEC 1024
1560
1561#define NOSLP __constant_cpu_to_le16(0), 0, 0
1562#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
1563#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
1564#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
1565 __constant_cpu_to_le32(X1), \
1566 __constant_cpu_to_le32(X2), \
1567 __constant_cpu_to_le32(X3), \
1568 __constant_cpu_to_le32(X4)}
1569
1570
1571/* default power management (not Tx power) table values */
1572/* for tim 0-10 */
bb8c093b 1573static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
b481de9c
ZY
1574 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1575 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1576 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1577 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1578 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1579 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1580};
1581
1582/* for tim > 10 */
bb8c093b 1583static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
b481de9c
ZY
1584 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1585 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
1586 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1587 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
1588 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1589 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
1590 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1591 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1592 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
1593 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
1594};
1595
c79dd5b5 1596int iwl4965_power_init_handle(struct iwl_priv *priv)
b481de9c
ZY
1597{
1598 int rc = 0, i;
bb8c093b
CH
1599 struct iwl4965_power_mgr *pow_data;
1600 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
b481de9c
ZY
1601 u16 pci_pm;
1602
1603 IWL_DEBUG_POWER("Initialize power \n");
1604
1605 pow_data = &(priv->power_data);
1606
1607 memset(pow_data, 0, sizeof(*pow_data));
1608
1609 pow_data->active_index = IWL_POWER_RANGE_0;
1610 pow_data->dtim_val = 0xffff;
1611
1612 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1613 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1614
1615 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1616 if (rc != 0)
1617 return 0;
1618 else {
bb8c093b 1619 struct iwl4965_powertable_cmd *cmd;
b481de9c
ZY
1620
1621 IWL_DEBUG_POWER("adjust power command flags\n");
1622
1623 for (i = 0; i < IWL_POWER_AC; i++) {
1624 cmd = &pow_data->pwr_range_0[i].cmd;
1625
1626 if (pci_pm & 0x1)
1627 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1628 else
1629 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1630 }
1631 }
1632 return rc;
1633}
1634
c79dd5b5 1635static int iwl4965_update_power_cmd(struct iwl_priv *priv,
bb8c093b 1636 struct iwl4965_powertable_cmd *cmd, u32 mode)
b481de9c
ZY
1637{
1638 int rc = 0, i;
1639 u8 skip;
1640 u32 max_sleep = 0;
bb8c093b 1641 struct iwl4965_power_vec_entry *range;
b481de9c 1642 u8 period = 0;
bb8c093b 1643 struct iwl4965_power_mgr *pow_data;
b481de9c
ZY
1644
1645 if (mode > IWL_POWER_INDEX_5) {
1646 IWL_DEBUG_POWER("Error invalid power mode \n");
1647 return -1;
1648 }
1649 pow_data = &(priv->power_data);
1650
1651 if (pow_data->active_index == IWL_POWER_RANGE_0)
1652 range = &pow_data->pwr_range_0[0];
1653 else
1654 range = &pow_data->pwr_range_1[1];
1655
bb8c093b 1656 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
b481de9c
ZY
1657
1658#ifdef IWL_MAC80211_DISABLE
1659 if (priv->assoc_network != NULL) {
1660 unsigned long flags;
1661
1662 period = priv->assoc_network->tim.tim_period;
1663 }
1664#endif /*IWL_MAC80211_DISABLE */
1665 skip = range[mode].no_dtim;
1666
1667 if (period == 0) {
1668 period = 1;
1669 skip = 0;
1670 }
1671
1672 if (skip == 0) {
1673 max_sleep = period;
1674 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
1675 } else {
1676 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1677 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1678 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
1679 }
1680
1681 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
1682 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1683 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1684 }
1685
1686 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1687 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1688 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1689 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1690 le32_to_cpu(cmd->sleep_interval[0]),
1691 le32_to_cpu(cmd->sleep_interval[1]),
1692 le32_to_cpu(cmd->sleep_interval[2]),
1693 le32_to_cpu(cmd->sleep_interval[3]),
1694 le32_to_cpu(cmd->sleep_interval[4]));
1695
1696 return rc;
1697}
1698
c79dd5b5 1699static int iwl4965_send_power_mode(struct iwl_priv *priv, u32 mode)
b481de9c 1700{
9a62f73b 1701 u32 uninitialized_var(final_mode);
b481de9c 1702 int rc;
bb8c093b 1703 struct iwl4965_powertable_cmd cmd;
b481de9c
ZY
1704
1705 /* If on battery, set to 3,
01ebd063 1706 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
1707 * else user level */
1708 switch (mode) {
1709 case IWL_POWER_BATTERY:
1710 final_mode = IWL_POWER_INDEX_3;
1711 break;
1712 case IWL_POWER_AC:
1713 final_mode = IWL_POWER_MODE_CAM;
1714 break;
1715 default:
1716 final_mode = mode;
1717 break;
1718 }
1719
1720 cmd.keep_alive_beacons = 0;
1721
bb8c093b 1722 iwl4965_update_power_cmd(priv, &cmd, final_mode);
b481de9c 1723
857485c0 1724 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
b481de9c
ZY
1725
1726 if (final_mode == IWL_POWER_MODE_CAM)
1727 clear_bit(STATUS_POWER_PMI, &priv->status);
1728 else
1729 set_bit(STATUS_POWER_PMI, &priv->status);
1730
1731 return rc;
1732}
1733
c79dd5b5 1734int iwl4965_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
1735{
1736 /* Filter incoming packets to determine if they are targeted toward
1737 * this network, discarding packets coming from ourselves */
1738 switch (priv->iw_mode) {
1739 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
1740 /* packets from our adapter are dropped (echo) */
1741 if (!compare_ether_addr(header->addr2, priv->mac_addr))
1742 return 0;
1743 /* {broad,multi}cast packets to our IBSS go through */
1744 if (is_multicast_ether_addr(header->addr1))
1745 return !compare_ether_addr(header->addr3, priv->bssid);
1746 /* packets to our adapter go through */
1747 return !compare_ether_addr(header->addr1, priv->mac_addr);
1748 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
1749 /* packets from our adapter are dropped (echo) */
1750 if (!compare_ether_addr(header->addr3, priv->mac_addr))
1751 return 0;
1752 /* {broad,multi}cast packets to our BSS go through */
1753 if (is_multicast_ether_addr(header->addr1))
1754 return !compare_ether_addr(header->addr2, priv->bssid);
1755 /* packets to our adapter go through */
1756 return !compare_ether_addr(header->addr1, priv->mac_addr);
69dc5d9d
TW
1757 default:
1758 break;
b481de9c
ZY
1759 }
1760
1761 return 1;
1762}
1763
1764#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1765
bb8c093b 1766static const char *iwl4965_get_tx_fail_reason(u32 status)
b481de9c
ZY
1767{
1768 switch (status & TX_STATUS_MSK) {
1769 case TX_STATUS_SUCCESS:
1770 return "SUCCESS";
1771 TX_STATUS_ENTRY(SHORT_LIMIT);
1772 TX_STATUS_ENTRY(LONG_LIMIT);
1773 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1774 TX_STATUS_ENTRY(MGMNT_ABORT);
1775 TX_STATUS_ENTRY(NEXT_FRAG);
1776 TX_STATUS_ENTRY(LIFE_EXPIRE);
1777 TX_STATUS_ENTRY(DEST_PS);
1778 TX_STATUS_ENTRY(ABORTED);
1779 TX_STATUS_ENTRY(BT_RETRY);
1780 TX_STATUS_ENTRY(STA_INVALID);
1781 TX_STATUS_ENTRY(FRAG_DROPPED);
1782 TX_STATUS_ENTRY(TID_DISABLE);
1783 TX_STATUS_ENTRY(FRAME_FLUSHED);
1784 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1785 TX_STATUS_ENTRY(TX_LOCKED);
1786 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1787 }
1788
1789 return "UNKNOWN";
1790}
1791
1792/**
bb8c093b 1793 * iwl4965_scan_cancel - Cancel any currently executing HW scan
b481de9c
ZY
1794 *
1795 * NOTE: priv->mutex is not required before calling this function
1796 */
c79dd5b5 1797static int iwl4965_scan_cancel(struct iwl_priv *priv)
b481de9c
ZY
1798{
1799 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1800 clear_bit(STATUS_SCANNING, &priv->status);
1801 return 0;
1802 }
1803
1804 if (test_bit(STATUS_SCANNING, &priv->status)) {
1805 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1806 IWL_DEBUG_SCAN("Queuing scan abort.\n");
1807 set_bit(STATUS_SCAN_ABORTING, &priv->status);
1808 queue_work(priv->workqueue, &priv->abort_scan);
1809
1810 } else
1811 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
1812
1813 return test_bit(STATUS_SCANNING, &priv->status);
1814 }
1815
1816 return 0;
1817}
1818
1819/**
bb8c093b 1820 * iwl4965_scan_cancel_timeout - Cancel any currently executing HW scan
b481de9c
ZY
1821 * @ms: amount of time to wait (in milliseconds) for scan to abort
1822 *
1823 * NOTE: priv->mutex must be held before calling this function
1824 */
c79dd5b5 1825static int iwl4965_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
b481de9c
ZY
1826{
1827 unsigned long now = jiffies;
1828 int ret;
1829
bb8c093b 1830 ret = iwl4965_scan_cancel(priv);
b481de9c
ZY
1831 if (ret && ms) {
1832 mutex_unlock(&priv->mutex);
1833 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
1834 test_bit(STATUS_SCANNING, &priv->status))
1835 msleep(1);
1836 mutex_lock(&priv->mutex);
1837
1838 return test_bit(STATUS_SCANNING, &priv->status);
1839 }
1840
1841 return ret;
1842}
1843
c79dd5b5 1844static void iwl4965_sequence_reset(struct iwl_priv *priv)
b481de9c
ZY
1845{
1846 /* Reset ieee stats */
1847
1848 /* We don't reset the net_device_stats (ieee->stats) on
1849 * re-association */
1850
1851 priv->last_seq_num = -1;
1852 priv->last_frag_num = -1;
1853 priv->last_packet_time = 0;
1854
bb8c093b 1855 iwl4965_scan_cancel(priv);
b481de9c
ZY
1856}
1857
1858#define MAX_UCODE_BEACON_INTERVAL 4096
1859#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
1860
bb8c093b 1861static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
1862{
1863 u16 new_val = 0;
1864 u16 beacon_factor = 0;
1865
1866 beacon_factor =
1867 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
1868 / MAX_UCODE_BEACON_INTERVAL;
1869 new_val = beacon_val / beacon_factor;
1870
1871 return cpu_to_le16(new_val);
1872}
1873
c79dd5b5 1874static void iwl4965_setup_rxon_timing(struct iwl_priv *priv)
b481de9c
ZY
1875{
1876 u64 interval_tm_unit;
1877 u64 tsf, result;
1878 unsigned long flags;
1879 struct ieee80211_conf *conf = NULL;
1880 u16 beacon_int = 0;
1881
1882 conf = ieee80211_get_hw_conf(priv->hw);
1883
1884 spin_lock_irqsave(&priv->lock, flags);
3109ece1
TW
1885 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp >> 32);
1886 priv->rxon_timing.timestamp.dw[0] =
1887 cpu_to_le32(priv->timestamp & 0xFFFFFFFF);
b481de9c
ZY
1888
1889 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
1890
3109ece1 1891 tsf = priv->timestamp;
b481de9c
ZY
1892
1893 beacon_int = priv->beacon_int;
1894 spin_unlock_irqrestore(&priv->lock, flags);
1895
1896 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
1897 if (beacon_int == 0) {
1898 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
1899 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
1900 } else {
1901 priv->rxon_timing.beacon_interval =
1902 cpu_to_le16(beacon_int);
1903 priv->rxon_timing.beacon_interval =
bb8c093b 1904 iwl4965_adjust_beacon_interval(
b481de9c
ZY
1905 le16_to_cpu(priv->rxon_timing.beacon_interval));
1906 }
1907
1908 priv->rxon_timing.atim_window = 0;
1909 } else {
1910 priv->rxon_timing.beacon_interval =
bb8c093b 1911 iwl4965_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
1912 /* TODO: we need to get atim_window from upper stack
1913 * for now we set to 0 */
1914 priv->rxon_timing.atim_window = 0;
1915 }
1916
1917 interval_tm_unit =
1918 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
1919 result = do_div(tsf, interval_tm_unit);
1920 priv->rxon_timing.beacon_init_val =
1921 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
1922
1923 IWL_DEBUG_ASSOC
1924 ("beacon interval %d beacon timer %d beacon tim %d\n",
1925 le16_to_cpu(priv->rxon_timing.beacon_interval),
1926 le32_to_cpu(priv->rxon_timing.beacon_init_val),
1927 le16_to_cpu(priv->rxon_timing.atim_window));
1928}
1929
c79dd5b5 1930static int iwl4965_scan_initiate(struct iwl_priv *priv)
b481de9c
ZY
1931{
1932 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1933 IWL_ERROR("APs don't scan.\n");
1934 return 0;
1935 }
1936
fee1247a 1937 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
1938 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
1939 return -EIO;
1940 }
1941
1942 if (test_bit(STATUS_SCANNING, &priv->status)) {
1943 IWL_DEBUG_SCAN("Scan already in progress.\n");
1944 return -EAGAIN;
1945 }
1946
1947 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1948 IWL_DEBUG_SCAN("Scan request while abort pending. "
1949 "Queuing.\n");
1950 return -EAGAIN;
1951 }
1952
1953 IWL_DEBUG_INFO("Starting scan...\n");
1954 priv->scan_bands = 2;
1955 set_bit(STATUS_SCANNING, &priv->status);
1956 priv->scan_start = jiffies;
1957 priv->scan_pass_start = priv->scan_start;
1958
1959 queue_work(priv->workqueue, &priv->request_scan);
1960
1961 return 0;
1962}
1963
b481de9c 1964
c79dd5b5 1965static void iwl4965_set_flags_for_phymode(struct iwl_priv *priv,
8318d78a 1966 enum ieee80211_band band)
b481de9c 1967{
8318d78a 1968 if (band == IEEE80211_BAND_5GHZ) {
b481de9c
ZY
1969 priv->staging_rxon.flags &=
1970 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
1971 | RXON_FLG_CCK_MSK);
1972 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1973 } else {
bb8c093b 1974 /* Copied from iwl4965_bg_post_associate() */
b481de9c
ZY
1975 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
1976 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1977 else
1978 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1979
1980 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
1981 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1982
1983 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1984 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
1985 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
1986 }
1987}
1988
1989/*
01ebd063 1990 * initialize rxon structure with default values from eeprom
b481de9c 1991 */
c79dd5b5 1992static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
b481de9c 1993{
bf85ea4f 1994 const struct iwl_channel_info *ch_info;
b481de9c
ZY
1995
1996 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
1997
1998 switch (priv->iw_mode) {
1999 case IEEE80211_IF_TYPE_AP:
2000 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2001 break;
2002
2003 case IEEE80211_IF_TYPE_STA:
2004 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2005 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2006 break;
2007
2008 case IEEE80211_IF_TYPE_IBSS:
2009 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2010 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2011 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2012 RXON_FILTER_ACCEPT_GRP_MSK;
2013 break;
2014
2015 case IEEE80211_IF_TYPE_MNTR:
2016 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2017 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2018 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2019 break;
69dc5d9d
TW
2020 default:
2021 IWL_ERROR("Unsupported interface type %d\n", priv->iw_mode);
2022 break;
b481de9c
ZY
2023 }
2024
2025#if 0
2026 /* TODO: Figure out when short_preamble would be set and cache from
2027 * that */
2028 if (!hw_to_local(priv->hw)->short_preamble)
2029 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2030 else
2031 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2032#endif
2033
8622e705 2034 ch_info = iwl_get_channel_info(priv, priv->band,
b481de9c
ZY
2035 le16_to_cpu(priv->staging_rxon.channel));
2036
2037 if (!ch_info)
2038 ch_info = &priv->channel_info[0];
2039
2040 /*
2041 * in some case A channels are all non IBSS
2042 * in this case force B/G channel
2043 */
2044 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2045 !(is_channel_ibss(ch_info)))
2046 ch_info = &priv->channel_info[0];
2047
2048 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
8318d78a 2049 priv->band = ch_info->band;
b481de9c 2050
8318d78a 2051 iwl4965_set_flags_for_phymode(priv, priv->band);
b481de9c
ZY
2052
2053 priv->staging_rxon.ofdm_basic_rates =
2054 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2055 priv->staging_rxon.cck_basic_rates =
2056 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2057
2058 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2059 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2060 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2061 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2062 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2063 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2064 iwl4965_set_rxon_chain(priv);
2065}
2066
c79dd5b5 2067static int iwl4965_set_mode(struct iwl_priv *priv, int mode)
b481de9c 2068{
b481de9c 2069 if (mode == IEEE80211_IF_TYPE_IBSS) {
bf85ea4f 2070 const struct iwl_channel_info *ch_info;
b481de9c 2071
8622e705 2072 ch_info = iwl_get_channel_info(priv,
8318d78a 2073 priv->band,
b481de9c
ZY
2074 le16_to_cpu(priv->staging_rxon.channel));
2075
2076 if (!ch_info || !is_channel_ibss(ch_info)) {
2077 IWL_ERROR("channel %d not IBSS channel\n",
2078 le16_to_cpu(priv->staging_rxon.channel));
2079 return -EINVAL;
2080 }
2081 }
2082
b481de9c
ZY
2083 priv->iw_mode = mode;
2084
bb8c093b 2085 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
2086 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2087
bf85ea4f 2088 iwlcore_clear_stations_table(priv);
b481de9c 2089
fde3571f 2090 /* dont commit rxon if rf-kill is on*/
fee1247a 2091 if (!iwl_is_ready_rf(priv))
fde3571f
MA
2092 return -EAGAIN;
2093
2094 cancel_delayed_work(&priv->scan_check);
2095 if (iwl4965_scan_cancel_timeout(priv, 100)) {
2096 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2097 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2098 return -EAGAIN;
2099 }
2100
bb8c093b 2101 iwl4965_commit_rxon(priv);
b481de9c
ZY
2102
2103 return 0;
2104}
2105
c79dd5b5 2106static void iwl4965_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
b481de9c 2107 struct ieee80211_tx_control *ctl,
857485c0 2108 struct iwl_cmd *cmd,
b481de9c 2109 struct sk_buff *skb_frag,
deb09c43 2110 int sta_id)
b481de9c 2111{
deb09c43 2112 struct iwl4965_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
6974e363
EG
2113 struct iwl_wep_key *wepkey;
2114 int keyidx = 0;
2115
2116 BUG_ON(ctl->key_idx > 3);
b481de9c
ZY
2117
2118 switch (keyinfo->alg) {
2119 case ALG_CCMP:
2120 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2121 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
8236e183
MS
2122 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
2123 cmd->cmd.tx.tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
b481de9c
ZY
2124 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2125 break;
2126
2127 case ALG_TKIP:
b481de9c 2128 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2bc75089
EG
2129 ieee80211_get_tkip_key(keyinfo->conf, skb_frag,
2130 IEEE80211_TKIP_P2_KEY, cmd->cmd.tx.key);
2131 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
b481de9c
ZY
2132 break;
2133
2134 case ALG_WEP:
6974e363
EG
2135 wepkey = &priv->wep_keys[ctl->key_idx];
2136 cmd->cmd.tx.sec_ctl = 0;
2137 if (priv->default_wep_key) {
2138 /* the WEP key was sent as static */
2139 keyidx = ctl->key_idx;
2140 memcpy(&cmd->cmd.tx.key[3], wepkey->key,
2141 wepkey->key_size);
2142 if (wepkey->key_size == WEP_KEY_LEN_128)
2143 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2144 } else {
0211ddda
EG
2145 /* the WEP key was sent as dynamic */
2146 keyidx = keyinfo->keyidx;
2147 memcpy(&cmd->cmd.tx.key[3], keyinfo->key,
2148 keyinfo->keylen);
2149 if (keyinfo->keylen == WEP_KEY_LEN_128)
2150 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
6974e363 2151 }
b481de9c 2152
6974e363
EG
2153 cmd->cmd.tx.sec_ctl |= (TX_CMD_SEC_WEP |
2154 (keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
b481de9c
ZY
2155
2156 IWL_DEBUG_TX("Configuring packet for WEP encryption "
6974e363 2157 "with key %d\n", keyidx);
b481de9c
ZY
2158 break;
2159
b481de9c
ZY
2160 default:
2161 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2162 break;
2163 }
2164}
2165
2166/*
2167 * handle build REPLY_TX command notification.
2168 */
c79dd5b5 2169static void iwl4965_build_tx_cmd_basic(struct iwl_priv *priv,
857485c0 2170 struct iwl_cmd *cmd,
b481de9c
ZY
2171 struct ieee80211_tx_control *ctrl,
2172 struct ieee80211_hdr *hdr,
2173 int is_unicast, u8 std_id)
2174{
2175 __le16 *qc;
2176 u16 fc = le16_to_cpu(hdr->frame_control);
2177 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2178
2179 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2180 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2181 tx_flags |= TX_CMD_FLG_ACK_MSK;
2182 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2183 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2184 if (ieee80211_is_probe_response(fc) &&
2185 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2186 tx_flags |= TX_CMD_FLG_TSF_MSK;
2187 } else {
2188 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2189 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2190 }
2191
87e4f7df
TW
2192 if (ieee80211_is_back_request(fc))
2193 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
2194
2195
b481de9c
ZY
2196 cmd->cmd.tx.sta_id = std_id;
2197 if (ieee80211_get_morefrag(hdr))
2198 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2199
2200 qc = ieee80211_get_qos_ctrl(hdr);
2201 if (qc) {
2202 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2203 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2204 } else
2205 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2206
2207 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2208 tx_flags |= TX_CMD_FLG_RTS_MSK;
2209 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2210 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2211 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2212 tx_flags |= TX_CMD_FLG_CTS_MSK;
2213 }
2214
2215 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2216 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2217
2218 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2219 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2220 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2221 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
bc434dd2 2222 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 2223 else
bc434dd2 2224 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
ab53d8af 2225 } else {
b481de9c 2226 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
ab53d8af 2227 }
b481de9c
ZY
2228
2229 cmd->cmd.tx.driver_txop = 0;
2230 cmd->cmd.tx.tx_flags = tx_flags;
2231 cmd->cmd.tx.next_frame_len = 0;
2232}
19758bef
TW
2233static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
2234{
2235 /* 0 - mgmt, 1 - cnt, 2 - data */
2236 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
2237 priv->tx_stats[idx].cnt++;
2238 priv->tx_stats[idx].bytes += len;
2239}
6440adb5
BC
2240/**
2241 * iwl4965_get_sta_id - Find station's index within station table
2242 *
2243 * If new IBSS station, create new entry in station table
2244 */
c79dd5b5 2245static int iwl4965_get_sta_id(struct iwl_priv *priv,
9fbab516 2246 struct ieee80211_hdr *hdr)
b481de9c
ZY
2247{
2248 int sta_id;
2249 u16 fc = le16_to_cpu(hdr->frame_control);
0795af57 2250 DECLARE_MAC_BUF(mac);
b481de9c 2251
6440adb5 2252 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
2253 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2254 is_multicast_ether_addr(hdr->addr1))
2255 return priv->hw_setting.bcast_sta_id;
2256
2257 switch (priv->iw_mode) {
2258
6440adb5
BC
2259 /* If we are a client station in a BSS network, use the special
2260 * AP station entry (that's the only station we communicate with) */
b481de9c
ZY
2261 case IEEE80211_IF_TYPE_STA:
2262 return IWL_AP_ID;
2263
2264 /* If we are an AP, then find the station, or use BCAST */
2265 case IEEE80211_IF_TYPE_AP:
bb8c093b 2266 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2267 if (sta_id != IWL_INVALID_STATION)
2268 return sta_id;
2269 return priv->hw_setting.bcast_sta_id;
2270
6440adb5
BC
2271 /* If this frame is going out to an IBSS network, find the station,
2272 * or create a new station table entry */
b481de9c 2273 case IEEE80211_IF_TYPE_IBSS:
bb8c093b 2274 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2275 if (sta_id != IWL_INVALID_STATION)
2276 return sta_id;
2277
6440adb5 2278 /* Create new station table entry */
67d62035
RR
2279 sta_id = iwl4965_add_station_flags(priv, hdr->addr1,
2280 0, CMD_ASYNC, NULL);
b481de9c
ZY
2281
2282 if (sta_id != IWL_INVALID_STATION)
2283 return sta_id;
2284
0795af57 2285 IWL_DEBUG_DROP("Station %s not in station map. "
b481de9c 2286 "Defaulting to broadcast...\n",
0795af57 2287 print_mac(mac, hdr->addr1));
0a6857e7 2288 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
b481de9c
ZY
2289 return priv->hw_setting.bcast_sta_id;
2290
2291 default:
01ebd063 2292 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
b481de9c
ZY
2293 return priv->hw_setting.bcast_sta_id;
2294 }
2295}
2296
2297/*
2298 * start REPLY_TX command process
2299 */
c79dd5b5 2300static int iwl4965_tx_skb(struct iwl_priv *priv,
b481de9c
ZY
2301 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2302{
2303 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bb8c093b 2304 struct iwl4965_tfd_frame *tfd;
b481de9c
ZY
2305 u32 *control_flags;
2306 int txq_id = ctl->queue;
bb8c093b
CH
2307 struct iwl4965_tx_queue *txq = NULL;
2308 struct iwl4965_queue *q = NULL;
b481de9c
ZY
2309 dma_addr_t phys_addr;
2310 dma_addr_t txcmd_phys;
87e4f7df 2311 dma_addr_t scratch_phys;
857485c0 2312 struct iwl_cmd *out_cmd = NULL;
b481de9c
ZY
2313 u16 len, idx, len_org;
2314 u8 id, hdr_len, unicast;
2315 u8 sta_id;
2316 u16 seq_number = 0;
2317 u16 fc;
2318 __le16 *qc;
2319 u8 wait_write_ptr = 0;
2320 unsigned long flags;
2321 int rc;
2322
2323 spin_lock_irqsave(&priv->lock, flags);
fee1247a 2324 if (iwl_is_rfkill(priv)) {
b481de9c
ZY
2325 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2326 goto drop_unlock;
2327 }
2328
32bfd35d
JB
2329 if (!priv->vif) {
2330 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
b481de9c
ZY
2331 goto drop_unlock;
2332 }
2333
8318d78a 2334 if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) {
b481de9c
ZY
2335 IWL_ERROR("ERROR: No TX rate available.\n");
2336 goto drop_unlock;
2337 }
2338
2339 unicast = !is_multicast_ether_addr(hdr->addr1);
2340 id = 0;
2341
2342 fc = le16_to_cpu(hdr->frame_control);
2343
0a6857e7 2344#ifdef CONFIG_IWLWIFI_DEBUG
b481de9c
ZY
2345 if (ieee80211_is_auth(fc))
2346 IWL_DEBUG_TX("Sending AUTH frame\n");
2347 else if (ieee80211_is_assoc_request(fc))
2348 IWL_DEBUG_TX("Sending ASSOC frame\n");
2349 else if (ieee80211_is_reassoc_request(fc))
2350 IWL_DEBUG_TX("Sending REASSOC frame\n");
2351#endif
2352
7878a5a4 2353 /* drop all data frame if we are not associated */
76f3915b 2354 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
3109ece1 2355 (!iwl_is_associated(priv) ||
a6477249 2356 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
76f3915b 2357 !priv->assoc_station_added)) {
3109ece1 2358 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
b481de9c
ZY
2359 goto drop_unlock;
2360 }
2361
2362 spin_unlock_irqrestore(&priv->lock, flags);
2363
2364 hdr_len = ieee80211_get_hdrlen(fc);
6440adb5
BC
2365
2366 /* Find (or create) index into station table for destination station */
bb8c093b 2367 sta_id = iwl4965_get_sta_id(priv, hdr);
b481de9c 2368 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
2369 DECLARE_MAC_BUF(mac);
2370
2371 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2372 print_mac(mac, hdr->addr1));
b481de9c
ZY
2373 goto drop;
2374 }
2375
2376 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2377
2378 qc = ieee80211_get_qos_ctrl(hdr);
2379 if (qc) {
2380 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2381 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2382 IEEE80211_SCTL_SEQ;
2383 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2384 (hdr->seq_ctrl &
2385 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2386 seq_number += 0x10;
c8b0e6e1 2387#ifdef CONFIG_IWL4965_HT
b481de9c 2388 /* aggregation is on for this <sta,tid> */
fe01b477 2389 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
b481de9c 2390 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
fe01b477 2391 priv->stations[sta_id].tid[tid].tfds_in_queue++;
c8b0e6e1 2392#endif /* CONFIG_IWL4965_HT */
b481de9c 2393 }
6440adb5
BC
2394
2395 /* Descriptor for chosen Tx queue */
b481de9c
ZY
2396 txq = &priv->txq[txq_id];
2397 q = &txq->q;
2398
2399 spin_lock_irqsave(&priv->lock, flags);
2400
6440adb5 2401 /* Set up first empty TFD within this queue's circular TFD buffer */
fc4b6853 2402 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
2403 memset(tfd, 0, sizeof(*tfd));
2404 control_flags = (u32 *) tfd;
fc4b6853 2405 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 2406
6440adb5 2407 /* Set up driver data for this TFD */
bb8c093b 2408 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
fc4b6853
TW
2409 txq->txb[q->write_ptr].skb[0] = skb;
2410 memcpy(&(txq->txb[q->write_ptr].status.control),
b481de9c 2411 ctl, sizeof(struct ieee80211_tx_control));
6440adb5
BC
2412
2413 /* Set up first empty entry in queue's array of Tx/cmd buffers */
b481de9c
ZY
2414 out_cmd = &txq->cmd[idx];
2415 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2416 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
6440adb5
BC
2417
2418 /*
2419 * Set up the Tx-command (not MAC!) header.
2420 * Store the chosen Tx queue and TFD index within the sequence field;
2421 * after Tx, uCode's Tx response will return this value so driver can
2422 * locate the frame within the tx queue and do post-tx processing.
2423 */
b481de9c
ZY
2424 out_cmd->hdr.cmd = REPLY_TX;
2425 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 2426 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
BC
2427
2428 /* Copy MAC header from skb into command buffer */
b481de9c
ZY
2429 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2430
6440adb5
BC
2431 /*
2432 * Use the first empty entry in this queue's command buffer array
2433 * to contain the Tx command and MAC header concatenated together
2434 * (payload data will be in another buffer).
2435 * Size of this varies, due to varying MAC header length.
2436 * If end is not dword aligned, we'll have 2 extra bytes at the end
2437 * of the MAC header (device reads on dword boundaries).
2438 * We'll tell device about this padding later.
2439 */
b481de9c 2440 len = priv->hw_setting.tx_cmd_len +
857485c0 2441 sizeof(struct iwl_cmd_header) + hdr_len;
b481de9c
ZY
2442
2443 len_org = len;
2444 len = (len + 3) & ~3;
2445
2446 if (len_org != len)
2447 len_org = 1;
2448 else
2449 len_org = 0;
2450
6440adb5
BC
2451 /* Physical address of this Tx command's header (not MAC header!),
2452 * within command buffer array. */
857485c0
TW
2453 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2454 offsetof(struct iwl_cmd, hdr);
b481de9c 2455
6440adb5
BC
2456 /* Add buffer containing Tx command and MAC(!) header to TFD's
2457 * first entry */
bb8c093b 2458 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
b481de9c
ZY
2459
2460 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
deb09c43 2461 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, sta_id);
b481de9c 2462
6440adb5
BC
2463 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2464 * if any (802.11 null frames have no payload). */
b481de9c
ZY
2465 len = skb->len - hdr_len;
2466 if (len) {
2467 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2468 len, PCI_DMA_TODEVICE);
bb8c093b 2469 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
b481de9c
ZY
2470 }
2471
6440adb5 2472 /* Tell 4965 about any 2-byte padding after MAC header */
b481de9c
ZY
2473 if (len_org)
2474 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2475
6440adb5 2476 /* Total # bytes to be transmitted */
b481de9c
ZY
2477 len = (u16)skb->len;
2478 out_cmd->cmd.tx.len = cpu_to_le16(len);
2479
2480 /* TODO need this for burst mode later on */
bb8c093b 2481 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
b481de9c
ZY
2482
2483 /* set is_hcca to 0; it probably will never be implemented */
bb8c093b 2484 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
b481de9c 2485
19758bef
TW
2486 iwl_update_tx_stats(priv, fc, len);
2487
857485c0 2488 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
87e4f7df
TW
2489 offsetof(struct iwl4965_tx_cmd, scratch);
2490 out_cmd->cmd.tx.dram_lsb_ptr = cpu_to_le32(scratch_phys);
2491 out_cmd->cmd.tx.dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
2492
b481de9c
ZY
2493 if (!ieee80211_get_morefrag(hdr)) {
2494 txq->need_update = 1;
2495 if (qc) {
2496 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2497 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2498 }
2499 } else {
2500 wait_write_ptr = 1;
2501 txq->need_update = 0;
2502 }
2503
0a6857e7 2504 iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
b481de9c
ZY
2505 sizeof(out_cmd->cmd.tx));
2506
0a6857e7 2507 iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
b481de9c
ZY
2508 ieee80211_get_hdrlen(fc));
2509
6440adb5 2510 /* Set up entry for this TFD in Tx byte-count array */
b481de9c
ZY
2511 iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
2512
6440adb5 2513 /* Tell device the write index *just past* this latest filled TFD */
c54b679d 2514 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
bb8c093b 2515 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
2516 spin_unlock_irqrestore(&priv->lock, flags);
2517
2518 if (rc)
2519 return rc;
2520
bb8c093b 2521 if ((iwl4965_queue_space(q) < q->high_mark)
b481de9c
ZY
2522 && priv->mac80211_registered) {
2523 if (wait_write_ptr) {
2524 spin_lock_irqsave(&priv->lock, flags);
2525 txq->need_update = 1;
bb8c093b 2526 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
2527 spin_unlock_irqrestore(&priv->lock, flags);
2528 }
2529
2530 ieee80211_stop_queue(priv->hw, ctl->queue);
2531 }
2532
2533 return 0;
2534
2535drop_unlock:
2536 spin_unlock_irqrestore(&priv->lock, flags);
2537drop:
2538 return -1;
2539}
2540
c79dd5b5 2541static void iwl4965_set_rate(struct iwl_priv *priv)
b481de9c 2542{
8318d78a 2543 const struct ieee80211_supported_band *hw = NULL;
b481de9c
ZY
2544 struct ieee80211_rate *rate;
2545 int i;
2546
8318d78a 2547 hw = iwl4965_get_hw_mode(priv, priv->band);
c4ba9621
SA
2548 if (!hw) {
2549 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
2550 return;
2551 }
b481de9c
ZY
2552
2553 priv->active_rate = 0;
2554 priv->active_rate_basic = 0;
2555
8318d78a
JB
2556 for (i = 0; i < hw->n_bitrates; i++) {
2557 rate = &(hw->bitrates[i]);
2558 if (rate->hw_value < IWL_RATE_COUNT)
2559 priv->active_rate |= (1 << rate->hw_value);
b481de9c
ZY
2560 }
2561
2562 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
2563 priv->active_rate, priv->active_rate_basic);
2564
2565 /*
2566 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2567 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2568 * OFDM
2569 */
2570 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
2571 priv->staging_rxon.cck_basic_rates =
2572 ((priv->active_rate_basic &
2573 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2574 else
2575 priv->staging_rxon.cck_basic_rates =
2576 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2577
2578 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
2579 priv->staging_rxon.ofdm_basic_rates =
2580 ((priv->active_rate_basic &
2581 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2582 IWL_FIRST_OFDM_RATE) & 0xFF;
2583 else
2584 priv->staging_rxon.ofdm_basic_rates =
2585 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2586}
2587
ad97edd2 2588void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
b481de9c
ZY
2589{
2590 unsigned long flags;
2591
2592 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2593 return;
2594
2595 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2596 disable_radio ? "OFF" : "ON");
2597
2598 if (disable_radio) {
bb8c093b 2599 iwl4965_scan_cancel(priv);
b481de9c
ZY
2600 /* FIXME: This is a workaround for AP */
2601 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
2602 spin_lock_irqsave(&priv->lock, flags);
3395f6e9 2603 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
2604 CSR_UCODE_SW_BIT_RFKILL);
2605 spin_unlock_irqrestore(&priv->lock, flags);
ad97edd2
MA
2606 /* call the host command only if no hw rf-kill set */
2607 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
2608 iwl4965_send_card_state(priv,
2609 CARD_STATE_CMD_DISABLE,
2610 0);
b481de9c 2611 set_bit(STATUS_RF_KILL_SW, &priv->status);
ad97edd2
MA
2612
2613 /* make sure mac80211 stop sending Tx frame */
2614 if (priv->mac80211_registered)
2615 ieee80211_stop_queues(priv->hw);
b481de9c
ZY
2616 }
2617 return;
2618 }
2619
2620 spin_lock_irqsave(&priv->lock, flags);
3395f6e9 2621 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
2622
2623 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2624 spin_unlock_irqrestore(&priv->lock, flags);
2625
2626 /* wake up ucode */
2627 msleep(10);
2628
2629 spin_lock_irqsave(&priv->lock, flags);
3395f6e9
TW
2630 iwl_read32(priv, CSR_UCODE_DRV_GP1);
2631 if (!iwl_grab_nic_access(priv))
2632 iwl_release_nic_access(priv);
b481de9c
ZY
2633 spin_unlock_irqrestore(&priv->lock, flags);
2634
2635 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2636 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
2637 "disabled by HW switch\n");
2638 return;
2639 }
2640
2641 queue_work(priv->workqueue, &priv->restart);
2642 return;
2643}
2644
c79dd5b5 2645void iwl4965_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
b481de9c
ZY
2646 u32 decrypt_res, struct ieee80211_rx_status *stats)
2647{
2648 u16 fc =
2649 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2650
2651 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2652 return;
2653
2654 if (!(fc & IEEE80211_FCTL_PROTECTED))
2655 return;
2656
2657 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2658 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2659 case RX_RES_STATUS_SEC_TYPE_TKIP:
17e476b8
EG
2660 /* The uCode has got a bad phase 1 Key, pushes the packet.
2661 * Decryption will be done in SW. */
2662 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2663 RX_RES_STATUS_BAD_KEY_TTAK)
2664 break;
2665
b481de9c
ZY
2666 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2667 RX_RES_STATUS_BAD_ICV_MIC)
2668 stats->flag |= RX_FLAG_MMIC_ERROR;
2669 case RX_RES_STATUS_SEC_TYPE_WEP:
2670 case RX_RES_STATUS_SEC_TYPE_CCMP:
2671 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2672 RX_RES_STATUS_DECRYPT_OK) {
2673 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2674 stats->flag |= RX_FLAG_DECRYPTED;
2675 }
2676 break;
2677
2678 default:
2679 break;
2680 }
2681}
2682
b481de9c
ZY
2683
2684#define IWL_PACKET_RETRY_TIME HZ
2685
c79dd5b5 2686int iwl4965_is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
2687{
2688 u16 sc = le16_to_cpu(header->seq_ctrl);
2689 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2690 u16 frag = sc & IEEE80211_SCTL_FRAG;
2691 u16 *last_seq, *last_frag;
2692 unsigned long *last_time;
2693
2694 switch (priv->iw_mode) {
2695 case IEEE80211_IF_TYPE_IBSS:{
2696 struct list_head *p;
bb8c093b 2697 struct iwl4965_ibss_seq *entry = NULL;
b481de9c
ZY
2698 u8 *mac = header->addr2;
2699 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
2700
2701 __list_for_each(p, &priv->ibss_mac_hash[index]) {
bb8c093b 2702 entry = list_entry(p, struct iwl4965_ibss_seq, list);
b481de9c
ZY
2703 if (!compare_ether_addr(entry->mac, mac))
2704 break;
2705 }
2706 if (p == &priv->ibss_mac_hash[index]) {
2707 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2708 if (!entry) {
bc434dd2 2709 IWL_ERROR("Cannot malloc new mac entry\n");
b481de9c
ZY
2710 return 0;
2711 }
2712 memcpy(entry->mac, mac, ETH_ALEN);
2713 entry->seq_num = seq;
2714 entry->frag_num = frag;
2715 entry->packet_time = jiffies;
bc434dd2 2716 list_add(&entry->list, &priv->ibss_mac_hash[index]);
b481de9c
ZY
2717 return 0;
2718 }
2719 last_seq = &entry->seq_num;
2720 last_frag = &entry->frag_num;
2721 last_time = &entry->packet_time;
2722 break;
2723 }
2724 case IEEE80211_IF_TYPE_STA:
2725 last_seq = &priv->last_seq_num;
2726 last_frag = &priv->last_frag_num;
2727 last_time = &priv->last_packet_time;
2728 break;
2729 default:
2730 return 0;
2731 }
2732 if ((*last_seq == seq) &&
2733 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
2734 if (*last_frag == frag)
2735 goto drop;
2736 if (*last_frag + 1 != frag)
2737 /* out-of-order fragment */
2738 goto drop;
2739 } else
2740 *last_seq = seq;
2741
2742 *last_frag = frag;
2743 *last_time = jiffies;
2744 return 0;
2745
2746 drop:
2747 return 1;
2748}
2749
c8b0e6e1 2750#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
2751
2752#include "iwl-spectrum.h"
2753
2754#define BEACON_TIME_MASK_LOW 0x00FFFFFF
2755#define BEACON_TIME_MASK_HIGH 0xFF000000
2756#define TIME_UNIT 1024
2757
2758/*
2759 * extended beacon time format
2760 * time in usec will be changed into a 32-bit value in 8:24 format
2761 * the high 1 byte is the beacon counts
2762 * the lower 3 bytes is the time in usec within one beacon interval
2763 */
2764
bb8c093b 2765static u32 iwl4965_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
2766{
2767 u32 quot;
2768 u32 rem;
2769 u32 interval = beacon_interval * 1024;
2770
2771 if (!interval || !usec)
2772 return 0;
2773
2774 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
2775 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
2776
2777 return (quot << 24) + rem;
2778}
2779
2780/* base is usually what we get from ucode with each received frame,
2781 * the same as HW timer counter counting down
2782 */
2783
bb8c093b 2784static __le32 iwl4965_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
2785{
2786 u32 base_low = base & BEACON_TIME_MASK_LOW;
2787 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
2788 u32 interval = beacon_interval * TIME_UNIT;
2789 u32 res = (base & BEACON_TIME_MASK_HIGH) +
2790 (addon & BEACON_TIME_MASK_HIGH);
2791
2792 if (base_low > addon_low)
2793 res += base_low - addon_low;
2794 else if (base_low < addon_low) {
2795 res += interval + base_low - addon_low;
2796 res += (1 << 24);
2797 } else
2798 res += (1 << 24);
2799
2800 return cpu_to_le32(res);
2801}
2802
c79dd5b5 2803static int iwl4965_get_measurement(struct iwl_priv *priv,
b481de9c
ZY
2804 struct ieee80211_measurement_params *params,
2805 u8 type)
2806{
bb8c093b
CH
2807 struct iwl4965_spectrum_cmd spectrum;
2808 struct iwl4965_rx_packet *res;
857485c0 2809 struct iwl_host_cmd cmd = {
b481de9c
ZY
2810 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2811 .data = (void *)&spectrum,
2812 .meta.flags = CMD_WANT_SKB,
2813 };
2814 u32 add_time = le64_to_cpu(params->start_time);
2815 int rc;
2816 int spectrum_resp_status;
2817 int duration = le16_to_cpu(params->duration);
2818
3109ece1 2819 if (iwl_is_associated(priv))
b481de9c 2820 add_time =
bb8c093b 2821 iwl4965_usecs_to_beacons(
b481de9c
ZY
2822 le64_to_cpu(params->start_time) - priv->last_tsf,
2823 le16_to_cpu(priv->rxon_timing.beacon_interval));
2824
2825 memset(&spectrum, 0, sizeof(spectrum));
2826
2827 spectrum.channel_count = cpu_to_le16(1);
2828 spectrum.flags =
2829 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
2830 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
2831 cmd.len = sizeof(spectrum);
2832 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
2833
3109ece1 2834 if (iwl_is_associated(priv))
b481de9c 2835 spectrum.start_time =
bb8c093b 2836 iwl4965_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
2837 add_time,
2838 le16_to_cpu(priv->rxon_timing.beacon_interval));
2839 else
2840 spectrum.start_time = 0;
2841
2842 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
2843 spectrum.channels[0].channel = params->channel;
2844 spectrum.channels[0].type = type;
2845 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
2846 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
2847 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
2848
857485c0 2849 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
2850 if (rc)
2851 return rc;
2852
bb8c093b 2853 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
2854 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
2855 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
2856 rc = -EIO;
2857 }
2858
2859 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
2860 switch (spectrum_resp_status) {
2861 case 0: /* Command will be handled */
2862 if (res->u.spectrum.id != 0xff) {
2863 IWL_DEBUG_INFO
2864 ("Replaced existing measurement: %d\n",
2865 res->u.spectrum.id);
2866 priv->measurement_status &= ~MEASUREMENT_READY;
2867 }
2868 priv->measurement_status |= MEASUREMENT_ACTIVE;
2869 rc = 0;
2870 break;
2871
2872 case 1: /* Command will not be handled */
2873 rc = -EAGAIN;
2874 break;
2875 }
2876
2877 dev_kfree_skb_any(cmd.meta.u.skb);
2878
2879 return rc;
2880}
2881#endif
2882
c79dd5b5 2883static void iwl4965_txstatus_to_ieee(struct iwl_priv *priv,
bb8c093b 2884 struct iwl4965_tx_info *tx_sta)
b481de9c
ZY
2885{
2886
2887 tx_sta->status.ack_signal = 0;
2888 tx_sta->status.excessive_retries = 0;
2889 tx_sta->status.queue_length = 0;
2890 tx_sta->status.queue_number = 0;
2891
2892 if (in_interrupt())
2893 ieee80211_tx_status_irqsafe(priv->hw,
2894 tx_sta->skb[0], &(tx_sta->status));
2895 else
2896 ieee80211_tx_status(priv->hw,
2897 tx_sta->skb[0], &(tx_sta->status));
2898
2899 tx_sta->skb[0] = NULL;
2900}
2901
2902/**
6440adb5 2903 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
b481de9c 2904 *
6440adb5
BC
2905 * When FW advances 'R' index, all entries between old and new 'R' index
2906 * need to be reclaimed. As result, some free space forms. If there is
2907 * enough free space (> low mark), wake the stack that feeds us.
b481de9c 2908 */
c79dd5b5 2909int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
b481de9c 2910{
bb8c093b
CH
2911 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
2912 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
2913 int nfreed = 0;
2914
2915 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
2916 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
2917 "is out of range [0-%d] %d %d.\n", txq_id,
fc4b6853 2918 index, q->n_bd, q->write_ptr, q->read_ptr);
b481de9c
ZY
2919 return 0;
2920 }
2921
c54b679d 2922 for (index = iwl_queue_inc_wrap(index, q->n_bd);
fc4b6853 2923 q->read_ptr != index;
c54b679d 2924 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
b481de9c 2925 if (txq_id != IWL_CMD_QUEUE_NUM) {
bb8c093b 2926 iwl4965_txstatus_to_ieee(priv,
fc4b6853 2927 &(txq->txb[txq->q.read_ptr]));
bb8c093b 2928 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c
ZY
2929 } else if (nfreed > 1) {
2930 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
fc4b6853 2931 q->write_ptr, q->read_ptr);
b481de9c
ZY
2932 queue_work(priv->workqueue, &priv->restart);
2933 }
2934 nfreed++;
2935 }
2936
fe01b477 2937/* if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
b481de9c
ZY
2938 (txq_id != IWL_CMD_QUEUE_NUM) &&
2939 priv->mac80211_registered)
fe01b477 2940 ieee80211_wake_queue(priv->hw, txq_id); */
b481de9c
ZY
2941
2942
2943 return nfreed;
2944}
2945
bb8c093b 2946static int iwl4965_is_tx_success(u32 status)
b481de9c
ZY
2947{
2948 status &= TX_STATUS_MSK;
2949 return (status == TX_STATUS_SUCCESS)
2950 || (status == TX_STATUS_DIRECT_DONE);
2951}
2952
2953/******************************************************************************
2954 *
2955 * Generic RX handler implementations
2956 *
2957 ******************************************************************************/
c8b0e6e1 2958#ifdef CONFIG_IWL4965_HT
b481de9c 2959
c79dd5b5 2960static inline int iwl4965_get_ra_sta_id(struct iwl_priv *priv,
b481de9c
ZY
2961 struct ieee80211_hdr *hdr)
2962{
2963 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
2964 return IWL_AP_ID;
2965 else {
2966 u8 *da = ieee80211_get_DA(hdr);
bb8c093b 2967 return iwl4965_hw_find_station(priv, da);
b481de9c
ZY
2968 }
2969}
2970
bb8c093b 2971static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
c79dd5b5 2972 struct iwl_priv *priv, int txq_id, int idx)
b481de9c
ZY
2973{
2974 if (priv->txq[txq_id].txb[idx].skb[0])
2975 return (struct ieee80211_hdr *)priv->txq[txq_id].
2976 txb[idx].skb[0]->data;
2977 return NULL;
2978}
2979
bb8c093b 2980static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
b481de9c
ZY
2981{
2982 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
2983 tx_resp->frame_count);
2984 return le32_to_cpu(*scd_ssn) & MAX_SN;
2985
2986}
6440adb5
BC
2987
2988/**
2989 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
2990 */
c79dd5b5 2991static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
bb8c093b 2992 struct iwl4965_ht_agg *agg,
fe01b477 2993 struct iwl4965_tx_resp_agg *tx_resp,
b481de9c
ZY
2994 u16 start_idx)
2995{
fe01b477
RR
2996 u16 status;
2997 struct agg_tx_status *frame_status = &tx_resp->status;
b481de9c
ZY
2998 struct ieee80211_tx_status *tx_status = NULL;
2999 struct ieee80211_hdr *hdr = NULL;
3000 int i, sh;
3001 int txq_id, idx;
3002 u16 seq;
3003
3004 if (agg->wait_for_ba)
6440adb5 3005 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
b481de9c
ZY
3006
3007 agg->frame_count = tx_resp->frame_count;
3008 agg->start_idx = start_idx;
3009 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
fe01b477 3010 agg->bitmap = 0;
b481de9c 3011
6440adb5 3012 /* # frames attempted by Tx command */
b481de9c 3013 if (agg->frame_count == 1) {
6440adb5 3014 /* Only one frame was attempted; no block-ack will arrive */
fe01b477
RR
3015 status = le16_to_cpu(frame_status[0].status);
3016 seq = le16_to_cpu(frame_status[0].sequence);
3017 idx = SEQ_TO_INDEX(seq);
3018 txq_id = SEQ_TO_QUEUE(seq);
b481de9c 3019
b481de9c 3020 /* FIXME: code repetition */
fe01b477
RR
3021 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
3022 agg->frame_count, agg->start_idx, idx);
b481de9c 3023
fe01b477 3024 tx_status = &(priv->txq[txq_id].txb[idx].status);
b481de9c
ZY
3025 tx_status->retry_count = tx_resp->failure_frame;
3026 tx_status->queue_number = status & 0xff;
fe01b477
RR
3027 tx_status->queue_length = tx_resp->failure_rts;
3028 tx_status->control.flags &= ~IEEE80211_TXCTL_AMPDU;
bb8c093b 3029 tx_status->flags = iwl4965_is_tx_success(status)?
b481de9c 3030 IEEE80211_TX_STATUS_ACK : 0;
4c424e4c
RR
3031 iwl4965_hwrate_to_tx_control(priv,
3032 le32_to_cpu(tx_resp->rate_n_flags),
3033 &tx_status->control);
b481de9c
ZY
3034 /* FIXME: code repetition end */
3035
3036 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3037 status & 0xff, tx_resp->failure_frame);
3038 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
bb8c093b 3039 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
b481de9c
ZY
3040
3041 agg->wait_for_ba = 0;
3042 } else {
6440adb5 3043 /* Two or more frames were attempted; expect block-ack */
b481de9c
ZY
3044 u64 bitmap = 0;
3045 int start = agg->start_idx;
3046
6440adb5 3047 /* Construct bit-map of pending frames within Tx window */
b481de9c
ZY
3048 for (i = 0; i < agg->frame_count; i++) {
3049 u16 sc;
fe01b477
RR
3050 status = le16_to_cpu(frame_status[i].status);
3051 seq = le16_to_cpu(frame_status[i].sequence);
b481de9c
ZY
3052 idx = SEQ_TO_INDEX(seq);
3053 txq_id = SEQ_TO_QUEUE(seq);
3054
3055 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3056 AGG_TX_STATE_ABORT_MSK))
3057 continue;
3058
3059 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3060 agg->frame_count, txq_id, idx);
3061
bb8c093b 3062 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
b481de9c
ZY
3063
3064 sc = le16_to_cpu(hdr->seq_ctrl);
3065 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3066 IWL_ERROR("BUG_ON idx doesn't match seq control"
3067 " idx=%d, seq_idx=%d, seq=%d\n",
3068 idx, SEQ_TO_SN(sc),
3069 hdr->seq_ctrl);
3070 return -1;
3071 }
3072
3073 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
3074 i, idx, SEQ_TO_SN(sc));
3075
3076 sh = idx - start;
3077 if (sh > 64) {
3078 sh = (start - idx) + 0xff;
3079 bitmap = bitmap << sh;
3080 sh = 0;
3081 start = idx;
3082 } else if (sh < -64)
3083 sh = 0xff - (start - idx);
3084 else if (sh < 0) {
3085 sh = start - idx;
3086 start = idx;
3087 bitmap = bitmap << sh;
3088 sh = 0;
3089 }
3090 bitmap |= (1 << sh);
3091 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3092 start, (u32)(bitmap & 0xFFFFFFFF));
3093 }
3094
fe01b477 3095 agg->bitmap = bitmap;
b481de9c
ZY
3096 agg->start_idx = start;
3097 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
fe01b477 3098 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
b481de9c 3099 agg->frame_count, agg->start_idx,
06501d29 3100 (unsigned long long)agg->bitmap);
b481de9c
ZY
3101
3102 if (bitmap)
3103 agg->wait_for_ba = 1;
3104 }
3105 return 0;
3106}
3107#endif
b481de9c 3108
6440adb5
BC
3109/**
3110 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3111 */
c79dd5b5 3112static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
bb8c093b 3113 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3114{
bb8c093b 3115 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3116 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3117 int txq_id = SEQ_TO_QUEUE(sequence);
3118 int index = SEQ_TO_INDEX(sequence);
bb8c093b 3119 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
b481de9c 3120 struct ieee80211_tx_status *tx_status;
bb8c093b 3121 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
b481de9c 3122 u32 status = le32_to_cpu(tx_resp->status);
c8b0e6e1 3123#ifdef CONFIG_IWL4965_HT
fe01b477
RR
3124 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
3125 struct ieee80211_hdr *hdr;
3126 __le16 *qc;
b481de9c
ZY
3127#endif
3128
3129 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3130 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3131 "is out of range [0-%d] %d %d\n", txq_id,
fc4b6853
TW
3132 index, txq->q.n_bd, txq->q.write_ptr,
3133 txq->q.read_ptr);
b481de9c
ZY
3134 return;
3135 }
3136
c8b0e6e1 3137#ifdef CONFIG_IWL4965_HT
fe01b477
RR
3138 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3139 qc = ieee80211_get_qos_ctrl(hdr);
3140
3141 if (qc)
3142 tid = le16_to_cpu(*qc) & 0xf;
3143
3144 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
3145 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
3146 IWL_ERROR("Station not known\n");
3147 return;
3148 }
3149
b481de9c 3150 if (txq->sched_retry) {
bb8c093b 3151 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
bb8c093b 3152 struct iwl4965_ht_agg *agg = NULL;
b481de9c 3153
fe01b477 3154 if (!qc)
b481de9c 3155 return;
b481de9c
ZY
3156
3157 agg = &priv->stations[sta_id].tid[tid].agg;
3158
fe01b477
RR
3159 iwl4965_tx_status_reply_tx(priv, agg,
3160 (struct iwl4965_tx_resp_agg *)tx_resp, index);
b481de9c
ZY
3161
3162 if ((tx_resp->frame_count == 1) &&
bb8c093b 3163 !iwl4965_is_tx_success(status)) {
b481de9c
ZY
3164 /* TODO: send BAR */
3165 }
3166
fe01b477
RR
3167 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
3168 int freed;
c54b679d 3169 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
b481de9c
ZY
3170 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3171 "%d index %d\n", scd_ssn , index);
fe01b477
RR
3172 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
3173 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3174
3175 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3176 txq_id >= 0 && priv->mac80211_registered &&
3177 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
3178 ieee80211_wake_queue(priv->hw, txq_id);
3179
3180 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
b481de9c
ZY
3181 }
3182 } else {
c8b0e6e1 3183#endif /* CONFIG_IWL4965_HT */
fc4b6853 3184 tx_status = &(txq->txb[txq->q.read_ptr].status);
b481de9c
ZY
3185
3186 tx_status->retry_count = tx_resp->failure_frame;
3187 tx_status->queue_number = status;
3188 tx_status->queue_length = tx_resp->bt_kill_count;
3189 tx_status->queue_length |= tx_resp->failure_rts;
b481de9c 3190 tx_status->flags =
bb8c093b 3191 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
4c424e4c
RR
3192 iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
3193 &tx_status->control);
b481de9c 3194
b481de9c 3195 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
bb8c093b 3196 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
b481de9c
ZY
3197 status, le32_to_cpu(tx_resp->rate_n_flags),
3198 tx_resp->failure_frame);
3199
3200 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
fe01b477
RR
3201 if (index != -1) {
3202 int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
3203#ifdef CONFIG_IWL4965_HT
3204 if (tid != MAX_TID_COUNT)
3205 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3206 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3207 (txq_id >= 0) &&
3208 priv->mac80211_registered)
3209 ieee80211_wake_queue(priv->hw, txq_id);
3210 if (tid != MAX_TID_COUNT)
3211 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3212#endif
3213 }
c8b0e6e1 3214#ifdef CONFIG_IWL4965_HT
b481de9c 3215 }
c8b0e6e1 3216#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
3217
3218 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3219 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3220}
3221
3222
c79dd5b5 3223static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
bb8c093b 3224 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3225{
bb8c093b
CH
3226 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3227 struct iwl4965_alive_resp *palive;
b481de9c
ZY
3228 struct delayed_work *pwork;
3229
3230 palive = &pkt->u.alive_frame;
3231
3232 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3233 "0x%01X 0x%01X\n",
3234 palive->is_valid, palive->ver_type,
3235 palive->ver_subtype);
3236
3237 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3238 IWL_DEBUG_INFO("Initialization Alive received.\n");
3239 memcpy(&priv->card_alive_init,
3240 &pkt->u.alive_frame,
bb8c093b 3241 sizeof(struct iwl4965_init_alive_resp));
b481de9c
ZY
3242 pwork = &priv->init_alive_start;
3243 } else {
3244 IWL_DEBUG_INFO("Runtime Alive received.\n");
3245 memcpy(&priv->card_alive, &pkt->u.alive_frame,
bb8c093b 3246 sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
3247 pwork = &priv->alive_start;
3248 }
3249
3250 /* We delay the ALIVE response by 5ms to
3251 * give the HW RF Kill time to activate... */
3252 if (palive->is_valid == UCODE_VALID_OK)
3253 queue_delayed_work(priv->workqueue, pwork,
3254 msecs_to_jiffies(5));
3255 else
3256 IWL_WARNING("uCode did not respond OK.\n");
3257}
3258
c79dd5b5 3259static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv,
bb8c093b 3260 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3261{
bb8c093b 3262 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3263
3264 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3265 return;
3266}
3267
c79dd5b5 3268static void iwl4965_rx_reply_error(struct iwl_priv *priv,
bb8c093b 3269 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3270{
bb8c093b 3271 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3272
3273 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3274 "seq 0x%04X ser 0x%08X\n",
3275 le32_to_cpu(pkt->u.err_resp.error_type),
3276 get_cmd_string(pkt->u.err_resp.cmd_id),
3277 pkt->u.err_resp.cmd_id,
3278 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3279 le32_to_cpu(pkt->u.err_resp.error_info));
3280}
3281
3282#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3283
c79dd5b5 3284static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3285{
bb8c093b
CH
3286 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3287 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon;
3288 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
3289 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3290 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3291 rxon->channel = csa->channel;
3292 priv->staging_rxon.channel = csa->channel;
3293}
3294
c79dd5b5 3295static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
bb8c093b 3296 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3297{
c8b0e6e1 3298#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
bb8c093b
CH
3299 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3300 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
3301
3302 if (!report->state) {
3303 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3304 "Spectrum Measure Notification: Start\n");
3305 return;
3306 }
3307
3308 memcpy(&priv->measure_report, report, sizeof(*report));
3309 priv->measurement_status |= MEASUREMENT_READY;
3310#endif
3311}
3312
c79dd5b5 3313static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
bb8c093b 3314 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3315{
0a6857e7 3316#ifdef CONFIG_IWLWIFI_DEBUG
bb8c093b
CH
3317 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3318 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
3319 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3320 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3321#endif
3322}
3323
c79dd5b5 3324static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
bb8c093b 3325 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3326{
bb8c093b 3327 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3328 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3329 "notification for %s:\n",
3330 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
0a6857e7 3331 iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
b481de9c
ZY
3332}
3333
bb8c093b 3334static void iwl4965_bg_beacon_update(struct work_struct *work)
b481de9c 3335{
c79dd5b5
TW
3336 struct iwl_priv *priv =
3337 container_of(work, struct iwl_priv, beacon_update);
b481de9c
ZY
3338 struct sk_buff *beacon;
3339
3340 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
32bfd35d 3341 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL);
b481de9c
ZY
3342
3343 if (!beacon) {
3344 IWL_ERROR("update beacon failed\n");
3345 return;
3346 }
3347
3348 mutex_lock(&priv->mutex);
3349 /* new beacon skb is allocated every time; dispose previous.*/
3350 if (priv->ibss_beacon)
3351 dev_kfree_skb(priv->ibss_beacon);
3352
3353 priv->ibss_beacon = beacon;
3354 mutex_unlock(&priv->mutex);
3355
bb8c093b 3356 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
3357}
3358
c79dd5b5 3359static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
bb8c093b 3360 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3361{
0a6857e7 3362#ifdef CONFIG_IWLWIFI_DEBUG
bb8c093b
CH
3363 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3364 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3365 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
b481de9c
ZY
3366
3367 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3368 "tsf %d %d rate %d\n",
3369 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3370 beacon->beacon_notify_hdr.failure_frame,
3371 le32_to_cpu(beacon->ibss_mgr_status),
3372 le32_to_cpu(beacon->high_tsf),
3373 le32_to_cpu(beacon->low_tsf), rate);
3374#endif
3375
3376 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3377 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3378 queue_work(priv->workqueue, &priv->beacon_update);
3379}
3380
3381/* Service response to REPLY_SCAN_CMD (0x80) */
c79dd5b5 3382static void iwl4965_rx_reply_scan(struct iwl_priv *priv,
bb8c093b 3383 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3384{
0a6857e7 3385#ifdef CONFIG_IWLWIFI_DEBUG
bb8c093b
CH
3386 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3387 struct iwl4965_scanreq_notification *notif =
3388 (struct iwl4965_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
3389
3390 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3391#endif
3392}
3393
3394/* Service SCAN_START_NOTIFICATION (0x82) */
c79dd5b5 3395static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv,
bb8c093b 3396 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3397{
bb8c093b
CH
3398 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3399 struct iwl4965_scanstart_notification *notif =
3400 (struct iwl4965_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
3401 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3402 IWL_DEBUG_SCAN("Scan start: "
3403 "%d [802.11%s] "
3404 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3405 notif->channel,
3406 notif->band ? "bg" : "a",
3407 notif->tsf_high,
3408 notif->tsf_low, notif->status, notif->beacon_timer);
3409}
3410
3411/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
c79dd5b5 3412static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv,
bb8c093b 3413 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3414{
bb8c093b
CH
3415 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3416 struct iwl4965_scanresults_notification *notif =
3417 (struct iwl4965_scanresults_notification *)pkt->u.raw;
b481de9c
ZY
3418
3419 IWL_DEBUG_SCAN("Scan ch.res: "
3420 "%d [802.11%s] "
3421 "(TSF: 0x%08X:%08X) - %d "
3422 "elapsed=%lu usec (%dms since last)\n",
3423 notif->channel,
3424 notif->band ? "bg" : "a",
3425 le32_to_cpu(notif->tsf_high),
3426 le32_to_cpu(notif->tsf_low),
3427 le32_to_cpu(notif->statistics[0]),
3428 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3429 jiffies_to_msecs(elapsed_jiffies
3430 (priv->last_scan_jiffies, jiffies)));
3431
3432 priv->last_scan_jiffies = jiffies;
7878a5a4 3433 priv->next_scan_jiffies = 0;
b481de9c
ZY
3434}
3435
3436/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
c79dd5b5 3437static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv,
bb8c093b 3438 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3439{
bb8c093b
CH
3440 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3441 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
b481de9c
ZY
3442
3443 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3444 scan_notif->scanned_channels,
3445 scan_notif->tsf_low,
3446 scan_notif->tsf_high, scan_notif->status);
3447
3448 /* The HW is no longer scanning */
3449 clear_bit(STATUS_SCAN_HW, &priv->status);
3450
3451 /* The scan completion notification came in, so kill that timer... */
3452 cancel_delayed_work(&priv->scan_check);
3453
3454 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
3455 (priv->scan_bands == 2) ? "2.4" : "5.2",
3456 jiffies_to_msecs(elapsed_jiffies
3457 (priv->scan_pass_start, jiffies)));
3458
3459 /* Remove this scanned band from the list
3460 * of pending bands to scan */
3461 priv->scan_bands--;
3462
3463 /* If a request to abort was given, or the scan did not succeed
3464 * then we reset the scan state machine and terminate,
3465 * re-queuing another scan if one has been requested */
3466 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3467 IWL_DEBUG_INFO("Aborted scan completed.\n");
3468 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3469 } else {
3470 /* If there are more bands on this scan pass reschedule */
3471 if (priv->scan_bands > 0)
3472 goto reschedule;
3473 }
3474
3475 priv->last_scan_jiffies = jiffies;
7878a5a4 3476 priv->next_scan_jiffies = 0;
b481de9c
ZY
3477 IWL_DEBUG_INFO("Setting scan to off\n");
3478
3479 clear_bit(STATUS_SCANNING, &priv->status);
3480
3481 IWL_DEBUG_INFO("Scan took %dms\n",
3482 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
3483
3484 queue_work(priv->workqueue, &priv->scan_completed);
3485
3486 return;
3487
3488reschedule:
3489 priv->scan_pass_start = jiffies;
3490 queue_work(priv->workqueue, &priv->request_scan);
3491}
3492
3493/* Handle notification from uCode that card's power state is changing
3494 * due to software, hardware, or critical temperature RFKILL */
c79dd5b5 3495static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
bb8c093b 3496 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3497{
bb8c093b 3498 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3499 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3500 unsigned long status = priv->status;
3501
3502 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
3503 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3504 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
3505
3506 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
3507 RF_CARD_DISABLED)) {
3508
3395f6e9 3509 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
3510 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3511
3395f6e9
TW
3512 if (!iwl_grab_nic_access(priv)) {
3513 iwl_write_direct32(
b481de9c
ZY
3514 priv, HBUS_TARG_MBX_C,
3515 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3516
3395f6e9 3517 iwl_release_nic_access(priv);
b481de9c
ZY
3518 }
3519
3520 if (!(flags & RXON_CARD_DISABLED)) {
3395f6e9 3521 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 3522 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3395f6e9
TW
3523 if (!iwl_grab_nic_access(priv)) {
3524 iwl_write_direct32(
b481de9c
ZY
3525 priv, HBUS_TARG_MBX_C,
3526 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3527
3395f6e9 3528 iwl_release_nic_access(priv);
b481de9c
ZY
3529 }
3530 }
3531
3532 if (flags & RF_CARD_DISABLED) {
3395f6e9 3533 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c 3534 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
3395f6e9
TW
3535 iwl_read32(priv, CSR_UCODE_DRV_GP1);
3536 if (!iwl_grab_nic_access(priv))
3537 iwl_release_nic_access(priv);
b481de9c
ZY
3538 }
3539 }
3540
3541 if (flags & HW_CARD_DISABLED)
3542 set_bit(STATUS_RF_KILL_HW, &priv->status);
3543 else
3544 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3545
3546
3547 if (flags & SW_CARD_DISABLED)
3548 set_bit(STATUS_RF_KILL_SW, &priv->status);
3549 else
3550 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3551
3552 if (!(flags & RXON_CARD_DISABLED))
bb8c093b 3553 iwl4965_scan_cancel(priv);
b481de9c
ZY
3554
3555 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
3556 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
3557 (test_bit(STATUS_RF_KILL_SW, &status) !=
3558 test_bit(STATUS_RF_KILL_SW, &priv->status)))
3559 queue_work(priv->workqueue, &priv->rf_kill);
3560 else
3561 wake_up_interruptible(&priv->wait_command_queue);
3562}
3563
3564/**
bb8c093b 3565 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
3566 *
3567 * Setup the RX handlers for each of the reply types sent from the uCode
3568 * to the host.
3569 *
3570 * This function chains into the hardware specific files for them to setup
3571 * any hardware specific handlers as well.
3572 */
c79dd5b5 3573static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
b481de9c 3574{
bb8c093b
CH
3575 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
3576 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
3577 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
3578 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
b481de9c 3579 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
3580 iwl4965_rx_spectrum_measure_notif;
3581 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl4965_rx_pm_sleep_notif;
b481de9c 3582 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
3583 iwl4965_rx_pm_debug_statistics_notif;
3584 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
b481de9c 3585
9fbab516
BC
3586 /*
3587 * The same handler is used for both the REPLY to a discrete
3588 * statistics request from the host as well as for the periodic
3589 * statistics notifications (after received beacons) from the uCode.
b481de9c 3590 */
bb8c093b
CH
3591 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics;
3592 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics;
b481de9c 3593
bb8c093b
CH
3594 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
3595 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
b481de9c 3596 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 3597 iwl4965_rx_scan_results_notif;
b481de9c 3598 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
3599 iwl4965_rx_scan_complete_notif;
3600 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
3601 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
b481de9c 3602
9fbab516 3603 /* Set up hardware specific Rx handlers */
bb8c093b 3604 iwl4965_hw_rx_handler_setup(priv);
b481de9c
ZY
3605}
3606
3607/**
bb8c093b 3608 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
3609 * @rxb: Rx buffer to reclaim
3610 *
3611 * If an Rx buffer has an async callback associated with it the callback
3612 * will be executed. The attached skb (if present) will only be freed
3613 * if the callback returns 1
3614 */
c79dd5b5 3615static void iwl4965_tx_cmd_complete(struct iwl_priv *priv,
bb8c093b 3616 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3617{
bb8c093b 3618 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
3619 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3620 int txq_id = SEQ_TO_QUEUE(sequence);
3621 int index = SEQ_TO_INDEX(sequence);
3622 int huge = sequence & SEQ_HUGE_FRAME;
3623 int cmd_index;
857485c0 3624 struct iwl_cmd *cmd;
b481de9c
ZY
3625
3626 /* If a Tx command is being handled and it isn't in the actual
3627 * command queue then there a command routing bug has been introduced
3628 * in the queue management code. */
3629 if (txq_id != IWL_CMD_QUEUE_NUM)
3630 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
3631 txq_id, pkt->hdr.cmd);
3632 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3633
3634 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3635 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3636
3637 /* Input error checking is done when commands are added to queue. */
3638 if (cmd->meta.flags & CMD_WANT_SKB) {
3639 cmd->meta.source->u.skb = rxb->skb;
3640 rxb->skb = NULL;
3641 } else if (cmd->meta.u.callback &&
3642 !cmd->meta.u.callback(priv, cmd, rxb->skb))
3643 rxb->skb = NULL;
3644
bb8c093b 3645 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
3646
3647 if (!(cmd->meta.flags & CMD_ASYNC)) {
3648 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3649 wake_up_interruptible(&priv->wait_command_queue);
3650 }
3651}
3652
3653/************************** RX-FUNCTIONS ****************************/
3654/*
3655 * Rx theory of operation
3656 *
9fbab516
BC
3657 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
3658 * each of which point to Receive Buffers to be filled by 4965. These get
3659 * used not only for Rx frames, but for any command response or notification
3660 * from the 4965. The driver and 4965 manage the Rx buffers by means
3661 * of indexes into the circular buffer.
b481de9c
ZY
3662 *
3663 * Rx Queue Indexes
3664 * The host/firmware share two index registers for managing the Rx buffers.
3665 *
3666 * The READ index maps to the first position that the firmware may be writing
3667 * to -- the driver can read up to (but not including) this position and get
3668 * good data.
3669 * The READ index is managed by the firmware once the card is enabled.
3670 *
3671 * The WRITE index maps to the last position the driver has read from -- the
3672 * position preceding WRITE is the last slot the firmware can place a packet.
3673 *
3674 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3675 * WRITE = READ.
3676 *
9fbab516 3677 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
3678 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3679 *
9fbab516 3680 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
3681 * and fire the RX interrupt. The driver can then query the READ index and
3682 * process as many packets as possible, moving the WRITE index forward as it
3683 * resets the Rx queue buffers with new memory.
3684 *
3685 * The management in the driver is as follows:
3686 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
3687 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 3688 * to replenish the iwl->rxq->rx_free.
bb8c093b 3689 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
3690 * iwl->rxq is replenished and the READ INDEX is updated (updating the
3691 * 'processed' and 'read' driver indexes as well)
3692 * + A received packet is processed and handed to the kernel network stack,
3693 * detached from the iwl->rxq. The driver 'processed' index is updated.
3694 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3695 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3696 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
3697 * were enough free buffers and RX_STALLED is set it is cleared.
3698 *
3699 *
3700 * Driver sequence:
3701 *
9fbab516
BC
3702 * iwl4965_rx_queue_alloc() Allocates rx_free
3703 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 3704 * iwl4965_rx_queue_restock
9fbab516 3705 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
3706 * queue, updates firmware pointers, and updates
3707 * the WRITE index. If insufficient rx_free buffers
bb8c093b 3708 * are available, schedules iwl4965_rx_replenish
b481de9c
ZY
3709 *
3710 * -- enable interrupts --
9fbab516 3711 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
b481de9c
ZY
3712 * READ INDEX, detaching the SKB from the pool.
3713 * Moves the packet buffer from queue to rx_used.
bb8c093b 3714 * Calls iwl4965_rx_queue_restock to refill any empty
b481de9c
ZY
3715 * slots.
3716 * ...
3717 *
3718 */
3719
3720/**
bb8c093b 3721 * iwl4965_rx_queue_space - Return number of free slots available in queue.
b481de9c 3722 */
bb8c093b 3723static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
b481de9c
ZY
3724{
3725 int s = q->read - q->write;
3726 if (s <= 0)
3727 s += RX_QUEUE_SIZE;
3728 /* keep some buffer to not confuse full and empty queue */
3729 s -= 2;
3730 if (s < 0)
3731 s = 0;
3732 return s;
3733}
3734
3735/**
bb8c093b 3736 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
b481de9c 3737 */
c79dd5b5 3738int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl4965_rx_queue *q)
b481de9c
ZY
3739{
3740 u32 reg = 0;
3741 int rc = 0;
3742 unsigned long flags;
3743
3744 spin_lock_irqsave(&q->lock, flags);
3745
3746 if (q->need_update == 0)
3747 goto exit_unlock;
3748
6440adb5 3749 /* If power-saving is in use, make sure device is awake */
b481de9c 3750 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3395f6e9 3751 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
3752
3753 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3395f6e9 3754 iwl_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
3755 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3756 goto exit_unlock;
3757 }
3758
3395f6e9 3759 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
3760 if (rc)
3761 goto exit_unlock;
3762
6440adb5 3763 /* Device expects a multiple of 8 */
3395f6e9 3764 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
b481de9c 3765 q->write & ~0x7);
3395f6e9 3766 iwl_release_nic_access(priv);
6440adb5
BC
3767
3768 /* Else device is assumed to be awake */
b481de9c 3769 } else
6440adb5 3770 /* Device expects a multiple of 8 */
3395f6e9 3771 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
b481de9c
ZY
3772
3773
3774 q->need_update = 0;
3775
3776 exit_unlock:
3777 spin_unlock_irqrestore(&q->lock, flags);
3778 return rc;
3779}
3780
3781/**
9fbab516 3782 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 3783 */
c79dd5b5 3784static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
b481de9c
ZY
3785 dma_addr_t dma_addr)
3786{
3787 return cpu_to_le32((u32)(dma_addr >> 8));
3788}
3789
3790
3791/**
bb8c093b 3792 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 3793 *
9fbab516 3794 * If there are slots in the RX queue that need to be restocked,
b481de9c 3795 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 3796 * as we can, pulling from rx_free.
b481de9c
ZY
3797 *
3798 * This moves the 'write' index forward to catch up with 'processed', and
3799 * also updates the memory address in the firmware to reference the new
3800 * target buffer.
3801 */
c79dd5b5 3802static int iwl4965_rx_queue_restock(struct iwl_priv *priv)
b481de9c 3803{
bb8c093b 3804 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 3805 struct list_head *element;
bb8c093b 3806 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
3807 unsigned long flags;
3808 int write, rc;
3809
3810 spin_lock_irqsave(&rxq->lock, flags);
3811 write = rxq->write & ~0x7;
bb8c093b 3812 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 3813 /* Get next free Rx buffer, remove from free list */
b481de9c 3814 element = rxq->rx_free.next;
bb8c093b 3815 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
b481de9c 3816 list_del(element);
6440adb5
BC
3817
3818 /* Point to Rx buffer via next RBD in circular buffer */
bb8c093b 3819 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
b481de9c
ZY
3820 rxq->queue[rxq->write] = rxb;
3821 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
3822 rxq->free_count--;
3823 }
3824 spin_unlock_irqrestore(&rxq->lock, flags);
3825 /* If the pre-allocated buffer pool is dropping low, schedule to
3826 * refill it */
3827 if (rxq->free_count <= RX_LOW_WATERMARK)
3828 queue_work(priv->workqueue, &priv->rx_replenish);
3829
3830
6440adb5
BC
3831 /* If we've added more space for the firmware to place data, tell it.
3832 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
3833 if ((write != (rxq->write & ~0x7))
3834 || (abs(rxq->write - rxq->read) > 7)) {
3835 spin_lock_irqsave(&rxq->lock, flags);
3836 rxq->need_update = 1;
3837 spin_unlock_irqrestore(&rxq->lock, flags);
bb8c093b 3838 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
3839 if (rc)
3840 return rc;
3841 }
3842
3843 return 0;
3844}
3845
3846/**
bb8c093b 3847 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
3848 *
3849 * When moving to rx_free an SKB is allocated for the slot.
3850 *
bb8c093b 3851 * Also restock the Rx queue via iwl4965_rx_queue_restock.
01ebd063 3852 * This is called as a scheduled work item (except for during initialization)
b481de9c 3853 */
c79dd5b5 3854static void iwl4965_rx_allocate(struct iwl_priv *priv)
b481de9c 3855{
bb8c093b 3856 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 3857 struct list_head *element;
bb8c093b 3858 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
3859 unsigned long flags;
3860 spin_lock_irqsave(&rxq->lock, flags);
3861 while (!list_empty(&rxq->rx_used)) {
3862 element = rxq->rx_used.next;
bb8c093b 3863 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
6440adb5
BC
3864
3865 /* Alloc a new receive buffer */
b481de9c 3866 rxb->skb =
9ee1ba47
RR
3867 alloc_skb(priv->hw_setting.rx_buf_size,
3868 __GFP_NOWARN | GFP_ATOMIC);
b481de9c
ZY
3869 if (!rxb->skb) {
3870 if (net_ratelimit())
3871 printk(KERN_CRIT DRV_NAME
3872 ": Can not allocate SKB buffers\n");
3873 /* We don't reschedule replenish work here -- we will
3874 * call the restock method and if it still needs
3875 * more buffers it will schedule replenish */
3876 break;
3877 }
3878 priv->alloc_rxb_skb++;
3879 list_del(element);
6440adb5
BC
3880
3881 /* Get physical address of RB/SKB */
b481de9c
ZY
3882 rxb->dma_addr =
3883 pci_map_single(priv->pci_dev, rxb->skb->data,
9ee1ba47 3884 priv->hw_setting.rx_buf_size, PCI_DMA_FROMDEVICE);
b481de9c
ZY
3885 list_add_tail(&rxb->list, &rxq->rx_free);
3886 rxq->free_count++;
3887 }
3888 spin_unlock_irqrestore(&rxq->lock, flags);
5c0eef96
MA
3889}
3890
3891/*
3892 * this should be called while priv->lock is locked
3893*/
4fd1f841 3894static void __iwl4965_rx_replenish(void *data)
5c0eef96 3895{
c79dd5b5 3896 struct iwl_priv *priv = data;
5c0eef96
MA
3897
3898 iwl4965_rx_allocate(priv);
3899 iwl4965_rx_queue_restock(priv);
3900}
3901
3902
3903void iwl4965_rx_replenish(void *data)
3904{
c79dd5b5 3905 struct iwl_priv *priv = data;
5c0eef96
MA
3906 unsigned long flags;
3907
3908 iwl4965_rx_allocate(priv);
b481de9c
ZY
3909
3910 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3911 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
3912 spin_unlock_irqrestore(&priv->lock, flags);
3913}
3914
3915/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
9fbab516 3916 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
b481de9c
ZY
3917 * This free routine walks the list of POOL entries and if SKB is set to
3918 * non NULL it is unmapped and freed
3919 */
c79dd5b5 3920static void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
3921{
3922 int i;
3923 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3924 if (rxq->pool[i].skb != NULL) {
3925 pci_unmap_single(priv->pci_dev,
3926 rxq->pool[i].dma_addr,
9ee1ba47
RR
3927 priv->hw_setting.rx_buf_size,
3928 PCI_DMA_FROMDEVICE);
b481de9c
ZY
3929 dev_kfree_skb(rxq->pool[i].skb);
3930 }
3931 }
3932
3933 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
3934 rxq->dma_addr);
3935 rxq->bd = NULL;
3936}
3937
c79dd5b5 3938int iwl4965_rx_queue_alloc(struct iwl_priv *priv)
b481de9c 3939{
bb8c093b 3940 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
3941 struct pci_dev *dev = priv->pci_dev;
3942 int i;
3943
3944 spin_lock_init(&rxq->lock);
3945 INIT_LIST_HEAD(&rxq->rx_free);
3946 INIT_LIST_HEAD(&rxq->rx_used);
6440adb5
BC
3947
3948 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
b481de9c
ZY
3949 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
3950 if (!rxq->bd)
3951 return -ENOMEM;
6440adb5 3952
b481de9c
ZY
3953 /* Fill the rx_used queue with _all_ of the Rx buffers */
3954 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3955 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
6440adb5 3956
b481de9c
ZY
3957 /* Set us so that we have processed and used all buffers, but have
3958 * not restocked the Rx queue with fresh buffers */
3959 rxq->read = rxq->write = 0;
3960 rxq->free_count = 0;
3961 rxq->need_update = 0;
3962 return 0;
3963}
3964
c79dd5b5 3965void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
3966{
3967 unsigned long flags;
3968 int i;
3969 spin_lock_irqsave(&rxq->lock, flags);
3970 INIT_LIST_HEAD(&rxq->rx_free);
3971 INIT_LIST_HEAD(&rxq->rx_used);
3972 /* Fill the rx_used queue with _all_ of the Rx buffers */
3973 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3974 /* In the reset function, these buffers may have been allocated
3975 * to an SKB, so we need to unmap and free potential storage */
3976 if (rxq->pool[i].skb != NULL) {
3977 pci_unmap_single(priv->pci_dev,
3978 rxq->pool[i].dma_addr,
9ee1ba47
RR
3979 priv->hw_setting.rx_buf_size,
3980 PCI_DMA_FROMDEVICE);
b481de9c
ZY
3981 priv->alloc_rxb_skb--;
3982 dev_kfree_skb(rxq->pool[i].skb);
3983 rxq->pool[i].skb = NULL;
3984 }
3985 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3986 }
3987
3988 /* Set us so that we have processed and used all buffers, but have
3989 * not restocked the Rx queue with fresh buffers */
3990 rxq->read = rxq->write = 0;
3991 rxq->free_count = 0;
3992 spin_unlock_irqrestore(&rxq->lock, flags);
3993}
3994
3995/* Convert linear signal-to-noise ratio into dB */
3996static u8 ratio2dB[100] = {
3997/* 0 1 2 3 4 5 6 7 8 9 */
3998 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3999 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4000 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4001 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4002 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4003 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4004 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4005 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4006 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4007 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4008};
4009
4010/* Calculates a relative dB value from a ratio of linear
4011 * (i.e. not dB) signal levels.
4012 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 4013int iwl4965_calc_db_from_ratio(int sig_ratio)
b481de9c 4014{
c899a575
AB
4015 /* 1000:1 or higher just report as 60 dB */
4016 if (sig_ratio >= 1000)
b481de9c
ZY
4017 return 60;
4018
c899a575 4019 /* 100:1 or higher, divide by 10 and use table,
b481de9c 4020 * add 20 dB to make up for divide by 10 */
c899a575 4021 if (sig_ratio >= 100)
b481de9c
ZY
4022 return (20 + (int)ratio2dB[sig_ratio/10]);
4023
4024 /* We shouldn't see this */
4025 if (sig_ratio < 1)
4026 return 0;
4027
4028 /* Use table for ratios 1:1 - 99:1 */
4029 return (int)ratio2dB[sig_ratio];
4030}
4031
4032#define PERFECT_RSSI (-20) /* dBm */
4033#define WORST_RSSI (-95) /* dBm */
4034#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4035
4036/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4037 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4038 * about formulas used below. */
bb8c093b 4039int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
4040{
4041 int sig_qual;
4042 int degradation = PERFECT_RSSI - rssi_dbm;
4043
4044 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4045 * as indicator; formula is (signal dbm - noise dbm).
4046 * SNR at or above 40 is a great signal (100%).
4047 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4048 * Weakest usable signal is usually 10 - 15 dB SNR. */
4049 if (noise_dbm) {
4050 if (rssi_dbm - noise_dbm >= 40)
4051 return 100;
4052 else if (rssi_dbm < noise_dbm)
4053 return 0;
4054 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4055
4056 /* Else use just the signal level.
4057 * This formula is a least squares fit of data points collected and
4058 * compared with a reference system that had a percentage (%) display
4059 * for signal quality. */
4060 } else
4061 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4062 (15 * RSSI_RANGE + 62 * degradation)) /
4063 (RSSI_RANGE * RSSI_RANGE);
4064
4065 if (sig_qual > 100)
4066 sig_qual = 100;
4067 else if (sig_qual < 1)
4068 sig_qual = 0;
4069
4070 return sig_qual;
4071}
4072
4073/**
9fbab516 4074 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
4075 *
4076 * Uses the priv->rx_handlers callback function array to invoke
4077 * the appropriate handlers, including command responses,
4078 * frame-received notifications, and other notifications.
4079 */
c79dd5b5 4080static void iwl4965_rx_handle(struct iwl_priv *priv)
b481de9c 4081{
bb8c093b
CH
4082 struct iwl4965_rx_mem_buffer *rxb;
4083 struct iwl4965_rx_packet *pkt;
4084 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4085 u32 r, i;
4086 int reclaim;
4087 unsigned long flags;
5c0eef96 4088 u8 fill_rx = 0;
d68ab680 4089 u32 count = 8;
b481de9c 4090
6440adb5
BC
4091 /* uCode's read index (stored in shared DRAM) indicates the last Rx
4092 * buffer that the driver may process (last buffer filled by ucode). */
bb8c093b 4093 r = iwl4965_hw_get_rx_read(priv);
b481de9c
ZY
4094 i = rxq->read;
4095
4096 /* Rx interrupt, but nothing sent from uCode */
4097 if (i == r)
4098 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4099
5c0eef96
MA
4100 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
4101 fill_rx = 1;
4102
b481de9c
ZY
4103 while (i != r) {
4104 rxb = rxq->queue[i];
4105
9fbab516 4106 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
4107 * then a bug has been introduced in the queue refilling
4108 * routines -- catch it here */
4109 BUG_ON(rxb == NULL);
4110
4111 rxq->queue[i] = NULL;
4112
4113 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
9ee1ba47 4114 priv->hw_setting.rx_buf_size,
b481de9c 4115 PCI_DMA_FROMDEVICE);
bb8c093b 4116 pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4117
4118 /* Reclaim a command buffer only if this packet is a response
4119 * to a (driver-originated) command.
4120 * If the packet (e.g. Rx frame) originated from uCode,
4121 * there is no command buffer to reclaim.
4122 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4123 * but apparently a few don't get set; catch them here. */
4124 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4125 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
857485c0 4126 (pkt->hdr.cmd != REPLY_RX) &&
cfe01709 4127 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
b481de9c
ZY
4128 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4129 (pkt->hdr.cmd != REPLY_TX);
4130
4131 /* Based on type of command response or notification,
4132 * handle those that need handling via function in
bb8c093b 4133 * rx_handlers table. See iwl4965_setup_rx_handlers() */
b481de9c
ZY
4134 if (priv->rx_handlers[pkt->hdr.cmd]) {
4135 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4136 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4137 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4138 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4139 } else {
4140 /* No handling needed */
4141 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4142 "r %d i %d No handler needed for %s, 0x%02x\n",
4143 r, i, get_cmd_string(pkt->hdr.cmd),
4144 pkt->hdr.cmd);
4145 }
4146
4147 if (reclaim) {
9fbab516 4148 /* Invoke any callbacks, transfer the skb to caller, and
857485c0 4149 * fire off the (possibly) blocking iwl_send_cmd()
b481de9c
ZY
4150 * as we reclaim the driver command queue */
4151 if (rxb && rxb->skb)
bb8c093b 4152 iwl4965_tx_cmd_complete(priv, rxb);
b481de9c
ZY
4153 else
4154 IWL_WARNING("Claim null rxb?\n");
4155 }
4156
4157 /* For now we just don't re-use anything. We can tweak this
4158 * later to try and re-use notification packets and SKBs that
4159 * fail to Rx correctly */
4160 if (rxb->skb != NULL) {
4161 priv->alloc_rxb_skb--;
4162 dev_kfree_skb_any(rxb->skb);
4163 rxb->skb = NULL;
4164 }
4165
4166 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
9ee1ba47
RR
4167 priv->hw_setting.rx_buf_size,
4168 PCI_DMA_FROMDEVICE);
b481de9c
ZY
4169 spin_lock_irqsave(&rxq->lock, flags);
4170 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4171 spin_unlock_irqrestore(&rxq->lock, flags);
4172 i = (i + 1) & RX_QUEUE_MASK;
5c0eef96
MA
4173 /* If there are a lot of unused frames,
4174 * restock the Rx queue so ucode wont assert. */
4175 if (fill_rx) {
4176 count++;
4177 if (count >= 8) {
4178 priv->rxq.read = i;
4179 __iwl4965_rx_replenish(priv);
4180 count = 0;
4181 }
4182 }
b481de9c
ZY
4183 }
4184
4185 /* Backtrack one entry */
4186 priv->rxq.read = i;
bb8c093b 4187 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4188}
4189
6440adb5
BC
4190/**
4191 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware
4192 */
c79dd5b5 4193static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv,
bb8c093b 4194 struct iwl4965_tx_queue *txq)
b481de9c
ZY
4195{
4196 u32 reg = 0;
4197 int rc = 0;
4198 int txq_id = txq->q.id;
4199
4200 if (txq->need_update == 0)
4201 return rc;
4202
4203 /* if we're trying to save power */
4204 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4205 /* wake up nic if it's powered down ...
4206 * uCode will wake up, and interrupt us again, so next
4207 * time we'll skip this part. */
3395f6e9 4208 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4209
4210 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4211 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
3395f6e9 4212 iwl_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4213 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4214 return rc;
4215 }
4216
4217 /* restore this queue's parameters in nic hardware. */
3395f6e9 4218 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4219 if (rc)
4220 return rc;
3395f6e9 4221 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
fc4b6853 4222 txq->q.write_ptr | (txq_id << 8));
3395f6e9 4223 iwl_release_nic_access(priv);
b481de9c
ZY
4224
4225 /* else not in power-save mode, uCode will never sleep when we're
4226 * trying to tx (during RFKILL, we're not trying to tx). */
4227 } else
3395f6e9 4228 iwl_write32(priv, HBUS_TARG_WRPTR,
fc4b6853 4229 txq->q.write_ptr | (txq_id << 8));
b481de9c
ZY
4230
4231 txq->need_update = 0;
4232
4233 return rc;
4234}
4235
0a6857e7 4236#ifdef CONFIG_IWLWIFI_DEBUG
bb8c093b 4237static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c 4238{
0795af57
JP
4239 DECLARE_MAC_BUF(mac);
4240
b481de9c 4241 IWL_DEBUG_RADIO("RX CONFIG:\n");
0a6857e7 4242 iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
4243 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4244 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4245 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4246 le32_to_cpu(rxon->filter_flags));
4247 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4248 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4249 rxon->ofdm_basic_rates);
4250 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
0795af57
JP
4251 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n",
4252 print_mac(mac, rxon->node_addr));
4253 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4254 print_mac(mac, rxon->bssid_addr));
b481de9c
ZY
4255 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4256}
4257#endif
4258
c79dd5b5 4259static void iwl4965_enable_interrupts(struct iwl_priv *priv)
b481de9c
ZY
4260{
4261 IWL_DEBUG_ISR("Enabling interrupts\n");
4262 set_bit(STATUS_INT_ENABLED, &priv->status);
3395f6e9 4263 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
4264}
4265
0359facc
MA
4266/* call this function to flush any scheduled tasklet */
4267static inline void iwl_synchronize_irq(struct iwl_priv *priv)
4268{
4269 /* wait to make sure we flush pedding tasklet*/
4270 synchronize_irq(priv->pci_dev->irq);
4271 tasklet_kill(&priv->irq_tasklet);
4272}
4273
c79dd5b5 4274static inline void iwl4965_disable_interrupts(struct iwl_priv *priv)
b481de9c
ZY
4275{
4276 clear_bit(STATUS_INT_ENABLED, &priv->status);
4277
4278 /* disable interrupts from uCode/NIC to host */
3395f6e9 4279 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
4280
4281 /* acknowledge/clear/reset any interrupts still pending
4282 * from uCode or flow handler (Rx/Tx DMA) */
3395f6e9
TW
4283 iwl_write32(priv, CSR_INT, 0xffffffff);
4284 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
4285 IWL_DEBUG_ISR("Disabled interrupts\n");
4286}
4287
4288static const char *desc_lookup(int i)
4289{
4290 switch (i) {
4291 case 1:
4292 return "FAIL";
4293 case 2:
4294 return "BAD_PARAM";
4295 case 3:
4296 return "BAD_CHECKSUM";
4297 case 4:
4298 return "NMI_INTERRUPT";
4299 case 5:
4300 return "SYSASSERT";
4301 case 6:
4302 return "FATAL_ERROR";
4303 }
4304
4305 return "UNKNOWN";
4306}
4307
4308#define ERROR_START_OFFSET (1 * sizeof(u32))
4309#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4310
c79dd5b5 4311static void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
b481de9c
ZY
4312{
4313 u32 data2, line;
4314 u32 desc, time, count, base, data1;
4315 u32 blink1, blink2, ilink1, ilink2;
4316 int rc;
4317
4318 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4319
57aab75a 4320 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
b481de9c
ZY
4321 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4322 return;
4323 }
4324
3395f6e9 4325 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4326 if (rc) {
4327 IWL_WARNING("Can not read from adapter at this time.\n");
4328 return;
4329 }
4330
3395f6e9 4331 count = iwl_read_targ_mem(priv, base);
b481de9c
ZY
4332
4333 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4334 IWL_ERROR("Start IWL Error Log Dump:\n");
2acae16e 4335 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
b481de9c
ZY
4336 }
4337
3395f6e9
TW
4338 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
4339 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
4340 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
4341 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
4342 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
4343 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
4344 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
4345 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
4346 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
b481de9c
ZY
4347
4348 IWL_ERROR("Desc Time "
4349 "data1 data2 line\n");
4350 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4351 desc_lookup(desc), desc, time, data1, data2, line);
4352 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4353 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4354 ilink1, ilink2);
4355
3395f6e9 4356 iwl_release_nic_access(priv);
b481de9c
ZY
4357}
4358
4359#define EVENT_START_OFFSET (4 * sizeof(u32))
4360
4361/**
bb8c093b 4362 * iwl4965_print_event_log - Dump error event log to syslog
b481de9c 4363 *
3395f6e9 4364 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
b481de9c 4365 */
c79dd5b5 4366static void iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
b481de9c
ZY
4367 u32 num_events, u32 mode)
4368{
4369 u32 i;
4370 u32 base; /* SRAM byte address of event log header */
4371 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4372 u32 ptr; /* SRAM byte address of log data */
4373 u32 ev, time, data; /* event log data */
4374
4375 if (num_events == 0)
4376 return;
4377
4378 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4379
4380 if (mode == 0)
4381 event_size = 2 * sizeof(u32);
4382 else
4383 event_size = 3 * sizeof(u32);
4384
4385 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4386
4387 /* "time" is actually "data" for mode 0 (no timestamp).
4388 * place event id # at far right for easier visual parsing. */
4389 for (i = 0; i < num_events; i++) {
3395f6e9 4390 ev = iwl_read_targ_mem(priv, ptr);
b481de9c 4391 ptr += sizeof(u32);
3395f6e9 4392 time = iwl_read_targ_mem(priv, ptr);
b481de9c
ZY
4393 ptr += sizeof(u32);
4394 if (mode == 0)
4395 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4396 else {
3395f6e9 4397 data = iwl_read_targ_mem(priv, ptr);
b481de9c
ZY
4398 ptr += sizeof(u32);
4399 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4400 }
4401 }
4402}
4403
c79dd5b5 4404static void iwl4965_dump_nic_event_log(struct iwl_priv *priv)
b481de9c
ZY
4405{
4406 int rc;
4407 u32 base; /* SRAM byte address of event log header */
4408 u32 capacity; /* event log capacity in # entries */
4409 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4410 u32 num_wraps; /* # times uCode wrapped to top of log */
4411 u32 next_entry; /* index of next entry to be written by uCode */
4412 u32 size; /* # entries that we'll print */
4413
4414 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
57aab75a 4415 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
b481de9c
ZY
4416 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4417 return;
4418 }
4419
3395f6e9 4420 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4421 if (rc) {
4422 IWL_WARNING("Can not read from adapter at this time.\n");
4423 return;
4424 }
4425
4426 /* event log header */
3395f6e9
TW
4427 capacity = iwl_read_targ_mem(priv, base);
4428 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
4429 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
4430 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
4431
4432 size = num_wraps ? capacity : next_entry;
4433
4434 /* bail out if nothing in log */
4435 if (size == 0) {
583fab37 4436 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
3395f6e9 4437 iwl_release_nic_access(priv);
b481de9c
ZY
4438 return;
4439 }
4440
583fab37 4441 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
4442 size, num_wraps);
4443
4444 /* if uCode has wrapped back to top of log, start at the oldest entry,
4445 * i.e the next one that uCode would fill. */
4446 if (num_wraps)
bb8c093b 4447 iwl4965_print_event_log(priv, next_entry,
b481de9c
ZY
4448 capacity - next_entry, mode);
4449
4450 /* (then/else) start at top of log */
bb8c093b 4451 iwl4965_print_event_log(priv, 0, next_entry, mode);
b481de9c 4452
3395f6e9 4453 iwl_release_nic_access(priv);
b481de9c
ZY
4454}
4455
4456/**
bb8c093b 4457 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 4458 */
c79dd5b5 4459static void iwl4965_irq_handle_error(struct iwl_priv *priv)
b481de9c 4460{
bb8c093b 4461 /* Set the FW error flag -- cleared on iwl4965_down */
b481de9c
ZY
4462 set_bit(STATUS_FW_ERROR, &priv->status);
4463
4464 /* Cancel currently queued command. */
4465 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4466
0a6857e7
TW
4467#ifdef CONFIG_IWLWIFI_DEBUG
4468 if (iwl_debug_level & IWL_DL_FW_ERRORS) {
bb8c093b
CH
4469 iwl4965_dump_nic_error_log(priv);
4470 iwl4965_dump_nic_event_log(priv);
4471 iwl4965_print_rx_config_cmd(&priv->staging_rxon);
b481de9c
ZY
4472 }
4473#endif
4474
4475 wake_up_interruptible(&priv->wait_command_queue);
4476
4477 /* Keep the restart process from trying to send host
4478 * commands by clearing the INIT status bit */
4479 clear_bit(STATUS_READY, &priv->status);
4480
4481 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4482 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
4483 "Restarting adapter due to uCode error.\n");
4484
3109ece1 4485 if (iwl_is_associated(priv)) {
b481de9c
ZY
4486 memcpy(&priv->recovery_rxon, &priv->active_rxon,
4487 sizeof(priv->recovery_rxon));
4488 priv->error_recovering = 1;
4489 }
4490 queue_work(priv->workqueue, &priv->restart);
4491 }
4492}
4493
c79dd5b5 4494static void iwl4965_error_recovery(struct iwl_priv *priv)
b481de9c
ZY
4495{
4496 unsigned long flags;
4497
4498 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
4499 sizeof(priv->staging_rxon));
4500 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 4501 iwl4965_commit_rxon(priv);
b481de9c 4502
bb8c093b 4503 iwl4965_rxon_add_station(priv, priv->bssid, 1);
b481de9c
ZY
4504
4505 spin_lock_irqsave(&priv->lock, flags);
4506 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
4507 priv->error_recovering = 0;
4508 spin_unlock_irqrestore(&priv->lock, flags);
4509}
4510
c79dd5b5 4511static void iwl4965_irq_tasklet(struct iwl_priv *priv)
b481de9c
ZY
4512{
4513 u32 inta, handled = 0;
4514 u32 inta_fh;
4515 unsigned long flags;
0a6857e7 4516#ifdef CONFIG_IWLWIFI_DEBUG
b481de9c
ZY
4517 u32 inta_mask;
4518#endif
4519
4520 spin_lock_irqsave(&priv->lock, flags);
4521
4522 /* Ack/clear/reset pending uCode interrupts.
4523 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4524 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
3395f6e9
TW
4525 inta = iwl_read32(priv, CSR_INT);
4526 iwl_write32(priv, CSR_INT, inta);
b481de9c
ZY
4527
4528 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4529 * Any new interrupts that happen after this, either while we're
4530 * in this tasklet, or later, will show up in next ISR/tasklet. */
3395f6e9
TW
4531 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4532 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 4533
0a6857e7
TW
4534#ifdef CONFIG_IWLWIFI_DEBUG
4535 if (iwl_debug_level & IWL_DL_ISR) {
9fbab516 4536 /* just for debug */
3395f6e9 4537 inta_mask = iwl_read32(priv, CSR_INT_MASK);
b481de9c
ZY
4538 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4539 inta, inta_mask, inta_fh);
4540 }
4541#endif
4542
4543 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4544 * atomic, make sure that inta covers all the interrupts that
4545 * we've discovered, even if FH interrupt came in just after
4546 * reading CSR_INT. */
6f83eaa1 4547 if (inta_fh & CSR49_FH_INT_RX_MASK)
b481de9c 4548 inta |= CSR_INT_BIT_FH_RX;
6f83eaa1 4549 if (inta_fh & CSR49_FH_INT_TX_MASK)
b481de9c
ZY
4550 inta |= CSR_INT_BIT_FH_TX;
4551
4552 /* Now service all interrupt bits discovered above. */
4553 if (inta & CSR_INT_BIT_HW_ERR) {
4554 IWL_ERROR("Microcode HW error detected. Restarting.\n");
4555
4556 /* Tell the device to stop sending interrupts */
bb8c093b 4557 iwl4965_disable_interrupts(priv);
b481de9c 4558
bb8c093b 4559 iwl4965_irq_handle_error(priv);
b481de9c
ZY
4560
4561 handled |= CSR_INT_BIT_HW_ERR;
4562
4563 spin_unlock_irqrestore(&priv->lock, flags);
4564
4565 return;
4566 }
4567
0a6857e7
TW
4568#ifdef CONFIG_IWLWIFI_DEBUG
4569 if (iwl_debug_level & (IWL_DL_ISR)) {
b481de9c 4570 /* NIC fires this, but we don't use it, redundant with WAKEUP */
25c03d8e
JP
4571 if (inta & CSR_INT_BIT_SCD)
4572 IWL_DEBUG_ISR("Scheduler finished to transmit "
4573 "the frame/frames.\n");
b481de9c
ZY
4574
4575 /* Alive notification via Rx interrupt will do the real work */
4576 if (inta & CSR_INT_BIT_ALIVE)
4577 IWL_DEBUG_ISR("Alive interrupt\n");
4578 }
4579#endif
4580 /* Safely ignore these bits for debug checks below */
25c03d8e 4581 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
b481de9c 4582
9fbab516 4583 /* HW RF KILL switch toggled */
b481de9c
ZY
4584 if (inta & CSR_INT_BIT_RF_KILL) {
4585 int hw_rf_kill = 0;
3395f6e9 4586 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
4587 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4588 hw_rf_kill = 1;
4589
4590 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
4591 "RF_KILL bit toggled to %s.\n",
4592 hw_rf_kill ? "disable radio":"enable radio");
4593
4594 /* Queue restart only if RF_KILL switch was set to "kill"
4595 * when we loaded driver, and is now set to "enable".
4596 * After we're Alive, RF_KILL gets handled by
3230455d 4597 * iwl4965_rx_card_state_notif() */
53e49093
ZY
4598 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
4599 clear_bit(STATUS_RF_KILL_HW, &priv->status);
b481de9c 4600 queue_work(priv->workqueue, &priv->restart);
53e49093 4601 }
b481de9c
ZY
4602
4603 handled |= CSR_INT_BIT_RF_KILL;
4604 }
4605
9fbab516 4606 /* Chip got too hot and stopped itself */
b481de9c
ZY
4607 if (inta & CSR_INT_BIT_CT_KILL) {
4608 IWL_ERROR("Microcode CT kill error detected.\n");
4609 handled |= CSR_INT_BIT_CT_KILL;
4610 }
4611
4612 /* Error detected by uCode */
4613 if (inta & CSR_INT_BIT_SW_ERR) {
4614 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
4615 inta);
bb8c093b 4616 iwl4965_irq_handle_error(priv);
b481de9c
ZY
4617 handled |= CSR_INT_BIT_SW_ERR;
4618 }
4619
4620 /* uCode wakes up after power-down sleep */
4621 if (inta & CSR_INT_BIT_WAKEUP) {
4622 IWL_DEBUG_ISR("Wakeup interrupt\n");
bb8c093b
CH
4623 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq);
4624 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]);
4625 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]);
4626 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]);
4627 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]);
4628 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]);
4629 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
4630
4631 handled |= CSR_INT_BIT_WAKEUP;
4632 }
4633
4634 /* All uCode command responses, including Tx command responses,
4635 * Rx "responses" (frame-received notification), and other
4636 * notifications from uCode come through here*/
4637 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 4638 iwl4965_rx_handle(priv);
b481de9c
ZY
4639 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4640 }
4641
4642 if (inta & CSR_INT_BIT_FH_TX) {
4643 IWL_DEBUG_ISR("Tx interrupt\n");
4644 handled |= CSR_INT_BIT_FH_TX;
4645 }
4646
4647 if (inta & ~handled)
4648 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4649
4650 if (inta & ~CSR_INI_SET_MASK) {
4651 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
4652 inta & ~CSR_INI_SET_MASK);
4653 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
4654 }
4655
4656 /* Re-enable all interrupts */
0359facc
MA
4657 /* only Re-enable if diabled by irq */
4658 if (test_bit(STATUS_INT_ENABLED, &priv->status))
4659 iwl4965_enable_interrupts(priv);
b481de9c 4660
0a6857e7
TW
4661#ifdef CONFIG_IWLWIFI_DEBUG
4662 if (iwl_debug_level & (IWL_DL_ISR)) {
3395f6e9
TW
4663 inta = iwl_read32(priv, CSR_INT);
4664 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4665 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
4666 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4667 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4668 }
4669#endif
4670 spin_unlock_irqrestore(&priv->lock, flags);
4671}
4672
bb8c093b 4673static irqreturn_t iwl4965_isr(int irq, void *data)
b481de9c 4674{
c79dd5b5 4675 struct iwl_priv *priv = data;
b481de9c
ZY
4676 u32 inta, inta_mask;
4677 u32 inta_fh;
4678 if (!priv)
4679 return IRQ_NONE;
4680
4681 spin_lock(&priv->lock);
4682
4683 /* Disable (but don't clear!) interrupts here to avoid
4684 * back-to-back ISRs and sporadic interrupts from our NIC.
4685 * If we have something to service, the tasklet will re-enable ints.
4686 * If we *don't* have something, we'll re-enable before leaving here. */
3395f6e9
TW
4687 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
4688 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
4689
4690 /* Discover which interrupts are active/pending */
3395f6e9
TW
4691 inta = iwl_read32(priv, CSR_INT);
4692 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
4693
4694 /* Ignore interrupt if there's nothing in NIC to service.
4695 * This may be due to IRQ shared with another device,
4696 * or due to sporadic interrupts thrown from our NIC. */
4697 if (!inta && !inta_fh) {
4698 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
4699 goto none;
4700 }
4701
4702 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
66fbb541
ON
4703 /* Hardware disappeared. It might have already raised
4704 * an interrupt */
b481de9c 4705 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
66fbb541 4706 goto unplugged;
b481de9c
ZY
4707 }
4708
4709 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4710 inta, inta_mask, inta_fh);
4711
25c03d8e
JP
4712 inta &= ~CSR_INT_BIT_SCD;
4713
bb8c093b 4714 /* iwl4965_irq_tasklet() will service interrupts and re-enable them */
25c03d8e
JP
4715 if (likely(inta || inta_fh))
4716 tasklet_schedule(&priv->irq_tasklet);
b481de9c 4717
66fbb541
ON
4718 unplugged:
4719 spin_unlock(&priv->lock);
b481de9c
ZY
4720 return IRQ_HANDLED;
4721
4722 none:
4723 /* re-enable interrupts here since we don't have anything to service. */
0359facc
MA
4724 /* only Re-enable if diabled by irq */
4725 if (test_bit(STATUS_INT_ENABLED, &priv->status))
4726 iwl4965_enable_interrupts(priv);
b481de9c
ZY
4727 spin_unlock(&priv->lock);
4728 return IRQ_NONE;
4729}
4730
b481de9c
ZY
4731/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
4732 * sending probe req. This should be set long enough to hear probe responses
4733 * from more than one AP. */
4734#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
4735#define IWL_ACTIVE_DWELL_TIME_52 (10)
4736
4737/* For faster active scanning, scan will move to the next channel if fewer than
4738 * PLCP_QUIET_THRESH packets are heard on this channel within
4739 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
4740 * time if it's a quiet channel (nothing responded to our probe, and there's
4741 * no other traffic).
4742 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
4743#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
4744#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
4745
4746/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
4747 * Must be set longer than active dwell time.
4748 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
4749#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
4750#define IWL_PASSIVE_DWELL_TIME_52 (10)
4751#define IWL_PASSIVE_DWELL_BASE (100)
4752#define IWL_CHANNEL_TUNE_TIME 5
4753
c79dd5b5 4754static inline u16 iwl4965_get_active_dwell_time(struct iwl_priv *priv,
8318d78a 4755 enum ieee80211_band band)
b481de9c 4756{
8318d78a 4757 if (band == IEEE80211_BAND_5GHZ)
b481de9c
ZY
4758 return IWL_ACTIVE_DWELL_TIME_52;
4759 else
4760 return IWL_ACTIVE_DWELL_TIME_24;
4761}
4762
c79dd5b5 4763static u16 iwl4965_get_passive_dwell_time(struct iwl_priv *priv,
8318d78a 4764 enum ieee80211_band band)
b481de9c 4765{
8318d78a
JB
4766 u16 active = iwl4965_get_active_dwell_time(priv, band);
4767 u16 passive = (band != IEEE80211_BAND_5GHZ) ?
b481de9c
ZY
4768 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
4769 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
4770
3109ece1 4771 if (iwl_is_associated(priv)) {
b481de9c
ZY
4772 /* If we're associated, we clamp the maximum passive
4773 * dwell time to be 98% of the beacon interval (minus
4774 * 2 * channel tune time) */
4775 passive = priv->beacon_int;
4776 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
4777 passive = IWL_PASSIVE_DWELL_BASE;
4778 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
4779 }
4780
4781 if (passive <= active)
4782 passive = active + 1;
4783
4784 return passive;
4785}
4786
c79dd5b5 4787static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
8318d78a 4788 enum ieee80211_band band,
b481de9c 4789 u8 is_active, u8 direct_mask,
bb8c093b 4790 struct iwl4965_scan_channel *scan_ch)
b481de9c
ZY
4791{
4792 const struct ieee80211_channel *channels = NULL;
8318d78a 4793 const struct ieee80211_supported_band *sband;
bf85ea4f 4794 const struct iwl_channel_info *ch_info;
b481de9c
ZY
4795 u16 passive_dwell = 0;
4796 u16 active_dwell = 0;
4797 int added, i;
4798
8318d78a
JB
4799 sband = iwl4965_get_hw_mode(priv, band);
4800 if (!sband)
b481de9c
ZY
4801 return 0;
4802
8318d78a 4803 channels = sband->channels;
b481de9c 4804
8318d78a
JB
4805 active_dwell = iwl4965_get_active_dwell_time(priv, band);
4806 passive_dwell = iwl4965_get_passive_dwell_time(priv, band);
b481de9c 4807
8318d78a 4808 for (i = 0, added = 0; i < sband->n_channels; i++) {
182e2e66
JB
4809 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4810 continue;
4811
8318d78a 4812 if (ieee80211_frequency_to_channel(channels[i].center_freq) ==
b481de9c 4813 le16_to_cpu(priv->active_rxon.channel)) {
3109ece1 4814 if (iwl_is_associated(priv)) {
b481de9c
ZY
4815 IWL_DEBUG_SCAN
4816 ("Skipping current channel %d\n",
4817 le16_to_cpu(priv->active_rxon.channel));
4818 continue;
4819 }
4820 } else if (priv->only_active_channel)
4821 continue;
4822
8318d78a 4823 scan_ch->channel = ieee80211_frequency_to_channel(channels[i].center_freq);
b481de9c 4824
8622e705 4825 ch_info = iwl_get_channel_info(priv, band,
9fbab516 4826 scan_ch->channel);
b481de9c
ZY
4827 if (!is_channel_valid(ch_info)) {
4828 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
4829 scan_ch->channel);
4830 continue;
4831 }
4832
4833 if (!is_active || is_channel_passive(ch_info) ||
8318d78a 4834 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
b481de9c
ZY
4835 scan_ch->type = 0; /* passive */
4836 else
4837 scan_ch->type = 1; /* active */
4838
4839 if (scan_ch->type & 1)
4840 scan_ch->type |= (direct_mask << 1);
4841
4842 if (is_channel_narrow(ch_info))
4843 scan_ch->type |= (1 << 7);
4844
4845 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4846 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4847
9fbab516 4848 /* Set txpower levels to defaults */
b481de9c
ZY
4849 scan_ch->tpc.dsp_atten = 110;
4850 /* scan_pwr_info->tpc.dsp_atten; */
4851
4852 /*scan_pwr_info->tpc.tx_gain; */
8318d78a 4853 if (band == IEEE80211_BAND_5GHZ)
b481de9c
ZY
4854 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
4855 else {
4856 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
4857 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516 4858 * power level:
8a1b0245 4859 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
b481de9c
ZY
4860 */
4861 }
4862
4863 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
4864 scan_ch->channel,
4865 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
4866 (scan_ch->type & 1) ?
4867 active_dwell : passive_dwell);
4868
4869 scan_ch++;
4870 added++;
4871 }
4872
4873 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
4874 return added;
4875}
4876
c79dd5b5 4877static void iwl4965_init_hw_rates(struct iwl_priv *priv,
b481de9c
ZY
4878 struct ieee80211_rate *rates)
4879{
4880 int i;
4881
4882 for (i = 0; i < IWL_RATE_COUNT; i++) {
8318d78a
JB
4883 rates[i].bitrate = iwl4965_rates[i].ieee * 5;
4884 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4885 rates[i].hw_value_short = i;
4886 rates[i].flags = 0;
4887 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
b481de9c 4888 /*
8318d78a 4889 * If CCK != 1M then set short preamble rate flag.
b481de9c 4890 */
35cdeaf4
TW
4891 rates[i].flags |=
4892 (iwl4965_rates[i].plcp == IWL_RATE_1M_PLCP) ?
4893 0 : IEEE80211_RATE_SHORT_PREAMBLE;
b481de9c 4894 }
b481de9c 4895 }
b481de9c
ZY
4896}
4897
4898/**
bb8c093b 4899 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 4900 */
bf85ea4f 4901int iwl4965_init_geos(struct iwl_priv *priv)
b481de9c 4902{
bf85ea4f 4903 struct iwl_channel_info *ch;
8211ef78 4904 struct ieee80211_supported_band *sband;
b481de9c
ZY
4905 struct ieee80211_channel *channels;
4906 struct ieee80211_channel *geo_ch;
4907 struct ieee80211_rate *rates;
4908 int i = 0;
b481de9c 4909
8318d78a
JB
4910 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4911 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
b481de9c
ZY
4912 IWL_DEBUG_INFO("Geography modes already initialized.\n");
4913 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4914 return 0;
4915 }
4916
b481de9c
ZY
4917 channels = kzalloc(sizeof(struct ieee80211_channel) *
4918 priv->channel_count, GFP_KERNEL);
8318d78a 4919 if (!channels)
b481de9c 4920 return -ENOMEM;
b481de9c 4921
8211ef78 4922 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
b481de9c
ZY
4923 GFP_KERNEL);
4924 if (!rates) {
b481de9c
ZY
4925 kfree(channels);
4926 return -ENOMEM;
4927 }
4928
b481de9c 4929 /* 5.2GHz channels start after the 2.4GHz channels */
8211ef78 4930 sband = &priv->bands[IEEE80211_BAND_5GHZ];
bf85ea4f 4931 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
8211ef78
TW
4932 /* just OFDM */
4933 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
4934 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
8318d78a 4935
1ea87396 4936 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_5GHZ);
78330fdd 4937
8211ef78
TW
4938 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4939 sband->channels = channels;
4940 /* OFDM & CCK */
4941 sband->bitrates = rates;
4942 sband->n_bitrates = IWL_RATE_COUNT;
b481de9c 4943
1ea87396 4944 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_2GHZ);
78330fdd 4945
b481de9c
ZY
4946 priv->ieee_channels = channels;
4947 priv->ieee_rates = rates;
4948
bb8c093b 4949 iwl4965_init_hw_rates(priv, rates);
b481de9c 4950
8211ef78 4951 for (i = 0; i < priv->channel_count; i++) {
b481de9c
ZY
4952 ch = &priv->channel_info[i];
4953
8211ef78
TW
4954 /* FIXME: might be removed if scan is OK */
4955 if (!is_channel_valid(ch))
b481de9c 4956 continue;
b481de9c 4957
8211ef78
TW
4958 if (is_channel_a_band(ch))
4959 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4960 else
4961 sband = &priv->bands[IEEE80211_BAND_2GHZ];
b481de9c 4962
8211ef78
TW
4963 geo_ch = &sband->channels[sband->n_channels++];
4964
4965 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
8318d78a
JB
4966 geo_ch->max_power = ch->max_power_avg;
4967 geo_ch->max_antenna_gain = 0xff;
7b72304d 4968 geo_ch->hw_value = ch->channel;
b481de9c
ZY
4969
4970 if (is_channel_valid(ch)) {
8318d78a
JB
4971 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
4972 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
b481de9c 4973
8318d78a
JB
4974 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
4975 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
b481de9c
ZY
4976
4977 if (ch->flags & EEPROM_CHANNEL_RADAR)
8318d78a 4978 geo_ch->flags |= IEEE80211_CHAN_RADAR;
b481de9c
ZY
4979
4980 if (ch->max_power_avg > priv->max_channel_txpower_limit)
4981 priv->max_channel_txpower_limit =
4982 ch->max_power_avg;
8211ef78 4983 } else {
8318d78a 4984 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
8211ef78
TW
4985 }
4986
4987 /* Save flags for reg domain usage */
4988 geo_ch->orig_flags = geo_ch->flags;
4989
4990 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4991 ch->channel, geo_ch->center_freq,
4992 is_channel_a_band(ch) ? "5.2" : "2.4",
4993 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4994 "restricted" : "valid",
4995 geo_ch->flags);
b481de9c
ZY
4996 }
4997
82b9a121
TW
4998 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4999 priv->cfg->sku & IWL_SKU_A) {
b481de9c
ZY
5000 printk(KERN_INFO DRV_NAME
5001 ": Incorrectly detected BG card as ABG. Please send "
5002 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5003 priv->pci_dev->device, priv->pci_dev->subsystem_device);
82b9a121 5004 priv->cfg->sku &= ~IWL_SKU_A;
b481de9c
ZY
5005 }
5006
5007 printk(KERN_INFO DRV_NAME
5008 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
8318d78a
JB
5009 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
5010 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
b481de9c 5011
e0e0a67e
JL
5012 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
5013 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5014 &priv->bands[IEEE80211_BAND_2GHZ];
5015 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
5016 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5017 &priv->bands[IEEE80211_BAND_5GHZ];
b481de9c 5018
b481de9c
ZY
5019 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5020
5021 return 0;
5022}
5023
849e0dce
RC
5024/*
5025 * iwl4965_free_geos - undo allocations in iwl4965_init_geos
5026 */
bf85ea4f 5027void iwl4965_free_geos(struct iwl_priv *priv)
849e0dce 5028{
849e0dce
RC
5029 kfree(priv->ieee_channels);
5030 kfree(priv->ieee_rates);
5031 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
5032}
5033
b481de9c
ZY
5034/******************************************************************************
5035 *
5036 * uCode download functions
5037 *
5038 ******************************************************************************/
5039
c79dd5b5 5040static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
b481de9c 5041{
98c92211
TW
5042 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
5043 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
5044 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
5045 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
5046 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
5047 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c
ZY
5048}
5049
5050/**
bb8c093b 5051 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
5052 * looking at all data.
5053 */
c79dd5b5 5054static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
9fbab516 5055 u32 len)
b481de9c
ZY
5056{
5057 u32 val;
5058 u32 save_len = len;
5059 int rc = 0;
5060 u32 errcnt;
5061
5062 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5063
3395f6e9 5064 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
5065 if (rc)
5066 return rc;
5067
3395f6e9 5068 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
b481de9c
ZY
5069
5070 errcnt = 0;
5071 for (; len > 0; len -= sizeof(u32), image++) {
5072 /* read data comes through single port, auto-incr addr */
5073 /* NOTE: Use the debugless read so we don't flood kernel log
5074 * if IWL_DL_IO is set */
3395f6e9 5075 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
5076 if (val != le32_to_cpu(*image)) {
5077 IWL_ERROR("uCode INST section is invalid at "
5078 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5079 save_len - len, val, le32_to_cpu(*image));
5080 rc = -EIO;
5081 errcnt++;
5082 if (errcnt >= 20)
5083 break;
5084 }
5085 }
5086
3395f6e9 5087 iwl_release_nic_access(priv);
b481de9c
ZY
5088
5089 if (!errcnt)
5090 IWL_DEBUG_INFO
5091 ("ucode image in INSTRUCTION memory is good\n");
5092
5093 return rc;
5094}
5095
5096
5097/**
bb8c093b 5098 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
5099 * using sample data 100 bytes apart. If these sample points are good,
5100 * it's a pretty good bet that everything between them is good, too.
5101 */
c79dd5b5 5102static int iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
5103{
5104 u32 val;
5105 int rc = 0;
5106 u32 errcnt = 0;
5107 u32 i;
5108
5109 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5110
3395f6e9 5111 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
5112 if (rc)
5113 return rc;
5114
5115 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
5116 /* read data comes through single port, auto-incr addr */
5117 /* NOTE: Use the debugless read so we don't flood kernel log
5118 * if IWL_DL_IO is set */
3395f6e9 5119 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
b481de9c 5120 i + RTC_INST_LOWER_BOUND);
3395f6e9 5121 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
5122 if (val != le32_to_cpu(*image)) {
5123#if 0 /* Enable this if you want to see details */
5124 IWL_ERROR("uCode INST section is invalid at "
5125 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5126 i, val, *image);
5127#endif
5128 rc = -EIO;
5129 errcnt++;
5130 if (errcnt >= 3)
5131 break;
5132 }
5133 }
5134
3395f6e9 5135 iwl_release_nic_access(priv);
b481de9c
ZY
5136
5137 return rc;
5138}
5139
5140
5141/**
bb8c093b 5142 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
5143 * and verify its contents
5144 */
c79dd5b5 5145static int iwl4965_verify_ucode(struct iwl_priv *priv)
b481de9c
ZY
5146{
5147 __le32 *image;
5148 u32 len;
5149 int rc = 0;
5150
5151 /* Try bootstrap */
5152 image = (__le32 *)priv->ucode_boot.v_addr;
5153 len = priv->ucode_boot.len;
bb8c093b 5154 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
5155 if (rc == 0) {
5156 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
5157 return 0;
5158 }
5159
5160 /* Try initialize */
5161 image = (__le32 *)priv->ucode_init.v_addr;
5162 len = priv->ucode_init.len;
bb8c093b 5163 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
5164 if (rc == 0) {
5165 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
5166 return 0;
5167 }
5168
5169 /* Try runtime/protocol */
5170 image = (__le32 *)priv->ucode_code.v_addr;
5171 len = priv->ucode_code.len;
bb8c093b 5172 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
5173 if (rc == 0) {
5174 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
5175 return 0;
5176 }
5177
5178 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5179
9fbab516
BC
5180 /* Since nothing seems to match, show first several data entries in
5181 * instruction SRAM, so maybe visual inspection will give a clue.
5182 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
5183 image = (__le32 *)priv->ucode_boot.v_addr;
5184 len = priv->ucode_boot.len;
bb8c093b 5185 rc = iwl4965_verify_inst_full(priv, image, len);
b481de9c
ZY
5186
5187 return rc;
5188}
5189
c79dd5b5 5190static void iwl4965_nic_start(struct iwl_priv *priv)
b481de9c
ZY
5191{
5192 /* Remove all resets to allow NIC to operate */
3395f6e9 5193 iwl_write32(priv, CSR_RESET, 0);
b481de9c
ZY
5194}
5195
90e759d1 5196
b481de9c 5197/**
bb8c093b 5198 * iwl4965_read_ucode - Read uCode images from disk file.
b481de9c
ZY
5199 *
5200 * Copy into buffers for card to fetch via bus-mastering
5201 */
c79dd5b5 5202static int iwl4965_read_ucode(struct iwl_priv *priv)
b481de9c 5203{
bb8c093b 5204 struct iwl4965_ucode *ucode;
90e759d1 5205 int ret;
b481de9c 5206 const struct firmware *ucode_raw;
4bf775cd 5207 const char *name = priv->cfg->fw_name;
b481de9c
ZY
5208 u8 *src;
5209 size_t len;
5210 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
5211
5212 /* Ask kernel firmware_class module to get the boot firmware off disk.
5213 * request_firmware() is synchronous, file is in memory on return. */
90e759d1
TW
5214 ret = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
5215 if (ret < 0) {
5216 IWL_ERROR("%s firmware file req failed: Reason %d\n",
5217 name, ret);
b481de9c
ZY
5218 goto error;
5219 }
5220
5221 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
5222 name, ucode_raw->size);
5223
5224 /* Make sure that we got at least our header! */
5225 if (ucode_raw->size < sizeof(*ucode)) {
5226 IWL_ERROR("File size way too small!\n");
90e759d1 5227 ret = -EINVAL;
b481de9c
ZY
5228 goto err_release;
5229 }
5230
5231 /* Data from ucode file: header followed by uCode images */
5232 ucode = (void *)ucode_raw->data;
5233
5234 ver = le32_to_cpu(ucode->ver);
5235 inst_size = le32_to_cpu(ucode->inst_size);
5236 data_size = le32_to_cpu(ucode->data_size);
5237 init_size = le32_to_cpu(ucode->init_size);
5238 init_data_size = le32_to_cpu(ucode->init_data_size);
5239 boot_size = le32_to_cpu(ucode->boot_size);
5240
5241 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
5242 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
5243 inst_size);
5244 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
5245 data_size);
5246 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
5247 init_size);
5248 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
5249 init_data_size);
5250 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
5251 boot_size);
5252
5253 /* Verify size of file vs. image size info in file's header */
5254 if (ucode_raw->size < sizeof(*ucode) +
5255 inst_size + data_size + init_size +
5256 init_data_size + boot_size) {
5257
5258 IWL_DEBUG_INFO("uCode file size %d too small\n",
5259 (int)ucode_raw->size);
90e759d1 5260 ret = -EINVAL;
b481de9c
ZY
5261 goto err_release;
5262 }
5263
5264 /* Verify that uCode images will fit in card's SRAM */
5265 if (inst_size > IWL_MAX_INST_SIZE) {
90e759d1
TW
5266 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
5267 inst_size);
5268 ret = -EINVAL;
b481de9c
ZY
5269 goto err_release;
5270 }
5271
5272 if (data_size > IWL_MAX_DATA_SIZE) {
90e759d1
TW
5273 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
5274 data_size);
5275 ret = -EINVAL;
b481de9c
ZY
5276 goto err_release;
5277 }
5278 if (init_size > IWL_MAX_INST_SIZE) {
5279 IWL_DEBUG_INFO
90e759d1
TW
5280 ("uCode init instr len %d too large to fit in\n",
5281 init_size);
5282 ret = -EINVAL;
b481de9c
ZY
5283 goto err_release;
5284 }
5285 if (init_data_size > IWL_MAX_DATA_SIZE) {
5286 IWL_DEBUG_INFO
90e759d1
TW
5287 ("uCode init data len %d too large to fit in\n",
5288 init_data_size);
5289 ret = -EINVAL;
b481de9c
ZY
5290 goto err_release;
5291 }
5292 if (boot_size > IWL_MAX_BSM_SIZE) {
5293 IWL_DEBUG_INFO
90e759d1
TW
5294 ("uCode boot instr len %d too large to fit in\n",
5295 boot_size);
5296 ret = -EINVAL;
b481de9c
ZY
5297 goto err_release;
5298 }
5299
5300 /* Allocate ucode buffers for card's bus-master loading ... */
5301
5302 /* Runtime instructions and 2 copies of data:
5303 * 1) unmodified from disk
5304 * 2) backup cache for save/restore during power-downs */
5305 priv->ucode_code.len = inst_size;
98c92211 5306 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
5307
5308 priv->ucode_data.len = data_size;
98c92211 5309 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
5310
5311 priv->ucode_data_backup.len = data_size;
98c92211 5312 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c
ZY
5313
5314 /* Initialization instructions and data */
90e759d1
TW
5315 if (init_size && init_data_size) {
5316 priv->ucode_init.len = init_size;
98c92211 5317 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
90e759d1
TW
5318
5319 priv->ucode_init_data.len = init_data_size;
98c92211 5320 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
90e759d1
TW
5321
5322 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
5323 goto err_pci_alloc;
5324 }
b481de9c
ZY
5325
5326 /* Bootstrap (instructions only, no data) */
90e759d1
TW
5327 if (boot_size) {
5328 priv->ucode_boot.len = boot_size;
98c92211 5329 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 5330
90e759d1
TW
5331 if (!priv->ucode_boot.v_addr)
5332 goto err_pci_alloc;
5333 }
b481de9c
ZY
5334
5335 /* Copy images into buffers for card's bus-master reads ... */
5336
5337 /* Runtime instructions (first block of data in file) */
5338 src = &ucode->data[0];
5339 len = priv->ucode_code.len;
90e759d1 5340 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
5341 memcpy(priv->ucode_code.v_addr, src, len);
5342 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
5343 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
5344
5345 /* Runtime data (2nd block)
bb8c093b 5346 * NOTE: Copy into backup buffer will be done in iwl4965_up() */
b481de9c
ZY
5347 src = &ucode->data[inst_size];
5348 len = priv->ucode_data.len;
90e759d1 5349 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
5350 memcpy(priv->ucode_data.v_addr, src, len);
5351 memcpy(priv->ucode_data_backup.v_addr, src, len);
5352
5353 /* Initialization instructions (3rd block) */
5354 if (init_size) {
5355 src = &ucode->data[inst_size + data_size];
5356 len = priv->ucode_init.len;
90e759d1
TW
5357 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
5358 len);
b481de9c
ZY
5359 memcpy(priv->ucode_init.v_addr, src, len);
5360 }
5361
5362 /* Initialization data (4th block) */
5363 if (init_data_size) {
5364 src = &ucode->data[inst_size + data_size + init_size];
5365 len = priv->ucode_init_data.len;
90e759d1
TW
5366 IWL_DEBUG_INFO("Copying (but not loading) init data len %Zd\n",
5367 len);
b481de9c
ZY
5368 memcpy(priv->ucode_init_data.v_addr, src, len);
5369 }
5370
5371 /* Bootstrap instructions (5th block) */
5372 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
5373 len = priv->ucode_boot.len;
90e759d1 5374 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %Zd\n", len);
b481de9c
ZY
5375 memcpy(priv->ucode_boot.v_addr, src, len);
5376
5377 /* We have our copies now, allow OS release its copies */
5378 release_firmware(ucode_raw);
5379 return 0;
5380
5381 err_pci_alloc:
5382 IWL_ERROR("failed to allocate pci memory\n");
90e759d1 5383 ret = -ENOMEM;
bb8c093b 5384 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
5385
5386 err_release:
5387 release_firmware(ucode_raw);
5388
5389 error:
90e759d1 5390 return ret;
b481de9c
ZY
5391}
5392
5393
5394/**
bb8c093b 5395 * iwl4965_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
5396 *
5397 * Tell initialization uCode where to find runtime uCode.
5398 *
5399 * BSM registers initially contain pointers to initialization uCode.
5400 * We need to replace them to load runtime uCode inst and data,
5401 * and to save runtime data when powering down.
5402 */
c79dd5b5 5403static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
b481de9c
ZY
5404{
5405 dma_addr_t pinst;
5406 dma_addr_t pdata;
5407 int rc = 0;
5408 unsigned long flags;
5409
5410 /* bits 35:4 for 4965 */
5411 pinst = priv->ucode_code.p_addr >> 4;
5412 pdata = priv->ucode_data_backup.p_addr >> 4;
5413
5414 spin_lock_irqsave(&priv->lock, flags);
3395f6e9 5415 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
5416 if (rc) {
5417 spin_unlock_irqrestore(&priv->lock, flags);
5418 return rc;
5419 }
5420
5421 /* Tell bootstrap uCode where to find image to load */
3395f6e9
TW
5422 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5423 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5424 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
5425 priv->ucode_data.len);
5426
5427 /* Inst bytecount must be last to set up, bit 31 signals uCode
5428 * that all new ptr/size info is in place */
3395f6e9 5429 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
5430 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
5431
3395f6e9 5432 iwl_release_nic_access(priv);
b481de9c
ZY
5433
5434 spin_unlock_irqrestore(&priv->lock, flags);
5435
5436 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
5437
5438 return rc;
5439}
5440
5441/**
bb8c093b 5442 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
5443 *
5444 * Called after REPLY_ALIVE notification received from "initialize" uCode.
5445 *
5446 * The 4965 "initialize" ALIVE reply contains calibration data for:
5447 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
5448 * (3945 does not contain this data).
5449 *
5450 * Tell "initialize" uCode to go ahead and load the runtime uCode.
5451*/
c79dd5b5 5452static void iwl4965_init_alive_start(struct iwl_priv *priv)
b481de9c
ZY
5453{
5454 /* Check alive response for "valid" sign from uCode */
5455 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
5456 /* We had an error bringing up the hardware, so take it
5457 * all the way back down so we can try again */
5458 IWL_DEBUG_INFO("Initialize Alive failed.\n");
5459 goto restart;
5460 }
5461
5462 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
5463 * This is a paranoid check, because we would not have gotten the
5464 * "initialize" alive if code weren't properly loaded. */
bb8c093b 5465 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
5466 /* Runtime instruction load was bad;
5467 * take it all the way back down so we can try again */
5468 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
5469 goto restart;
5470 }
5471
5472 /* Calculate temperature */
5473 priv->temperature = iwl4965_get_temperature(priv);
5474
5475 /* Send pointers to protocol/runtime uCode image ... init code will
5476 * load and launch runtime uCode, which will send us another "Alive"
5477 * notification. */
5478 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 5479 if (iwl4965_set_ucode_ptrs(priv)) {
b481de9c
ZY
5480 /* Runtime instruction load won't happen;
5481 * take it all the way back down so we can try again */
5482 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
5483 goto restart;
5484 }
5485 return;
5486
5487 restart:
5488 queue_work(priv->workqueue, &priv->restart);
5489}
5490
5491
5492/**
bb8c093b 5493 * iwl4965_alive_start - called after REPLY_ALIVE notification received
b481de9c 5494 * from protocol/runtime uCode (initialization uCode's
bb8c093b 5495 * Alive gets handled by iwl4965_init_alive_start()).
b481de9c 5496 */
c79dd5b5 5497static void iwl4965_alive_start(struct iwl_priv *priv)
b481de9c 5498{
57aab75a 5499 int ret = 0;
b481de9c
ZY
5500
5501 IWL_DEBUG_INFO("Runtime Alive received.\n");
5502
5503 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
5504 /* We had an error bringing up the hardware, so take it
5505 * all the way back down so we can try again */
5506 IWL_DEBUG_INFO("Alive failed.\n");
5507 goto restart;
5508 }
5509
5510 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5511 * This is a paranoid check, because we would not have gotten the
5512 * "runtime" alive if code weren't properly loaded. */
bb8c093b 5513 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
5514 /* Runtime instruction load was bad;
5515 * take it all the way back down so we can try again */
5516 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
5517 goto restart;
5518 }
5519
bf85ea4f 5520 iwlcore_clear_stations_table(priv);
b481de9c 5521
57aab75a
TW
5522 ret = priv->cfg->ops->lib->alive_notify(priv);
5523 if (ret) {
b481de9c 5524 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
57aab75a 5525 ret);
b481de9c
ZY
5526 goto restart;
5527 }
5528
9fbab516 5529 /* After the ALIVE response, we can send host commands to 4965 uCode */
b481de9c
ZY
5530 set_bit(STATUS_ALIVE, &priv->status);
5531
5532 /* Clear out the uCode error bit if it is set */
5533 clear_bit(STATUS_FW_ERROR, &priv->status);
5534
fee1247a 5535 if (iwl_is_rfkill(priv))
b481de9c
ZY
5536 return;
5537
5a66926a 5538 ieee80211_start_queues(priv->hw);
b481de9c
ZY
5539
5540 priv->active_rate = priv->rates_mask;
5541 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
5542
bb8c093b 5543 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 5544
3109ece1 5545 if (iwl_is_associated(priv)) {
bb8c093b
CH
5546 struct iwl4965_rxon_cmd *active_rxon =
5547 (struct iwl4965_rxon_cmd *)(&priv->active_rxon);
b481de9c
ZY
5548
5549 memcpy(&priv->staging_rxon, &priv->active_rxon,
5550 sizeof(priv->staging_rxon));
5551 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5552 } else {
5553 /* Initialize our rx_config data */
bb8c093b 5554 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
5555 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
5556 }
5557
9fbab516 5558 /* Configure Bluetooth device coexistence support */
bb8c093b 5559 iwl4965_send_bt_config(priv);
b481de9c
ZY
5560
5561 /* Configure the adapter for unassociated operation */
bb8c093b 5562 iwl4965_commit_rxon(priv);
b481de9c
ZY
5563
5564 /* At this point, the NIC is initialized and operational */
5565 priv->notif_missed_beacons = 0;
b481de9c
ZY
5566
5567 iwl4965_rf_kill_ct_config(priv);
5a66926a 5568
fe00b5a5
RC
5569 iwl_leds_register(priv);
5570
b481de9c 5571 IWL_DEBUG_INFO("ALIVE processing complete.\n");
a9f46786 5572 set_bit(STATUS_READY, &priv->status);
5a66926a 5573 wake_up_interruptible(&priv->wait_command_queue);
b481de9c
ZY
5574
5575 if (priv->error_recovering)
bb8c093b 5576 iwl4965_error_recovery(priv);
b481de9c 5577
c8381fdc 5578 iwlcore_low_level_notify(priv, IWLCORE_START_EVT);
84363e6e 5579 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
b481de9c
ZY
5580 return;
5581
5582 restart:
5583 queue_work(priv->workqueue, &priv->restart);
5584}
5585
c79dd5b5 5586static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
b481de9c 5587
c79dd5b5 5588static void __iwl4965_down(struct iwl_priv *priv)
b481de9c
ZY
5589{
5590 unsigned long flags;
5591 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
5592 struct ieee80211_conf *conf = NULL;
5593
5594 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
5595
5596 conf = ieee80211_get_hw_conf(priv->hw);
5597
5598 if (!exit_pending)
5599 set_bit(STATUS_EXIT_PENDING, &priv->status);
5600
ab53d8af
MA
5601 iwl_leds_unregister(priv);
5602
c8381fdc
MA
5603 iwlcore_low_level_notify(priv, IWLCORE_STOP_EVT);
5604
bf85ea4f 5605 iwlcore_clear_stations_table(priv);
b481de9c
ZY
5606
5607 /* Unblock any waiting calls */
5608 wake_up_interruptible_all(&priv->wait_command_queue);
5609
b481de9c
ZY
5610 /* Wipe out the EXIT_PENDING status bit if we are not actually
5611 * exiting the module */
5612 if (!exit_pending)
5613 clear_bit(STATUS_EXIT_PENDING, &priv->status);
5614
5615 /* stop and reset the on-board processor */
3395f6e9 5616 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
5617
5618 /* tell the device to stop sending interrupts */
0359facc 5619 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 5620 iwl4965_disable_interrupts(priv);
0359facc
MA
5621 spin_unlock_irqrestore(&priv->lock, flags);
5622 iwl_synchronize_irq(priv);
b481de9c
ZY
5623
5624 if (priv->mac80211_registered)
5625 ieee80211_stop_queues(priv->hw);
5626
bb8c093b 5627 /* If we have not previously called iwl4965_init() then
b481de9c 5628 * clear all bits but the RF Kill and SUSPEND bits and return */
fee1247a 5629 if (!iwl_is_init(priv)) {
b481de9c
ZY
5630 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
5631 STATUS_RF_KILL_HW |
5632 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
5633 STATUS_RF_KILL_SW |
9788864e
RC
5634 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5635 STATUS_GEO_CONFIGURED |
b481de9c
ZY
5636 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5637 STATUS_IN_SUSPEND;
5638 goto exit;
5639 }
5640
5641 /* ...otherwise clear out all the status bits but the RF Kill and
5642 * SUSPEND bits and continue taking the NIC down. */
5643 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
5644 STATUS_RF_KILL_HW |
5645 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
5646 STATUS_RF_KILL_SW |
9788864e
RC
5647 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5648 STATUS_GEO_CONFIGURED |
b481de9c
ZY
5649 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5650 STATUS_IN_SUSPEND |
5651 test_bit(STATUS_FW_ERROR, &priv->status) <<
5652 STATUS_FW_ERROR;
5653
5654 spin_lock_irqsave(&priv->lock, flags);
3395f6e9 5655 iwl_clear_bit(priv, CSR_GP_CNTRL,
9fbab516 5656 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
5657 spin_unlock_irqrestore(&priv->lock, flags);
5658
bb8c093b
CH
5659 iwl4965_hw_txq_ctx_stop(priv);
5660 iwl4965_hw_rxq_stop(priv);
b481de9c
ZY
5661
5662 spin_lock_irqsave(&priv->lock, flags);
3395f6e9
TW
5663 if (!iwl_grab_nic_access(priv)) {
5664 iwl_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 5665 APMG_CLK_VAL_DMA_CLK_RQT);
3395f6e9 5666 iwl_release_nic_access(priv);
b481de9c
ZY
5667 }
5668 spin_unlock_irqrestore(&priv->lock, flags);
5669
5670 udelay(5);
5671
bb8c093b 5672 iwl4965_hw_nic_stop_master(priv);
3395f6e9 5673 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
bb8c093b 5674 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
5675
5676 exit:
bb8c093b 5677 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
5678
5679 if (priv->ibss_beacon)
5680 dev_kfree_skb(priv->ibss_beacon);
5681 priv->ibss_beacon = NULL;
5682
5683 /* clear out any free frames */
bb8c093b 5684 iwl4965_clear_free_frames(priv);
b481de9c
ZY
5685}
5686
c79dd5b5 5687static void iwl4965_down(struct iwl_priv *priv)
b481de9c
ZY
5688{
5689 mutex_lock(&priv->mutex);
bb8c093b 5690 __iwl4965_down(priv);
b481de9c 5691 mutex_unlock(&priv->mutex);
b24d22b1 5692
bb8c093b 5693 iwl4965_cancel_deferred_work(priv);
b481de9c
ZY
5694}
5695
5696#define MAX_HW_RESTARTS 5
5697
c79dd5b5 5698static int __iwl4965_up(struct iwl_priv *priv)
b481de9c 5699{
57aab75a
TW
5700 int i;
5701 int ret;
b481de9c
ZY
5702
5703 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5704 IWL_WARNING("Exit pending; will not bring the NIC up\n");
5705 return -EIO;
5706 }
5707
5708 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
5709 IWL_WARNING("Radio disabled by SW RF kill (module "
5710 "parameter)\n");
ad97edd2 5711 iwl_rfkill_set_hw_state(priv);
e655b9f0
ZY
5712 return -ENODEV;
5713 }
5714
e903fbd4
RC
5715 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
5716 IWL_ERROR("ucode not available for device bringup\n");
5717 return -EIO;
5718 }
5719
e655b9f0 5720 /* If platform's RF_KILL switch is NOT set to KILL */
3395f6e9 5721 if (iwl_read32(priv, CSR_GP_CNTRL) &
e655b9f0
ZY
5722 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5723 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5724 else {
5725 set_bit(STATUS_RF_KILL_HW, &priv->status);
5726 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
ad97edd2 5727 iwl_rfkill_set_hw_state(priv);
e655b9f0
ZY
5728 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
5729 return -ENODEV;
5730 }
b481de9c
ZY
5731 }
5732
ad97edd2 5733 iwl_rfkill_set_hw_state(priv);
3395f6e9 5734 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 5735
57aab75a
TW
5736 ret = priv->cfg->ops->lib->hw_nic_init(priv);
5737 if (ret) {
5738 IWL_ERROR("Unable to init nic\n");
5739 return ret;
b481de9c
ZY
5740 }
5741
5742 /* make sure rfkill handshake bits are cleared */
3395f6e9
TW
5743 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5744 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
5745 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5746
5747 /* clear (again), then enable host interrupts */
3395f6e9 5748 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
bb8c093b 5749 iwl4965_enable_interrupts(priv);
b481de9c
ZY
5750
5751 /* really make sure rfkill handshake bits are cleared */
3395f6e9
TW
5752 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5753 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
5754
5755 /* Copy original ucode data image from disk into backup cache.
5756 * This will be used to initialize the on-board processor's
5757 * data SRAM for a clean start when the runtime program first loads. */
5758 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
5a66926a 5759 priv->ucode_data.len);
b481de9c 5760
e655b9f0
ZY
5761 /* We return success when we resume from suspend and rf_kill is on. */
5762 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
b481de9c 5763 return 0;
b481de9c
ZY
5764
5765 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5766
bf85ea4f 5767 iwlcore_clear_stations_table(priv);
b481de9c
ZY
5768
5769 /* load bootstrap state machine,
5770 * load bootstrap program into processor's memory,
5771 * prepare to load the "initialize" uCode */
57aab75a 5772 ret = priv->cfg->ops->lib->load_ucode(priv);
b481de9c 5773
57aab75a
TW
5774 if (ret) {
5775 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", ret);
b481de9c
ZY
5776 continue;
5777 }
5778
5779 /* start card; "initialize" will load runtime ucode */
bb8c093b 5780 iwl4965_nic_start(priv);
b481de9c 5781
b481de9c
ZY
5782 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
5783
5784 return 0;
5785 }
5786
5787 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 5788 __iwl4965_down(priv);
b481de9c
ZY
5789
5790 /* tried to restart and config the device for as long as our
5791 * patience could withstand */
5792 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
5793 return -EIO;
5794}
5795
5796
5797/*****************************************************************************
5798 *
5799 * Workqueue callbacks
5800 *
5801 *****************************************************************************/
5802
bb8c093b 5803static void iwl4965_bg_init_alive_start(struct work_struct *data)
b481de9c 5804{
c79dd5b5
TW
5805 struct iwl_priv *priv =
5806 container_of(data, struct iwl_priv, init_alive_start.work);
b481de9c
ZY
5807
5808 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5809 return;
5810
5811 mutex_lock(&priv->mutex);
bb8c093b 5812 iwl4965_init_alive_start(priv);
b481de9c
ZY
5813 mutex_unlock(&priv->mutex);
5814}
5815
bb8c093b 5816static void iwl4965_bg_alive_start(struct work_struct *data)
b481de9c 5817{
c79dd5b5
TW
5818 struct iwl_priv *priv =
5819 container_of(data, struct iwl_priv, alive_start.work);
b481de9c
ZY
5820
5821 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5822 return;
5823
5824 mutex_lock(&priv->mutex);
bb8c093b 5825 iwl4965_alive_start(priv);
b481de9c
ZY
5826 mutex_unlock(&priv->mutex);
5827}
5828
bb8c093b 5829static void iwl4965_bg_rf_kill(struct work_struct *work)
b481de9c 5830{
c79dd5b5 5831 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
b481de9c
ZY
5832
5833 wake_up_interruptible(&priv->wait_command_queue);
5834
5835 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5836 return;
5837
5838 mutex_lock(&priv->mutex);
5839
fee1247a 5840 if (!iwl_is_rfkill(priv)) {
b481de9c
ZY
5841 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
5842 "HW and/or SW RF Kill no longer active, restarting "
5843 "device\n");
5844 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
5845 queue_work(priv->workqueue, &priv->restart);
5846 } else {
ad97edd2
MA
5847 /* make sure mac80211 stop sending Tx frame */
5848 if (priv->mac80211_registered)
5849 ieee80211_stop_queues(priv->hw);
b481de9c
ZY
5850
5851 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
5852 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
5853 "disabled by SW switch\n");
5854 else
5855 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
5856 "Kill switch must be turned off for "
5857 "wireless networking to work.\n");
5858 }
ad97edd2
MA
5859 iwl_rfkill_set_hw_state(priv);
5860
b481de9c
ZY
5861 mutex_unlock(&priv->mutex);
5862}
5863
5864#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
5865
bb8c093b 5866static void iwl4965_bg_scan_check(struct work_struct *data)
b481de9c 5867{
c79dd5b5
TW
5868 struct iwl_priv *priv =
5869 container_of(data, struct iwl_priv, scan_check.work);
b481de9c
ZY
5870
5871 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5872 return;
5873
5874 mutex_lock(&priv->mutex);
5875 if (test_bit(STATUS_SCANNING, &priv->status) ||
5876 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5877 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
5878 "Scan completion watchdog resetting adapter (%dms)\n",
5879 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
052c4b9f 5880
b481de9c 5881 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
bb8c093b 5882 iwl4965_send_scan_abort(priv);
b481de9c
ZY
5883 }
5884 mutex_unlock(&priv->mutex);
5885}
5886
bb8c093b 5887static void iwl4965_bg_request_scan(struct work_struct *data)
b481de9c 5888{
c79dd5b5
TW
5889 struct iwl_priv *priv =
5890 container_of(data, struct iwl_priv, request_scan);
857485c0 5891 struct iwl_host_cmd cmd = {
b481de9c 5892 .id = REPLY_SCAN_CMD,
bb8c093b 5893 .len = sizeof(struct iwl4965_scan_cmd),
b481de9c
ZY
5894 .meta.flags = CMD_SIZE_HUGE,
5895 };
bb8c093b 5896 struct iwl4965_scan_cmd *scan;
b481de9c 5897 struct ieee80211_conf *conf = NULL;
78330fdd 5898 u16 cmd_len;
8318d78a 5899 enum ieee80211_band band;
78330fdd 5900 u8 direct_mask;
857485c0 5901 int ret = 0;
b481de9c
ZY
5902
5903 conf = ieee80211_get_hw_conf(priv->hw);
5904
5905 mutex_lock(&priv->mutex);
5906
fee1247a 5907 if (!iwl_is_ready(priv)) {
b481de9c
ZY
5908 IWL_WARNING("request scan called when driver not ready.\n");
5909 goto done;
5910 }
5911
5912 /* Make sure the scan wasn't cancelled before this queued work
5913 * was given the chance to run... */
5914 if (!test_bit(STATUS_SCANNING, &priv->status))
5915 goto done;
5916
5917 /* This should never be called or scheduled if there is currently
5918 * a scan active in the hardware. */
5919 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
5920 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
5921 "Ignoring second request.\n");
857485c0 5922 ret = -EIO;
b481de9c
ZY
5923 goto done;
5924 }
5925
5926 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5927 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5928 goto done;
5929 }
5930
5931 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5932 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5933 goto done;
5934 }
5935
fee1247a 5936 if (iwl_is_rfkill(priv)) {
b481de9c
ZY
5937 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5938 goto done;
5939 }
5940
5941 if (!test_bit(STATUS_READY, &priv->status)) {
5942 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
5943 goto done;
5944 }
5945
5946 if (!priv->scan_bands) {
5947 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
5948 goto done;
5949 }
5950
5951 if (!priv->scan) {
bb8c093b 5952 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) +
b481de9c
ZY
5953 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
5954 if (!priv->scan) {
857485c0 5955 ret = -ENOMEM;
b481de9c
ZY
5956 goto done;
5957 }
5958 }
5959 scan = priv->scan;
bb8c093b 5960 memset(scan, 0, sizeof(struct iwl4965_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
5961
5962 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
5963 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
5964
3109ece1 5965 if (iwl_is_associated(priv)) {
b481de9c
ZY
5966 u16 interval = 0;
5967 u32 extra;
5968 u32 suspend_time = 100;
5969 u32 scan_suspend_time = 100;
5970 unsigned long flags;
5971
5972 IWL_DEBUG_INFO("Scanning while associated...\n");
5973
5974 spin_lock_irqsave(&priv->lock, flags);
5975 interval = priv->beacon_int;
5976 spin_unlock_irqrestore(&priv->lock, flags);
5977
5978 scan->suspend_time = 0;
052c4b9f 5979 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
5980 if (!interval)
5981 interval = suspend_time;
5982
5983 extra = (suspend_time / interval) << 22;
5984 scan_suspend_time = (extra |
5985 ((suspend_time % interval) * 1024));
5986 scan->suspend_time = cpu_to_le32(scan_suspend_time);
5987 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
5988 scan_suspend_time, interval);
5989 }
5990
5991 /* We should add the ability for user to lock to PASSIVE ONLY */
5992 if (priv->one_direct_scan) {
5993 IWL_DEBUG_SCAN
5994 ("Kicking off one direct scan for '%s'\n",
bb8c093b 5995 iwl4965_escape_essid(priv->direct_ssid,
b481de9c
ZY
5996 priv->direct_ssid_len));
5997 scan->direct_scan[0].id = WLAN_EID_SSID;
5998 scan->direct_scan[0].len = priv->direct_ssid_len;
5999 memcpy(scan->direct_scan[0].ssid,
6000 priv->direct_ssid, priv->direct_ssid_len);
6001 direct_mask = 1;
3109ece1 6002 } else if (!iwl_is_associated(priv) && priv->essid_len) {
b481de9c
ZY
6003 scan->direct_scan[0].id = WLAN_EID_SSID;
6004 scan->direct_scan[0].len = priv->essid_len;
6005 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
6006 direct_mask = 1;
857485c0 6007 } else {
b481de9c 6008 direct_mask = 0;
857485c0 6009 }
b481de9c 6010
b481de9c
ZY
6011 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
6012 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
6013 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
6014
b481de9c
ZY
6015
6016 switch (priv->scan_bands) {
6017 case 2:
6018 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
6019 scan->tx_cmd.rate_n_flags =
bb8c093b 6020 iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
b481de9c
ZY
6021 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
6022
6023 scan->good_CRC_th = 0;
8318d78a 6024 band = IEEE80211_BAND_2GHZ;
b481de9c
ZY
6025 break;
6026
6027 case 1:
6028 scan->tx_cmd.rate_n_flags =
bb8c093b 6029 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
b481de9c
ZY
6030 RATE_MCS_ANT_B_MSK);
6031 scan->good_CRC_th = IWL_GOOD_CRC_TH;
8318d78a 6032 band = IEEE80211_BAND_5GHZ;
b481de9c
ZY
6033 break;
6034
6035 default:
6036 IWL_WARNING("Invalid scan band count\n");
6037 goto done;
6038 }
6039
78330fdd
TW
6040 /* We don't build a direct scan probe request; the uCode will do
6041 * that based on the direct_mask added to each channel entry */
6042 cmd_len = iwl4965_fill_probe_req(priv, band,
6043 (struct ieee80211_mgmt *)scan->data,
6044 IWL_MAX_SCAN_SIZE - sizeof(*scan), 0);
6045
6046 scan->tx_cmd.len = cpu_to_le16(cmd_len);
b481de9c
ZY
6047 /* select Rx chains */
6048
6049 /* Force use of chains B and C (0x6) for scan Rx.
6050 * Avoid A (0x1) because of its off-channel reception on A-band.
6051 * MIMO is not used here, but value is required to make uCode happy. */
6052 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
6053 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
6054 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
6055 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
6056
6057 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
6058 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
6059
26c0f03f 6060 if (direct_mask) {
b481de9c
ZY
6061 IWL_DEBUG_SCAN
6062 ("Initiating direct scan for %s.\n",
bb8c093b 6063 iwl4965_escape_essid(priv->essid, priv->essid_len));
26c0f03f
RC
6064 scan->channel_count =
6065 iwl4965_get_channels_for_scan(
6066 priv, band, 1, /* active */
6067 direct_mask,
6068 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6069 } else {
b481de9c 6070 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
26c0f03f
RC
6071 scan->channel_count =
6072 iwl4965_get_channels_for_scan(
6073 priv, band, 0, /* passive */
6074 direct_mask,
6075 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6076 }
b481de9c
ZY
6077
6078 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 6079 scan->channel_count * sizeof(struct iwl4965_scan_channel);
b481de9c
ZY
6080 cmd.data = scan;
6081 scan->len = cpu_to_le16(cmd.len);
6082
6083 set_bit(STATUS_SCAN_HW, &priv->status);
857485c0
TW
6084 ret = iwl_send_cmd_sync(priv, &cmd);
6085 if (ret)
b481de9c
ZY
6086 goto done;
6087
6088 queue_delayed_work(priv->workqueue, &priv->scan_check,
6089 IWL_SCAN_CHECK_WATCHDOG);
6090
6091 mutex_unlock(&priv->mutex);
6092 return;
6093
6094 done:
01ebd063 6095 /* inform mac80211 scan aborted */
b481de9c
ZY
6096 queue_work(priv->workqueue, &priv->scan_completed);
6097 mutex_unlock(&priv->mutex);
6098}
6099
bb8c093b 6100static void iwl4965_bg_up(struct work_struct *data)
b481de9c 6101{
c79dd5b5 6102 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
b481de9c
ZY
6103
6104 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6105 return;
6106
6107 mutex_lock(&priv->mutex);
bb8c093b 6108 __iwl4965_up(priv);
b481de9c
ZY
6109 mutex_unlock(&priv->mutex);
6110}
6111
bb8c093b 6112static void iwl4965_bg_restart(struct work_struct *data)
b481de9c 6113{
c79dd5b5 6114 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
b481de9c
ZY
6115
6116 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6117 return;
6118
bb8c093b 6119 iwl4965_down(priv);
b481de9c
ZY
6120 queue_work(priv->workqueue, &priv->up);
6121}
6122
bb8c093b 6123static void iwl4965_bg_rx_replenish(struct work_struct *data)
b481de9c 6124{
c79dd5b5
TW
6125 struct iwl_priv *priv =
6126 container_of(data, struct iwl_priv, rx_replenish);
b481de9c
ZY
6127
6128 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6129 return;
6130
6131 mutex_lock(&priv->mutex);
bb8c093b 6132 iwl4965_rx_replenish(priv);
b481de9c
ZY
6133 mutex_unlock(&priv->mutex);
6134}
6135
7878a5a4
MA
6136#define IWL_DELAY_NEXT_SCAN (HZ*2)
6137
bb8c093b 6138static void iwl4965_bg_post_associate(struct work_struct *data)
b481de9c 6139{
c79dd5b5 6140 struct iwl_priv *priv = container_of(data, struct iwl_priv,
b481de9c 6141 post_associate.work);
b481de9c 6142 struct ieee80211_conf *conf = NULL;
857485c0 6143 int ret = 0;
0795af57 6144 DECLARE_MAC_BUF(mac);
b481de9c
ZY
6145
6146 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
6147 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
6148 return;
6149 }
6150
0795af57
JP
6151 IWL_DEBUG_ASSOC("Associated as %d to: %s\n",
6152 priv->assoc_id,
6153 print_mac(mac, priv->active_rxon.bssid_addr));
b481de9c
ZY
6154
6155
6156 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6157 return;
6158
6159 mutex_lock(&priv->mutex);
6160
32bfd35d 6161 if (!priv->vif || !priv->is_open) {
948c171c
MA
6162 mutex_unlock(&priv->mutex);
6163 return;
6164 }
bb8c093b 6165 iwl4965_scan_cancel_timeout(priv, 200);
052c4b9f 6166
b481de9c
ZY
6167 conf = ieee80211_get_hw_conf(priv->hw);
6168
6169 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 6170 iwl4965_commit_rxon(priv);
b481de9c 6171
bb8c093b
CH
6172 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
6173 iwl4965_setup_rxon_timing(priv);
857485c0 6174 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c 6175 sizeof(priv->rxon_timing), &priv->rxon_timing);
857485c0 6176 if (ret)
b481de9c
ZY
6177 IWL_WARNING("REPLY_RXON_TIMING failed - "
6178 "Attempting to continue.\n");
6179
6180 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
6181
c8b0e6e1 6182#ifdef CONFIG_IWL4965_HT
fd105e79
RR
6183 if (priv->current_ht_config.is_ht)
6184 iwl4965_set_rxon_ht(priv, &priv->current_ht_config);
c8b0e6e1 6185#endif /* CONFIG_IWL4965_HT*/
b481de9c
ZY
6186 iwl4965_set_rxon_chain(priv);
6187 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
6188
6189 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
6190 priv->assoc_id, priv->beacon_int);
6191
6192 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
6193 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6194 else
6195 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
6196
6197 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
6198 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
6199 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
6200 else
6201 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6202
6203 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
6204 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6205
6206 }
6207
bb8c093b 6208 iwl4965_commit_rxon(priv);
b481de9c
ZY
6209
6210 switch (priv->iw_mode) {
6211 case IEEE80211_IF_TYPE_STA:
bb8c093b 6212 iwl4965_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
6213 break;
6214
6215 case IEEE80211_IF_TYPE_IBSS:
6216
6217 /* clear out the station table */
bf85ea4f 6218 iwlcore_clear_stations_table(priv);
b481de9c 6219
bb8c093b
CH
6220 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
6221 iwl4965_rxon_add_station(priv, priv->bssid, 0);
6222 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
6223 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
6224
6225 break;
6226
6227 default:
6228 IWL_ERROR("%s Should not be called in %d mode\n",
6229 __FUNCTION__, priv->iw_mode);
6230 break;
6231 }
6232
bb8c093b 6233 iwl4965_sequence_reset(priv);
b481de9c 6234
c8b0e6e1 6235#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
6236 /* Enable Rx differential gain and sensitivity calibrations */
6237 iwl4965_chain_noise_reset(priv);
6238 priv->start_calib = 1;
c8b0e6e1 6239#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
6240
6241 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
6242 priv->assoc_station_added = 1;
6243
bb8c093b 6244 iwl4965_activate_qos(priv, 0);
292ae174 6245
7878a5a4
MA
6246 /* we have just associated, don't start scan too early */
6247 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
b481de9c
ZY
6248 mutex_unlock(&priv->mutex);
6249}
6250
bb8c093b 6251static void iwl4965_bg_abort_scan(struct work_struct *work)
b481de9c 6252{
c79dd5b5 6253 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
b481de9c 6254
fee1247a 6255 if (!iwl_is_ready(priv))
b481de9c
ZY
6256 return;
6257
6258 mutex_lock(&priv->mutex);
6259
6260 set_bit(STATUS_SCAN_ABORTING, &priv->status);
bb8c093b 6261 iwl4965_send_scan_abort(priv);
b481de9c
ZY
6262
6263 mutex_unlock(&priv->mutex);
6264}
6265
76bb77e0
ZY
6266static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf);
6267
bb8c093b 6268static void iwl4965_bg_scan_completed(struct work_struct *work)
b481de9c 6269{
c79dd5b5
TW
6270 struct iwl_priv *priv =
6271 container_of(work, struct iwl_priv, scan_completed);
b481de9c
ZY
6272
6273 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
6274
6275 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6276 return;
6277
a0646470
ZY
6278 if (test_bit(STATUS_CONF_PENDING, &priv->status))
6279 iwl4965_mac_config(priv->hw, ieee80211_get_hw_conf(priv->hw));
76bb77e0 6280
b481de9c
ZY
6281 ieee80211_scan_completed(priv->hw);
6282
6283 /* Since setting the TXPOWER may have been deferred while
6284 * performing the scan, fire one off */
6285 mutex_lock(&priv->mutex);
bb8c093b 6286 iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
6287 mutex_unlock(&priv->mutex);
6288}
6289
6290/*****************************************************************************
6291 *
6292 * mac80211 entry point functions
6293 *
6294 *****************************************************************************/
6295
5a66926a
ZY
6296#define UCODE_READY_TIMEOUT (2 * HZ)
6297
bb8c093b 6298static int iwl4965_mac_start(struct ieee80211_hw *hw)
b481de9c 6299{
c79dd5b5 6300 struct iwl_priv *priv = hw->priv;
5a66926a 6301 int ret;
b481de9c
ZY
6302
6303 IWL_DEBUG_MAC80211("enter\n");
6304
5a66926a
ZY
6305 if (pci_enable_device(priv->pci_dev)) {
6306 IWL_ERROR("Fail to pci_enable_device\n");
6307 return -ENODEV;
6308 }
6309 pci_restore_state(priv->pci_dev);
6310 pci_enable_msi(priv->pci_dev);
6311
6312 ret = request_irq(priv->pci_dev->irq, iwl4965_isr, IRQF_SHARED,
6313 DRV_NAME, priv);
6314 if (ret) {
6315 IWL_ERROR("Error allocating IRQ %d\n", priv->pci_dev->irq);
6316 goto out_disable_msi;
6317 }
6318
b481de9c
ZY
6319 /* we should be verifying the device is ready to be opened */
6320 mutex_lock(&priv->mutex);
6321
5a66926a
ZY
6322 memset(&priv->staging_rxon, 0, sizeof(struct iwl4965_rxon_cmd));
6323 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
6324 * ucode filename and max sizes are card-specific. */
b481de9c 6325
5a66926a
ZY
6326 if (!priv->ucode_code.len) {
6327 ret = iwl4965_read_ucode(priv);
6328 if (ret) {
6329 IWL_ERROR("Could not read microcode: %d\n", ret);
6330 mutex_unlock(&priv->mutex);
6331 goto out_release_irq;
6332 }
6333 }
b481de9c 6334
e655b9f0 6335 ret = __iwl4965_up(priv);
5a66926a 6336
b481de9c 6337 mutex_unlock(&priv->mutex);
5a66926a 6338
e655b9f0
ZY
6339 if (ret)
6340 goto out_release_irq;
6341
6342 IWL_DEBUG_INFO("Start UP work done.\n");
6343
6344 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
6345 return 0;
6346
5a66926a
ZY
6347 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
6348 * mac80211 will not be run successfully. */
6349 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
6350 test_bit(STATUS_READY, &priv->status),
6351 UCODE_READY_TIMEOUT);
6352 if (!ret) {
6353 if (!test_bit(STATUS_READY, &priv->status)) {
6354 IWL_ERROR("Wait for START_ALIVE timeout after %dms.\n",
6355 jiffies_to_msecs(UCODE_READY_TIMEOUT));
6356 ret = -ETIMEDOUT;
6357 goto out_release_irq;
6358 }
6359 }
6360
e655b9f0 6361 priv->is_open = 1;
b481de9c
ZY
6362 IWL_DEBUG_MAC80211("leave\n");
6363 return 0;
5a66926a
ZY
6364
6365out_release_irq:
6366 free_irq(priv->pci_dev->irq, priv);
6367out_disable_msi:
6368 pci_disable_msi(priv->pci_dev);
e655b9f0
ZY
6369 pci_disable_device(priv->pci_dev);
6370 priv->is_open = 0;
6371 IWL_DEBUG_MAC80211("leave - failed\n");
5a66926a 6372 return ret;
b481de9c
ZY
6373}
6374
bb8c093b 6375static void iwl4965_mac_stop(struct ieee80211_hw *hw)
b481de9c 6376{
c79dd5b5 6377 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6378
6379 IWL_DEBUG_MAC80211("enter\n");
948c171c 6380
e655b9f0
ZY
6381 if (!priv->is_open) {
6382 IWL_DEBUG_MAC80211("leave - skip\n");
6383 return;
6384 }
6385
b481de9c 6386 priv->is_open = 0;
5a66926a 6387
fee1247a 6388 if (iwl_is_ready_rf(priv)) {
e655b9f0
ZY
6389 /* stop mac, cancel any scan request and clear
6390 * RXON_FILTER_ASSOC_MSK BIT
6391 */
5a66926a
ZY
6392 mutex_lock(&priv->mutex);
6393 iwl4965_scan_cancel_timeout(priv, 100);
6394 cancel_delayed_work(&priv->post_associate);
fde3571f 6395 mutex_unlock(&priv->mutex);
fde3571f
MA
6396 }
6397
5a66926a
ZY
6398 iwl4965_down(priv);
6399
6400 flush_workqueue(priv->workqueue);
6401 free_irq(priv->pci_dev->irq, priv);
6402 pci_disable_msi(priv->pci_dev);
6403 pci_save_state(priv->pci_dev);
6404 pci_disable_device(priv->pci_dev);
948c171c 6405
b481de9c 6406 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
6407}
6408
bb8c093b 6409static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
6410 struct ieee80211_tx_control *ctl)
6411{
c79dd5b5 6412 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6413
6414 IWL_DEBUG_MAC80211("enter\n");
6415
6416 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
6417 IWL_DEBUG_MAC80211("leave - monitor\n");
6418 return -1;
6419 }
6420
6421 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
8318d78a 6422 ctl->tx_rate->bitrate);
b481de9c 6423
bb8c093b 6424 if (iwl4965_tx_skb(priv, skb, ctl))
b481de9c
ZY
6425 dev_kfree_skb_any(skb);
6426
6427 IWL_DEBUG_MAC80211("leave\n");
6428 return 0;
6429}
6430
bb8c093b 6431static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
6432 struct ieee80211_if_init_conf *conf)
6433{
c79dd5b5 6434 struct iwl_priv *priv = hw->priv;
b481de9c 6435 unsigned long flags;
0795af57 6436 DECLARE_MAC_BUF(mac);
b481de9c 6437
32bfd35d 6438 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
b481de9c 6439
32bfd35d
JB
6440 if (priv->vif) {
6441 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
75849d28 6442 return -EOPNOTSUPP;
b481de9c
ZY
6443 }
6444
6445 spin_lock_irqsave(&priv->lock, flags);
32bfd35d 6446 priv->vif = conf->vif;
b481de9c
ZY
6447
6448 spin_unlock_irqrestore(&priv->lock, flags);
6449
6450 mutex_lock(&priv->mutex);
864792e3
TW
6451
6452 if (conf->mac_addr) {
6453 IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr));
6454 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
6455 }
b481de9c 6456
fee1247a 6457 if (iwl_is_ready(priv))
5a66926a
ZY
6458 iwl4965_set_mode(priv, conf->type);
6459
b481de9c
ZY
6460 mutex_unlock(&priv->mutex);
6461
5a66926a 6462 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
6463 return 0;
6464}
6465
6466/**
bb8c093b 6467 * iwl4965_mac_config - mac80211 config callback
b481de9c
ZY
6468 *
6469 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
6470 * be set inappropriately and the driver currently sets the hardware up to
6471 * use it whenever needed.
6472 */
bb8c093b 6473static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
b481de9c 6474{
c79dd5b5 6475 struct iwl_priv *priv = hw->priv;
bf85ea4f 6476 const struct iwl_channel_info *ch_info;
b481de9c 6477 unsigned long flags;
76bb77e0 6478 int ret = 0;
b481de9c
ZY
6479
6480 mutex_lock(&priv->mutex);
8318d78a 6481 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
b481de9c 6482
12342c47
ZY
6483 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
6484
fee1247a 6485 if (!iwl_is_ready(priv)) {
b481de9c 6486 IWL_DEBUG_MAC80211("leave - not ready\n");
76bb77e0
ZY
6487 ret = -EIO;
6488 goto out;
b481de9c
ZY
6489 }
6490
1ea87396 6491 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
b481de9c 6492 test_bit(STATUS_SCANNING, &priv->status))) {
a0646470
ZY
6493 IWL_DEBUG_MAC80211("leave - scanning\n");
6494 set_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 6495 mutex_unlock(&priv->mutex);
a0646470 6496 return 0;
b481de9c
ZY
6497 }
6498
6499 spin_lock_irqsave(&priv->lock, flags);
6500
8622e705 6501 ch_info = iwl_get_channel_info(priv, conf->channel->band,
8318d78a 6502 ieee80211_frequency_to_channel(conf->channel->center_freq));
b481de9c 6503 if (!is_channel_valid(ch_info)) {
b481de9c
ZY
6504 IWL_DEBUG_MAC80211("leave - invalid channel\n");
6505 spin_unlock_irqrestore(&priv->lock, flags);
76bb77e0
ZY
6506 ret = -EINVAL;
6507 goto out;
b481de9c
ZY
6508 }
6509
c8b0e6e1 6510#ifdef CONFIG_IWL4965_HT
78330fdd 6511 /* if we are switching from ht to 2.4 clear flags
b481de9c
ZY
6512 * from any ht related info since 2.4 does not
6513 * support ht */
78330fdd 6514 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel->hw_value)
b481de9c
ZY
6515#ifdef IEEE80211_CONF_CHANNEL_SWITCH
6516 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
6517#endif
6518 )
6519 priv->staging_rxon.flags = 0;
c8b0e6e1 6520#endif /* CONFIG_IWL4965_HT */
b481de9c 6521
bf85ea4f 6522 iwlcore_set_rxon_channel(priv, conf->channel->band,
8318d78a 6523 ieee80211_frequency_to_channel(conf->channel->center_freq));
b481de9c 6524
8318d78a 6525 iwl4965_set_flags_for_phymode(priv, conf->channel->band);
b481de9c
ZY
6526
6527 /* The list of supported rates and rate mask can be different
8318d78a 6528 * for each band; since the band may have changed, reset
b481de9c 6529 * the rate mask to what mac80211 lists */
bb8c093b 6530 iwl4965_set_rate(priv);
b481de9c
ZY
6531
6532 spin_unlock_irqrestore(&priv->lock, flags);
6533
6534#ifdef IEEE80211_CONF_CHANNEL_SWITCH
6535 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 6536 iwl4965_hw_channel_switch(priv, conf->channel);
76bb77e0 6537 goto out;
b481de9c
ZY
6538 }
6539#endif
6540
ad97edd2
MA
6541 if (priv->cfg->ops->lib->radio_kill_sw)
6542 priv->cfg->ops->lib->radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
6543
6544 if (!conf->radio_enabled) {
6545 IWL_DEBUG_MAC80211("leave - radio disabled\n");
76bb77e0 6546 goto out;
b481de9c
ZY
6547 }
6548
fee1247a 6549 if (iwl_is_rfkill(priv)) {
b481de9c 6550 IWL_DEBUG_MAC80211("leave - RF kill\n");
76bb77e0
ZY
6551 ret = -EIO;
6552 goto out;
b481de9c
ZY
6553 }
6554
bb8c093b 6555 iwl4965_set_rate(priv);
b481de9c
ZY
6556
6557 if (memcmp(&priv->active_rxon,
6558 &priv->staging_rxon, sizeof(priv->staging_rxon)))
bb8c093b 6559 iwl4965_commit_rxon(priv);
b481de9c
ZY
6560 else
6561 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
6562
6563 IWL_DEBUG_MAC80211("leave\n");
6564
a0646470
ZY
6565out:
6566 clear_bit(STATUS_CONF_PENDING, &priv->status);
5a66926a 6567 mutex_unlock(&priv->mutex);
76bb77e0 6568 return ret;
b481de9c
ZY
6569}
6570
c79dd5b5 6571static void iwl4965_config_ap(struct iwl_priv *priv)
b481de9c 6572{
857485c0 6573 int ret = 0;
b481de9c 6574
d986bcd1 6575 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
b481de9c
ZY
6576 return;
6577
6578 /* The following should be done only at AP bring up */
6579 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
6580
6581 /* RXON - unassoc (to set timing command) */
6582 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 6583 iwl4965_commit_rxon(priv);
b481de9c
ZY
6584
6585 /* RXON Timing */
bb8c093b
CH
6586 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
6587 iwl4965_setup_rxon_timing(priv);
857485c0 6588 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c 6589 sizeof(priv->rxon_timing), &priv->rxon_timing);
857485c0 6590 if (ret)
b481de9c
ZY
6591 IWL_WARNING("REPLY_RXON_TIMING failed - "
6592 "Attempting to continue.\n");
6593
6594 iwl4965_set_rxon_chain(priv);
6595
6596 /* FIXME: what should be the assoc_id for AP? */
6597 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
6598 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
6599 priv->staging_rxon.flags |=
6600 RXON_FLG_SHORT_PREAMBLE_MSK;
6601 else
6602 priv->staging_rxon.flags &=
6603 ~RXON_FLG_SHORT_PREAMBLE_MSK;
6604
6605 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
6606 if (priv->assoc_capability &
6607 WLAN_CAPABILITY_SHORT_SLOT_TIME)
6608 priv->staging_rxon.flags |=
6609 RXON_FLG_SHORT_SLOT_MSK;
6610 else
6611 priv->staging_rxon.flags &=
6612 ~RXON_FLG_SHORT_SLOT_MSK;
6613
6614 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
6615 priv->staging_rxon.flags &=
6616 ~RXON_FLG_SHORT_SLOT_MSK;
6617 }
6618 /* restore RXON assoc */
6619 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 6620 iwl4965_commit_rxon(priv);
bb8c093b 6621 iwl4965_activate_qos(priv, 1);
bb8c093b 6622 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
e1493deb 6623 }
bb8c093b 6624 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
6625
6626 /* FIXME - we need to add code here to detect a totally new
6627 * configuration, reset the AP, unassoc, rxon timing, assoc,
6628 * clear sta table, add BCAST sta... */
6629}
6630
32bfd35d
JB
6631static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
6632 struct ieee80211_vif *vif,
b481de9c
ZY
6633 struct ieee80211_if_conf *conf)
6634{
c79dd5b5 6635 struct iwl_priv *priv = hw->priv;
0795af57 6636 DECLARE_MAC_BUF(mac);
b481de9c
ZY
6637 unsigned long flags;
6638 int rc;
6639
6640 if (conf == NULL)
6641 return -EIO;
6642
b716bb91
EG
6643 if (priv->vif != vif) {
6644 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
6645 mutex_unlock(&priv->mutex);
6646 return 0;
6647 }
6648
b481de9c
ZY
6649 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
6650 (!conf->beacon || !conf->ssid_len)) {
6651 IWL_DEBUG_MAC80211
6652 ("Leaving in AP mode because HostAPD is not ready.\n");
6653 return 0;
6654 }
6655
fee1247a 6656 if (!iwl_is_alive(priv))
5a66926a
ZY
6657 return -EAGAIN;
6658
b481de9c
ZY
6659 mutex_lock(&priv->mutex);
6660
b481de9c 6661 if (conf->bssid)
0795af57
JP
6662 IWL_DEBUG_MAC80211("bssid: %s\n",
6663 print_mac(mac, conf->bssid));
b481de9c 6664
4150c572
JB
6665/*
6666 * very dubious code was here; the probe filtering flag is never set:
6667 *
b481de9c
ZY
6668 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
6669 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572 6670 */
b481de9c
ZY
6671
6672 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
6673 if (!conf->bssid) {
6674 conf->bssid = priv->mac_addr;
6675 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
0795af57
JP
6676 IWL_DEBUG_MAC80211("bssid was set to: %s\n",
6677 print_mac(mac, conf->bssid));
b481de9c
ZY
6678 }
6679 if (priv->ibss_beacon)
6680 dev_kfree_skb(priv->ibss_beacon);
6681
6682 priv->ibss_beacon = conf->beacon;
6683 }
6684
fee1247a 6685 if (iwl_is_rfkill(priv))
fde3571f
MA
6686 goto done;
6687
b481de9c
ZY
6688 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
6689 !is_multicast_ether_addr(conf->bssid)) {
6690 /* If there is currently a HW scan going on in the background
6691 * then we need to cancel it else the RXON below will fail. */
bb8c093b 6692 if (iwl4965_scan_cancel_timeout(priv, 100)) {
b481de9c
ZY
6693 IWL_WARNING("Aborted scan still in progress "
6694 "after 100ms\n");
6695 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
6696 mutex_unlock(&priv->mutex);
6697 return -EAGAIN;
6698 }
6699 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
6700
6701 /* TODO: Audit driver for usage of these members and see
6702 * if mac80211 deprecates them (priv->bssid looks like it
6703 * shouldn't be there, but I haven't scanned the IBSS code
6704 * to verify) - jpk */
6705 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
6706
6707 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b 6708 iwl4965_config_ap(priv);
b481de9c 6709 else {
bb8c093b 6710 rc = iwl4965_commit_rxon(priv);
b481de9c 6711 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
bb8c093b 6712 iwl4965_rxon_add_station(
b481de9c
ZY
6713 priv, priv->active_rxon.bssid_addr, 1);
6714 }
6715
6716 } else {
bb8c093b 6717 iwl4965_scan_cancel_timeout(priv, 100);
b481de9c 6718 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 6719 iwl4965_commit_rxon(priv);
b481de9c
ZY
6720 }
6721
fde3571f 6722 done:
b481de9c
ZY
6723 spin_lock_irqsave(&priv->lock, flags);
6724 if (!conf->ssid_len)
6725 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
6726 else
6727 memcpy(priv->essid, conf->ssid, conf->ssid_len);
6728
6729 priv->essid_len = conf->ssid_len;
6730 spin_unlock_irqrestore(&priv->lock, flags);
6731
6732 IWL_DEBUG_MAC80211("leave\n");
6733 mutex_unlock(&priv->mutex);
6734
6735 return 0;
6736}
6737
bb8c093b 6738static void iwl4965_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
6739 unsigned int changed_flags,
6740 unsigned int *total_flags,
6741 int mc_count, struct dev_addr_list *mc_list)
6742{
6743 /*
6744 * XXX: dummy
bb8c093b 6745 * see also iwl4965_connection_init_rx_config
4150c572
JB
6746 */
6747 *total_flags = 0;
6748}
6749
bb8c093b 6750static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
6751 struct ieee80211_if_init_conf *conf)
6752{
c79dd5b5 6753 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6754
6755 IWL_DEBUG_MAC80211("enter\n");
6756
6757 mutex_lock(&priv->mutex);
948c171c 6758
fee1247a 6759 if (iwl_is_ready_rf(priv)) {
fde3571f
MA
6760 iwl4965_scan_cancel_timeout(priv, 100);
6761 cancel_delayed_work(&priv->post_associate);
6762 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6763 iwl4965_commit_rxon(priv);
6764 }
32bfd35d
JB
6765 if (priv->vif == conf->vif) {
6766 priv->vif = NULL;
b481de9c
ZY
6767 memset(priv->bssid, 0, ETH_ALEN);
6768 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
6769 priv->essid_len = 0;
6770 }
6771 mutex_unlock(&priv->mutex);
6772
6773 IWL_DEBUG_MAC80211("leave\n");
6774
6775}
471b3efd 6776
98952d5d
TW
6777
6778#ifdef CONFIG_IWL4965_HT
6779static void iwl4965_ht_conf(struct iwl_priv *priv,
6780 struct ieee80211_bss_conf *bss_conf)
6781{
6782 struct ieee80211_ht_info *ht_conf = bss_conf->ht_conf;
6783 struct ieee80211_ht_bss_info *ht_bss_conf = bss_conf->ht_bss_conf;
6784 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
6785
6786 IWL_DEBUG_MAC80211("enter: \n");
6787
6788 iwl_conf->is_ht = bss_conf->assoc_ht;
6789
6790 if (!iwl_conf->is_ht)
6791 return;
6792
6793 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6794
6795 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
6796 iwl_conf->sgf |= 0x1;
6797 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
6798 iwl_conf->sgf |= 0x2;
6799
6800 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
6801 iwl_conf->max_amsdu_size =
6802 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
6803
6804 iwl_conf->supported_chan_width =
6805 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
6806 iwl_conf->extension_chan_offset =
6807 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
6808 /* If no above or below channel supplied disable FAT channel */
6809 if (iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_ABOVE &&
6810 iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_BELOW)
6811 iwl_conf->supported_chan_width = 0;
6812
6813 iwl_conf->tx_mimo_ps_mode =
6814 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6815 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
6816
6817 iwl_conf->control_channel = ht_bss_conf->primary_channel;
6818 iwl_conf->tx_chan_width =
6819 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
6820 iwl_conf->ht_protection =
6821 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
6822 iwl_conf->non_GF_STA_present =
6823 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
6824
6825 IWL_DEBUG_MAC80211("control channel %d\n", iwl_conf->control_channel);
6826 IWL_DEBUG_MAC80211("leave\n");
6827}
6828#else
6829static inline void iwl4965_ht_conf(struct iwl_priv *priv,
6830 struct ieee80211_bss_conf *bss_conf)
6831{
6832}
6833#endif
6834
3109ece1 6835#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
471b3efd
JB
6836static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
6837 struct ieee80211_vif *vif,
6838 struct ieee80211_bss_conf *bss_conf,
6839 u32 changes)
220173b0 6840{
c79dd5b5 6841 struct iwl_priv *priv = hw->priv;
220173b0 6842
3109ece1
TW
6843 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
6844
471b3efd 6845 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
3109ece1
TW
6846 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
6847 bss_conf->use_short_preamble);
471b3efd 6848 if (bss_conf->use_short_preamble)
220173b0
TW
6849 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6850 else
6851 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
6852 }
6853
471b3efd 6854 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
3109ece1 6855 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
8318d78a 6856 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
220173b0
TW
6857 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
6858 else
6859 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
6860 }
6861
98952d5d 6862 if (changes & BSS_CHANGED_HT) {
3109ece1 6863 IWL_DEBUG_MAC80211("HT %d\n", bss_conf->assoc_ht);
98952d5d
TW
6864 iwl4965_ht_conf(priv, bss_conf);
6865 iwl4965_set_rxon_chain(priv);
6866 }
6867
471b3efd 6868 if (changes & BSS_CHANGED_ASSOC) {
3109ece1
TW
6869 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
6870 if (bss_conf->assoc) {
6871 priv->assoc_id = bss_conf->aid;
6872 priv->beacon_int = bss_conf->beacon_int;
6873 priv->timestamp = bss_conf->timestamp;
6874 priv->assoc_capability = bss_conf->assoc_capability;
6875 priv->next_scan_jiffies = jiffies +
6876 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
6877 queue_work(priv->workqueue, &priv->post_associate.work);
6878 } else {
6879 priv->assoc_id = 0;
6880 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
6881 }
6882 } else if (changes && iwl_is_associated(priv) && priv->assoc_id) {
6883 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
6884 iwl4965_send_rxon_assoc(priv);
471b3efd
JB
6885 }
6886
220173b0 6887}
b481de9c 6888
bb8c093b 6889static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
6890{
6891 int rc = 0;
6892 unsigned long flags;
c79dd5b5 6893 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6894
6895 IWL_DEBUG_MAC80211("enter\n");
6896
052c4b9f 6897 mutex_lock(&priv->mutex);
b481de9c
ZY
6898 spin_lock_irqsave(&priv->lock, flags);
6899
fee1247a 6900 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
6901 rc = -EIO;
6902 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
6903 goto out_unlock;
6904 }
6905
6906 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
6907 rc = -EIO;
6908 IWL_ERROR("ERROR: APs don't scan\n");
6909 goto out_unlock;
6910 }
6911
7878a5a4
MA
6912 /* we don't schedule scan within next_scan_jiffies period */
6913 if (priv->next_scan_jiffies &&
6914 time_after(priv->next_scan_jiffies, jiffies)) {
6915 rc = -EAGAIN;
6916 goto out_unlock;
6917 }
b481de9c 6918 /* if we just finished scan ask for delay */
7878a5a4
MA
6919 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies +
6920 IWL_DELAY_NEXT_SCAN, jiffies)) {
b481de9c
ZY
6921 rc = -EAGAIN;
6922 goto out_unlock;
6923 }
6924 if (len) {
7878a5a4 6925 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
bb8c093b 6926 iwl4965_escape_essid(ssid, len), (int)len);
b481de9c
ZY
6927
6928 priv->one_direct_scan = 1;
6929 priv->direct_ssid_len = (u8)
6930 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
6931 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
948c171c
MA
6932 } else
6933 priv->one_direct_scan = 0;
b481de9c 6934
bb8c093b 6935 rc = iwl4965_scan_initiate(priv);
b481de9c
ZY
6936
6937 IWL_DEBUG_MAC80211("leave\n");
6938
6939out_unlock:
6940 spin_unlock_irqrestore(&priv->lock, flags);
052c4b9f 6941 mutex_unlock(&priv->mutex);
b481de9c
ZY
6942
6943 return rc;
6944}
6945
ab885f8c
EG
6946static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
6947 struct ieee80211_key_conf *keyconf, const u8 *addr,
6948 u32 iv32, u16 *phase1key)
6949{
6950 struct iwl_priv *priv = hw->priv;
6951 u8 sta_id = IWL_INVALID_STATION;
6952 unsigned long flags;
6953 __le16 key_flags = 0;
6954 int i;
6955 DECLARE_MAC_BUF(mac);
6956
6957 IWL_DEBUG_MAC80211("enter\n");
6958
6959 sta_id = iwl4965_hw_find_station(priv, addr);
6960 if (sta_id == IWL_INVALID_STATION) {
6961 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
6962 print_mac(mac, addr));
6963 return;
6964 }
6965
6966 iwl4965_scan_cancel_timeout(priv, 100);
6967
6968 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
6969 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
6970 key_flags &= ~STA_KEY_FLG_INVALID;
6971
6972 if (sta_id == priv->hw_setting.bcast_sta_id)
6973 key_flags |= STA_KEY_MULTICAST_MSK;
6974
6975 spin_lock_irqsave(&priv->sta_lock, flags);
6976
6977 priv->stations[sta_id].sta.key.key_offset =
80fb47a1 6978 iwl_get_free_ucode_key_index(priv);
ab885f8c
EG
6979 priv->stations[sta_id].sta.key.key_flags = key_flags;
6980 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
6981
6982 for (i = 0; i < 5; i++)
6983 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
6984 cpu_to_le16(phase1key[i]);
6985
6986 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
6987 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
6988
6989 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
6990
6991 spin_unlock_irqrestore(&priv->sta_lock, flags);
6992
6993 IWL_DEBUG_MAC80211("leave\n");
6994}
6995
bb8c093b 6996static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
b481de9c
ZY
6997 const u8 *local_addr, const u8 *addr,
6998 struct ieee80211_key_conf *key)
6999{
c79dd5b5 7000 struct iwl_priv *priv = hw->priv;
0795af57 7001 DECLARE_MAC_BUF(mac);
deb09c43
EG
7002 int ret = 0;
7003 u8 sta_id = IWL_INVALID_STATION;
6974e363 7004 u8 is_default_wep_key = 0;
b481de9c
ZY
7005
7006 IWL_DEBUG_MAC80211("enter\n");
7007
1ea87396 7008 if (!priv->cfg->mod_params->hw_crypto) {
b481de9c
ZY
7009 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7010 return -EOPNOTSUPP;
7011 }
7012
7013 if (is_zero_ether_addr(addr))
7014 /* only support pairwise keys */
7015 return -EOPNOTSUPP;
7016
6974e363
EG
7017 sta_id = iwl4965_hw_find_station(priv, addr);
7018 if (sta_id == IWL_INVALID_STATION) {
7019 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7020 print_mac(mac, addr));
7021 return -EINVAL;
b481de9c 7022
deb09c43 7023 }
b481de9c 7024
6974e363 7025 mutex_lock(&priv->mutex);
bb8c093b 7026 iwl4965_scan_cancel_timeout(priv, 100);
6974e363
EG
7027 mutex_unlock(&priv->mutex);
7028
7029 /* If we are getting WEP group key and we didn't receive any key mapping
7030 * so far, we are in legacy wep mode (group key only), otherwise we are
7031 * in 1X mode.
7032 * In legacy wep mode, we use another host command to the uCode */
7033 if (key->alg == ALG_WEP && sta_id == priv->hw_setting.bcast_sta_id &&
7034 priv->iw_mode != IEEE80211_IF_TYPE_AP) {
7035 if (cmd == SET_KEY)
7036 is_default_wep_key = !priv->key_mapping_key;
7037 else
7038 is_default_wep_key = priv->default_wep_key;
7039 }
052c4b9f 7040
b481de9c 7041 switch (cmd) {
deb09c43 7042 case SET_KEY:
6974e363
EG
7043 if (is_default_wep_key)
7044 ret = iwl_set_default_wep_key(priv, key);
deb09c43
EG
7045 else
7046 ret = iwl4965_set_dynamic_key(priv, key, sta_id);
7047
7048 IWL_DEBUG_MAC80211("enable hwcrypto key\n");
b481de9c
ZY
7049 break;
7050 case DISABLE_KEY:
6974e363
EG
7051 if (is_default_wep_key)
7052 ret = iwl_remove_default_wep_key(priv, key);
deb09c43
EG
7053 else
7054 ret = iwl4965_clear_sta_key_info(priv, sta_id);
7055
7056 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
b481de9c
ZY
7057 break;
7058 default:
deb09c43 7059 ret = -EINVAL;
b481de9c
ZY
7060 }
7061
7062 IWL_DEBUG_MAC80211("leave\n");
b481de9c 7063
deb09c43 7064 return ret;
b481de9c
ZY
7065}
7066
bb8c093b 7067static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
b481de9c
ZY
7068 const struct ieee80211_tx_queue_params *params)
7069{
c79dd5b5 7070 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
7071 unsigned long flags;
7072 int q;
b481de9c
ZY
7073
7074 IWL_DEBUG_MAC80211("enter\n");
7075
fee1247a 7076 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
7077 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7078 return -EIO;
7079 }
7080
7081 if (queue >= AC_NUM) {
7082 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7083 return 0;
7084 }
7085
b481de9c
ZY
7086 if (!priv->qos_data.qos_enable) {
7087 priv->qos_data.qos_active = 0;
7088 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7089 return 0;
7090 }
7091 q = AC_NUM - 1 - queue;
7092
7093 spin_lock_irqsave(&priv->lock, flags);
7094
7095 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
7096 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
7097 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
7098 priv->qos_data.def_qos_parm.ac[q].edca_txop =
3330d7be 7099 cpu_to_le16((params->txop * 32));
b481de9c
ZY
7100
7101 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
7102 priv->qos_data.qos_active = 1;
7103
7104 spin_unlock_irqrestore(&priv->lock, flags);
7105
7106 mutex_lock(&priv->mutex);
7107 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b 7108 iwl4965_activate_qos(priv, 1);
3109ece1 7109 else if (priv->assoc_id && iwl_is_associated(priv))
bb8c093b 7110 iwl4965_activate_qos(priv, 0);
b481de9c
ZY
7111
7112 mutex_unlock(&priv->mutex);
7113
b481de9c
ZY
7114 IWL_DEBUG_MAC80211("leave\n");
7115 return 0;
7116}
7117
bb8c093b 7118static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
7119 struct ieee80211_tx_queue_stats *stats)
7120{
c79dd5b5 7121 struct iwl_priv *priv = hw->priv;
b481de9c 7122 int i, avail;
bb8c093b
CH
7123 struct iwl4965_tx_queue *txq;
7124 struct iwl4965_queue *q;
b481de9c
ZY
7125 unsigned long flags;
7126
7127 IWL_DEBUG_MAC80211("enter\n");
7128
fee1247a 7129 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
7130 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7131 return -EIO;
7132 }
7133
7134 spin_lock_irqsave(&priv->lock, flags);
7135
7136 for (i = 0; i < AC_NUM; i++) {
7137 txq = &priv->txq[i];
7138 q = &txq->q;
bb8c093b 7139 avail = iwl4965_queue_space(q);
b481de9c
ZY
7140
7141 stats->data[i].len = q->n_window - avail;
7142 stats->data[i].limit = q->n_window - q->high_mark;
7143 stats->data[i].count = q->n_window;
7144
7145 }
7146 spin_unlock_irqrestore(&priv->lock, flags);
7147
7148 IWL_DEBUG_MAC80211("leave\n");
7149
7150 return 0;
7151}
7152
bb8c093b 7153static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
b481de9c
ZY
7154 struct ieee80211_low_level_stats *stats)
7155{
7156 IWL_DEBUG_MAC80211("enter\n");
7157 IWL_DEBUG_MAC80211("leave\n");
7158
7159 return 0;
7160}
7161
bb8c093b 7162static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
b481de9c
ZY
7163{
7164 IWL_DEBUG_MAC80211("enter\n");
7165 IWL_DEBUG_MAC80211("leave\n");
7166
7167 return 0;
7168}
7169
bb8c093b 7170static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 7171{
c79dd5b5 7172 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
7173 unsigned long flags;
7174
7175 mutex_lock(&priv->mutex);
7176 IWL_DEBUG_MAC80211("enter\n");
7177
7178 priv->lq_mngr.lq_ready = 0;
c8b0e6e1 7179#ifdef CONFIG_IWL4965_HT
b481de9c 7180 spin_lock_irqsave(&priv->lock, flags);
fd105e79 7181 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
b481de9c 7182 spin_unlock_irqrestore(&priv->lock, flags);
c8b0e6e1 7183#endif /* CONFIG_IWL4965_HT */
b481de9c 7184
bf85ea4f 7185 iwlcore_reset_qos(priv);
b481de9c
ZY
7186
7187 cancel_delayed_work(&priv->post_associate);
7188
7189 spin_lock_irqsave(&priv->lock, flags);
7190 priv->assoc_id = 0;
7191 priv->assoc_capability = 0;
b481de9c
ZY
7192 priv->assoc_station_added = 0;
7193
7194 /* new association get rid of ibss beacon skb */
7195 if (priv->ibss_beacon)
7196 dev_kfree_skb(priv->ibss_beacon);
7197
7198 priv->ibss_beacon = NULL;
7199
7200 priv->beacon_int = priv->hw->conf.beacon_int;
3109ece1 7201 priv->timestamp = 0;
b481de9c
ZY
7202 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
7203 priv->beacon_int = 0;
7204
7205 spin_unlock_irqrestore(&priv->lock, flags);
7206
fee1247a 7207 if (!iwl_is_ready_rf(priv)) {
fde3571f
MA
7208 IWL_DEBUG_MAC80211("leave - not ready\n");
7209 mutex_unlock(&priv->mutex);
7210 return;
7211 }
7212
052c4b9f 7213 /* we are restarting association process
7214 * clear RXON_FILTER_ASSOC_MSK bit
7215 */
7216 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
bb8c093b 7217 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 7218 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7219 iwl4965_commit_rxon(priv);
052c4b9f 7220 }
7221
b481de9c
ZY
7222 /* Per mac80211.h: This is only used in IBSS mode... */
7223 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
052c4b9f 7224
b481de9c
ZY
7225 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
7226 mutex_unlock(&priv->mutex);
7227 return;
7228 }
7229
b481de9c
ZY
7230 priv->only_active_channel = 0;
7231
bb8c093b 7232 iwl4965_set_rate(priv);
b481de9c
ZY
7233
7234 mutex_unlock(&priv->mutex);
7235
7236 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7237}
7238
bb8c093b 7239static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
7240 struct ieee80211_tx_control *control)
7241{
c79dd5b5 7242 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
7243 unsigned long flags;
7244
7245 mutex_lock(&priv->mutex);
7246 IWL_DEBUG_MAC80211("enter\n");
7247
fee1247a 7248 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
7249 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7250 mutex_unlock(&priv->mutex);
7251 return -EIO;
7252 }
7253
7254 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7255 IWL_DEBUG_MAC80211("leave - not IBSS\n");
7256 mutex_unlock(&priv->mutex);
7257 return -EIO;
7258 }
7259
7260 spin_lock_irqsave(&priv->lock, flags);
7261
7262 if (priv->ibss_beacon)
7263 dev_kfree_skb(priv->ibss_beacon);
7264
7265 priv->ibss_beacon = skb;
7266
7267 priv->assoc_id = 0;
7268
7269 IWL_DEBUG_MAC80211("leave\n");
7270 spin_unlock_irqrestore(&priv->lock, flags);
7271
bf85ea4f 7272 iwlcore_reset_qos(priv);
b481de9c
ZY
7273
7274 queue_work(priv->workqueue, &priv->post_associate.work);
7275
7276 mutex_unlock(&priv->mutex);
7277
7278 return 0;
7279}
7280
b481de9c
ZY
7281/*****************************************************************************
7282 *
7283 * sysfs attributes
7284 *
7285 *****************************************************************************/
7286
0a6857e7 7287#ifdef CONFIG_IWLWIFI_DEBUG
b481de9c
ZY
7288
7289/*
7290 * The following adds a new attribute to the sysfs representation
7291 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
7292 * used for controlling the debug level.
7293 *
7294 * See the level definitions in iwl for details.
7295 */
7296
7297static ssize_t show_debug_level(struct device_driver *d, char *buf)
7298{
0a6857e7 7299 return sprintf(buf, "0x%08X\n", iwl_debug_level);
b481de9c
ZY
7300}
7301static ssize_t store_debug_level(struct device_driver *d,
7302 const char *buf, size_t count)
7303{
7304 char *p = (char *)buf;
7305 u32 val;
7306
7307 val = simple_strtoul(p, &p, 0);
7308 if (p == buf)
7309 printk(KERN_INFO DRV_NAME
7310 ": %s is not in hex or decimal form.\n", buf);
7311 else
0a6857e7 7312 iwl_debug_level = val;
b481de9c
ZY
7313
7314 return strnlen(buf, count);
7315}
7316
7317static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
7318 show_debug_level, store_debug_level);
7319
0a6857e7 7320#endif /* CONFIG_IWLWIFI_DEBUG */
b481de9c 7321
b481de9c
ZY
7322
7323static ssize_t show_temperature(struct device *d,
7324 struct device_attribute *attr, char *buf)
7325{
c79dd5b5 7326 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c 7327
fee1247a 7328 if (!iwl_is_alive(priv))
b481de9c
ZY
7329 return -EAGAIN;
7330
bb8c093b 7331 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv));
b481de9c
ZY
7332}
7333
7334static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
7335
7336static ssize_t show_rs_window(struct device *d,
7337 struct device_attribute *attr,
7338 char *buf)
7339{
c79dd5b5 7340 struct iwl_priv *priv = d->driver_data;
bb8c093b 7341 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
b481de9c
ZY
7342}
7343static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
7344
7345static ssize_t show_tx_power(struct device *d,
7346 struct device_attribute *attr, char *buf)
7347{
c79dd5b5 7348 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
7349 return sprintf(buf, "%d\n", priv->user_txpower_limit);
7350}
7351
7352static ssize_t store_tx_power(struct device *d,
7353 struct device_attribute *attr,
7354 const char *buf, size_t count)
7355{
c79dd5b5 7356 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
7357 char *p = (char *)buf;
7358 u32 val;
7359
7360 val = simple_strtoul(p, &p, 10);
7361 if (p == buf)
7362 printk(KERN_INFO DRV_NAME
7363 ": %s is not in decimal form.\n", buf);
7364 else
bb8c093b 7365 iwl4965_hw_reg_set_txpower(priv, val);
b481de9c
ZY
7366
7367 return count;
7368}
7369
7370static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
7371
7372static ssize_t show_flags(struct device *d,
7373 struct device_attribute *attr, char *buf)
7374{
c79dd5b5 7375 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
7376
7377 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
7378}
7379
7380static ssize_t store_flags(struct device *d,
7381 struct device_attribute *attr,
7382 const char *buf, size_t count)
7383{
c79dd5b5 7384 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
7385 u32 flags = simple_strtoul(buf, NULL, 0);
7386
7387 mutex_lock(&priv->mutex);
7388 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
7389 /* Cancel any currently running scans... */
bb8c093b 7390 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
7391 IWL_WARNING("Could not cancel scan.\n");
7392 else {
7393 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
7394 flags);
7395 priv->staging_rxon.flags = cpu_to_le32(flags);
bb8c093b 7396 iwl4965_commit_rxon(priv);
b481de9c
ZY
7397 }
7398 }
7399 mutex_unlock(&priv->mutex);
7400
7401 return count;
7402}
7403
7404static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
7405
7406static ssize_t show_filter_flags(struct device *d,
7407 struct device_attribute *attr, char *buf)
7408{
c79dd5b5 7409 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
7410
7411 return sprintf(buf, "0x%04X\n",
7412 le32_to_cpu(priv->active_rxon.filter_flags));
7413}
7414
7415static ssize_t store_filter_flags(struct device *d,
7416 struct device_attribute *attr,
7417 const char *buf, size_t count)
7418{
c79dd5b5 7419 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
7420 u32 filter_flags = simple_strtoul(buf, NULL, 0);
7421
7422 mutex_lock(&priv->mutex);
7423 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
7424 /* Cancel any currently running scans... */
bb8c093b 7425 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
7426 IWL_WARNING("Could not cancel scan.\n");
7427 else {
7428 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
7429 "0x%04X\n", filter_flags);
7430 priv->staging_rxon.filter_flags =
7431 cpu_to_le32(filter_flags);
bb8c093b 7432 iwl4965_commit_rxon(priv);
b481de9c
ZY
7433 }
7434 }
7435 mutex_unlock(&priv->mutex);
7436
7437 return count;
7438}
7439
7440static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
7441 store_filter_flags);
7442
c8b0e6e1 7443#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
7444
7445static ssize_t show_measurement(struct device *d,
7446 struct device_attribute *attr, char *buf)
7447{
c79dd5b5 7448 struct iwl_priv *priv = dev_get_drvdata(d);
bb8c093b 7449 struct iwl4965_spectrum_notification measure_report;
b481de9c
ZY
7450 u32 size = sizeof(measure_report), len = 0, ofs = 0;
7451 u8 *data = (u8 *) & measure_report;
7452 unsigned long flags;
7453
7454 spin_lock_irqsave(&priv->lock, flags);
7455 if (!(priv->measurement_status & MEASUREMENT_READY)) {
7456 spin_unlock_irqrestore(&priv->lock, flags);
7457 return 0;
7458 }
7459 memcpy(&measure_report, &priv->measure_report, size);
7460 priv->measurement_status = 0;
7461 spin_unlock_irqrestore(&priv->lock, flags);
7462
7463 while (size && (PAGE_SIZE - len)) {
7464 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
7465 PAGE_SIZE - len, 1);
7466 len = strlen(buf);
7467 if (PAGE_SIZE - len)
7468 buf[len++] = '\n';
7469
7470 ofs += 16;
7471 size -= min(size, 16U);
7472 }
7473
7474 return len;
7475}
7476
7477static ssize_t store_measurement(struct device *d,
7478 struct device_attribute *attr,
7479 const char *buf, size_t count)
7480{
c79dd5b5 7481 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
7482 struct ieee80211_measurement_params params = {
7483 .channel = le16_to_cpu(priv->active_rxon.channel),
7484 .start_time = cpu_to_le64(priv->last_tsf),
7485 .duration = cpu_to_le16(1),
7486 };
7487 u8 type = IWL_MEASURE_BASIC;
7488 u8 buffer[32];
7489 u8 channel;
7490
7491 if (count) {
7492 char *p = buffer;
7493 strncpy(buffer, buf, min(sizeof(buffer), count));
7494 channel = simple_strtoul(p, NULL, 0);
7495 if (channel)
7496 params.channel = channel;
7497
7498 p = buffer;
7499 while (*p && *p != ' ')
7500 p++;
7501 if (*p)
7502 type = simple_strtoul(p + 1, NULL, 0);
7503 }
7504
7505 IWL_DEBUG_INFO("Invoking measurement of type %d on "
7506 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 7507 iwl4965_get_measurement(priv, &params, type);
b481de9c
ZY
7508
7509 return count;
7510}
7511
7512static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
7513 show_measurement, store_measurement);
c8b0e6e1 7514#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */
b481de9c
ZY
7515
7516static ssize_t store_retry_rate(struct device *d,
7517 struct device_attribute *attr,
7518 const char *buf, size_t count)
7519{
c79dd5b5 7520 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
7521
7522 priv->retry_rate = simple_strtoul(buf, NULL, 0);
7523 if (priv->retry_rate <= 0)
7524 priv->retry_rate = 1;
7525
7526 return count;
7527}
7528
7529static ssize_t show_retry_rate(struct device *d,
7530 struct device_attribute *attr, char *buf)
7531{
c79dd5b5 7532 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
7533 return sprintf(buf, "%d", priv->retry_rate);
7534}
7535
7536static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
7537 store_retry_rate);
7538
7539static ssize_t store_power_level(struct device *d,
7540 struct device_attribute *attr,
7541 const char *buf, size_t count)
7542{
c79dd5b5 7543 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
7544 int rc;
7545 int mode;
7546
7547 mode = simple_strtoul(buf, NULL, 0);
7548 mutex_lock(&priv->mutex);
7549
fee1247a 7550 if (!iwl_is_ready(priv)) {
b481de9c
ZY
7551 rc = -EAGAIN;
7552 goto out;
7553 }
7554
7555 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
7556 mode = IWL_POWER_AC;
7557 else
7558 mode |= IWL_POWER_ENABLED;
7559
7560 if (mode != priv->power_mode) {
bb8c093b 7561 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
7562 if (rc) {
7563 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7564 goto out;
7565 }
7566 priv->power_mode = mode;
7567 }
7568
7569 rc = count;
7570
7571 out:
7572 mutex_unlock(&priv->mutex);
7573 return rc;
7574}
7575
7576#define MAX_WX_STRING 80
7577
7578/* Values are in microsecond */
7579static const s32 timeout_duration[] = {
7580 350000,
7581 250000,
7582 75000,
7583 37000,
7584 25000,
7585};
7586static const s32 period_duration[] = {
7587 400000,
7588 700000,
7589 1000000,
7590 1000000,
7591 1000000
7592};
7593
7594static ssize_t show_power_level(struct device *d,
7595 struct device_attribute *attr, char *buf)
7596{
c79dd5b5 7597 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
7598 int level = IWL_POWER_LEVEL(priv->power_mode);
7599 char *p = buf;
7600
7601 p += sprintf(p, "%d ", level);
7602 switch (level) {
7603 case IWL_POWER_MODE_CAM:
7604 case IWL_POWER_AC:
7605 p += sprintf(p, "(AC)");
7606 break;
7607 case IWL_POWER_BATTERY:
7608 p += sprintf(p, "(BATTERY)");
7609 break;
7610 default:
7611 p += sprintf(p,
7612 "(Timeout %dms, Period %dms)",
7613 timeout_duration[level - 1] / 1000,
7614 period_duration[level - 1] / 1000);
7615 }
7616
7617 if (!(priv->power_mode & IWL_POWER_ENABLED))
7618 p += sprintf(p, " OFF\n");
7619 else
7620 p += sprintf(p, " \n");
7621
7622 return (p - buf + 1);
7623
7624}
7625
7626static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
7627 store_power_level);
7628
7629static ssize_t show_channels(struct device *d,
7630 struct device_attribute *attr, char *buf)
7631{
8318d78a
JB
7632 /* all this shit doesn't belong into sysfs anyway */
7633 return 0;
b481de9c
ZY
7634}
7635
7636static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
7637
7638static ssize_t show_statistics(struct device *d,
7639 struct device_attribute *attr, char *buf)
7640{
c79dd5b5 7641 struct iwl_priv *priv = dev_get_drvdata(d);
bb8c093b 7642 u32 size = sizeof(struct iwl4965_notif_statistics);
b481de9c
ZY
7643 u32 len = 0, ofs = 0;
7644 u8 *data = (u8 *) & priv->statistics;
7645 int rc = 0;
7646
fee1247a 7647 if (!iwl_is_alive(priv))
b481de9c
ZY
7648 return -EAGAIN;
7649
7650 mutex_lock(&priv->mutex);
bb8c093b 7651 rc = iwl4965_send_statistics_request(priv);
b481de9c
ZY
7652 mutex_unlock(&priv->mutex);
7653
7654 if (rc) {
7655 len = sprintf(buf,
7656 "Error sending statistics request: 0x%08X\n", rc);
7657 return len;
7658 }
7659
7660 while (size && (PAGE_SIZE - len)) {
7661 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
7662 PAGE_SIZE - len, 1);
7663 len = strlen(buf);
7664 if (PAGE_SIZE - len)
7665 buf[len++] = '\n';
7666
7667 ofs += 16;
7668 size -= min(size, 16U);
7669 }
7670
7671 return len;
7672}
7673
7674static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
7675
7676static ssize_t show_antenna(struct device *d,
7677 struct device_attribute *attr, char *buf)
7678{
c79dd5b5 7679 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c 7680
fee1247a 7681 if (!iwl_is_alive(priv))
b481de9c
ZY
7682 return -EAGAIN;
7683
7684 return sprintf(buf, "%d\n", priv->antenna);
7685}
7686
7687static ssize_t store_antenna(struct device *d,
7688 struct device_attribute *attr,
7689 const char *buf, size_t count)
7690{
7691 int ant;
c79dd5b5 7692 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
7693
7694 if (count == 0)
7695 return 0;
7696
7697 if (sscanf(buf, "%1i", &ant) != 1) {
7698 IWL_DEBUG_INFO("not in hex or decimal form.\n");
7699 return count;
7700 }
7701
7702 if ((ant >= 0) && (ant <= 2)) {
7703 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 7704 priv->antenna = (enum iwl4965_antenna)ant;
b481de9c
ZY
7705 } else
7706 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
7707
7708
7709 return count;
7710}
7711
7712static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
7713
7714static ssize_t show_status(struct device *d,
7715 struct device_attribute *attr, char *buf)
7716{
c79dd5b5 7717 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
fee1247a 7718 if (!iwl_is_alive(priv))
b481de9c
ZY
7719 return -EAGAIN;
7720 return sprintf(buf, "0x%08x\n", (int)priv->status);
7721}
7722
7723static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
7724
7725static ssize_t dump_error_log(struct device *d,
7726 struct device_attribute *attr,
7727 const char *buf, size_t count)
7728{
7729 char *p = (char *)buf;
7730
7731 if (p[0] == '1')
c79dd5b5 7732 iwl4965_dump_nic_error_log((struct iwl_priv *)d->driver_data);
b481de9c
ZY
7733
7734 return strnlen(buf, count);
7735}
7736
7737static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
7738
7739static ssize_t dump_event_log(struct device *d,
7740 struct device_attribute *attr,
7741 const char *buf, size_t count)
7742{
7743 char *p = (char *)buf;
7744
7745 if (p[0] == '1')
c79dd5b5 7746 iwl4965_dump_nic_event_log((struct iwl_priv *)d->driver_data);
b481de9c
ZY
7747
7748 return strnlen(buf, count);
7749}
7750
7751static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
7752
7753/*****************************************************************************
7754 *
7755 * driver setup and teardown
7756 *
7757 *****************************************************************************/
7758
c79dd5b5 7759static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
b481de9c
ZY
7760{
7761 priv->workqueue = create_workqueue(DRV_NAME);
7762
7763 init_waitqueue_head(&priv->wait_command_queue);
7764
bb8c093b
CH
7765 INIT_WORK(&priv->up, iwl4965_bg_up);
7766 INIT_WORK(&priv->restart, iwl4965_bg_restart);
7767 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
7768 INIT_WORK(&priv->scan_completed, iwl4965_bg_scan_completed);
7769 INIT_WORK(&priv->request_scan, iwl4965_bg_request_scan);
7770 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
7771 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
7772 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
7773 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate);
7774 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
7775 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
7776 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check);
7777
7778 iwl4965_hw_setup_deferred_work(priv);
b481de9c
ZY
7779
7780 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 7781 iwl4965_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
7782}
7783
c79dd5b5 7784static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
b481de9c 7785{
bb8c093b 7786 iwl4965_hw_cancel_deferred_work(priv);
b481de9c 7787
3ae6a054 7788 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
7789 cancel_delayed_work(&priv->scan_check);
7790 cancel_delayed_work(&priv->alive_start);
7791 cancel_delayed_work(&priv->post_associate);
7792 cancel_work_sync(&priv->beacon_update);
7793}
7794
bb8c093b 7795static struct attribute *iwl4965_sysfs_entries[] = {
b481de9c
ZY
7796 &dev_attr_antenna.attr,
7797 &dev_attr_channels.attr,
7798 &dev_attr_dump_errors.attr,
7799 &dev_attr_dump_events.attr,
7800 &dev_attr_flags.attr,
7801 &dev_attr_filter_flags.attr,
c8b0e6e1 7802#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
7803 &dev_attr_measurement.attr,
7804#endif
7805 &dev_attr_power_level.attr,
7806 &dev_attr_retry_rate.attr,
b481de9c
ZY
7807 &dev_attr_rs_window.attr,
7808 &dev_attr_statistics.attr,
7809 &dev_attr_status.attr,
7810 &dev_attr_temperature.attr,
b481de9c
ZY
7811 &dev_attr_tx_power.attr,
7812
7813 NULL
7814};
7815
bb8c093b 7816static struct attribute_group iwl4965_attribute_group = {
b481de9c 7817 .name = NULL, /* put in device directory */
bb8c093b 7818 .attrs = iwl4965_sysfs_entries,
b481de9c
ZY
7819};
7820
bb8c093b
CH
7821static struct ieee80211_ops iwl4965_hw_ops = {
7822 .tx = iwl4965_mac_tx,
7823 .start = iwl4965_mac_start,
7824 .stop = iwl4965_mac_stop,
7825 .add_interface = iwl4965_mac_add_interface,
7826 .remove_interface = iwl4965_mac_remove_interface,
7827 .config = iwl4965_mac_config,
7828 .config_interface = iwl4965_mac_config_interface,
7829 .configure_filter = iwl4965_configure_filter,
7830 .set_key = iwl4965_mac_set_key,
ab885f8c 7831 .update_tkip_key = iwl4965_mac_update_tkip_key,
bb8c093b
CH
7832 .get_stats = iwl4965_mac_get_stats,
7833 .get_tx_stats = iwl4965_mac_get_tx_stats,
7834 .conf_tx = iwl4965_mac_conf_tx,
7835 .get_tsf = iwl4965_mac_get_tsf,
7836 .reset_tsf = iwl4965_mac_reset_tsf,
7837 .beacon_update = iwl4965_mac_beacon_update,
471b3efd 7838 .bss_info_changed = iwl4965_bss_info_changed,
c8b0e6e1 7839#ifdef CONFIG_IWL4965_HT
9ab46173 7840 .ampdu_action = iwl4965_mac_ampdu_action,
c8b0e6e1 7841#endif /* CONFIG_IWL4965_HT */
bb8c093b 7842 .hw_scan = iwl4965_mac_hw_scan
b481de9c
ZY
7843};
7844
bb8c093b 7845static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
7846{
7847 int err = 0;
c79dd5b5 7848 struct iwl_priv *priv;
b481de9c 7849 struct ieee80211_hw *hw;
82b9a121 7850 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
0359facc 7851 unsigned long flags;
5a66926a 7852 DECLARE_MAC_BUF(mac);
b481de9c 7853
316c30d9
AK
7854 /************************
7855 * 1. Allocating HW data
7856 ************************/
7857
6440adb5
BC
7858 /* Disabling hardware scan means that mac80211 will perform scans
7859 * "the hard way", rather than using device's scan. */
1ea87396 7860 if (cfg->mod_params->disable_hw_scan) {
b481de9c 7861 IWL_DEBUG_INFO("Disabling hw_scan\n");
bb8c093b 7862 iwl4965_hw_ops.hw_scan = NULL;
b481de9c
ZY
7863 }
7864
1d0a082d
AK
7865 hw = iwl_alloc_all(cfg, &iwl4965_hw_ops);
7866 if (!hw) {
b481de9c
ZY
7867 err = -ENOMEM;
7868 goto out;
7869 }
1d0a082d
AK
7870 priv = hw->priv;
7871 /* At this point both hw and priv are allocated. */
7872
b481de9c
ZY
7873 SET_IEEE80211_DEV(hw, &pdev->dev);
7874
7875 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
82b9a121 7876 priv->cfg = cfg;
b481de9c 7877 priv->pci_dev = pdev;
316c30d9 7878
0a6857e7 7879#ifdef CONFIG_IWLWIFI_DEBUG
1ea87396 7880 iwl_debug_level = priv->cfg->mod_params->debug;
b481de9c
ZY
7881 atomic_set(&priv->restrict_refcnt, 0);
7882#endif
b481de9c 7883
316c30d9
AK
7884 /**************************
7885 * 2. Initializing PCI bus
7886 **************************/
7887 if (pci_enable_device(pdev)) {
7888 err = -ENODEV;
7889 goto out_ieee80211_free_hw;
7890 }
7891
7892 pci_set_master(pdev);
7893
7894 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7895 if (!err)
7896 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
7897 if (err) {
7898 printk(KERN_WARNING DRV_NAME
7899 ": No suitable DMA available.\n");
7900 goto out_pci_disable_device;
7901 }
7902
7903 err = pci_request_regions(pdev, DRV_NAME);
7904 if (err)
7905 goto out_pci_disable_device;
7906
7907 pci_set_drvdata(pdev, priv);
7908
7909 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7910 * PCI Tx retries from interfering with C3 CPU state */
7911 pci_write_config_byte(pdev, 0x41, 0x00);
7912
7913 /***********************
7914 * 3. Read REV register
7915 ***********************/
7916 priv->hw_base = pci_iomap(pdev, 0, 0);
7917 if (!priv->hw_base) {
7918 err = -ENODEV;
7919 goto out_pci_release_regions;
7920 }
7921
7922 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
7923 (unsigned long long) pci_resource_len(pdev, 0));
7924 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
7925
7926 printk(KERN_INFO DRV_NAME
7927 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name);
7928
7929 /*****************
7930 * 4. Read EEPROM
7931 *****************/
7932 /* nic init */
3395f6e9 7933 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
316c30d9
AK
7934 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
7935
3395f6e9
TW
7936 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7937 err = iwl_poll_bit(priv, CSR_GP_CNTRL,
316c30d9
AK
7938 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7939 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
7940 if (err < 0) {
7941 IWL_DEBUG_INFO("Failed to init the card\n");
7942 goto out_iounmap;
7943 }
7944 /* Read the EEPROM */
7945 err = iwl_eeprom_init(priv);
7946 if (err) {
7947 IWL_ERROR("Unable to init EEPROM\n");
7948 goto out_iounmap;
7949 }
7950 /* MAC Address location in EEPROM same for 3945/4965 */
7951 iwl_eeprom_get_mac(priv, priv->mac_addr);
7952 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
7953 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
7954
7955 /************************
7956 * 5. Setup HW constants
7957 ************************/
7958 /* Device-specific setup */
7959 if (iwl4965_hw_set_hw_setting(priv)) {
7960 IWL_ERROR("failed to set hw settings\n");
7961 goto out_iounmap;
7962 }
7963
7964 /*******************
7965 * 6. Setup hw/priv
7966 *******************/
b481de9c 7967
bf85ea4f
AK
7968 err = iwl_setup(priv);
7969 if (err)
316c30d9 7970 goto out_unset_hw_settings;
bf85ea4f 7971 /* At this point both hw and priv are initialized. */
316c30d9
AK
7972
7973 /**********************************
7974 * 7. Initialize module parameters
7975 **********************************/
7976
7977 /* Disable radio (SW RF KILL) via parameter when loading driver */
1ea87396 7978 if (priv->cfg->mod_params->disable) {
316c30d9
AK
7979 set_bit(STATUS_RF_KILL_SW, &priv->status);
7980 IWL_DEBUG_INFO("Radio disabled.\n");
7981 }
7982
1ea87396 7983 if (priv->cfg->mod_params->enable_qos)
316c30d9
AK
7984 priv->qos_data.qos_enable = 1;
7985
7986 /********************
7987 * 8. Setup services
7988 ********************/
0359facc 7989 spin_lock_irqsave(&priv->lock, flags);
316c30d9 7990 iwl4965_disable_interrupts(priv);
0359facc 7991 spin_unlock_irqrestore(&priv->lock, flags);
316c30d9
AK
7992
7993 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7994 if (err) {
7995 IWL_ERROR("failed to create sysfs device attributes\n");
bf85ea4f 7996 goto out_unset_hw_settings;
316c30d9
AK
7997 }
7998
7999 err = iwl_dbgfs_register(priv, DRV_NAME);
8000 if (err) {
8001 IWL_ERROR("failed to create debugfs files\n");
8002 goto out_remove_sysfs;
8003 }
8004
8005 iwl4965_setup_deferred_work(priv);
8006 iwl4965_setup_rx_handlers(priv);
8007
8008 /********************
8009 * 9. Conclude
8010 ********************/
5a66926a
ZY
8011 pci_save_state(pdev);
8012 pci_disable_device(pdev);
b481de9c 8013
c8381fdc
MA
8014 /* notify iwlcore to init */
8015 iwlcore_low_level_notify(priv, IWLCORE_INIT_EVT);
b481de9c
ZY
8016 return 0;
8017
316c30d9
AK
8018 out_remove_sysfs:
8019 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
316c30d9 8020 out_unset_hw_settings:
bb8c093b 8021 iwl4965_unset_hw_setting(priv);
b481de9c
ZY
8022 out_iounmap:
8023 pci_iounmap(pdev, priv->hw_base);
8024 out_pci_release_regions:
8025 pci_release_regions(pdev);
316c30d9 8026 pci_set_drvdata(pdev, NULL);
b481de9c
ZY
8027 out_pci_disable_device:
8028 pci_disable_device(pdev);
b481de9c
ZY
8029 out_ieee80211_free_hw:
8030 ieee80211_free_hw(priv->hw);
8031 out:
8032 return err;
8033}
8034
c83dbf68 8035static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
b481de9c 8036{
c79dd5b5 8037 struct iwl_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
8038 struct list_head *p, *q;
8039 int i;
0359facc 8040 unsigned long flags;
b481de9c
ZY
8041
8042 if (!priv)
8043 return;
8044
8045 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
8046
c4f55232
RR
8047 if (priv->mac80211_registered) {
8048 ieee80211_unregister_hw(priv->hw);
8049 priv->mac80211_registered = 0;
8050 }
8051
b481de9c 8052 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 8053
bb8c093b 8054 iwl4965_down(priv);
b481de9c 8055
0359facc
MA
8056 /* make sure we flush any pending irq or
8057 * tasklet for the driver
8058 */
8059 spin_lock_irqsave(&priv->lock, flags);
8060 iwl4965_disable_interrupts(priv);
8061 spin_unlock_irqrestore(&priv->lock, flags);
8062
8063 iwl_synchronize_irq(priv);
8064
b481de9c
ZY
8065 /* Free MAC hash list for ADHOC */
8066 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
8067 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
8068 list_del(p);
bb8c093b 8069 kfree(list_entry(p, struct iwl4965_ibss_seq, list));
b481de9c
ZY
8070 }
8071 }
8072
c8381fdc 8073 iwlcore_low_level_notify(priv, IWLCORE_REMOVE_EVT);
712b6cf5 8074 iwl_dbgfs_unregister(priv);
bb8c093b 8075 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c 8076
bb8c093b 8077 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
8078
8079 if (priv->rxq.bd)
bb8c093b
CH
8080 iwl4965_rx_queue_free(priv, &priv->rxq);
8081 iwl4965_hw_txq_ctx_free(priv);
b481de9c 8082
bb8c093b 8083 iwl4965_unset_hw_setting(priv);
bf85ea4f 8084 iwlcore_clear_stations_table(priv);
b481de9c 8085
b481de9c 8086
948c171c
MA
8087 /*netif_stop_queue(dev); */
8088 flush_workqueue(priv->workqueue);
8089
bb8c093b 8090 /* ieee80211_unregister_hw calls iwl4965_mac_stop, which flushes
b481de9c
ZY
8091 * priv->workqueue... so we can't take down the workqueue
8092 * until now... */
8093 destroy_workqueue(priv->workqueue);
8094 priv->workqueue = NULL;
8095
b481de9c
ZY
8096 pci_iounmap(pdev, priv->hw_base);
8097 pci_release_regions(pdev);
8098 pci_disable_device(pdev);
8099 pci_set_drvdata(pdev, NULL);
8100
bf85ea4f 8101 iwl_free_channel_map(priv);
849e0dce 8102 iwl4965_free_geos(priv);
b481de9c
ZY
8103
8104 if (priv->ibss_beacon)
8105 dev_kfree_skb(priv->ibss_beacon);
8106
8107 ieee80211_free_hw(priv->hw);
8108}
8109
8110#ifdef CONFIG_PM
8111
bb8c093b 8112static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 8113{
c79dd5b5 8114 struct iwl_priv *priv = pci_get_drvdata(pdev);
b481de9c 8115
e655b9f0
ZY
8116 if (priv->is_open) {
8117 set_bit(STATUS_IN_SUSPEND, &priv->status);
8118 iwl4965_mac_stop(priv->hw);
8119 priv->is_open = 1;
8120 }
b481de9c 8121
b481de9c
ZY
8122 pci_set_power_state(pdev, PCI_D3hot);
8123
b481de9c
ZY
8124 return 0;
8125}
8126
bb8c093b 8127static int iwl4965_pci_resume(struct pci_dev *pdev)
b481de9c 8128{
c79dd5b5 8129 struct iwl_priv *priv = pci_get_drvdata(pdev);
b481de9c 8130
b481de9c 8131 pci_set_power_state(pdev, PCI_D0);
b481de9c 8132
e655b9f0
ZY
8133 if (priv->is_open)
8134 iwl4965_mac_start(priv->hw);
b481de9c 8135
e655b9f0 8136 clear_bit(STATUS_IN_SUSPEND, &priv->status);
b481de9c
ZY
8137 return 0;
8138}
8139
8140#endif /* CONFIG_PM */
8141
8142/*****************************************************************************
8143 *
8144 * driver and module entry point
8145 *
8146 *****************************************************************************/
8147
bb8c093b 8148static struct pci_driver iwl4965_driver = {
b481de9c 8149 .name = DRV_NAME,
bb8c093b
CH
8150 .id_table = iwl4965_hw_card_ids,
8151 .probe = iwl4965_pci_probe,
8152 .remove = __devexit_p(iwl4965_pci_remove),
b481de9c 8153#ifdef CONFIG_PM
bb8c093b
CH
8154 .suspend = iwl4965_pci_suspend,
8155 .resume = iwl4965_pci_resume,
b481de9c
ZY
8156#endif
8157};
8158
bb8c093b 8159static int __init iwl4965_init(void)
b481de9c
ZY
8160{
8161
8162 int ret;
8163 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
8164 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
897e1cf2
RC
8165
8166 ret = iwl4965_rate_control_register();
8167 if (ret) {
8168 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret);
8169 return ret;
8170 }
8171
bb8c093b 8172 ret = pci_register_driver(&iwl4965_driver);
b481de9c
ZY
8173 if (ret) {
8174 IWL_ERROR("Unable to initialize PCI module\n");
897e1cf2 8175 goto error_register;
b481de9c 8176 }
0a6857e7 8177#ifdef CONFIG_IWLWIFI_DEBUG
bb8c093b 8178 ret = driver_create_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c
ZY
8179 if (ret) {
8180 IWL_ERROR("Unable to create driver sysfs file\n");
897e1cf2 8181 goto error_debug;
b481de9c
ZY
8182 }
8183#endif
8184
8185 return ret;
897e1cf2
RC
8186
8187#ifdef CONFIG_IWLWIFI_DEBUG
8188error_debug:
8189 pci_unregister_driver(&iwl4965_driver);
8190#endif
8191error_register:
8192 iwl4965_rate_control_unregister();
8193 return ret;
b481de9c
ZY
8194}
8195
bb8c093b 8196static void __exit iwl4965_exit(void)
b481de9c 8197{
0a6857e7 8198#ifdef CONFIG_IWLWIFI_DEBUG
bb8c093b 8199 driver_remove_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c 8200#endif
bb8c093b 8201 pci_unregister_driver(&iwl4965_driver);
897e1cf2 8202 iwl4965_rate_control_unregister();
b481de9c
ZY
8203}
8204
bb8c093b
CH
8205module_exit(iwl4965_exit);
8206module_init(iwl4965_init);