]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl3945-base.c
iwl3945: Use iwl-rfkill
[mirror_ubuntu-zesty-kernel.git] / drivers / net / wireless / iwlwifi / iwl3945-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
01f8162a 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
b481de9c
ZY
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
b481de9c
ZY
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
b481de9c
ZY
32#include <linux/init.h>
33#include <linux/pci.h>
34#include <linux/dma-mapping.h>
35#include <linux/delay.h>
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/wireless.h>
39#include <linux/firmware.h>
b481de9c
ZY
40#include <linux/etherdevice.h>
41#include <linux/if_arp.h>
42
43#include <net/ieee80211_radiotap.h>
7e272fcf 44#include <net/lib80211.h>
b481de9c
ZY
45#include <net/mac80211.h>
46
47#include <asm/div64.h>
48
a3139c59
SO
49#define DRV_NAME "iwl3945"
50
dbb6654c
WT
51#include "iwl-fh.h"
52#include "iwl-3945-fh.h"
600c0e11 53#include "iwl-commands.h"
b481de9c
ZY
54#include "iwl-3945.h"
55#include "iwl-helpers.h"
5747d47f 56#include "iwl-core.h"
d20b3c65 57#include "iwl-dev.h"
b481de9c 58
b481de9c
ZY
59/*
60 * module name, copyright, version, etc.
b481de9c
ZY
61 */
62
63#define DRV_DESCRIPTION \
64"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
65
c8b0e6e1 66#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
67#define VD "d"
68#else
69#define VD
70#endif
71
c8b0e6e1 72#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
73#define VS "s"
74#else
75#define VS
76#endif
77
eaa686c3 78#define IWL39_VERSION "1.2.26k" VD VS
01f8162a 79#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
a7b75207 80#define DRV_AUTHOR "<ilw@linux.intel.com>"
eaa686c3 81#define DRV_VERSION IWL39_VERSION
b481de9c 82
b481de9c
ZY
83
84MODULE_DESCRIPTION(DRV_DESCRIPTION);
85MODULE_VERSION(DRV_VERSION);
a7b75207 86MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
b481de9c
ZY
87MODULE_LICENSE("GPL");
88
df878d8f
KA
89 /* module parameters */
90struct iwl_mod_params iwl3945_mod_params = {
91 .num_of_queues = IWL39_MAX_NUM_QUEUES,
9c74d9fb 92 .sw_crypto = 1,
df878d8f
KA
93 /* the rest are 0 by default */
94};
95
b481de9c
ZY
96/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
97 * DMA services
98 *
99 * Theory of operation
100 *
6440adb5
BC
101 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
102 * of buffer descriptors, each of which points to one or more data buffers for
103 * the device to read from or fill. Driver and device exchange status of each
104 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
105 * entries in each circular buffer, to protect against confusing empty and full
106 * queue states.
107 *
108 * The device reads or writes the data in the queues via the device's several
109 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
110 *
111 * For Tx queue, there are low mark and high mark limits. If, after queuing
112 * the packet for Tx, free space become < low mark, Tx queue stopped. When
113 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
114 * Tx queue resumed.
115 *
6440adb5
BC
116 * The 3945 operates with six queues: One receive queue, one transmit queue
117 * (#4) for sending commands to the device firmware, and four transmit queues
118 * (#0-3) for data tx via EDCA. An additional 2 HCCA queues are unused.
b481de9c
ZY
119 ***************************************************/
120
6440adb5
BC
121/**
122 * iwl3945_queue_init - Initialize queue's high/low-water and read/write indexes
123 */
4a8a4322 124static int iwl3945_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
b481de9c
ZY
125 int count, int slots_num, u32 id)
126{
127 q->n_bd = count;
128 q->n_window = slots_num;
129 q->id = id;
130
c54b679d
TW
131 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
132 * and iwl_queue_dec_wrap are broken. */
b481de9c
ZY
133 BUG_ON(!is_power_of_2(count));
134
135 /* slots_num must be power-of-two size, otherwise
136 * get_cmd_index is broken. */
137 BUG_ON(!is_power_of_2(slots_num));
138
139 q->low_mark = q->n_window / 4;
140 if (q->low_mark < 4)
141 q->low_mark = 4;
142
143 q->high_mark = q->n_window / 8;
144 if (q->high_mark < 2)
145 q->high_mark = 2;
146
fc4b6853 147 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
148
149 return 0;
150}
151
6440adb5
BC
152/**
153 * iwl3945_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
154 */
4a8a4322 155static int iwl3945_tx_queue_alloc(struct iwl_priv *priv,
188cf6c7 156 struct iwl_tx_queue *txq, u32 id)
b481de9c
ZY
157{
158 struct pci_dev *dev = priv->pci_dev;
159
6440adb5
BC
160 /* Driver private data, only for Tx (not command) queues,
161 * not shared with device. */
b481de9c
ZY
162 if (id != IWL_CMD_QUEUE_NUM) {
163 txq->txb = kmalloc(sizeof(txq->txb[0]) *
164 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
165 if (!txq->txb) {
15b1687c 166 IWL_ERR(priv, "kmalloc for auxiliary BD "
b481de9c
ZY
167 "structures failed\n");
168 goto error;
169 }
170 } else
171 txq->txb = NULL;
172
6440adb5
BC
173 /* Circular buffer of transmit frame descriptors (TFDs),
174 * shared with device */
188cf6c7
SO
175 txq->tfds39 = pci_alloc_consistent(dev,
176 sizeof(txq->tfds39[0]) * TFD_QUEUE_SIZE_MAX,
b481de9c
ZY
177 &txq->q.dma_addr);
178
188cf6c7 179 if (!txq->tfds39) {
15b1687c 180 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n",
188cf6c7 181 sizeof(txq->tfds39[0]) * TFD_QUEUE_SIZE_MAX);
b481de9c
ZY
182 goto error;
183 }
184 txq->q.id = id;
185
186 return 0;
187
188 error:
3ac7f146
TW
189 kfree(txq->txb);
190 txq->txb = NULL;
b481de9c
ZY
191
192 return -ENOMEM;
193}
194
6440adb5
BC
195/**
196 * iwl3945_tx_queue_init - Allocate and initialize one tx/cmd queue
197 */
4a8a4322 198int iwl3945_tx_queue_init(struct iwl_priv *priv,
188cf6c7 199 struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c 200{
188cf6c7 201 int len, i;
b481de9c
ZY
202 int rc = 0;
203
6440adb5
BC
204 /*
205 * Alloc buffer array for commands (Tx or other types of commands).
206 * For the command queue (#4), allocate command space + one big
207 * command for scan, since scan command is very huge; the system will
208 * not have two scans at the same time, so only one is needed.
209 * For data Tx queues (all other queues), no super-size command
210 * space is needed.
211 */
188cf6c7
SO
212 len = sizeof(struct iwl_cmd);
213 for (i = 0; i <= slots_num; i++) {
214 if (i == slots_num) {
215 if (txq_id == IWL_CMD_QUEUE_NUM)
216 len += IWL_MAX_SCAN_SIZE;
217 else
218 continue;
219 }
220
221 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
222 if (!txq->cmd[i])
223 goto err;
224 }
b481de9c 225
6440adb5 226 /* Alloc driver data array and TFD circular buffer */
bb8c093b 227 rc = iwl3945_tx_queue_alloc(priv, txq, txq_id);
188cf6c7
SO
228 if (rc)
229 goto err;
b481de9c 230
b481de9c
ZY
231 txq->need_update = 0;
232
233 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
c54b679d 234 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
b481de9c 235 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
6440adb5
BC
236
237 /* Initialize queue high/low-water, head/tail indexes */
bb8c093b 238 iwl3945_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 239
6440adb5 240 /* Tell device where to find queue, enable DMA channel. */
bb8c093b 241 iwl3945_hw_tx_queue_init(priv, txq);
b481de9c
ZY
242
243 return 0;
188cf6c7
SO
244err:
245 for (i = 0; i < slots_num; i++) {
246 kfree(txq->cmd[i]);
247 txq->cmd[i] = NULL;
248 }
249
250 if (txq_id == IWL_CMD_QUEUE_NUM) {
251 kfree(txq->cmd[slots_num]);
252 txq->cmd[slots_num] = NULL;
253 }
254 return -ENOMEM;
b481de9c
ZY
255}
256
257/**
bb8c093b 258 * iwl3945_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
259 * @txq: Transmit queue to deallocate.
260 *
261 * Empty queue by removing and destroying all BD's.
6440adb5
BC
262 * Free all buffers.
263 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 264 */
188cf6c7 265void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
b481de9c 266{
d20b3c65 267 struct iwl_queue *q = &txq->q;
b481de9c 268 struct pci_dev *dev = priv->pci_dev;
188cf6c7 269 int len, i;
b481de9c
ZY
270
271 if (q->n_bd == 0)
272 return;
273
274 /* first, empty all BD's */
fc4b6853 275 for (; q->write_ptr != q->read_ptr;
c54b679d 276 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
7aaa1d79 277 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
b481de9c 278
c2d79b48 279 len = sizeof(struct iwl_cmd) * q->n_window;
b481de9c
ZY
280 if (q->id == IWL_CMD_QUEUE_NUM)
281 len += IWL_MAX_SCAN_SIZE;
282
6440adb5 283 /* De-alloc array of command/tx buffers */
188cf6c7
SO
284 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
285 kfree(txq->cmd[i]);
b481de9c 286
6440adb5 287 /* De-alloc circular buffer of TFDs */
b481de9c 288 if (txq->q.n_bd)
dbb6654c 289 pci_free_consistent(dev, sizeof(struct iwl3945_tfd) *
188cf6c7 290 txq->q.n_bd, txq->tfds39, txq->q.dma_addr);
b481de9c 291
6440adb5 292 /* De-alloc array of per-TFD driver data */
3ac7f146
TW
293 kfree(txq->txb);
294 txq->txb = NULL;
b481de9c 295
6440adb5 296 /* 0-fill queue descriptor structure */
b481de9c
ZY
297 memset(txq, 0, sizeof(*txq));
298}
299
b481de9c 300/*************** STATION TABLE MANAGEMENT ****
9fbab516 301 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
302 * the functionality provided here
303 */
304
305/**************************************************************/
01ebd063 306#if 0 /* temporary disable till we add real remove station */
6440adb5
BC
307/**
308 * iwl3945_remove_station - Remove driver's knowledge of station.
309 *
310 * NOTE: This does not remove station from device's station table.
311 */
4a8a4322 312static u8 iwl3945_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
313{
314 int index = IWL_INVALID_STATION;
315 int i;
316 unsigned long flags;
317
318 spin_lock_irqsave(&priv->sta_lock, flags);
319
320 if (is_ap)
321 index = IWL_AP_ID;
322 else if (is_broadcast_ether_addr(addr))
3832ec9d 323 index = priv->hw_params.bcast_sta_id;
b481de9c 324 else
3832ec9d 325 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
f2c7e521
AK
326 if (priv->stations_39[i].used &&
327 !compare_ether_addr(priv->stations_39[i].sta.sta.addr,
b481de9c
ZY
328 addr)) {
329 index = i;
330 break;
331 }
332
333 if (unlikely(index == IWL_INVALID_STATION))
334 goto out;
335
f2c7e521
AK
336 if (priv->stations_39[index].used) {
337 priv->stations_39[index].used = 0;
b481de9c
ZY
338 priv->num_stations--;
339 }
340
341 BUG_ON(priv->num_stations < 0);
342
343out:
344 spin_unlock_irqrestore(&priv->sta_lock, flags);
345 return 0;
346}
556f8db7 347#endif
6440adb5
BC
348
349/**
350 * iwl3945_clear_stations_table - Clear the driver's station table
351 *
352 * NOTE: This does not clear or otherwise alter the device's station table.
353 */
4a8a4322 354static void iwl3945_clear_stations_table(struct iwl_priv *priv)
b481de9c
ZY
355{
356 unsigned long flags;
357
358 spin_lock_irqsave(&priv->sta_lock, flags);
359
360 priv->num_stations = 0;
f2c7e521 361 memset(priv->stations_39, 0, sizeof(priv->stations_39));
b481de9c
ZY
362
363 spin_unlock_irqrestore(&priv->sta_lock, flags);
364}
365
6440adb5
BC
366/**
367 * iwl3945_add_station - Add station to station tables in driver and device
368 */
4a8a4322 369u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
b481de9c
ZY
370{
371 int i;
372 int index = IWL_INVALID_STATION;
bb8c093b 373 struct iwl3945_station_entry *station;
b481de9c 374 unsigned long flags_spin;
c14c521e 375 u8 rate;
b481de9c
ZY
376
377 spin_lock_irqsave(&priv->sta_lock, flags_spin);
378 if (is_ap)
379 index = IWL_AP_ID;
380 else if (is_broadcast_ether_addr(addr))
3832ec9d 381 index = priv->hw_params.bcast_sta_id;
b481de9c 382 else
3832ec9d 383 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
f2c7e521 384 if (!compare_ether_addr(priv->stations_39[i].sta.sta.addr,
b481de9c
ZY
385 addr)) {
386 index = i;
387 break;
388 }
389
f2c7e521 390 if (!priv->stations_39[i].used &&
b481de9c
ZY
391 index == IWL_INVALID_STATION)
392 index = i;
393 }
394
01ebd063 395 /* These two conditions has the same outcome but keep them separate
b481de9c
ZY
396 since they have different meaning */
397 if (unlikely(index == IWL_INVALID_STATION)) {
398 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
399 return index;
400 }
401
f2c7e521
AK
402 if (priv->stations_39[index].used &&
403 !compare_ether_addr(priv->stations_39[index].sta.sta.addr, addr)) {
b481de9c
ZY
404 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
405 return index;
406 }
407
e174961c 408 IWL_DEBUG_ASSOC("Add STA ID %d: %pM\n", index, addr);
f2c7e521 409 station = &priv->stations_39[index];
b481de9c
ZY
410 station->used = 1;
411 priv->num_stations++;
412
6440adb5 413 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 414 memset(&station->sta, 0, sizeof(struct iwl3945_addsta_cmd));
b481de9c
ZY
415 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
416 station->sta.mode = 0;
417 station->sta.sta.sta_id = index;
418 station->sta.station_flags = 0;
419
8318d78a 420 if (priv->band == IEEE80211_BAND_5GHZ)
69946333
TW
421 rate = IWL_RATE_6M_PLCP;
422 else
423 rate = IWL_RATE_1M_PLCP;
c14c521e
ZY
424
425 /* Turn on both antennas for the station... */
426 station->sta.rate_n_flags =
bb8c093b 427 iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
c14c521e 428
b481de9c 429 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
BC
430
431 /* Add station to device's station table */
bb8c093b 432 iwl3945_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
433 return index;
434
435}
436
518099a8 437int iwl3945_send_statistics_request(struct iwl_priv *priv)
b481de9c 438{
518099a8 439 u32 val = 0;
b481de9c 440
c2d79b48 441 struct iwl_host_cmd cmd = {
518099a8 442 .id = REPLY_STATISTICS_CMD,
b481de9c
ZY
443 .len = sizeof(val),
444 .data = &val,
445 };
446
518099a8 447 return iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
448}
449
b481de9c 450/**
bb8c093b 451 * iwl3945_set_rxon_channel - Set the phymode and channel values in staging RXON
8318d78a
JB
452 * @band: 2.4 or 5 GHz band
453 * @channel: Any channel valid for the requested band
b481de9c 454
8318d78a 455 * In addition to setting the staging RXON, priv->band is also set.
b481de9c
ZY
456 *
457 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
8318d78a 458 * in the staging RXON flag structure based on the band
b481de9c 459 */
4a8a4322 460static int iwl3945_set_rxon_channel(struct iwl_priv *priv,
8318d78a
JB
461 enum ieee80211_band band,
462 u16 channel)
b481de9c 463{
8318d78a 464 if (!iwl3945_get_channel_info(priv, band, channel)) {
b481de9c 465 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
8318d78a 466 channel, band);
b481de9c
ZY
467 return -EINVAL;
468 }
469
f2c7e521 470 if ((le16_to_cpu(priv->staging39_rxon.channel) == channel) &&
8318d78a 471 (priv->band == band))
b481de9c
ZY
472 return 0;
473
f2c7e521 474 priv->staging39_rxon.channel = cpu_to_le16(channel);
8318d78a 475 if (band == IEEE80211_BAND_5GHZ)
f2c7e521 476 priv->staging39_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
b481de9c 477 else
f2c7e521 478 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
b481de9c 479
8318d78a 480 priv->band = band;
b481de9c 481
8318d78a 482 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
b481de9c
ZY
483
484 return 0;
485}
486
487/**
bb8c093b 488 * iwl3945_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
489 *
490 * NOTE: This is really only useful during development and can eventually
491 * be #ifdef'd out once the driver is stable and folks aren't actively
492 * making changes
493 */
4a8a4322 494static int iwl3945_check_rxon_cmd(struct iwl_priv *priv)
b481de9c
ZY
495{
496 int error = 0;
497 int counter = 1;
f2c7e521 498 struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
b481de9c
ZY
499
500 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
501 error |= le32_to_cpu(rxon->flags &
502 (RXON_FLG_TGJ_NARROW_BAND_MSK |
503 RXON_FLG_RADAR_DETECT_MSK));
504 if (error)
39aadf8c 505 IWL_WARN(priv, "check 24G fields %d | %d\n",
b481de9c
ZY
506 counter++, error);
507 } else {
508 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
509 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
510 if (error)
39aadf8c 511 IWL_WARN(priv, "check 52 fields %d | %d\n",
b481de9c
ZY
512 counter++, error);
513 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
514 if (error)
39aadf8c 515 IWL_WARN(priv, "check 52 CCK %d | %d\n",
b481de9c
ZY
516 counter++, error);
517 }
518 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
519 if (error)
39aadf8c 520 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error);
b481de9c
ZY
521
522 /* make sure basic rates 6Mbps and 1Mbps are supported */
523 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
524 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
525 if (error)
39aadf8c 526 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error);
b481de9c
ZY
527
528 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
529 if (error)
39aadf8c 530 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error);
b481de9c
ZY
531
532 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
533 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
534 if (error)
39aadf8c 535 IWL_WARN(priv, "check CCK and short slot %d | %d\n",
b481de9c
ZY
536 counter++, error);
537
538 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
539 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
540 if (error)
39aadf8c 541 IWL_WARN(priv, "check CCK & auto detect %d | %d\n",
b481de9c
ZY
542 counter++, error);
543
544 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
545 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
546 if (error)
39aadf8c 547 IWL_WARN(priv, "check TGG and auto detect %d | %d\n",
b481de9c
ZY
548 counter++, error);
549
550 if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
551 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
552 RXON_FLG_ANT_A_MSK)) == 0);
553 if (error)
39aadf8c 554 IWL_WARN(priv, "check antenna %d %d\n", counter++, error);
b481de9c
ZY
555
556 if (error)
39aadf8c 557 IWL_WARN(priv, "Tuning to channel %d\n",
b481de9c
ZY
558 le16_to_cpu(rxon->channel));
559
560 if (error) {
15b1687c 561 IWL_ERR(priv, "Not a valid rxon_assoc_cmd field values\n");
b481de9c
ZY
562 return -1;
563 }
564 return 0;
565}
566
567/**
9fbab516 568 * iwl3945_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 569 * @priv: staging_rxon is compared to active_rxon
b481de9c 570 *
9fbab516
BC
571 * If the RXON structure is changing enough to require a new tune,
572 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
573 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 574 */
4a8a4322 575static int iwl3945_full_rxon_required(struct iwl_priv *priv)
b481de9c
ZY
576{
577
578 /* These items are only settable from the full RXON command */
5d1e2325 579 if (!(iwl3945_is_associated(priv)) ||
f2c7e521
AK
580 compare_ether_addr(priv->staging39_rxon.bssid_addr,
581 priv->active39_rxon.bssid_addr) ||
582 compare_ether_addr(priv->staging39_rxon.node_addr,
583 priv->active39_rxon.node_addr) ||
584 compare_ether_addr(priv->staging39_rxon.wlap_bssid_addr,
585 priv->active39_rxon.wlap_bssid_addr) ||
586 (priv->staging39_rxon.dev_type != priv->active39_rxon.dev_type) ||
587 (priv->staging39_rxon.channel != priv->active39_rxon.channel) ||
588 (priv->staging39_rxon.air_propagation !=
589 priv->active39_rxon.air_propagation) ||
590 (priv->staging39_rxon.assoc_id != priv->active39_rxon.assoc_id))
b481de9c
ZY
591 return 1;
592
593 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
594 * be updated with the RXON_ASSOC command -- however only some
595 * flag transitions are allowed using RXON_ASSOC */
596
597 /* Check if we are not switching bands */
f2c7e521
AK
598 if ((priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
599 (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK))
b481de9c
ZY
600 return 1;
601
602 /* Check if we are switching association toggle */
f2c7e521
AK
603 if ((priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
604 (priv->active39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
b481de9c
ZY
605 return 1;
606
607 return 0;
608}
609
4a8a4322 610static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
b481de9c
ZY
611{
612 int rc = 0;
3d24a9f7 613 struct iwl_rx_packet *res = NULL;
bb8c093b 614 struct iwl3945_rxon_assoc_cmd rxon_assoc;
c2d79b48 615 struct iwl_host_cmd cmd = {
b481de9c
ZY
616 .id = REPLY_RXON_ASSOC,
617 .len = sizeof(rxon_assoc),
618 .meta.flags = CMD_WANT_SKB,
619 .data = &rxon_assoc,
620 };
f2c7e521
AK
621 const struct iwl3945_rxon_cmd *rxon1 = &priv->staging39_rxon;
622 const struct iwl3945_rxon_cmd *rxon2 = &priv->active39_rxon;
b481de9c
ZY
623
624 if ((rxon1->flags == rxon2->flags) &&
625 (rxon1->filter_flags == rxon2->filter_flags) &&
626 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
627 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
628 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
629 return 0;
630 }
631
f2c7e521
AK
632 rxon_assoc.flags = priv->staging39_rxon.flags;
633 rxon_assoc.filter_flags = priv->staging39_rxon.filter_flags;
634 rxon_assoc.ofdm_basic_rates = priv->staging39_rxon.ofdm_basic_rates;
635 rxon_assoc.cck_basic_rates = priv->staging39_rxon.cck_basic_rates;
b481de9c
ZY
636 rxon_assoc.reserved = 0;
637
518099a8 638 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
639 if (rc)
640 return rc;
641
3d24a9f7 642 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c 643 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 644 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
b481de9c
ZY
645 rc = -EIO;
646 }
647
648 priv->alloc_rxb_skb--;
649 dev_kfree_skb_any(cmd.meta.u.skb);
650
651 return rc;
652}
653
654/**
bb8c093b 655 * iwl3945_commit_rxon - commit staging_rxon to hardware
b481de9c 656 *
01ebd063 657 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
658 * the active_rxon structure is updated with the new data. This
659 * function correctly transitions out of the RXON_ASSOC_MSK state if
660 * a HW tune is required based on the RXON structure changes.
661 */
4a8a4322 662static int iwl3945_commit_rxon(struct iwl_priv *priv)
b481de9c
ZY
663{
664 /* cast away the const for active_rxon in this function */
f2c7e521 665 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active39_rxon;
b481de9c
ZY
666 int rc = 0;
667
775a6e27 668 if (!iwl_is_alive(priv))
b481de9c
ZY
669 return -1;
670
671 /* always get timestamp with Rx frame */
f2c7e521 672 priv->staging39_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
b481de9c
ZY
673
674 /* select antenna */
f2c7e521 675 priv->staging39_rxon.flags &=
b481de9c 676 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
f2c7e521 677 priv->staging39_rxon.flags |= iwl3945_get_antenna_flags(priv);
b481de9c 678
a3139c59 679 rc = iwl3945_check_rxon_cmd(priv);
b481de9c 680 if (rc) {
15b1687c 681 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
b481de9c
ZY
682 return -EINVAL;
683 }
684
685 /* If we don't need to send a full RXON, we can use
bb8c093b 686 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 687 * and other flags for the current radio configuration. */
bb8c093b
CH
688 if (!iwl3945_full_rxon_required(priv)) {
689 rc = iwl3945_send_rxon_assoc(priv);
b481de9c 690 if (rc) {
15b1687c 691 IWL_ERR(priv, "Error setting RXON_ASSOC "
b481de9c
ZY
692 "configuration (%d).\n", rc);
693 return rc;
694 }
695
f2c7e521 696 memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
b481de9c
ZY
697
698 return 0;
699 }
700
701 /* If we are currently associated and the new config requires
702 * an RXON_ASSOC and the new config wants the associated mask enabled,
703 * we must clear the associated from the active configuration
704 * before we apply the new config */
bb8c093b 705 if (iwl3945_is_associated(priv) &&
f2c7e521 706 (priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
b481de9c
ZY
707 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
708 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
709
518099a8 710 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
bb8c093b 711 sizeof(struct iwl3945_rxon_cmd),
f2c7e521 712 &priv->active39_rxon);
b481de9c
ZY
713
714 /* If the mask clearing failed then we set
715 * active_rxon back to what it was previously */
716 if (rc) {
717 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
15b1687c 718 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
b481de9c
ZY
719 "configuration (%d).\n", rc);
720 return rc;
721 }
b481de9c
ZY
722 }
723
724 IWL_DEBUG_INFO("Sending RXON\n"
725 "* with%s RXON_FILTER_ASSOC_MSK\n"
726 "* channel = %d\n"
e174961c 727 "* bssid = %pM\n",
f2c7e521 728 ((priv->staging39_rxon.filter_flags &
b481de9c 729 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
f2c7e521 730 le16_to_cpu(priv->staging39_rxon.channel),
e174961c 731 priv->staging_rxon.bssid_addr);
b481de9c
ZY
732
733 /* Apply the new configuration */
518099a8 734 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
f2c7e521 735 sizeof(struct iwl3945_rxon_cmd), &priv->staging39_rxon);
b481de9c 736 if (rc) {
15b1687c 737 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
b481de9c
ZY
738 return rc;
739 }
740
f2c7e521 741 memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
b481de9c 742
bb8c093b 743 iwl3945_clear_stations_table(priv);
556f8db7 744
b481de9c
ZY
745 /* If we issue a new RXON command which required a tune then we must
746 * send a new TXPOWER command or we won't be able to Tx any frames */
bb8c093b 747 rc = iwl3945_hw_reg_send_txpower(priv);
b481de9c 748 if (rc) {
15b1687c 749 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
b481de9c
ZY
750 return rc;
751 }
752
753 /* Add the broadcast address so we can send broadcast frames */
b5323d36 754 if (iwl3945_add_station(priv, iwl_bcast_addr, 0, 0) ==
b481de9c 755 IWL_INVALID_STATION) {
15b1687c 756 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
b481de9c
ZY
757 return -EIO;
758 }
759
760 /* If we have set the ASSOC_MSK and we are in BSS mode then
761 * add the IWL_AP_ID to the station rate table */
bb8c093b 762 if (iwl3945_is_associated(priv) &&
05c914fe 763 (priv->iw_mode == NL80211_IFTYPE_STATION))
f2c7e521 764 if (iwl3945_add_station(priv, priv->active39_rxon.bssid_addr, 1, 0)
b481de9c 765 == IWL_INVALID_STATION) {
15b1687c 766 IWL_ERR(priv, "Error adding AP address for transmit\n");
b481de9c
ZY
767 return -EIO;
768 }
769
8318d78a 770 /* Init the hardware's rate fallback order based on the band */
b481de9c
ZY
771 rc = iwl3945_init_hw_rate_table(priv);
772 if (rc) {
15b1687c 773 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
b481de9c
ZY
774 return -EIO;
775 }
776
777 return 0;
778}
779
4a8a4322 780static int iwl3945_send_bt_config(struct iwl_priv *priv)
b481de9c 781{
4c897253 782 struct iwl_bt_cmd bt_cmd = {
b481de9c
ZY
783 .flags = 3,
784 .lead_time = 0xAA,
785 .max_kill = 1,
786 .kill_ack_mask = 0,
787 .kill_cts_mask = 0,
788 };
789
518099a8 790 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
4c897253 791 sizeof(bt_cmd), &bt_cmd);
b481de9c
ZY
792}
793
4a8a4322 794static int iwl3945_send_scan_abort(struct iwl_priv *priv)
b481de9c
ZY
795{
796 int rc = 0;
3d24a9f7 797 struct iwl_rx_packet *res;
c2d79b48 798 struct iwl_host_cmd cmd = {
b481de9c
ZY
799 .id = REPLY_SCAN_ABORT_CMD,
800 .meta.flags = CMD_WANT_SKB,
801 };
802
803 /* If there isn't a scan actively going on in the hardware
804 * then we are in between scan bands and not actually
805 * actively scanning, so don't send the abort command */
806 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
807 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
808 return 0;
809 }
810
518099a8 811 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
812 if (rc) {
813 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
814 return rc;
815 }
816
3d24a9f7 817 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
818 if (res->u.status != CAN_ABORT_STATUS) {
819 /* The scan abort will return 1 for success or
820 * 2 for "failure". A failure condition can be
821 * due to simply not being in an active scan which
822 * can occur if we send the scan abort before we
823 * the microcode has notified us that a scan is
824 * completed. */
825 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
826 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
827 clear_bit(STATUS_SCAN_HW, &priv->status);
828 }
829
830 dev_kfree_skb_any(cmd.meta.u.skb);
831
832 return rc;
833}
834
4a8a4322 835static int iwl3945_add_sta_sync_callback(struct iwl_priv *priv,
c2d79b48 836 struct iwl_cmd *cmd, struct sk_buff *skb)
b481de9c 837{
3d24a9f7 838 struct iwl_rx_packet *res = NULL;
b481de9c
ZY
839
840 if (!skb) {
15b1687c 841 IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
b481de9c
ZY
842 return 1;
843 }
844
3d24a9f7 845 res = (struct iwl_rx_packet *)skb->data;
b481de9c 846 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 847 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
b481de9c
ZY
848 res->hdr.flags);
849 return 1;
850 }
851
852 switch (res->u.add_sta.status) {
853 case ADD_STA_SUCCESS_MSK:
854 break;
855 default:
856 break;
857 }
858
859 /* We didn't cache the SKB; let the caller free it */
860 return 1;
861}
862
4a8a4322 863int iwl3945_send_add_station(struct iwl_priv *priv,
bb8c093b 864 struct iwl3945_addsta_cmd *sta, u8 flags)
b481de9c 865{
3d24a9f7 866 struct iwl_rx_packet *res = NULL;
b481de9c 867 int rc = 0;
c2d79b48 868 struct iwl_host_cmd cmd = {
b481de9c 869 .id = REPLY_ADD_STA,
bb8c093b 870 .len = sizeof(struct iwl3945_addsta_cmd),
b481de9c
ZY
871 .meta.flags = flags,
872 .data = sta,
873 };
874
875 if (flags & CMD_ASYNC)
bb8c093b 876 cmd.meta.u.callback = iwl3945_add_sta_sync_callback;
b481de9c
ZY
877 else
878 cmd.meta.flags |= CMD_WANT_SKB;
879
518099a8 880 rc = iwl_send_cmd(priv, &cmd);
b481de9c
ZY
881
882 if (rc || (flags & CMD_ASYNC))
883 return rc;
884
3d24a9f7 885 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c 886 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 887 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
b481de9c
ZY
888 res->hdr.flags);
889 rc = -EIO;
890 }
891
892 if (rc == 0) {
893 switch (res->u.add_sta.status) {
894 case ADD_STA_SUCCESS_MSK:
895 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
896 break;
897 default:
898 rc = -EIO;
39aadf8c 899 IWL_WARN(priv, "REPLY_ADD_STA failed\n");
b481de9c
ZY
900 break;
901 }
902 }
903
904 priv->alloc_rxb_skb--;
905 dev_kfree_skb_any(cmd.meta.u.skb);
906
907 return rc;
908}
909
4a8a4322 910static int iwl3945_update_sta_key_info(struct iwl_priv *priv,
b481de9c
ZY
911 struct ieee80211_key_conf *keyconf,
912 u8 sta_id)
913{
914 unsigned long flags;
915 __le16 key_flags = 0;
916
917 switch (keyconf->alg) {
918 case ALG_CCMP:
919 key_flags |= STA_KEY_FLG_CCMP;
920 key_flags |= cpu_to_le16(
921 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
922 key_flags &= ~STA_KEY_FLG_INVALID;
923 break;
924 case ALG_TKIP:
925 case ALG_WEP:
b481de9c
ZY
926 default:
927 return -EINVAL;
928 }
929 spin_lock_irqsave(&priv->sta_lock, flags);
f2c7e521
AK
930 priv->stations_39[sta_id].keyinfo.alg = keyconf->alg;
931 priv->stations_39[sta_id].keyinfo.keylen = keyconf->keylen;
932 memcpy(priv->stations_39[sta_id].keyinfo.key, keyconf->key,
b481de9c
ZY
933 keyconf->keylen);
934
f2c7e521 935 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
b481de9c 936 keyconf->keylen);
f2c7e521
AK
937 priv->stations_39[sta_id].sta.key.key_flags = key_flags;
938 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
939 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
b481de9c
ZY
940
941 spin_unlock_irqrestore(&priv->sta_lock, flags);
942
943 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
f2c7e521 944 iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
b481de9c
ZY
945 return 0;
946}
947
4a8a4322 948static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
b481de9c
ZY
949{
950 unsigned long flags;
951
952 spin_lock_irqsave(&priv->sta_lock, flags);
f2c7e521
AK
953 memset(&priv->stations_39[sta_id].keyinfo, 0, sizeof(struct iwl3945_hw_key));
954 memset(&priv->stations_39[sta_id].sta.key, 0,
4c897253 955 sizeof(struct iwl4965_keyinfo));
f2c7e521
AK
956 priv->stations_39[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
957 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
958 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
b481de9c
ZY
959 spin_unlock_irqrestore(&priv->sta_lock, flags);
960
961 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
f2c7e521 962 iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
b481de9c
ZY
963 return 0;
964}
965
4a8a4322 966static void iwl3945_clear_free_frames(struct iwl_priv *priv)
b481de9c
ZY
967{
968 struct list_head *element;
969
970 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
971 priv->frames_count);
972
973 while (!list_empty(&priv->free_frames)) {
974 element = priv->free_frames.next;
975 list_del(element);
bb8c093b 976 kfree(list_entry(element, struct iwl3945_frame, list));
b481de9c
ZY
977 priv->frames_count--;
978 }
979
980 if (priv->frames_count) {
39aadf8c 981 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
b481de9c
ZY
982 priv->frames_count);
983 priv->frames_count = 0;
984 }
985}
986
4a8a4322 987static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
b481de9c 988{
bb8c093b 989 struct iwl3945_frame *frame;
b481de9c
ZY
990 struct list_head *element;
991 if (list_empty(&priv->free_frames)) {
992 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
993 if (!frame) {
15b1687c 994 IWL_ERR(priv, "Could not allocate frame!\n");
b481de9c
ZY
995 return NULL;
996 }
997
998 priv->frames_count++;
999 return frame;
1000 }
1001
1002 element = priv->free_frames.next;
1003 list_del(element);
bb8c093b 1004 return list_entry(element, struct iwl3945_frame, list);
b481de9c
ZY
1005}
1006
4a8a4322 1007static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
b481de9c
ZY
1008{
1009 memset(frame, 0, sizeof(*frame));
1010 list_add(&frame->list, &priv->free_frames);
1011}
1012
4a8a4322 1013unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
b481de9c 1014 struct ieee80211_hdr *hdr,
73ec1cc2 1015 int left)
b481de9c
ZY
1016{
1017
bb8c093b 1018 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
05c914fe
JB
1019 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
1020 (priv->iw_mode != NL80211_IFTYPE_AP)))
b481de9c
ZY
1021 return 0;
1022
1023 if (priv->ibss_beacon->len > left)
1024 return 0;
1025
1026 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1027
1028 return priv->ibss_beacon->len;
1029}
1030
4a8a4322 1031static u8 iwl3945_rate_get_lowest_plcp(struct iwl_priv *priv)
b481de9c
ZY
1032{
1033 u8 i;
c24f0817
KA
1034 int rate_mask;
1035
1036 /* Set rate mask*/
f2c7e521 1037 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
dbce56a4 1038 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
c24f0817 1039 else
dbce56a4 1040 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
b481de9c
ZY
1041
1042 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1043 i = iwl3945_rates[i].next_ieee) {
b481de9c 1044 if (rate_mask & (1 << i))
bb8c093b 1045 return iwl3945_rates[i].plcp;
b481de9c
ZY
1046 }
1047
c24f0817 1048 /* No valid rate was found. Assign the lowest one */
f2c7e521 1049 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
c24f0817
KA
1050 return IWL_RATE_1M_PLCP;
1051 else
1052 return IWL_RATE_6M_PLCP;
b481de9c
ZY
1053}
1054
4a8a4322 1055static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
b481de9c 1056{
bb8c093b 1057 struct iwl3945_frame *frame;
b481de9c
ZY
1058 unsigned int frame_size;
1059 int rc;
1060 u8 rate;
1061
bb8c093b 1062 frame = iwl3945_get_free_frame(priv);
b481de9c
ZY
1063
1064 if (!frame) {
15b1687c 1065 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
b481de9c
ZY
1066 "command.\n");
1067 return -ENOMEM;
1068 }
1069
c24f0817 1070 rate = iwl3945_rate_get_lowest_plcp(priv);
b481de9c 1071
bb8c093b 1072 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1073
518099a8 1074 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1075 &frame->u.cmd[0]);
1076
bb8c093b 1077 iwl3945_free_frame(priv, frame);
b481de9c
ZY
1078
1079 return rc;
1080}
1081
1082/******************************************************************************
1083 *
1084 * EEPROM related functions
1085 *
1086 ******************************************************************************/
1087
4a8a4322 1088static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
b481de9c 1089{
f2c7e521 1090 memcpy(mac, priv->eeprom39.mac_address, 6);
b481de9c
ZY
1091}
1092
74a3a250
RC
1093/*
1094 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
1095 * embedded controller) as EEPROM reader; each read is a series of pulses
1096 * to/from the EEPROM chip, not a single event, so even reads could conflict
1097 * if they weren't arbitrated by some ownership mechanism. Here, the driver
1098 * simply claims ownership, which should be safe when this function is called
1099 * (i.e. before loading uCode!).
1100 */
4a8a4322 1101static inline int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
74a3a250 1102{
5d49f498 1103 _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
74a3a250
RC
1104 return 0;
1105}
1106
b481de9c 1107/**
bb8c093b 1108 * iwl3945_eeprom_init - read EEPROM contents
b481de9c 1109 *
f2c7e521 1110 * Load the EEPROM contents from adapter into priv->eeprom39
b481de9c
ZY
1111 *
1112 * NOTE: This routine uses the non-debug IO access functions.
1113 */
4a8a4322 1114int iwl3945_eeprom_init(struct iwl_priv *priv)
b481de9c 1115{
f2c7e521 1116 u16 *e = (u16 *)&priv->eeprom39;
5d49f498 1117 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
f2c7e521 1118 int sz = sizeof(priv->eeprom39);
3d5717ad 1119 int ret;
b481de9c
ZY
1120 u16 addr;
1121
1122 /* The EEPROM structure has several padding buffers within it
1123 * and when adding new EEPROM maps is subject to programmer errors
1124 * which may be very difficult to identify without explicitly
1125 * checking the resulting size of the eeprom map. */
f2c7e521 1126 BUILD_BUG_ON(sizeof(priv->eeprom39) != IWL_EEPROM_IMAGE_SIZE);
b481de9c
ZY
1127
1128 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
15b1687c 1129 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
b481de9c
ZY
1130 return -ENOENT;
1131 }
1132
6440adb5 1133 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
3d5717ad
ZY
1134 ret = iwl3945_eeprom_acquire_semaphore(priv);
1135 if (ret < 0) {
15b1687c 1136 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
b481de9c
ZY
1137 return -ENOENT;
1138 }
1139
1140 /* eeprom is an array of 16bit values */
1141 for (addr = 0; addr < sz; addr += sizeof(u16)) {
3d5717ad 1142 u32 r;
b481de9c 1143
5d49f498 1144 _iwl_write32(priv, CSR_EEPROM_REG,
3d5717ad 1145 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
5d49f498
AK
1146 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1147 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
3d5717ad
ZY
1148 CSR_EEPROM_REG_READ_VALID_MSK,
1149 IWL_EEPROM_ACCESS_TIMEOUT);
1150 if (ret < 0) {
15b1687c 1151 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
3d5717ad 1152 return ret;
b481de9c 1153 }
3d5717ad 1154
5d49f498 1155 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
58ff6d4d 1156 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
b481de9c
ZY
1157 }
1158
1159 return 0;
1160}
1161
4a8a4322 1162static void iwl3945_unset_hw_params(struct iwl_priv *priv)
b481de9c 1163{
3832ec9d 1164 if (priv->shared_virt)
b481de9c 1165 pci_free_consistent(priv->pci_dev,
bb8c093b 1166 sizeof(struct iwl3945_shared),
3832ec9d
AK
1167 priv->shared_virt,
1168 priv->shared_phys);
b481de9c
ZY
1169}
1170
1171/**
bb8c093b 1172 * iwl3945_supported_rate_to_ie - fill in the supported rate in IE field
b481de9c
ZY
1173 *
1174 * return : set the bit for each supported rate insert in ie
1175 */
bb8c093b 1176static u16 iwl3945_supported_rate_to_ie(u8 *ie, u16 supported_rate,
c7c46676 1177 u16 basic_rate, int *left)
b481de9c
ZY
1178{
1179 u16 ret_rates = 0, bit;
1180 int i;
c7c46676
TW
1181 u8 *cnt = ie;
1182 u8 *rates = ie + 1;
b481de9c
ZY
1183
1184 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1185 if (bit & supported_rate) {
1186 ret_rates |= bit;
bb8c093b 1187 rates[*cnt] = iwl3945_rates[i].ieee |
c7c46676
TW
1188 ((bit & basic_rate) ? 0x80 : 0x00);
1189 (*cnt)++;
1190 (*left)--;
1191 if ((*left <= 0) ||
1192 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
b481de9c
ZY
1193 break;
1194 }
1195 }
1196
1197 return ret_rates;
1198}
1199
1200/**
bb8c093b 1201 * iwl3945_fill_probe_req - fill in all required fields and IE for probe request
b481de9c 1202 */
4a8a4322 1203static u16 iwl3945_fill_probe_req(struct iwl_priv *priv,
b481de9c 1204 struct ieee80211_mgmt *frame,
430cfe95 1205 int left)
b481de9c
ZY
1206{
1207 int len = 0;
1208 u8 *pos = NULL;
c7c46676 1209 u16 active_rates, ret_rates, cck_rates;
b481de9c
ZY
1210
1211 /* Make sure there is enough space for the probe request,
1212 * two mandatory IEs and the data */
1213 left -= 24;
1214 if (left < 0)
1215 return 0;
1216 len += 24;
1217
1218 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
b5323d36 1219 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
b481de9c 1220 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
b5323d36 1221 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
b481de9c
ZY
1222 frame->seq_ctrl = 0;
1223
1224 /* fill in our indirect SSID IE */
1225 /* ...next IE... */
1226
1227 left -= 2;
1228 if (left < 0)
1229 return 0;
1230 len += 2;
1231 pos = &(frame->u.probe_req.variable[0]);
1232 *pos++ = WLAN_EID_SSID;
1233 *pos++ = 0;
1234
b481de9c
ZY
1235 /* fill in supported rate */
1236 /* ...next IE... */
1237 left -= 2;
1238 if (left < 0)
1239 return 0;
c7c46676 1240
b481de9c
ZY
1241 /* ... fill it in... */
1242 *pos++ = WLAN_EID_SUPP_RATES;
1243 *pos = 0;
c7c46676
TW
1244
1245 priv->active_rate = priv->rates_mask;
1246 active_rates = priv->active_rate;
b481de9c
ZY
1247 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1248
c7c46676 1249 cck_rates = IWL_CCK_RATES_MASK & active_rates;
bb8c093b 1250 ret_rates = iwl3945_supported_rate_to_ie(pos, cck_rates,
c7c46676
TW
1251 priv->active_rate_basic, &left);
1252 active_rates &= ~ret_rates;
1253
bb8c093b 1254 ret_rates = iwl3945_supported_rate_to_ie(pos, active_rates,
c7c46676
TW
1255 priv->active_rate_basic, &left);
1256 active_rates &= ~ret_rates;
1257
b481de9c
ZY
1258 len += 2 + *pos;
1259 pos += (*pos) + 1;
c7c46676 1260 if (active_rates == 0)
b481de9c
ZY
1261 goto fill_end;
1262
1263 /* fill in supported extended rate */
1264 /* ...next IE... */
1265 left -= 2;
1266 if (left < 0)
1267 return 0;
1268 /* ... fill it in... */
1269 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1270 *pos = 0;
bb8c093b 1271 iwl3945_supported_rate_to_ie(pos, active_rates,
c7c46676 1272 priv->active_rate_basic, &left);
b481de9c
ZY
1273 if (*pos > 0)
1274 len += 2 + *pos;
1275
1276 fill_end:
1277 return (u16)len;
1278}
1279
1280/*
1281 * QoS support
1282*/
4a8a4322 1283static int iwl3945_send_qos_params_command(struct iwl_priv *priv,
4c897253 1284 struct iwl_qosparam_cmd *qos)
b481de9c
ZY
1285{
1286
518099a8 1287 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
4c897253 1288 sizeof(struct iwl_qosparam_cmd), qos);
b481de9c
ZY
1289}
1290
4a8a4322 1291static void iwl3945_activate_qos(struct iwl_priv *priv, u8 force)
b481de9c
ZY
1292{
1293 unsigned long flags;
1294
b481de9c
ZY
1295 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1296 return;
1297
b481de9c
ZY
1298 spin_lock_irqsave(&priv->lock, flags);
1299 priv->qos_data.def_qos_parm.qos_flags = 0;
1300
1301 if (priv->qos_data.qos_cap.q_AP.queue_request &&
1302 !priv->qos_data.qos_cap.q_AP.txop_request)
1303 priv->qos_data.def_qos_parm.qos_flags |=
1304 QOS_PARAM_FLG_TXOP_TYPE_MSK;
1305
1306 if (priv->qos_data.qos_active)
1307 priv->qos_data.def_qos_parm.qos_flags |=
1308 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1309
1310 spin_unlock_irqrestore(&priv->lock, flags);
1311
bb8c093b 1312 if (force || iwl3945_is_associated(priv)) {
a96a27f9 1313 IWL_DEBUG_QOS("send QoS cmd with QoS active %d \n",
b481de9c
ZY
1314 priv->qos_data.qos_active);
1315
bb8c093b 1316 iwl3945_send_qos_params_command(priv,
b481de9c
ZY
1317 &(priv->qos_data.def_qos_parm));
1318 }
1319}
1320
b481de9c
ZY
1321/*
1322 * Power management (not Tx power!) functions
1323 */
1324#define MSEC_TO_USEC 1024
1325
600c0e11 1326
b481de9c 1327/* default power management (not Tx power) table values */
a96a27f9 1328/* for TIM 0-10 */
3dae0c42
WT
1329static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
1330 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1331 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1332 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1333 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1334 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1335 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
b481de9c
ZY
1336};
1337
a96a27f9 1338/* for TIM > 10 */
3dae0c42
WT
1339static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
1340 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1341 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1342 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1343 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1344 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1345 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
b481de9c
ZY
1346};
1347
4a8a4322 1348int iwl3945_power_init_handle(struct iwl_priv *priv)
b481de9c
ZY
1349{
1350 int rc = 0, i;
3dae0c42
WT
1351 struct iwl_power_mgr *pow_data;
1352 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
b481de9c
ZY
1353 u16 pci_pm;
1354
1355 IWL_DEBUG_POWER("Initialize power \n");
1356
3dae0c42 1357 pow_data = &priv->power_data;
b481de9c
ZY
1358
1359 memset(pow_data, 0, sizeof(*pow_data));
1360
3dae0c42 1361 pow_data->dtim_period = 1;
b481de9c
ZY
1362
1363 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1364 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1365
1366 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1367 if (rc != 0)
1368 return 0;
1369 else {
600c0e11 1370 struct iwl_powertable_cmd *cmd;
b481de9c
ZY
1371
1372 IWL_DEBUG_POWER("adjust power command flags\n");
1373
3dae0c42 1374 for (i = 0; i < IWL_POWER_MAX; i++) {
b481de9c
ZY
1375 cmd = &pow_data->pwr_range_0[i].cmd;
1376
1377 if (pci_pm & 0x1)
1378 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1379 else
1380 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1381 }
1382 }
1383 return rc;
1384}
1385
4a8a4322 1386static int iwl3945_update_power_cmd(struct iwl_priv *priv,
600c0e11 1387 struct iwl_powertable_cmd *cmd, u32 mode)
b481de9c 1388{
3dae0c42 1389 struct iwl_power_mgr *pow_data;
1125eff3 1390 struct iwl_power_vec_entry *range;
3dae0c42
WT
1391 u32 max_sleep = 0;
1392 int i;
b481de9c 1393 u8 period = 0;
3dae0c42 1394 bool skip;
b481de9c
ZY
1395
1396 if (mode > IWL_POWER_INDEX_5) {
1397 IWL_DEBUG_POWER("Error invalid power mode \n");
3dae0c42 1398 return -EINVAL;
b481de9c 1399 }
3dae0c42 1400 pow_data = &priv->power_data;
b481de9c 1401
3dae0c42 1402 if (pow_data->dtim_period < 10)
b481de9c
ZY
1403 range = &pow_data->pwr_range_0[0];
1404 else
1405 range = &pow_data->pwr_range_1[1];
1406
bb8c093b 1407 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl3945_powertable_cmd));
b481de9c 1408
b481de9c
ZY
1409
1410 if (period == 0) {
1411 period = 1;
3dae0c42
WT
1412 skip = false;
1413 } else {
1414 skip = !!range[mode].no_dtim;
b481de9c
ZY
1415 }
1416
3dae0c42 1417 if (skip) {
b481de9c
ZY
1418 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1419 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1420 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
3dae0c42
WT
1421 } else {
1422 max_sleep = period;
1423 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
b481de9c
ZY
1424 }
1425
3dae0c42 1426 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
b481de9c
ZY
1427 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1428 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
b481de9c
ZY
1429
1430 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1431 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1432 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1433 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1434 le32_to_cpu(cmd->sleep_interval[0]),
1435 le32_to_cpu(cmd->sleep_interval[1]),
1436 le32_to_cpu(cmd->sleep_interval[2]),
1437 le32_to_cpu(cmd->sleep_interval[3]),
1438 le32_to_cpu(cmd->sleep_interval[4]));
1439
3dae0c42 1440 return 0;
b481de9c
ZY
1441}
1442
4a8a4322 1443static int iwl3945_send_power_mode(struct iwl_priv *priv, u32 mode)
b481de9c 1444{
9a62f73b 1445 u32 uninitialized_var(final_mode);
b481de9c 1446 int rc;
600c0e11 1447 struct iwl_powertable_cmd cmd;
b481de9c
ZY
1448
1449 /* If on battery, set to 3,
01ebd063 1450 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
1451 * else user level */
1452 switch (mode) {
1125eff3 1453 case IWL39_POWER_BATTERY:
b481de9c
ZY
1454 final_mode = IWL_POWER_INDEX_3;
1455 break;
1125eff3 1456 case IWL39_POWER_AC:
b481de9c
ZY
1457 final_mode = IWL_POWER_MODE_CAM;
1458 break;
1459 default:
1460 final_mode = mode;
1461 break;
1462 }
1463
bb8c093b 1464 iwl3945_update_power_cmd(priv, &cmd, final_mode);
b481de9c 1465
600c0e11 1466 /* FIXME use get_hcmd_size 3945 command is 4 bytes shorter */
518099a8
SO
1467 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD,
1468 sizeof(struct iwl3945_powertable_cmd), &cmd);
b481de9c
ZY
1469
1470 if (final_mode == IWL_POWER_MODE_CAM)
1471 clear_bit(STATUS_POWER_PMI, &priv->status);
1472 else
1473 set_bit(STATUS_POWER_PMI, &priv->status);
1474
1475 return rc;
1476}
1477
b481de9c
ZY
1478#define MAX_UCODE_BEACON_INTERVAL 1024
1479#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
1480
bb8c093b 1481static __le16 iwl3945_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
1482{
1483 u16 new_val = 0;
1484 u16 beacon_factor = 0;
1485
1486 beacon_factor =
1487 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
1488 / MAX_UCODE_BEACON_INTERVAL;
1489 new_val = beacon_val / beacon_factor;
1490
1491 return cpu_to_le16(new_val);
1492}
1493
4a8a4322 1494static void iwl3945_setup_rxon_timing(struct iwl_priv *priv)
b481de9c
ZY
1495{
1496 u64 interval_tm_unit;
1497 u64 tsf, result;
1498 unsigned long flags;
1499 struct ieee80211_conf *conf = NULL;
1500 u16 beacon_int = 0;
1501
1502 conf = ieee80211_get_hw_conf(priv->hw);
1503
1504 spin_lock_irqsave(&priv->lock, flags);
28afaf91 1505 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
b481de9c
ZY
1506 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
1507
28afaf91 1508 tsf = priv->timestamp;
b481de9c
ZY
1509
1510 beacon_int = priv->beacon_int;
1511 spin_unlock_irqrestore(&priv->lock, flags);
1512
05c914fe 1513 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
b481de9c
ZY
1514 if (beacon_int == 0) {
1515 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
1516 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
1517 } else {
1518 priv->rxon_timing.beacon_interval =
1519 cpu_to_le16(beacon_int);
1520 priv->rxon_timing.beacon_interval =
bb8c093b 1521 iwl3945_adjust_beacon_interval(
b481de9c
ZY
1522 le16_to_cpu(priv->rxon_timing.beacon_interval));
1523 }
1524
1525 priv->rxon_timing.atim_window = 0;
1526 } else {
1527 priv->rxon_timing.beacon_interval =
bb8c093b 1528 iwl3945_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
1529 /* TODO: we need to get atim_window from upper stack
1530 * for now we set to 0 */
1531 priv->rxon_timing.atim_window = 0;
1532 }
1533
1534 interval_tm_unit =
1535 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
1536 result = do_div(tsf, interval_tm_unit);
1537 priv->rxon_timing.beacon_init_val =
1538 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
1539
1540 IWL_DEBUG_ASSOC
1541 ("beacon interval %d beacon timer %d beacon tim %d\n",
1542 le16_to_cpu(priv->rxon_timing.beacon_interval),
1543 le32_to_cpu(priv->rxon_timing.beacon_init_val),
1544 le16_to_cpu(priv->rxon_timing.atim_window));
1545}
1546
4a8a4322 1547static int iwl3945_scan_initiate(struct iwl_priv *priv)
b481de9c 1548{
775a6e27 1549 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
1550 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
1551 return -EIO;
1552 }
1553
1554 if (test_bit(STATUS_SCANNING, &priv->status)) {
1555 IWL_DEBUG_SCAN("Scan already in progress.\n");
1556 return -EAGAIN;
1557 }
1558
1559 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1560 IWL_DEBUG_SCAN("Scan request while abort pending. "
1561 "Queuing.\n");
1562 return -EAGAIN;
1563 }
1564
1565 IWL_DEBUG_INFO("Starting scan...\n");
66b5004d
RR
1566 if (priv->cfg->sku & IWL_SKU_G)
1567 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
1568 if (priv->cfg->sku & IWL_SKU_A)
1569 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
b481de9c
ZY
1570 set_bit(STATUS_SCANNING, &priv->status);
1571 priv->scan_start = jiffies;
1572 priv->scan_pass_start = priv->scan_start;
1573
1574 queue_work(priv->workqueue, &priv->request_scan);
1575
1576 return 0;
1577}
1578
4a8a4322 1579static int iwl3945_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
b481de9c 1580{
f2c7e521 1581 struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
b481de9c
ZY
1582
1583 if (hw_decrypt)
1584 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
1585 else
1586 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
1587
1588 return 0;
1589}
1590
4a8a4322 1591static void iwl3945_set_flags_for_phymode(struct iwl_priv *priv,
8318d78a 1592 enum ieee80211_band band)
b481de9c 1593{
8318d78a 1594 if (band == IEEE80211_BAND_5GHZ) {
f2c7e521 1595 priv->staging39_rxon.flags &=
b481de9c
ZY
1596 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
1597 | RXON_FLG_CCK_MSK);
f2c7e521 1598 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1599 } else {
bb8c093b 1600 /* Copied from iwl3945_bg_post_associate() */
b481de9c 1601 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
f2c7e521 1602 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1603 else
f2c7e521 1604 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1605
05c914fe 1606 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
f2c7e521 1607 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1608
f2c7e521
AK
1609 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1610 priv->staging39_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
1611 priv->staging39_rxon.flags &= ~RXON_FLG_CCK_MSK;
b481de9c
ZY
1612 }
1613}
1614
1615/*
01ebd063 1616 * initialize rxon structure with default values from eeprom
b481de9c 1617 */
4a8a4322 1618static void iwl3945_connection_init_rx_config(struct iwl_priv *priv,
60294de3 1619 int mode)
b481de9c 1620{
d20b3c65 1621 const struct iwl_channel_info *ch_info;
b481de9c 1622
f2c7e521 1623 memset(&priv->staging39_rxon, 0, sizeof(priv->staging39_rxon));
b481de9c 1624
60294de3 1625 switch (mode) {
05c914fe 1626 case NL80211_IFTYPE_AP:
f2c7e521 1627 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_AP;
b481de9c
ZY
1628 break;
1629
05c914fe 1630 case NL80211_IFTYPE_STATION:
f2c7e521
AK
1631 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_ESS;
1632 priv->staging39_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
b481de9c
ZY
1633 break;
1634
05c914fe 1635 case NL80211_IFTYPE_ADHOC:
f2c7e521
AK
1636 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_IBSS;
1637 priv->staging39_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1638 priv->staging39_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
b481de9c
ZY
1639 RXON_FILTER_ACCEPT_GRP_MSK;
1640 break;
1641
05c914fe 1642 case NL80211_IFTYPE_MONITOR:
f2c7e521
AK
1643 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
1644 priv->staging39_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
b481de9c
ZY
1645 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
1646 break;
69dc5d9d 1647 default:
15b1687c 1648 IWL_ERR(priv, "Unsupported interface type %d\n", mode);
69dc5d9d 1649 break;
b481de9c
ZY
1650 }
1651
1652#if 0
1653 /* TODO: Figure out when short_preamble would be set and cache from
1654 * that */
1655 if (!hw_to_local(priv->hw)->short_preamble)
f2c7e521 1656 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c 1657 else
f2c7e521 1658 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c
ZY
1659#endif
1660
8318d78a 1661 ch_info = iwl3945_get_channel_info(priv, priv->band,
f2c7e521 1662 le16_to_cpu(priv->active39_rxon.channel));
b481de9c
ZY
1663
1664 if (!ch_info)
1665 ch_info = &priv->channel_info[0];
1666
1667 /*
1668 * in some case A channels are all non IBSS
1669 * in this case force B/G channel
1670 */
60294de3 1671 if ((mode == NL80211_IFTYPE_ADHOC) && !(is_channel_ibss(ch_info)))
b481de9c
ZY
1672 ch_info = &priv->channel_info[0];
1673
f2c7e521 1674 priv->staging39_rxon.channel = cpu_to_le16(ch_info->channel);
b481de9c 1675 if (is_channel_a_band(ch_info))
8318d78a 1676 priv->band = IEEE80211_BAND_5GHZ;
b481de9c 1677 else
8318d78a 1678 priv->band = IEEE80211_BAND_2GHZ;
b481de9c 1679
8318d78a 1680 iwl3945_set_flags_for_phymode(priv, priv->band);
b481de9c 1681
f2c7e521 1682 priv->staging39_rxon.ofdm_basic_rates =
b481de9c 1683 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
f2c7e521 1684 priv->staging39_rxon.cck_basic_rates =
b481de9c
ZY
1685 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1686}
1687
4a8a4322 1688static int iwl3945_set_mode(struct iwl_priv *priv, int mode)
b481de9c 1689{
05c914fe 1690 if (mode == NL80211_IFTYPE_ADHOC) {
d20b3c65 1691 const struct iwl_channel_info *ch_info;
b481de9c 1692
bb8c093b 1693 ch_info = iwl3945_get_channel_info(priv,
8318d78a 1694 priv->band,
f2c7e521 1695 le16_to_cpu(priv->staging39_rxon.channel));
b481de9c
ZY
1696
1697 if (!ch_info || !is_channel_ibss(ch_info)) {
15b1687c 1698 IWL_ERR(priv, "channel %d not IBSS channel\n",
f2c7e521 1699 le16_to_cpu(priv->staging39_rxon.channel));
b481de9c
ZY
1700 return -EINVAL;
1701 }
1702 }
1703
60294de3 1704 iwl3945_connection_init_rx_config(priv, mode);
f2c7e521 1705 memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
b481de9c 1706
bb8c093b 1707 iwl3945_clear_stations_table(priv);
b481de9c 1708
a96a27f9 1709 /* don't commit rxon if rf-kill is on*/
775a6e27 1710 if (!iwl_is_ready_rf(priv))
fde3571f
MA
1711 return -EAGAIN;
1712
1713 cancel_delayed_work(&priv->scan_check);
af0053d6 1714 if (iwl_scan_cancel_timeout(priv, 100)) {
39aadf8c 1715 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
fde3571f
MA
1716 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
1717 return -EAGAIN;
1718 }
1719
bb8c093b 1720 iwl3945_commit_rxon(priv);
b481de9c
ZY
1721
1722 return 0;
1723}
1724
4a8a4322 1725static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
e039fa4a 1726 struct ieee80211_tx_info *info,
c2d79b48 1727 struct iwl_cmd *cmd,
b481de9c
ZY
1728 struct sk_buff *skb_frag,
1729 int last_frag)
1730{
e52119c5 1731 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
1c014420 1732 struct iwl3945_hw_key *keyinfo =
f2c7e521 1733 &priv->stations_39[info->control.hw_key->hw_key_idx].keyinfo;
b481de9c
ZY
1734
1735 switch (keyinfo->alg) {
1736 case ALG_CCMP:
e52119c5
WT
1737 tx->sec_ctl = TX_CMD_SEC_CCM;
1738 memcpy(tx->key, keyinfo->key, keyinfo->keylen);
a96a27f9 1739 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
b481de9c
ZY
1740 break;
1741
1742 case ALG_TKIP:
1743#if 0
e52119c5 1744 tx->sec_ctl = TX_CMD_SEC_TKIP;
b481de9c
ZY
1745
1746 if (last_frag)
e52119c5 1747 memcpy(tx->tkip_mic.byte, skb_frag->tail - 8,
b481de9c
ZY
1748 8);
1749 else
e52119c5 1750 memset(tx->tkip_mic.byte, 0, 8);
b481de9c
ZY
1751#endif
1752 break;
1753
1754 case ALG_WEP:
e52119c5 1755 tx->sec_ctl = TX_CMD_SEC_WEP |
e039fa4a 1756 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
b481de9c
ZY
1757
1758 if (keyinfo->keylen == 13)
e52119c5 1759 tx->sec_ctl |= TX_CMD_SEC_KEY128;
b481de9c 1760
e52119c5 1761 memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen);
b481de9c
ZY
1762
1763 IWL_DEBUG_TX("Configuring packet for WEP encryption "
e039fa4a 1764 "with key %d\n", info->control.hw_key->hw_key_idx);
b481de9c
ZY
1765 break;
1766
b481de9c 1767 default:
978785a3 1768 IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg);
b481de9c
ZY
1769 break;
1770 }
1771}
1772
1773/*
1774 * handle build REPLY_TX command notification.
1775 */
4a8a4322 1776static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
c2d79b48 1777 struct iwl_cmd *cmd,
e039fa4a 1778 struct ieee80211_tx_info *info,
e52119c5 1779 struct ieee80211_hdr *hdr, u8 std_id)
b481de9c 1780{
e52119c5
WT
1781 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
1782 __le32 tx_flags = tx->tx_flags;
fd7c8a40 1783 __le16 fc = hdr->frame_control;
e6a9854b 1784 u8 rc_flags = info->control.rates[0].flags;
b481de9c 1785
e52119c5 1786 tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
e039fa4a 1787 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
b481de9c 1788 tx_flags |= TX_CMD_FLG_ACK_MSK;
fd7c8a40 1789 if (ieee80211_is_mgmt(fc))
b481de9c 1790 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
fd7c8a40 1791 if (ieee80211_is_probe_resp(fc) &&
b481de9c
ZY
1792 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1793 tx_flags |= TX_CMD_FLG_TSF_MSK;
1794 } else {
1795 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1796 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1797 }
1798
e52119c5 1799 tx->sta_id = std_id;
8b7b1e05 1800 if (ieee80211_has_morefrags(fc))
b481de9c
ZY
1801 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1802
fd7c8a40
HH
1803 if (ieee80211_is_data_qos(fc)) {
1804 u8 *qc = ieee80211_get_qos_ctl(hdr);
e52119c5 1805 tx->tid_tspec = qc[0] & 0xf;
b481de9c 1806 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
54dbb525 1807 } else {
b481de9c 1808 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
54dbb525 1809 }
b481de9c 1810
e6a9854b 1811 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
b481de9c
ZY
1812 tx_flags |= TX_CMD_FLG_RTS_MSK;
1813 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
e6a9854b 1814 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
b481de9c
ZY
1815 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
1816 tx_flags |= TX_CMD_FLG_CTS_MSK;
1817 }
1818
1819 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
1820 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
1821
1822 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
fd7c8a40
HH
1823 if (ieee80211_is_mgmt(fc)) {
1824 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
e52119c5 1825 tx->timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 1826 else
e52119c5 1827 tx->timeout.pm_frame_timeout = cpu_to_le16(2);
ab53d8af 1828 } else {
e52119c5 1829 tx->timeout.pm_frame_timeout = 0;
ab53d8af
MA
1830#ifdef CONFIG_IWL3945_LEDS
1831 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
1832#endif
1833 }
b481de9c 1834
e52119c5
WT
1835 tx->driver_txop = 0;
1836 tx->tx_flags = tx_flags;
1837 tx->next_frame_len = 0;
b481de9c
ZY
1838}
1839
6440adb5
BC
1840/**
1841 * iwl3945_get_sta_id - Find station's index within station table
1842 */
4a8a4322 1843static int iwl3945_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
b481de9c
ZY
1844{
1845 int sta_id;
1846 u16 fc = le16_to_cpu(hdr->frame_control);
1847
6440adb5 1848 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
1849 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
1850 is_multicast_ether_addr(hdr->addr1))
3832ec9d 1851 return priv->hw_params.bcast_sta_id;
b481de9c
ZY
1852
1853 switch (priv->iw_mode) {
1854
6440adb5
BC
1855 /* If we are a client station in a BSS network, use the special
1856 * AP station entry (that's the only station we communicate with) */
05c914fe 1857 case NL80211_IFTYPE_STATION:
b481de9c
ZY
1858 return IWL_AP_ID;
1859
1860 /* If we are an AP, then find the station, or use BCAST */
05c914fe 1861 case NL80211_IFTYPE_AP:
bb8c093b 1862 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
1863 if (sta_id != IWL_INVALID_STATION)
1864 return sta_id;
3832ec9d 1865 return priv->hw_params.bcast_sta_id;
b481de9c 1866
6440adb5
BC
1867 /* If this frame is going out to an IBSS network, find the station,
1868 * or create a new station table entry */
05c914fe 1869 case NL80211_IFTYPE_ADHOC: {
6440adb5 1870 /* Create new station table entry */
bb8c093b 1871 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
1872 if (sta_id != IWL_INVALID_STATION)
1873 return sta_id;
1874
bb8c093b 1875 sta_id = iwl3945_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
b481de9c
ZY
1876
1877 if (sta_id != IWL_INVALID_STATION)
1878 return sta_id;
1879
e174961c 1880 IWL_DEBUG_DROP("Station %pM not in station map. "
b481de9c 1881 "Defaulting to broadcast...\n",
e174961c 1882 hdr->addr1);
40b8ec0b 1883 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
3832ec9d 1884 return priv->hw_params.bcast_sta_id;
0795af57 1885 }
914233d6
SG
1886 /* If we are in monitor mode, use BCAST. This is required for
1887 * packet injection. */
05c914fe 1888 case NL80211_IFTYPE_MONITOR:
3832ec9d 1889 return priv->hw_params.bcast_sta_id;
914233d6 1890
b481de9c 1891 default:
39aadf8c
WT
1892 IWL_WARN(priv, "Unknown mode of operation: %d\n",
1893 priv->iw_mode);
3832ec9d 1894 return priv->hw_params.bcast_sta_id;
b481de9c
ZY
1895 }
1896}
1897
1898/*
1899 * start REPLY_TX command process
1900 */
4a8a4322 1901static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
b481de9c
ZY
1902{
1903 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e039fa4a 1904 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
e52119c5 1905 struct iwl3945_tx_cmd *tx;
188cf6c7 1906 struct iwl_tx_queue *txq = NULL;
d20b3c65 1907 struct iwl_queue *q = NULL;
e52119c5 1908 struct iwl_cmd *out_cmd = NULL;
b481de9c
ZY
1909 dma_addr_t phys_addr;
1910 dma_addr_t txcmd_phys;
e52119c5 1911 int txq_id = skb_get_queue_mapping(skb);
54dbb525
TW
1912 u16 len, idx, len_org, hdr_len;
1913 u8 id;
1914 u8 unicast;
b481de9c 1915 u8 sta_id;
54dbb525 1916 u8 tid = 0;
b481de9c 1917 u16 seq_number = 0;
fd7c8a40 1918 __le16 fc;
b481de9c 1919 u8 wait_write_ptr = 0;
54dbb525 1920 u8 *qc = NULL;
b481de9c
ZY
1921 unsigned long flags;
1922 int rc;
1923
1924 spin_lock_irqsave(&priv->lock, flags);
775a6e27 1925 if (iwl_is_rfkill(priv)) {
b481de9c
ZY
1926 IWL_DEBUG_DROP("Dropping - RF KILL\n");
1927 goto drop_unlock;
1928 }
1929
e039fa4a 1930 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
15b1687c 1931 IWL_ERR(priv, "ERROR: No TX rate available.\n");
b481de9c
ZY
1932 goto drop_unlock;
1933 }
1934
1935 unicast = !is_multicast_ether_addr(hdr->addr1);
1936 id = 0;
1937
fd7c8a40 1938 fc = hdr->frame_control;
b481de9c 1939
c8b0e6e1 1940#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
1941 if (ieee80211_is_auth(fc))
1942 IWL_DEBUG_TX("Sending AUTH frame\n");
fd7c8a40 1943 else if (ieee80211_is_assoc_req(fc))
b481de9c 1944 IWL_DEBUG_TX("Sending ASSOC frame\n");
fd7c8a40 1945 else if (ieee80211_is_reassoc_req(fc))
b481de9c
ZY
1946 IWL_DEBUG_TX("Sending REASSOC frame\n");
1947#endif
1948
7878a5a4 1949 /* drop all data frame if we are not associated */
914233d6 1950 if (ieee80211_is_data(fc) &&
05c914fe 1951 (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
914233d6 1952 (!iwl3945_is_associated(priv) ||
05c914fe 1953 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
bb8c093b 1954 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
b481de9c
ZY
1955 goto drop_unlock;
1956 }
1957
1958 spin_unlock_irqrestore(&priv->lock, flags);
1959
7294ec95 1960 hdr_len = ieee80211_hdrlen(fc);
6440adb5
BC
1961
1962 /* Find (or create) index into station table for destination station */
bb8c093b 1963 sta_id = iwl3945_get_sta_id(priv, hdr);
b481de9c 1964 if (sta_id == IWL_INVALID_STATION) {
e174961c
JB
1965 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
1966 hdr->addr1);
b481de9c
ZY
1967 goto drop;
1968 }
1969
1970 IWL_DEBUG_RATE("station Id %d\n", sta_id);
1971
fd7c8a40
HH
1972 if (ieee80211_is_data_qos(fc)) {
1973 qc = ieee80211_get_qos_ctl(hdr);
7294ec95 1974 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
f2c7e521 1975 seq_number = priv->stations_39[sta_id].tid[tid].seq_number &
b481de9c
ZY
1976 IEEE80211_SCTL_SEQ;
1977 hdr->seq_ctrl = cpu_to_le16(seq_number) |
1978 (hdr->seq_ctrl &
1979 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
1980 seq_number += 0x10;
1981 }
6440adb5
BC
1982
1983 /* Descriptor for chosen Tx queue */
188cf6c7 1984 txq = &priv->txq[txq_id];
b481de9c
ZY
1985 q = &txq->q;
1986
1987 spin_lock_irqsave(&priv->lock, flags);
1988
fc4b6853 1989 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 1990
6440adb5 1991 /* Set up driver data for this TFD */
dbb6654c 1992 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
fc4b6853 1993 txq->txb[q->write_ptr].skb[0] = skb;
6440adb5
BC
1994
1995 /* Init first empty entry in queue's array of Tx/cmd buffers */
188cf6c7 1996 out_cmd = txq->cmd[idx];
e52119c5 1997 tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
b481de9c 1998 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
e52119c5 1999 memset(tx, 0, sizeof(*tx));
6440adb5
BC
2000
2001 /*
2002 * Set up the Tx-command (not MAC!) header.
2003 * Store the chosen Tx queue and TFD index within the sequence field;
2004 * after Tx, uCode's Tx response will return this value so driver can
2005 * locate the frame within the tx queue and do post-tx processing.
2006 */
b481de9c
ZY
2007 out_cmd->hdr.cmd = REPLY_TX;
2008 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 2009 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
BC
2010
2011 /* Copy MAC header from skb into command buffer */
e52119c5 2012 memcpy(tx->hdr, hdr, hdr_len);
b481de9c 2013
6440adb5
BC
2014 /*
2015 * Use the first empty entry in this queue's command buffer array
2016 * to contain the Tx command and MAC header concatenated together
2017 * (payload data will be in another buffer).
2018 * Size of this varies, due to varying MAC header length.
2019 * If end is not dword aligned, we'll have 2 extra bytes at the end
2020 * of the MAC header (device reads on dword boundaries).
2021 * We'll tell device about this padding later.
2022 */
3832ec9d 2023 len = sizeof(struct iwl3945_tx_cmd) +
4c897253 2024 sizeof(struct iwl_cmd_header) + hdr_len;
b481de9c
ZY
2025
2026 len_org = len;
2027 len = (len + 3) & ~3;
2028
2029 if (len_org != len)
2030 len_org = 1;
2031 else
2032 len_org = 0;
2033
6440adb5
BC
2034 /* Physical address of this Tx command's header (not MAC header!),
2035 * within command buffer array. */
188cf6c7
SO
2036 txcmd_phys = pci_map_single(priv->pci_dev,
2037 out_cmd, sizeof(struct iwl_cmd),
2038 PCI_DMA_TODEVICE);
2039 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
2040 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
2041 /* Add buffer containing Tx command and MAC(!) header to TFD's
2042 * first entry */
2043 txcmd_phys += offsetof(struct iwl_cmd, hdr);
b481de9c 2044
6440adb5
BC
2045 /* Add buffer containing Tx command and MAC(!) header to TFD's
2046 * first entry */
7aaa1d79
SO
2047 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
2048 txcmd_phys, len, 1, 0);
b481de9c 2049
d0f09804 2050 if (info->control.hw_key)
e039fa4a 2051 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
b481de9c 2052
6440adb5
BC
2053 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2054 * if any (802.11 null frames have no payload). */
b481de9c
ZY
2055 len = skb->len - hdr_len;
2056 if (len) {
2057 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2058 len, PCI_DMA_TODEVICE);
7aaa1d79
SO
2059 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
2060 phys_addr, len,
2061 0, U32_PAD(len));
b481de9c
ZY
2062 }
2063
6440adb5 2064 /* Total # bytes to be transmitted */
b481de9c 2065 len = (u16)skb->len;
e52119c5 2066 tx->len = cpu_to_le16(len);
b481de9c
ZY
2067
2068 /* TODO need this for burst mode later on */
e52119c5 2069 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
b481de9c
ZY
2070
2071 /* set is_hcca to 0; it probably will never be implemented */
e039fa4a 2072 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
b481de9c 2073
e52119c5
WT
2074 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2075 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
b481de9c 2076
8b7b1e05 2077 if (!ieee80211_has_morefrags(hdr->frame_control)) {
b481de9c 2078 txq->need_update = 1;
3ac7f146 2079 if (qc)
f2c7e521 2080 priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
b481de9c
ZY
2081 } else {
2082 wait_write_ptr = 1;
2083 txq->need_update = 0;
2084 }
2085
e52119c5 2086 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
b481de9c 2087
e52119c5 2088 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
7294ec95 2089 ieee80211_hdrlen(fc));
b481de9c 2090
6440adb5 2091 /* Tell device the write index *just past* this latest filled TFD */
c54b679d 2092 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
4f3602c8 2093 rc = iwl_txq_update_write_ptr(priv, txq);
b481de9c
ZY
2094 spin_unlock_irqrestore(&priv->lock, flags);
2095
2096 if (rc)
2097 return rc;
2098
d20b3c65 2099 if ((iwl_queue_space(q) < q->high_mark)
b481de9c
ZY
2100 && priv->mac80211_registered) {
2101 if (wait_write_ptr) {
2102 spin_lock_irqsave(&priv->lock, flags);
2103 txq->need_update = 1;
4f3602c8 2104 iwl_txq_update_write_ptr(priv, txq);
b481de9c
ZY
2105 spin_unlock_irqrestore(&priv->lock, flags);
2106 }
2107
e2530083 2108 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
b481de9c
ZY
2109 }
2110
2111 return 0;
2112
2113drop_unlock:
2114 spin_unlock_irqrestore(&priv->lock, flags);
2115drop:
2116 return -1;
2117}
2118
4a8a4322 2119static void iwl3945_set_rate(struct iwl_priv *priv)
b481de9c 2120{
8318d78a 2121 const struct ieee80211_supported_band *sband = NULL;
b481de9c
ZY
2122 struct ieee80211_rate *rate;
2123 int i;
2124
cbba18c6 2125 sband = iwl_get_hw_mode(priv, priv->band);
8318d78a 2126 if (!sband) {
15b1687c 2127 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
c4ba9621
SA
2128 return;
2129 }
b481de9c
ZY
2130
2131 priv->active_rate = 0;
2132 priv->active_rate_basic = 0;
2133
8318d78a
JB
2134 IWL_DEBUG_RATE("Setting rates for %s GHz\n",
2135 sband->band == IEEE80211_BAND_2GHZ ? "2.4" : "5");
2136
2137 for (i = 0; i < sband->n_bitrates; i++) {
2138 rate = &sband->bitrates[i];
2139 if ((rate->hw_value < IWL_RATE_COUNT) &&
2140 !(rate->flags & IEEE80211_CHAN_DISABLED)) {
2141 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)\n",
2142 rate->hw_value, iwl3945_rates[rate->hw_value].plcp);
2143 priv->active_rate |= (1 << rate->hw_value);
2144 }
b481de9c
ZY
2145 }
2146
2147 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
2148 priv->active_rate, priv->active_rate_basic);
2149
2150 /*
2151 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2152 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2153 * OFDM
2154 */
2155 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
f2c7e521 2156 priv->staging39_rxon.cck_basic_rates =
b481de9c
ZY
2157 ((priv->active_rate_basic &
2158 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2159 else
f2c7e521 2160 priv->staging39_rxon.cck_basic_rates =
b481de9c
ZY
2161 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2162
2163 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
f2c7e521 2164 priv->staging39_rxon.ofdm_basic_rates =
b481de9c
ZY
2165 ((priv->active_rate_basic &
2166 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2167 IWL_FIRST_OFDM_RATE) & 0xFF;
2168 else
f2c7e521 2169 priv->staging39_rxon.ofdm_basic_rates =
b481de9c
ZY
2170 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2171}
2172
4a8a4322 2173static void iwl3945_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
b481de9c
ZY
2174{
2175 unsigned long flags;
2176
2177 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2178 return;
2179
2180 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2181 disable_radio ? "OFF" : "ON");
2182
2183 if (disable_radio) {
af0053d6 2184 iwl_scan_cancel(priv);
b481de9c 2185 /* FIXME: This is a workaround for AP */
05c914fe 2186 if (priv->iw_mode != NL80211_IFTYPE_AP) {
b481de9c 2187 spin_lock_irqsave(&priv->lock, flags);
5d49f498 2188 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
2189 CSR_UCODE_SW_BIT_RFKILL);
2190 spin_unlock_irqrestore(&priv->lock, flags);
c496294e 2191 iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
b481de9c
ZY
2192 set_bit(STATUS_RF_KILL_SW, &priv->status);
2193 }
2194 return;
2195 }
2196
2197 spin_lock_irqsave(&priv->lock, flags);
5d49f498 2198 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
2199
2200 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2201 spin_unlock_irqrestore(&priv->lock, flags);
2202
2203 /* wake up ucode */
2204 msleep(10);
2205
2206 spin_lock_irqsave(&priv->lock, flags);
5d49f498
AK
2207 iwl_read32(priv, CSR_UCODE_DRV_GP1);
2208 if (!iwl_grab_nic_access(priv))
2209 iwl_release_nic_access(priv);
b481de9c
ZY
2210 spin_unlock_irqrestore(&priv->lock, flags);
2211
2212 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2213 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
2214 "disabled by HW switch\n");
2215 return;
2216 }
2217
808e72a0
ZY
2218 if (priv->is_open)
2219 queue_work(priv->workqueue, &priv->restart);
b481de9c
ZY
2220 return;
2221}
2222
4a8a4322 2223void iwl3945_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
b481de9c
ZY
2224 u32 decrypt_res, struct ieee80211_rx_status *stats)
2225{
2226 u16 fc =
2227 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2228
f2c7e521 2229 if (priv->active39_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
b481de9c
ZY
2230 return;
2231
2232 if (!(fc & IEEE80211_FCTL_PROTECTED))
2233 return;
2234
2235 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2236 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2237 case RX_RES_STATUS_SEC_TYPE_TKIP:
2238 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2239 RX_RES_STATUS_BAD_ICV_MIC)
2240 stats->flag |= RX_FLAG_MMIC_ERROR;
2241 case RX_RES_STATUS_SEC_TYPE_WEP:
2242 case RX_RES_STATUS_SEC_TYPE_CCMP:
2243 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2244 RX_RES_STATUS_DECRYPT_OK) {
2245 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2246 stats->flag |= RX_FLAG_DECRYPTED;
2247 }
2248 break;
2249
2250 default:
2251 break;
2252 }
2253}
2254
c8b0e6e1 2255#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
2256
2257#include "iwl-spectrum.h"
2258
2259#define BEACON_TIME_MASK_LOW 0x00FFFFFF
2260#define BEACON_TIME_MASK_HIGH 0xFF000000
2261#define TIME_UNIT 1024
2262
2263/*
2264 * extended beacon time format
2265 * time in usec will be changed into a 32-bit value in 8:24 format
2266 * the high 1 byte is the beacon counts
2267 * the lower 3 bytes is the time in usec within one beacon interval
2268 */
2269
bb8c093b 2270static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
2271{
2272 u32 quot;
2273 u32 rem;
2274 u32 interval = beacon_interval * 1024;
2275
2276 if (!interval || !usec)
2277 return 0;
2278
2279 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
2280 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
2281
2282 return (quot << 24) + rem;
2283}
2284
2285/* base is usually what we get from ucode with each received frame,
2286 * the same as HW timer counter counting down
2287 */
2288
bb8c093b 2289static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
2290{
2291 u32 base_low = base & BEACON_TIME_MASK_LOW;
2292 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
2293 u32 interval = beacon_interval * TIME_UNIT;
2294 u32 res = (base & BEACON_TIME_MASK_HIGH) +
2295 (addon & BEACON_TIME_MASK_HIGH);
2296
2297 if (base_low > addon_low)
2298 res += base_low - addon_low;
2299 else if (base_low < addon_low) {
2300 res += interval + base_low - addon_low;
2301 res += (1 << 24);
2302 } else
2303 res += (1 << 24);
2304
2305 return cpu_to_le32(res);
2306}
2307
4a8a4322 2308static int iwl3945_get_measurement(struct iwl_priv *priv,
b481de9c
ZY
2309 struct ieee80211_measurement_params *params,
2310 u8 type)
2311{
600c0e11 2312 struct iwl_spectrum_cmd spectrum;
3d24a9f7 2313 struct iwl_rx_packet *res;
c2d79b48 2314 struct iwl_host_cmd cmd = {
b481de9c
ZY
2315 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2316 .data = (void *)&spectrum,
2317 .meta.flags = CMD_WANT_SKB,
2318 };
2319 u32 add_time = le64_to_cpu(params->start_time);
2320 int rc;
2321 int spectrum_resp_status;
2322 int duration = le16_to_cpu(params->duration);
2323
bb8c093b 2324 if (iwl3945_is_associated(priv))
b481de9c 2325 add_time =
bb8c093b 2326 iwl3945_usecs_to_beacons(
b481de9c
ZY
2327 le64_to_cpu(params->start_time) - priv->last_tsf,
2328 le16_to_cpu(priv->rxon_timing.beacon_interval));
2329
2330 memset(&spectrum, 0, sizeof(spectrum));
2331
2332 spectrum.channel_count = cpu_to_le16(1);
2333 spectrum.flags =
2334 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
2335 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
2336 cmd.len = sizeof(spectrum);
2337 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
2338
bb8c093b 2339 if (iwl3945_is_associated(priv))
b481de9c 2340 spectrum.start_time =
bb8c093b 2341 iwl3945_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
2342 add_time,
2343 le16_to_cpu(priv->rxon_timing.beacon_interval));
2344 else
2345 spectrum.start_time = 0;
2346
2347 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
2348 spectrum.channels[0].channel = params->channel;
2349 spectrum.channels[0].type = type;
f2c7e521 2350 if (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK)
b481de9c
ZY
2351 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
2352 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
2353
518099a8 2354 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
2355 if (rc)
2356 return rc;
2357
3d24a9f7 2358 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c 2359 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 2360 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
b481de9c
ZY
2361 rc = -EIO;
2362 }
2363
2364 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
2365 switch (spectrum_resp_status) {
2366 case 0: /* Command will be handled */
2367 if (res->u.spectrum.id != 0xff) {
bc434dd2
IS
2368 IWL_DEBUG_INFO("Replaced existing measurement: %d\n",
2369 res->u.spectrum.id);
b481de9c
ZY
2370 priv->measurement_status &= ~MEASUREMENT_READY;
2371 }
2372 priv->measurement_status |= MEASUREMENT_ACTIVE;
2373 rc = 0;
2374 break;
2375
2376 case 1: /* Command will not be handled */
2377 rc = -EAGAIN;
2378 break;
2379 }
2380
2381 dev_kfree_skb_any(cmd.meta.u.skb);
2382
2383 return rc;
2384}
2385#endif
2386
4a8a4322 2387static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
6100b588 2388 struct iwl_rx_mem_buffer *rxb)
b481de9c 2389{
3d24a9f7
TW
2390 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2391 struct iwl_alive_resp *palive;
b481de9c
ZY
2392 struct delayed_work *pwork;
2393
2394 palive = &pkt->u.alive_frame;
2395
2396 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
2397 "0x%01X 0x%01X\n",
2398 palive->is_valid, palive->ver_type,
2399 palive->ver_subtype);
2400
2401 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
2402 IWL_DEBUG_INFO("Initialization Alive received.\n");
3d24a9f7
TW
2403 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
2404 sizeof(struct iwl_alive_resp));
b481de9c
ZY
2405 pwork = &priv->init_alive_start;
2406 } else {
2407 IWL_DEBUG_INFO("Runtime Alive received.\n");
2408 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3d24a9f7 2409 sizeof(struct iwl_alive_resp));
b481de9c 2410 pwork = &priv->alive_start;
bb8c093b 2411 iwl3945_disable_events(priv);
b481de9c
ZY
2412 }
2413
2414 /* We delay the ALIVE response by 5ms to
2415 * give the HW RF Kill time to activate... */
2416 if (palive->is_valid == UCODE_VALID_OK)
2417 queue_delayed_work(priv->workqueue, pwork,
2418 msecs_to_jiffies(5));
2419 else
39aadf8c 2420 IWL_WARN(priv, "uCode did not respond OK.\n");
b481de9c
ZY
2421}
2422
4a8a4322 2423static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
6100b588 2424 struct iwl_rx_mem_buffer *rxb)
b481de9c 2425{
c7e035a9 2426#ifdef CONFIG_IWLWIFI_DEBUG
3d24a9f7 2427 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
c7e035a9 2428#endif
b481de9c
ZY
2429
2430 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
2431 return;
2432}
2433
4a8a4322 2434static void iwl3945_rx_reply_error(struct iwl_priv *priv,
6100b588 2435 struct iwl_rx_mem_buffer *rxb)
b481de9c 2436{
3d24a9f7 2437 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c 2438
15b1687c 2439 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
b481de9c
ZY
2440 "seq 0x%04X ser 0x%08X\n",
2441 le32_to_cpu(pkt->u.err_resp.error_type),
2442 get_cmd_string(pkt->u.err_resp.cmd_id),
2443 pkt->u.err_resp.cmd_id,
2444 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
2445 le32_to_cpu(pkt->u.err_resp.error_info));
2446}
2447
2448#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2449
4a8a4322 2450static void iwl3945_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
b481de9c 2451{
3d24a9f7 2452 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
f2c7e521 2453 struct iwl3945_rxon_cmd *rxon = (void *)&priv->active39_rxon;
600c0e11 2454 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
2455 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
2456 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
2457 rxon->channel = csa->channel;
f2c7e521 2458 priv->staging39_rxon.channel = csa->channel;
b481de9c
ZY
2459}
2460
4a8a4322 2461static void iwl3945_rx_spectrum_measure_notif(struct iwl_priv *priv,
6100b588 2462 struct iwl_rx_mem_buffer *rxb)
b481de9c 2463{
c8b0e6e1 2464#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
3d24a9f7 2465 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
600c0e11 2466 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
2467
2468 if (!report->state) {
2469 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
2470 "Spectrum Measure Notification: Start\n");
2471 return;
2472 }
2473
2474 memcpy(&priv->measure_report, report, sizeof(*report));
2475 priv->measurement_status |= MEASUREMENT_READY;
2476#endif
2477}
2478
4a8a4322 2479static void iwl3945_rx_pm_sleep_notif(struct iwl_priv *priv,
6100b588 2480 struct iwl_rx_mem_buffer *rxb)
b481de9c 2481{
c8b0e6e1 2482#ifdef CONFIG_IWL3945_DEBUG
3d24a9f7 2483 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
600c0e11 2484 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
2485 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
2486 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
2487#endif
2488}
2489
4a8a4322 2490static void iwl3945_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
6100b588 2491 struct iwl_rx_mem_buffer *rxb)
b481de9c 2492{
3d24a9f7 2493 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
2494 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
2495 "notification for %s:\n",
2496 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
40b8ec0b
SO
2497 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw,
2498 le32_to_cpu(pkt->len));
b481de9c
ZY
2499}
2500
bb8c093b 2501static void iwl3945_bg_beacon_update(struct work_struct *work)
b481de9c 2502{
4a8a4322
AK
2503 struct iwl_priv *priv =
2504 container_of(work, struct iwl_priv, beacon_update);
b481de9c
ZY
2505 struct sk_buff *beacon;
2506
2507 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
e039fa4a 2508 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
b481de9c
ZY
2509
2510 if (!beacon) {
15b1687c 2511 IWL_ERR(priv, "update beacon failed\n");
b481de9c
ZY
2512 return;
2513 }
2514
2515 mutex_lock(&priv->mutex);
2516 /* new beacon skb is allocated every time; dispose previous.*/
2517 if (priv->ibss_beacon)
2518 dev_kfree_skb(priv->ibss_beacon);
2519
2520 priv->ibss_beacon = beacon;
2521 mutex_unlock(&priv->mutex);
2522
bb8c093b 2523 iwl3945_send_beacon_cmd(priv);
b481de9c
ZY
2524}
2525
4a8a4322 2526static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
6100b588 2527 struct iwl_rx_mem_buffer *rxb)
b481de9c 2528{
c8b0e6e1 2529#ifdef CONFIG_IWL3945_DEBUG
3d24a9f7 2530 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
bb8c093b 2531 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
b481de9c
ZY
2532 u8 rate = beacon->beacon_notify_hdr.rate;
2533
2534 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
2535 "tsf %d %d rate %d\n",
2536 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
2537 beacon->beacon_notify_hdr.failure_frame,
2538 le32_to_cpu(beacon->ibss_mgr_status),
2539 le32_to_cpu(beacon->high_tsf),
2540 le32_to_cpu(beacon->low_tsf), rate);
2541#endif
2542
05c914fe 2543 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
b481de9c
ZY
2544 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
2545 queue_work(priv->workqueue, &priv->beacon_update);
2546}
2547
2548/* Service response to REPLY_SCAN_CMD (0x80) */
4a8a4322 2549static void iwl3945_rx_reply_scan(struct iwl_priv *priv,
6100b588 2550 struct iwl_rx_mem_buffer *rxb)
b481de9c 2551{
c8b0e6e1 2552#ifdef CONFIG_IWL3945_DEBUG
3d24a9f7 2553 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253
TW
2554 struct iwl_scanreq_notification *notif =
2555 (struct iwl_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
2556
2557 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
2558#endif
2559}
2560
2561/* Service SCAN_START_NOTIFICATION (0x82) */
4a8a4322 2562static void iwl3945_rx_scan_start_notif(struct iwl_priv *priv,
6100b588 2563 struct iwl_rx_mem_buffer *rxb)
b481de9c 2564{
3d24a9f7 2565 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253
TW
2566 struct iwl_scanstart_notification *notif =
2567 (struct iwl_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
2568 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
2569 IWL_DEBUG_SCAN("Scan start: "
2570 "%d [802.11%s] "
2571 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
2572 notif->channel,
2573 notif->band ? "bg" : "a",
2574 notif->tsf_high,
2575 notif->tsf_low, notif->status, notif->beacon_timer);
2576}
2577
2578/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
4a8a4322 2579static void iwl3945_rx_scan_results_notif(struct iwl_priv *priv,
6100b588 2580 struct iwl_rx_mem_buffer *rxb)
b481de9c 2581{
c7e035a9 2582#ifdef CONFIG_IWLWIFI_DEBUG
3d24a9f7 2583 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253
TW
2584 struct iwl_scanresults_notification *notif =
2585 (struct iwl_scanresults_notification *)pkt->u.raw;
c7e035a9 2586#endif
b481de9c
ZY
2587
2588 IWL_DEBUG_SCAN("Scan ch.res: "
2589 "%d [802.11%s] "
2590 "(TSF: 0x%08X:%08X) - %d "
2591 "elapsed=%lu usec (%dms since last)\n",
2592 notif->channel,
2593 notif->band ? "bg" : "a",
2594 le32_to_cpu(notif->tsf_high),
2595 le32_to_cpu(notif->tsf_low),
2596 le32_to_cpu(notif->statistics[0]),
2597 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
2598 jiffies_to_msecs(elapsed_jiffies
2599 (priv->last_scan_jiffies, jiffies)));
2600
2601 priv->last_scan_jiffies = jiffies;
7878a5a4 2602 priv->next_scan_jiffies = 0;
b481de9c
ZY
2603}
2604
2605/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
4a8a4322 2606static void iwl3945_rx_scan_complete_notif(struct iwl_priv *priv,
6100b588 2607 struct iwl_rx_mem_buffer *rxb)
b481de9c 2608{
c7e035a9 2609#ifdef CONFIG_IWLWIFI_DEBUG
3d24a9f7 2610 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253 2611 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
c7e035a9 2612#endif
b481de9c
ZY
2613
2614 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
2615 scan_notif->scanned_channels,
2616 scan_notif->tsf_low,
2617 scan_notif->tsf_high, scan_notif->status);
2618
2619 /* The HW is no longer scanning */
2620 clear_bit(STATUS_SCAN_HW, &priv->status);
2621
2622 /* The scan completion notification came in, so kill that timer... */
2623 cancel_delayed_work(&priv->scan_check);
2624
2625 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
66b5004d
RR
2626 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
2627 "2.4" : "5.2",
b481de9c
ZY
2628 jiffies_to_msecs(elapsed_jiffies
2629 (priv->scan_pass_start, jiffies)));
2630
66b5004d
RR
2631 /* Remove this scanned band from the list of pending
2632 * bands to scan, band G precedes A in order of scanning
2633 * as seen in iwl3945_bg_request_scan */
2634 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
2635 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
2636 else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
2637 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
b481de9c
ZY
2638
2639 /* If a request to abort was given, or the scan did not succeed
2640 * then we reset the scan state machine and terminate,
2641 * re-queuing another scan if one has been requested */
2642 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2643 IWL_DEBUG_INFO("Aborted scan completed.\n");
2644 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
2645 } else {
2646 /* If there are more bands on this scan pass reschedule */
2647 if (priv->scan_bands > 0)
2648 goto reschedule;
2649 }
2650
2651 priv->last_scan_jiffies = jiffies;
7878a5a4 2652 priv->next_scan_jiffies = 0;
b481de9c
ZY
2653 IWL_DEBUG_INFO("Setting scan to off\n");
2654
2655 clear_bit(STATUS_SCANNING, &priv->status);
2656
2657 IWL_DEBUG_INFO("Scan took %dms\n",
2658 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
2659
2660 queue_work(priv->workqueue, &priv->scan_completed);
2661
2662 return;
2663
2664reschedule:
2665 priv->scan_pass_start = jiffies;
2666 queue_work(priv->workqueue, &priv->request_scan);
2667}
2668
2669/* Handle notification from uCode that card's power state is changing
2670 * due to software, hardware, or critical temperature RFKILL */
4a8a4322 2671static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
6100b588 2672 struct iwl_rx_mem_buffer *rxb)
b481de9c 2673{
3d24a9f7 2674 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
2675 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
2676 unsigned long status = priv->status;
2677
2678 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
2679 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
2680 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
2681
5d49f498 2682 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
2683 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2684
2685 if (flags & HW_CARD_DISABLED)
2686 set_bit(STATUS_RF_KILL_HW, &priv->status);
2687 else
2688 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2689
2690
2691 if (flags & SW_CARD_DISABLED)
2692 set_bit(STATUS_RF_KILL_SW, &priv->status);
2693 else
2694 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2695
af0053d6 2696 iwl_scan_cancel(priv);
b481de9c
ZY
2697
2698 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
2699 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
2700 (test_bit(STATUS_RF_KILL_SW, &status) !=
2701 test_bit(STATUS_RF_KILL_SW, &priv->status)))
2702 queue_work(priv->workqueue, &priv->rf_kill);
2703 else
2704 wake_up_interruptible(&priv->wait_command_queue);
2705}
2706
2707/**
bb8c093b 2708 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
2709 *
2710 * Setup the RX handlers for each of the reply types sent from the uCode
2711 * to the host.
2712 *
2713 * This function chains into the hardware specific files for them to setup
2714 * any hardware specific handlers as well.
2715 */
4a8a4322 2716static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
b481de9c 2717{
bb8c093b
CH
2718 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
2719 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
2720 priv->rx_handlers[REPLY_ERROR] = iwl3945_rx_reply_error;
2721 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl3945_rx_csa;
b481de9c 2722 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
2723 iwl3945_rx_spectrum_measure_notif;
2724 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl3945_rx_pm_sleep_notif;
b481de9c 2725 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
2726 iwl3945_rx_pm_debug_statistics_notif;
2727 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
b481de9c 2728
9fbab516
BC
2729 /*
2730 * The same handler is used for both the REPLY to a discrete
2731 * statistics request from the host as well as for the periodic
2732 * statistics notifications (after received beacons) from the uCode.
b481de9c 2733 */
bb8c093b
CH
2734 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
2735 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
b481de9c 2736
bb8c093b
CH
2737 priv->rx_handlers[REPLY_SCAN_CMD] = iwl3945_rx_reply_scan;
2738 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl3945_rx_scan_start_notif;
b481de9c 2739 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 2740 iwl3945_rx_scan_results_notif;
b481de9c 2741 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
2742 iwl3945_rx_scan_complete_notif;
2743 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
b481de9c 2744
9fbab516 2745 /* Set up hardware specific Rx handlers */
bb8c093b 2746 iwl3945_hw_rx_handler_setup(priv);
b481de9c
ZY
2747}
2748
91c066f2
TW
2749/**
2750 * iwl3945_cmd_queue_reclaim - Reclaim CMD queue entries
2751 * When FW advances 'R' index, all entries between old and new 'R' index
2752 * need to be reclaimed.
2753 */
4a8a4322 2754static void iwl3945_cmd_queue_reclaim(struct iwl_priv *priv,
91c066f2
TW
2755 int txq_id, int index)
2756{
188cf6c7 2757 struct iwl_tx_queue *txq = &priv->txq[txq_id];
d20b3c65 2758 struct iwl_queue *q = &txq->q;
91c066f2
TW
2759 int nfreed = 0;
2760
625a381a 2761 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
15b1687c 2762 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
91c066f2
TW
2763 "is out of range [0-%d] %d %d.\n", txq_id,
2764 index, q->n_bd, q->write_ptr, q->read_ptr);
2765 return;
2766 }
2767
2768 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
2769 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2770 if (nfreed > 1) {
15b1687c 2771 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", index,
91c066f2
TW
2772 q->write_ptr, q->read_ptr);
2773 queue_work(priv->workqueue, &priv->restart);
2774 break;
2775 }
2776 nfreed++;
2777 }
2778}
2779
2780
b481de9c 2781/**
bb8c093b 2782 * iwl3945_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
2783 * @rxb: Rx buffer to reclaim
2784 *
2785 * If an Rx buffer has an async callback associated with it the callback
2786 * will be executed. The attached skb (if present) will only be freed
2787 * if the callback returns 1
2788 */
4a8a4322 2789static void iwl3945_tx_cmd_complete(struct iwl_priv *priv,
6100b588 2790 struct iwl_rx_mem_buffer *rxb)
b481de9c 2791{
3d24a9f7 2792 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
b481de9c
ZY
2793 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2794 int txq_id = SEQ_TO_QUEUE(sequence);
2795 int index = SEQ_TO_INDEX(sequence);
600c0e11 2796 int huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
b481de9c 2797 int cmd_index;
c2d79b48 2798 struct iwl_cmd *cmd;
b481de9c 2799
638d0eb9
CR
2800 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
2801 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
2802 txq_id, sequence,
2803 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
2804 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
2805 iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
2806 return;
2807 }
b481de9c 2808
188cf6c7
SO
2809 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
2810 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
b481de9c
ZY
2811
2812 /* Input error checking is done when commands are added to queue. */
2813 if (cmd->meta.flags & CMD_WANT_SKB) {
2814 cmd->meta.source->u.skb = rxb->skb;
2815 rxb->skb = NULL;
2816 } else if (cmd->meta.u.callback &&
2817 !cmd->meta.u.callback(priv, cmd, rxb->skb))
2818 rxb->skb = NULL;
2819
91c066f2 2820 iwl3945_cmd_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
2821
2822 if (!(cmd->meta.flags & CMD_ASYNC)) {
2823 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
2824 wake_up_interruptible(&priv->wait_command_queue);
2825 }
2826}
2827
2828/************************** RX-FUNCTIONS ****************************/
2829/*
2830 * Rx theory of operation
2831 *
2832 * The host allocates 32 DMA target addresses and passes the host address
2833 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
2834 * 0 to 31
2835 *
2836 * Rx Queue Indexes
2837 * The host/firmware share two index registers for managing the Rx buffers.
2838 *
2839 * The READ index maps to the first position that the firmware may be writing
2840 * to -- the driver can read up to (but not including) this position and get
2841 * good data.
2842 * The READ index is managed by the firmware once the card is enabled.
2843 *
2844 * The WRITE index maps to the last position the driver has read from -- the
2845 * position preceding WRITE is the last slot the firmware can place a packet.
2846 *
2847 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2848 * WRITE = READ.
2849 *
9fbab516 2850 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
2851 * INDEX position, and WRITE to the last (READ - 1 wrapped)
2852 *
9fbab516 2853 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
2854 * and fire the RX interrupt. The driver can then query the READ index and
2855 * process as many packets as possible, moving the WRITE index forward as it
2856 * resets the Rx queue buffers with new memory.
2857 *
2858 * The management in the driver is as follows:
2859 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2860 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 2861 * to replenish the iwl->rxq->rx_free.
bb8c093b 2862 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
2863 * iwl->rxq is replenished and the READ INDEX is updated (updating the
2864 * 'processed' and 'read' driver indexes as well)
2865 * + A received packet is processed and handed to the kernel network stack,
2866 * detached from the iwl->rxq. The driver 'processed' index is updated.
2867 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2868 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2869 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
2870 * were enough free buffers and RX_STALLED is set it is cleared.
2871 *
2872 *
2873 * Driver sequence:
2874 *
9fbab516 2875 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 2876 * iwl3945_rx_queue_restock
9fbab516 2877 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
2878 * queue, updates firmware pointers, and updates
2879 * the WRITE index. If insufficient rx_free buffers
bb8c093b 2880 * are available, schedules iwl3945_rx_replenish
b481de9c
ZY
2881 *
2882 * -- enable interrupts --
6100b588 2883 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
b481de9c
ZY
2884 * READ INDEX, detaching the SKB from the pool.
2885 * Moves the packet buffer from queue to rx_used.
bb8c093b 2886 * Calls iwl3945_rx_queue_restock to refill any empty
b481de9c
ZY
2887 * slots.
2888 * ...
2889 *
2890 */
2891
b481de9c 2892/**
9fbab516 2893 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 2894 */
4a8a4322 2895static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
b481de9c
ZY
2896 dma_addr_t dma_addr)
2897{
2898 return cpu_to_le32((u32)dma_addr);
2899}
2900
2901/**
bb8c093b 2902 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 2903 *
9fbab516 2904 * If there are slots in the RX queue that need to be restocked,
b481de9c 2905 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 2906 * as we can, pulling from rx_free.
b481de9c
ZY
2907 *
2908 * This moves the 'write' index forward to catch up with 'processed', and
2909 * also updates the memory address in the firmware to reference the new
2910 * target buffer.
2911 */
4a8a4322 2912static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
b481de9c 2913{
cc2f362c 2914 struct iwl_rx_queue *rxq = &priv->rxq;
b481de9c 2915 struct list_head *element;
6100b588 2916 struct iwl_rx_mem_buffer *rxb;
b481de9c
ZY
2917 unsigned long flags;
2918 int write, rc;
2919
2920 spin_lock_irqsave(&rxq->lock, flags);
2921 write = rxq->write & ~0x7;
37d68317 2922 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 2923 /* Get next free Rx buffer, remove from free list */
b481de9c 2924 element = rxq->rx_free.next;
6100b588 2925 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
b481de9c 2926 list_del(element);
6440adb5
BC
2927
2928 /* Point to Rx buffer via next RBD in circular buffer */
6100b588 2929 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr);
b481de9c
ZY
2930 rxq->queue[rxq->write] = rxb;
2931 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
2932 rxq->free_count--;
2933 }
2934 spin_unlock_irqrestore(&rxq->lock, flags);
2935 /* If the pre-allocated buffer pool is dropping low, schedule to
2936 * refill it */
2937 if (rxq->free_count <= RX_LOW_WATERMARK)
2938 queue_work(priv->workqueue, &priv->rx_replenish);
2939
2940
6440adb5
BC
2941 /* If we've added more space for the firmware to place data, tell it.
2942 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
2943 if ((write != (rxq->write & ~0x7))
2944 || (abs(rxq->write - rxq->read) > 7)) {
2945 spin_lock_irqsave(&rxq->lock, flags);
2946 rxq->need_update = 1;
2947 spin_unlock_irqrestore(&rxq->lock, flags);
141c43a3 2948 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
2949 if (rc)
2950 return rc;
2951 }
2952
2953 return 0;
2954}
2955
2956/**
bb8c093b 2957 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
2958 *
2959 * When moving to rx_free an SKB is allocated for the slot.
2960 *
bb8c093b 2961 * Also restock the Rx queue via iwl3945_rx_queue_restock.
01ebd063 2962 * This is called as a scheduled work item (except for during initialization)
b481de9c 2963 */
4a8a4322 2964static void iwl3945_rx_allocate(struct iwl_priv *priv)
b481de9c 2965{
cc2f362c 2966 struct iwl_rx_queue *rxq = &priv->rxq;
b481de9c 2967 struct list_head *element;
6100b588 2968 struct iwl_rx_mem_buffer *rxb;
b481de9c
ZY
2969 unsigned long flags;
2970 spin_lock_irqsave(&rxq->lock, flags);
2971 while (!list_empty(&rxq->rx_used)) {
2972 element = rxq->rx_used.next;
6100b588 2973 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
6440adb5
BC
2974
2975 /* Alloc a new receive buffer */
b481de9c 2976 rxb->skb =
1e33dc64
WT
2977 alloc_skb(priv->hw_params.rx_buf_size,
2978 __GFP_NOWARN | GFP_ATOMIC);
b481de9c
ZY
2979 if (!rxb->skb) {
2980 if (net_ratelimit())
978785a3 2981 IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
b481de9c
ZY
2982 /* We don't reschedule replenish work here -- we will
2983 * call the restock method and if it still needs
2984 * more buffers it will schedule replenish */
2985 break;
2986 }
12342c47
ZY
2987
2988 /* If radiotap head is required, reserve some headroom here.
2989 * The physical head count is a variable rx_stats->phy_count.
2990 * We reserve 4 bytes here. Plus these extra bytes, the
2991 * headroom of the physical head should be enough for the
2992 * radiotap head that iwl3945 supported. See iwl3945_rt.
2993 */
2994 skb_reserve(rxb->skb, 4);
2995
b481de9c
ZY
2996 priv->alloc_rxb_skb++;
2997 list_del(element);
6440adb5
BC
2998
2999 /* Get physical address of RB/SKB */
1e33dc64
WT
3000 rxb->real_dma_addr = pci_map_single(priv->pci_dev,
3001 rxb->skb->data,
3002 priv->hw_params.rx_buf_size,
3003 PCI_DMA_FROMDEVICE);
b481de9c
ZY
3004 list_add_tail(&rxb->list, &rxq->rx_free);
3005 rxq->free_count++;
3006 }
3007 spin_unlock_irqrestore(&rxq->lock, flags);
5c0eef96
MA
3008}
3009
3010/*
3011 * this should be called while priv->lock is locked
3012 */
4fd1f841 3013static void __iwl3945_rx_replenish(void *data)
5c0eef96 3014{
4a8a4322 3015 struct iwl_priv *priv = data;
5c0eef96
MA
3016
3017 iwl3945_rx_allocate(priv);
3018 iwl3945_rx_queue_restock(priv);
3019}
3020
3021
3022void iwl3945_rx_replenish(void *data)
3023{
4a8a4322 3024 struct iwl_priv *priv = data;
5c0eef96
MA
3025 unsigned long flags;
3026
3027 iwl3945_rx_allocate(priv);
b481de9c
ZY
3028
3029 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3030 iwl3945_rx_queue_restock(priv);
b481de9c
ZY
3031 spin_unlock_irqrestore(&priv->lock, flags);
3032}
3033
b481de9c
ZY
3034/* Convert linear signal-to-noise ratio into dB */
3035static u8 ratio2dB[100] = {
3036/* 0 1 2 3 4 5 6 7 8 9 */
3037 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3038 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
3039 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
3040 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
3041 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
3042 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
3043 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
3044 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
3045 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
3046 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
3047};
3048
3049/* Calculates a relative dB value from a ratio of linear
3050 * (i.e. not dB) signal levels.
3051 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 3052int iwl3945_calc_db_from_ratio(int sig_ratio)
b481de9c 3053{
221c80cf
AB
3054 /* 1000:1 or higher just report as 60 dB */
3055 if (sig_ratio >= 1000)
b481de9c
ZY
3056 return 60;
3057
221c80cf 3058 /* 100:1 or higher, divide by 10 and use table,
b481de9c 3059 * add 20 dB to make up for divide by 10 */
221c80cf 3060 if (sig_ratio >= 100)
3ac7f146 3061 return 20 + (int)ratio2dB[sig_ratio/10];
b481de9c
ZY
3062
3063 /* We shouldn't see this */
3064 if (sig_ratio < 1)
3065 return 0;
3066
3067 /* Use table for ratios 1:1 - 99:1 */
3068 return (int)ratio2dB[sig_ratio];
3069}
3070
3071#define PERFECT_RSSI (-20) /* dBm */
3072#define WORST_RSSI (-95) /* dBm */
3073#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
3074
3075/* Calculate an indication of rx signal quality (a percentage, not dBm!).
3076 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
3077 * about formulas used below. */
bb8c093b 3078int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
3079{
3080 int sig_qual;
3081 int degradation = PERFECT_RSSI - rssi_dbm;
3082
3083 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
3084 * as indicator; formula is (signal dbm - noise dbm).
3085 * SNR at or above 40 is a great signal (100%).
3086 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
3087 * Weakest usable signal is usually 10 - 15 dB SNR. */
3088 if (noise_dbm) {
3089 if (rssi_dbm - noise_dbm >= 40)
3090 return 100;
3091 else if (rssi_dbm < noise_dbm)
3092 return 0;
3093 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
3094
3095 /* Else use just the signal level.
3096 * This formula is a least squares fit of data points collected and
3097 * compared with a reference system that had a percentage (%) display
3098 * for signal quality. */
3099 } else
3100 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
3101 (15 * RSSI_RANGE + 62 * degradation)) /
3102 (RSSI_RANGE * RSSI_RANGE);
3103
3104 if (sig_qual > 100)
3105 sig_qual = 100;
3106 else if (sig_qual < 1)
3107 sig_qual = 0;
3108
3109 return sig_qual;
3110}
3111
3112/**
9fbab516 3113 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
3114 *
3115 * Uses the priv->rx_handlers callback function array to invoke
3116 * the appropriate handlers, including command responses,
3117 * frame-received notifications, and other notifications.
3118 */
4a8a4322 3119static void iwl3945_rx_handle(struct iwl_priv *priv)
b481de9c 3120{
6100b588 3121 struct iwl_rx_mem_buffer *rxb;
3d24a9f7 3122 struct iwl_rx_packet *pkt;
cc2f362c 3123 struct iwl_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
3124 u32 r, i;
3125 int reclaim;
3126 unsigned long flags;
5c0eef96 3127 u8 fill_rx = 0;
d68ab680 3128 u32 count = 8;
b481de9c 3129
6440adb5
BC
3130 /* uCode's read index (stored in shared DRAM) indicates the last Rx
3131 * buffer that the driver may process (last buffer filled by ucode). */
8cd812bc 3132 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
b481de9c
ZY
3133 i = rxq->read;
3134
37d68317 3135 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
5c0eef96 3136 fill_rx = 1;
b481de9c
ZY
3137 /* Rx interrupt, but nothing sent from uCode */
3138 if (i == r)
3139 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
3140
3141 while (i != r) {
3142 rxb = rxq->queue[i];
3143
9fbab516 3144 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
3145 * then a bug has been introduced in the queue refilling
3146 * routines -- catch it here */
3147 BUG_ON(rxb == NULL);
3148
3149 rxq->queue[i] = NULL;
3150
6100b588 3151 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr,
1e33dc64 3152 priv->hw_params.rx_buf_size,
b481de9c 3153 PCI_DMA_FROMDEVICE);
3d24a9f7 3154 pkt = (struct iwl_rx_packet *)rxb->skb->data;
b481de9c
ZY
3155
3156 /* Reclaim a command buffer only if this packet is a response
3157 * to a (driver-originated) command.
3158 * If the packet (e.g. Rx frame) originated from uCode,
3159 * there is no command buffer to reclaim.
3160 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
3161 * but apparently a few don't get set; catch them here. */
3162 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
3163 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
3164 (pkt->hdr.cmd != REPLY_TX);
3165
3166 /* Based on type of command response or notification,
3167 * handle those that need handling via function in
bb8c093b 3168 * rx_handlers table. See iwl3945_setup_rx_handlers() */
b481de9c 3169 if (priv->rx_handlers[pkt->hdr.cmd]) {
40b8ec0b 3170 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
b481de9c
ZY
3171 "r = %d, i = %d, %s, 0x%02x\n", r, i,
3172 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3173 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3174 } else {
3175 /* No handling needed */
40b8ec0b 3176 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
b481de9c
ZY
3177 "r %d i %d No handler needed for %s, 0x%02x\n",
3178 r, i, get_cmd_string(pkt->hdr.cmd),
3179 pkt->hdr.cmd);
3180 }
3181
3182 if (reclaim) {
9fbab516 3183 /* Invoke any callbacks, transfer the skb to caller, and
518099a8 3184 * fire off the (possibly) blocking iwl_send_cmd()
b481de9c
ZY
3185 * as we reclaim the driver command queue */
3186 if (rxb && rxb->skb)
bb8c093b 3187 iwl3945_tx_cmd_complete(priv, rxb);
b481de9c 3188 else
39aadf8c 3189 IWL_WARN(priv, "Claim null rxb?\n");
b481de9c
ZY
3190 }
3191
3192 /* For now we just don't re-use anything. We can tweak this
3193 * later to try and re-use notification packets and SKBs that
3194 * fail to Rx correctly */
3195 if (rxb->skb != NULL) {
3196 priv->alloc_rxb_skb--;
3197 dev_kfree_skb_any(rxb->skb);
3198 rxb->skb = NULL;
3199 }
3200
6100b588 3201 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1e33dc64
WT
3202 priv->hw_params.rx_buf_size,
3203 PCI_DMA_FROMDEVICE);
b481de9c
ZY
3204 spin_lock_irqsave(&rxq->lock, flags);
3205 list_add_tail(&rxb->list, &priv->rxq.rx_used);
3206 spin_unlock_irqrestore(&rxq->lock, flags);
3207 i = (i + 1) & RX_QUEUE_MASK;
5c0eef96
MA
3208 /* If there are a lot of unused frames,
3209 * restock the Rx queue so ucode won't assert. */
3210 if (fill_rx) {
3211 count++;
3212 if (count >= 8) {
3213 priv->rxq.read = i;
3214 __iwl3945_rx_replenish(priv);
3215 count = 0;
3216 }
3217 }
b481de9c
ZY
3218 }
3219
3220 /* Backtrack one entry */
3221 priv->rxq.read = i;
bb8c093b 3222 iwl3945_rx_queue_restock(priv);
b481de9c
ZY
3223}
3224
c8b0e6e1 3225#ifdef CONFIG_IWL3945_DEBUG
4a8a4322 3226static void iwl3945_print_rx_config_cmd(struct iwl_priv *priv,
40b8ec0b 3227 struct iwl3945_rxon_cmd *rxon)
b481de9c
ZY
3228{
3229 IWL_DEBUG_RADIO("RX CONFIG:\n");
40b8ec0b 3230 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
3231 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3232 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3233 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
3234 le32_to_cpu(rxon->filter_flags));
3235 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3236 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
3237 rxon->ofdm_basic_rates);
3238 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
e174961c
JB
3239 IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3240 IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
b481de9c
ZY
3241 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3242}
3243#endif
3244
4a8a4322 3245static void iwl3945_enable_interrupts(struct iwl_priv *priv)
b481de9c
ZY
3246{
3247 IWL_DEBUG_ISR("Enabling interrupts\n");
3248 set_bit(STATUS_INT_ENABLED, &priv->status);
5d49f498 3249 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
3250}
3251
0359facc
MA
3252
3253/* call this function to flush any scheduled tasklet */
4a8a4322 3254static inline void iwl_synchronize_irq(struct iwl_priv *priv)
0359facc 3255{
a96a27f9 3256 /* wait to make sure we flush pending tasklet*/
0359facc
MA
3257 synchronize_irq(priv->pci_dev->irq);
3258 tasklet_kill(&priv->irq_tasklet);
3259}
3260
3261
4a8a4322 3262static inline void iwl3945_disable_interrupts(struct iwl_priv *priv)
b481de9c
ZY
3263{
3264 clear_bit(STATUS_INT_ENABLED, &priv->status);
3265
3266 /* disable interrupts from uCode/NIC to host */
5d49f498 3267 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
3268
3269 /* acknowledge/clear/reset any interrupts still pending
3270 * from uCode or flow handler (Rx/Tx DMA) */
5d49f498
AK
3271 iwl_write32(priv, CSR_INT, 0xffffffff);
3272 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
3273 IWL_DEBUG_ISR("Disabled interrupts\n");
3274}
3275
3276static const char *desc_lookup(int i)
3277{
3278 switch (i) {
3279 case 1:
3280 return "FAIL";
3281 case 2:
3282 return "BAD_PARAM";
3283 case 3:
3284 return "BAD_CHECKSUM";
3285 case 4:
3286 return "NMI_INTERRUPT";
3287 case 5:
3288 return "SYSASSERT";
3289 case 6:
3290 return "FATAL_ERROR";
3291 }
3292
3293 return "UNKNOWN";
3294}
3295
3296#define ERROR_START_OFFSET (1 * sizeof(u32))
3297#define ERROR_ELEM_SIZE (7 * sizeof(u32))
3298
4a8a4322 3299static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
b481de9c
ZY
3300{
3301 u32 i;
3302 u32 desc, time, count, base, data1;
3303 u32 blink1, blink2, ilink1, ilink2;
3304 int rc;
3305
3306 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
3307
bb8c093b 3308 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
15b1687c 3309 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
b481de9c
ZY
3310 return;
3311 }
3312
5d49f498 3313 rc = iwl_grab_nic_access(priv);
b481de9c 3314 if (rc) {
39aadf8c 3315 IWL_WARN(priv, "Can not read from adapter at this time.\n");
b481de9c
ZY
3316 return;
3317 }
3318
5d49f498 3319 count = iwl_read_targ_mem(priv, base);
b481de9c
ZY
3320
3321 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
15b1687c
WT
3322 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
3323 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
3324 priv->status, count);
b481de9c
ZY
3325 }
3326
15b1687c 3327 IWL_ERR(priv, "Desc Time asrtPC blink2 "
b481de9c
ZY
3328 "ilink1 nmiPC Line\n");
3329 for (i = ERROR_START_OFFSET;
3330 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
3331 i += ERROR_ELEM_SIZE) {
5d49f498 3332 desc = iwl_read_targ_mem(priv, base + i);
b481de9c 3333 time =
5d49f498 3334 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
b481de9c 3335 blink1 =
5d49f498 3336 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
b481de9c 3337 blink2 =
5d49f498 3338 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
b481de9c 3339 ilink1 =
5d49f498 3340 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
b481de9c 3341 ilink2 =
5d49f498 3342 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
b481de9c 3343 data1 =
5d49f498 3344 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
b481de9c 3345
15b1687c
WT
3346 IWL_ERR(priv,
3347 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
3348 desc_lookup(desc), desc, time, blink1, blink2,
3349 ilink1, ilink2, data1);
b481de9c
ZY
3350 }
3351
5d49f498 3352 iwl_release_nic_access(priv);
b481de9c
ZY
3353
3354}
3355
f58177b9 3356#define EVENT_START_OFFSET (6 * sizeof(u32))
b481de9c
ZY
3357
3358/**
bb8c093b 3359 * iwl3945_print_event_log - Dump error event log to syslog
b481de9c 3360 *
5d49f498 3361 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
b481de9c 3362 */
4a8a4322 3363static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
b481de9c
ZY
3364 u32 num_events, u32 mode)
3365{
3366 u32 i;
3367 u32 base; /* SRAM byte address of event log header */
3368 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
3369 u32 ptr; /* SRAM byte address of log data */
3370 u32 ev, time, data; /* event log data */
3371
3372 if (num_events == 0)
3373 return;
3374
3375 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
3376
3377 if (mode == 0)
3378 event_size = 2 * sizeof(u32);
3379 else
3380 event_size = 3 * sizeof(u32);
3381
3382 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
3383
3384 /* "time" is actually "data" for mode 0 (no timestamp).
3385 * place event id # at far right for easier visual parsing. */
3386 for (i = 0; i < num_events; i++) {
5d49f498 3387 ev = iwl_read_targ_mem(priv, ptr);
b481de9c 3388 ptr += sizeof(u32);
5d49f498 3389 time = iwl_read_targ_mem(priv, ptr);
b481de9c 3390 ptr += sizeof(u32);
15b1687c
WT
3391 if (mode == 0) {
3392 /* data, ev */
3393 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
3394 } else {
5d49f498 3395 data = iwl_read_targ_mem(priv, ptr);
b481de9c 3396 ptr += sizeof(u32);
15b1687c 3397 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
b481de9c
ZY
3398 }
3399 }
3400}
3401
4a8a4322 3402static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
b481de9c
ZY
3403{
3404 int rc;
3405 u32 base; /* SRAM byte address of event log header */
3406 u32 capacity; /* event log capacity in # entries */
3407 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
3408 u32 num_wraps; /* # times uCode wrapped to top of log */
3409 u32 next_entry; /* index of next entry to be written by uCode */
3410 u32 size; /* # entries that we'll print */
3411
3412 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
bb8c093b 3413 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
15b1687c 3414 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
b481de9c
ZY
3415 return;
3416 }
3417
5d49f498 3418 rc = iwl_grab_nic_access(priv);
b481de9c 3419 if (rc) {
39aadf8c 3420 IWL_WARN(priv, "Can not read from adapter at this time.\n");
b481de9c
ZY
3421 return;
3422 }
3423
3424 /* event log header */
5d49f498
AK
3425 capacity = iwl_read_targ_mem(priv, base);
3426 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
3427 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
3428 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
3429
3430 size = num_wraps ? capacity : next_entry;
3431
3432 /* bail out if nothing in log */
3433 if (size == 0) {
15b1687c 3434 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
5d49f498 3435 iwl_release_nic_access(priv);
b481de9c
ZY
3436 return;
3437 }
3438
15b1687c 3439 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
3440 size, num_wraps);
3441
3442 /* if uCode has wrapped back to top of log, start at the oldest entry,
3443 * i.e the next one that uCode would fill. */
3444 if (num_wraps)
bb8c093b 3445 iwl3945_print_event_log(priv, next_entry,
b481de9c
ZY
3446 capacity - next_entry, mode);
3447
3448 /* (then/else) start at top of log */
bb8c093b 3449 iwl3945_print_event_log(priv, 0, next_entry, mode);
b481de9c 3450
5d49f498 3451 iwl_release_nic_access(priv);
b481de9c
ZY
3452}
3453
3454/**
bb8c093b 3455 * iwl3945_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 3456 */
4a8a4322 3457static void iwl3945_irq_handle_error(struct iwl_priv *priv)
b481de9c 3458{
bb8c093b 3459 /* Set the FW error flag -- cleared on iwl3945_down */
b481de9c
ZY
3460 set_bit(STATUS_FW_ERROR, &priv->status);
3461
3462 /* Cancel currently queued command. */
3463 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3464
c8b0e6e1 3465#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3466 if (priv->debug_level & IWL_DL_FW_ERRORS) {
bb8c093b
CH
3467 iwl3945_dump_nic_error_log(priv);
3468 iwl3945_dump_nic_event_log(priv);
f2c7e521 3469 iwl3945_print_rx_config_cmd(priv, &priv->staging39_rxon);
b481de9c
ZY
3470 }
3471#endif
3472
3473 wake_up_interruptible(&priv->wait_command_queue);
3474
3475 /* Keep the restart process from trying to send host
3476 * commands by clearing the INIT status bit */
3477 clear_bit(STATUS_READY, &priv->status);
3478
3479 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
3480 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
3481 "Restarting adapter due to uCode error.\n");
3482
bb8c093b 3483 if (iwl3945_is_associated(priv)) {
f2c7e521
AK
3484 memcpy(&priv->recovery39_rxon, &priv->active39_rxon,
3485 sizeof(priv->recovery39_rxon));
b481de9c
ZY
3486 priv->error_recovering = 1;
3487 }
3488 queue_work(priv->workqueue, &priv->restart);
3489 }
3490}
3491
4a8a4322 3492static void iwl3945_error_recovery(struct iwl_priv *priv)
b481de9c
ZY
3493{
3494 unsigned long flags;
3495
f2c7e521
AK
3496 memcpy(&priv->staging39_rxon, &priv->recovery39_rxon,
3497 sizeof(priv->staging39_rxon));
3498 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 3499 iwl3945_commit_rxon(priv);
b481de9c 3500
bb8c093b 3501 iwl3945_add_station(priv, priv->bssid, 1, 0);
b481de9c
ZY
3502
3503 spin_lock_irqsave(&priv->lock, flags);
f2c7e521 3504 priv->assoc_id = le16_to_cpu(priv->staging39_rxon.assoc_id);
b481de9c
ZY
3505 priv->error_recovering = 0;
3506 spin_unlock_irqrestore(&priv->lock, flags);
3507}
3508
4a8a4322 3509static void iwl3945_irq_tasklet(struct iwl_priv *priv)
b481de9c
ZY
3510{
3511 u32 inta, handled = 0;
3512 u32 inta_fh;
3513 unsigned long flags;
c8b0e6e1 3514#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
3515 u32 inta_mask;
3516#endif
3517
3518 spin_lock_irqsave(&priv->lock, flags);
3519
3520 /* Ack/clear/reset pending uCode interrupts.
3521 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
3522 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
5d49f498
AK
3523 inta = iwl_read32(priv, CSR_INT);
3524 iwl_write32(priv, CSR_INT, inta);
b481de9c
ZY
3525
3526 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
3527 * Any new interrupts that happen after this, either while we're
3528 * in this tasklet, or later, will show up in next ISR/tasklet. */
5d49f498
AK
3529 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
3530 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 3531
c8b0e6e1 3532#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3533 if (priv->debug_level & IWL_DL_ISR) {
9fbab516 3534 /* just for debug */
5d49f498 3535 inta_mask = iwl_read32(priv, CSR_INT_MASK);
b481de9c
ZY
3536 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
3537 inta, inta_mask, inta_fh);
3538 }
3539#endif
3540
3541 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
3542 * atomic, make sure that inta covers all the interrupts that
3543 * we've discovered, even if FH interrupt came in just after
3544 * reading CSR_INT. */
6f83eaa1 3545 if (inta_fh & CSR39_FH_INT_RX_MASK)
b481de9c 3546 inta |= CSR_INT_BIT_FH_RX;
6f83eaa1 3547 if (inta_fh & CSR39_FH_INT_TX_MASK)
b481de9c
ZY
3548 inta |= CSR_INT_BIT_FH_TX;
3549
3550 /* Now service all interrupt bits discovered above. */
3551 if (inta & CSR_INT_BIT_HW_ERR) {
15b1687c 3552 IWL_ERR(priv, "Microcode HW error detected. Restarting.\n");
b481de9c
ZY
3553
3554 /* Tell the device to stop sending interrupts */
bb8c093b 3555 iwl3945_disable_interrupts(priv);
b481de9c 3556
bb8c093b 3557 iwl3945_irq_handle_error(priv);
b481de9c
ZY
3558
3559 handled |= CSR_INT_BIT_HW_ERR;
3560
3561 spin_unlock_irqrestore(&priv->lock, flags);
3562
3563 return;
3564 }
3565
c8b0e6e1 3566#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3567 if (priv->debug_level & (IWL_DL_ISR)) {
b481de9c 3568 /* NIC fires this, but we don't use it, redundant with WAKEUP */
25c03d8e
JP
3569 if (inta & CSR_INT_BIT_SCD)
3570 IWL_DEBUG_ISR("Scheduler finished to transmit "
3571 "the frame/frames.\n");
b481de9c
ZY
3572
3573 /* Alive notification via Rx interrupt will do the real work */
3574 if (inta & CSR_INT_BIT_ALIVE)
3575 IWL_DEBUG_ISR("Alive interrupt\n");
3576 }
3577#endif
3578 /* Safely ignore these bits for debug checks below */
25c03d8e 3579 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
b481de9c 3580
b481de9c
ZY
3581 /* Error detected by uCode */
3582 if (inta & CSR_INT_BIT_SW_ERR) {
15b1687c
WT
3583 IWL_ERR(priv, "Microcode SW error detected. "
3584 "Restarting 0x%X.\n", inta);
bb8c093b 3585 iwl3945_irq_handle_error(priv);
b481de9c
ZY
3586 handled |= CSR_INT_BIT_SW_ERR;
3587 }
3588
3589 /* uCode wakes up after power-down sleep */
3590 if (inta & CSR_INT_BIT_WAKEUP) {
3591 IWL_DEBUG_ISR("Wakeup interrupt\n");
141c43a3 3592 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4f3602c8
SO
3593 iwl_txq_update_write_ptr(priv, &priv->txq[0]);
3594 iwl_txq_update_write_ptr(priv, &priv->txq[1]);
3595 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
3596 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
3597 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
3598 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
3599
3600 handled |= CSR_INT_BIT_WAKEUP;
3601 }
3602
3603 /* All uCode command responses, including Tx command responses,
3604 * Rx "responses" (frame-received notification), and other
3605 * notifications from uCode come through here*/
3606 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 3607 iwl3945_rx_handle(priv);
b481de9c
ZY
3608 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
3609 }
3610
3611 if (inta & CSR_INT_BIT_FH_TX) {
3612 IWL_DEBUG_ISR("Tx interrupt\n");
3613
5d49f498
AK
3614 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
3615 if (!iwl_grab_nic_access(priv)) {
3616 iwl_write_direct32(priv, FH39_TCSR_CREDIT
bddadf86 3617 (FH39_SRVC_CHNL), 0x0);
5d49f498 3618 iwl_release_nic_access(priv);
b481de9c
ZY
3619 }
3620 handled |= CSR_INT_BIT_FH_TX;
3621 }
3622
3623 if (inta & ~handled)
15b1687c 3624 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
b481de9c
ZY
3625
3626 if (inta & ~CSR_INI_SET_MASK) {
39aadf8c 3627 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
b481de9c 3628 inta & ~CSR_INI_SET_MASK);
39aadf8c 3629 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
b481de9c
ZY
3630 }
3631
3632 /* Re-enable all interrupts */
0359facc
MA
3633 /* only Re-enable if disabled by irq */
3634 if (test_bit(STATUS_INT_ENABLED, &priv->status))
3635 iwl3945_enable_interrupts(priv);
b481de9c 3636
c8b0e6e1 3637#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3638 if (priv->debug_level & (IWL_DL_ISR)) {
5d49f498
AK
3639 inta = iwl_read32(priv, CSR_INT);
3640 inta_mask = iwl_read32(priv, CSR_INT_MASK);
3641 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
3642 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
3643 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
3644 }
3645#endif
3646 spin_unlock_irqrestore(&priv->lock, flags);
3647}
3648
bb8c093b 3649static irqreturn_t iwl3945_isr(int irq, void *data)
b481de9c 3650{
4a8a4322 3651 struct iwl_priv *priv = data;
b481de9c
ZY
3652 u32 inta, inta_mask;
3653 u32 inta_fh;
3654 if (!priv)
3655 return IRQ_NONE;
3656
3657 spin_lock(&priv->lock);
3658
3659 /* Disable (but don't clear!) interrupts here to avoid
3660 * back-to-back ISRs and sporadic interrupts from our NIC.
3661 * If we have something to service, the tasklet will re-enable ints.
3662 * If we *don't* have something, we'll re-enable before leaving here. */
5d49f498
AK
3663 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
3664 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
3665
3666 /* Discover which interrupts are active/pending */
5d49f498
AK
3667 inta = iwl_read32(priv, CSR_INT);
3668 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
3669
3670 /* Ignore interrupt if there's nothing in NIC to service.
3671 * This may be due to IRQ shared with another device,
3672 * or due to sporadic interrupts thrown from our NIC. */
3673 if (!inta && !inta_fh) {
3674 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
3675 goto none;
3676 }
3677
3678 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
3679 /* Hardware disappeared */
39aadf8c 3680 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
cb4da1a3 3681 goto unplugged;
b481de9c
ZY
3682 }
3683
3684 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
3685 inta, inta_mask, inta_fh);
3686
25c03d8e
JP
3687 inta &= ~CSR_INT_BIT_SCD;
3688
bb8c093b 3689 /* iwl3945_irq_tasklet() will service interrupts and re-enable them */
25c03d8e
JP
3690 if (likely(inta || inta_fh))
3691 tasklet_schedule(&priv->irq_tasklet);
cb4da1a3 3692unplugged:
b481de9c
ZY
3693 spin_unlock(&priv->lock);
3694
3695 return IRQ_HANDLED;
3696
3697 none:
3698 /* re-enable interrupts here since we don't have anything to service. */
0359facc
MA
3699 /* only Re-enable if disabled by irq */
3700 if (test_bit(STATUS_INT_ENABLED, &priv->status))
3701 iwl3945_enable_interrupts(priv);
b481de9c
ZY
3702 spin_unlock(&priv->lock);
3703 return IRQ_NONE;
3704}
3705
3706/************************** EEPROM BANDS ****************************
3707 *
bb8c093b 3708 * The iwl3945_eeprom_band definitions below provide the mapping from the
b481de9c
ZY
3709 * EEPROM contents to the specific channel number supported for each
3710 * band.
3711 *
f2c7e521 3712 * For example, iwl3945_priv->eeprom39.band_3_channels[4] from the band_3
b481de9c
ZY
3713 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
3714 * The specific geography and calibration information for that channel
3715 * is contained in the eeprom map itself.
3716 *
3717 * During init, we copy the eeprom information and channel map
3718 * information into priv->channel_info_24/52 and priv->channel_map_24/52
3719 *
3720 * channel_map_24/52 provides the index in the channel_info array for a
3721 * given channel. We have to have two separate maps as there is channel
3722 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
3723 * band_2
3724 *
3725 * A value of 0xff stored in the channel_map indicates that the channel
3726 * is not supported by the hardware at all.
3727 *
3728 * A value of 0xfe in the channel_map indicates that the channel is not
3729 * valid for Tx with the current hardware. This means that
3730 * while the system can tune and receive on a given channel, it may not
3731 * be able to associate or transmit any frames on that
3732 * channel. There is no corresponding channel information for that
3733 * entry.
3734 *
3735 *********************************************************************/
3736
3737/* 2.4 GHz */
bb8c093b 3738static const u8 iwl3945_eeprom_band_1[14] = {
b481de9c
ZY
3739 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
3740};
3741
3742/* 5.2 GHz bands */
9fbab516 3743static const u8 iwl3945_eeprom_band_2[] = { /* 4915-5080MHz */
b481de9c
ZY
3744 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
3745};
3746
9fbab516 3747static const u8 iwl3945_eeprom_band_3[] = { /* 5170-5320MHz */
b481de9c
ZY
3748 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
3749};
3750
bb8c093b 3751static const u8 iwl3945_eeprom_band_4[] = { /* 5500-5700MHz */
b481de9c
ZY
3752 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
3753};
3754
bb8c093b 3755static const u8 iwl3945_eeprom_band_5[] = { /* 5725-5825MHz */
b481de9c
ZY
3756 145, 149, 153, 157, 161, 165
3757};
3758
4a8a4322 3759static void iwl3945_init_band_reference(const struct iwl_priv *priv, int band,
b481de9c 3760 int *eeprom_ch_count,
0f741d99 3761 const struct iwl_eeprom_channel
b481de9c
ZY
3762 **eeprom_ch_info,
3763 const u8 **eeprom_ch_index)
3764{
3765 switch (band) {
3766 case 1: /* 2.4GHz band */
bb8c093b 3767 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_1);
f2c7e521 3768 *eeprom_ch_info = priv->eeprom39.band_1_channels;
bb8c093b 3769 *eeprom_ch_index = iwl3945_eeprom_band_1;
b481de9c 3770 break;
9fbab516 3771 case 2: /* 4.9GHz band */
bb8c093b 3772 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_2);
f2c7e521 3773 *eeprom_ch_info = priv->eeprom39.band_2_channels;
bb8c093b 3774 *eeprom_ch_index = iwl3945_eeprom_band_2;
b481de9c
ZY
3775 break;
3776 case 3: /* 5.2GHz band */
bb8c093b 3777 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_3);
f2c7e521 3778 *eeprom_ch_info = priv->eeprom39.band_3_channels;
bb8c093b 3779 *eeprom_ch_index = iwl3945_eeprom_band_3;
b481de9c 3780 break;
9fbab516 3781 case 4: /* 5.5GHz band */
bb8c093b 3782 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_4);
f2c7e521 3783 *eeprom_ch_info = priv->eeprom39.band_4_channels;
bb8c093b 3784 *eeprom_ch_index = iwl3945_eeprom_band_4;
b481de9c 3785 break;
9fbab516 3786 case 5: /* 5.7GHz band */
bb8c093b 3787 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_5);
f2c7e521 3788 *eeprom_ch_info = priv->eeprom39.band_5_channels;
bb8c093b 3789 *eeprom_ch_index = iwl3945_eeprom_band_5;
b481de9c
ZY
3790 break;
3791 default:
3792 BUG();
3793 return;
3794 }
3795}
3796
6440adb5
BC
3797/**
3798 * iwl3945_get_channel_info - Find driver's private channel info
3799 *
3800 * Based on band and channel number.
3801 */
d20b3c65 3802const struct iwl_channel_info *
4a8a4322 3803iwl3945_get_channel_info(const struct iwl_priv *priv,
d20b3c65 3804 enum ieee80211_band band, u16 channel)
b481de9c
ZY
3805{
3806 int i;
3807
8318d78a
JB
3808 switch (band) {
3809 case IEEE80211_BAND_5GHZ:
b481de9c
ZY
3810 for (i = 14; i < priv->channel_count; i++) {
3811 if (priv->channel_info[i].channel == channel)
3812 return &priv->channel_info[i];
3813 }
3814 break;
3815
8318d78a 3816 case IEEE80211_BAND_2GHZ:
b481de9c
ZY
3817 if (channel >= 1 && channel <= 14)
3818 return &priv->channel_info[channel - 1];
3819 break;
8318d78a
JB
3820 case IEEE80211_NUM_BANDS:
3821 WARN_ON(1);
b481de9c
ZY
3822 }
3823
3824 return NULL;
3825}
3826
3827#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
3828 ? # x " " : "")
3829
6440adb5
BC
3830/**
3831 * iwl3945_init_channel_map - Set up driver's info for all possible channels
3832 */
4a8a4322 3833static int iwl3945_init_channel_map(struct iwl_priv *priv)
b481de9c
ZY
3834{
3835 int eeprom_ch_count = 0;
3836 const u8 *eeprom_ch_index = NULL;
0f741d99 3837 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
b481de9c 3838 int band, ch;
d20b3c65 3839 struct iwl_channel_info *ch_info;
b481de9c
ZY
3840
3841 if (priv->channel_count) {
3842 IWL_DEBUG_INFO("Channel map already initialized.\n");
3843 return 0;
3844 }
3845
f2c7e521 3846 if (priv->eeprom39.version < 0x2f) {
39aadf8c 3847 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
f2c7e521 3848 priv->eeprom39.version);
b481de9c
ZY
3849 return -EINVAL;
3850 }
3851
3852 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
3853
3854 priv->channel_count =
bb8c093b
CH
3855 ARRAY_SIZE(iwl3945_eeprom_band_1) +
3856 ARRAY_SIZE(iwl3945_eeprom_band_2) +
3857 ARRAY_SIZE(iwl3945_eeprom_band_3) +
3858 ARRAY_SIZE(iwl3945_eeprom_band_4) +
3859 ARRAY_SIZE(iwl3945_eeprom_band_5);
b481de9c
ZY
3860
3861 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
3862
d20b3c65 3863 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
b481de9c
ZY
3864 priv->channel_count, GFP_KERNEL);
3865 if (!priv->channel_info) {
15b1687c 3866 IWL_ERR(priv, "Could not allocate channel_info\n");
b481de9c
ZY
3867 priv->channel_count = 0;
3868 return -ENOMEM;
3869 }
3870
3871 ch_info = priv->channel_info;
3872
3873 /* Loop through the 5 EEPROM bands adding them in order to the
3874 * channel map we maintain (that contains additional information than
3875 * what just in the EEPROM) */
3876 for (band = 1; band <= 5; band++) {
3877
bb8c093b 3878 iwl3945_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
3879 &eeprom_ch_info, &eeprom_ch_index);
3880
3881 /* Loop through each band adding each of the channels */
3882 for (ch = 0; ch < eeprom_ch_count; ch++) {
3883 ch_info->channel = eeprom_ch_index[ch];
8318d78a
JB
3884 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
3885 IEEE80211_BAND_5GHZ;
b481de9c
ZY
3886
3887 /* permanently store EEPROM's channel regulatory flags
3888 * and max power in channel info database. */
3889 ch_info->eeprom = eeprom_ch_info[ch];
3890
3891 /* Copy the run-time flags so they are there even on
3892 * invalid channels */
3893 ch_info->flags = eeprom_ch_info[ch].flags;
3894
3895 if (!(is_channel_valid(ch_info))) {
3896 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
3897 "No traffic\n",
3898 ch_info->channel,
3899 ch_info->flags,
3900 is_channel_a_band(ch_info) ?
3901 "5.2" : "2.4");
3902 ch_info++;
3903 continue;
3904 }
3905
3906 /* Initialize regulatory-based run-time data */
3907 ch_info->max_power_avg = ch_info->curr_txpow =
3908 eeprom_ch_info[ch].max_power_avg;
3909 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
3910 ch_info->min_power = 0;
3911
fe7c4040 3912 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
b481de9c
ZY
3913 " %ddBm): Ad-Hoc %ssupported\n",
3914 ch_info->channel,
3915 is_channel_a_band(ch_info) ?
3916 "5.2" : "2.4",
8211ef78 3917 CHECK_AND_PRINT(VALID),
b481de9c
ZY
3918 CHECK_AND_PRINT(IBSS),
3919 CHECK_AND_PRINT(ACTIVE),
3920 CHECK_AND_PRINT(RADAR),
3921 CHECK_AND_PRINT(WIDE),
b481de9c
ZY
3922 CHECK_AND_PRINT(DFS),
3923 eeprom_ch_info[ch].flags,
3924 eeprom_ch_info[ch].max_power_avg,
3925 ((eeprom_ch_info[ch].
3926 flags & EEPROM_CHANNEL_IBSS)
3927 && !(eeprom_ch_info[ch].
3928 flags & EEPROM_CHANNEL_RADAR))
3929 ? "" : "not ");
3930
62ea9c5b 3931 /* Set the tx_power_user_lmt to the highest power
b481de9c
ZY
3932 * supported by any channel */
3933 if (eeprom_ch_info[ch].max_power_avg >
62ea9c5b
WT
3934 priv->tx_power_user_lmt)
3935 priv->tx_power_user_lmt =
b481de9c
ZY
3936 eeprom_ch_info[ch].max_power_avg;
3937
3938 ch_info++;
3939 }
3940 }
3941
6440adb5 3942 /* Set up txpower settings in driver for all channels */
b481de9c
ZY
3943 if (iwl3945_txpower_set_from_eeprom(priv))
3944 return -EIO;
3945
3946 return 0;
3947}
3948
849e0dce
RC
3949/*
3950 * iwl3945_free_channel_map - undo allocations in iwl3945_init_channel_map
3951 */
4a8a4322 3952static void iwl3945_free_channel_map(struct iwl_priv *priv)
849e0dce
RC
3953{
3954 kfree(priv->channel_info);
3955 priv->channel_count = 0;
3956}
3957
b481de9c
ZY
3958/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
3959 * sending probe req. This should be set long enough to hear probe responses
3960 * from more than one AP. */
f9340520
AK
3961#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
3962#define IWL_ACTIVE_DWELL_TIME_52 (20)
3963
3964#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
3965#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
b481de9c
ZY
3966
3967/* For faster active scanning, scan will move to the next channel if fewer than
3968 * PLCP_QUIET_THRESH packets are heard on this channel within
3969 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
3970 * time if it's a quiet channel (nothing responded to our probe, and there's
3971 * no other traffic).
3972 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
3973#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
f9340520 3974#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(10) /* msec */
b481de9c
ZY
3975
3976/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
3977 * Must be set longer than active dwell time.
3978 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
3979#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
3980#define IWL_PASSIVE_DWELL_TIME_52 (10)
3981#define IWL_PASSIVE_DWELL_BASE (100)
3982#define IWL_CHANNEL_TUNE_TIME 5
3983
e720ce9d 3984#define IWL_SCAN_PROBE_MASK(n) (BIT(n) | (BIT(n) - BIT(1)))
f9340520 3985
4a8a4322 3986static inline u16 iwl3945_get_active_dwell_time(struct iwl_priv *priv,
f9340520
AK
3987 enum ieee80211_band band,
3988 u8 n_probes)
b481de9c 3989{
8318d78a 3990 if (band == IEEE80211_BAND_5GHZ)
f9340520
AK
3991 return IWL_ACTIVE_DWELL_TIME_52 +
3992 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
b481de9c 3993 else
f9340520
AK
3994 return IWL_ACTIVE_DWELL_TIME_24 +
3995 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
b481de9c
ZY
3996}
3997
4a8a4322 3998static u16 iwl3945_get_passive_dwell_time(struct iwl_priv *priv,
8318d78a 3999 enum ieee80211_band band)
b481de9c 4000{
8318d78a 4001 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
b481de9c
ZY
4002 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
4003 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
4004
bb8c093b 4005 if (iwl3945_is_associated(priv)) {
b481de9c
ZY
4006 /* If we're associated, we clamp the maximum passive
4007 * dwell time to be 98% of the beacon interval (minus
4008 * 2 * channel tune time) */
4009 passive = priv->beacon_int;
4010 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
4011 passive = IWL_PASSIVE_DWELL_BASE;
4012 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
4013 }
4014
b481de9c
ZY
4015 return passive;
4016}
4017
4a8a4322 4018static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
8318d78a 4019 enum ieee80211_band band,
f9340520 4020 u8 is_active, u8 n_probes,
bb8c093b 4021 struct iwl3945_scan_channel *scan_ch)
b481de9c
ZY
4022{
4023 const struct ieee80211_channel *channels = NULL;
8318d78a 4024 const struct ieee80211_supported_band *sband;
d20b3c65 4025 const struct iwl_channel_info *ch_info;
b481de9c
ZY
4026 u16 passive_dwell = 0;
4027 u16 active_dwell = 0;
4028 int added, i;
4029
cbba18c6 4030 sband = iwl_get_hw_mode(priv, band);
8318d78a 4031 if (!sband)
b481de9c
ZY
4032 return 0;
4033
8318d78a 4034 channels = sband->channels;
b481de9c 4035
f9340520 4036 active_dwell = iwl3945_get_active_dwell_time(priv, band, n_probes);
8318d78a 4037 passive_dwell = iwl3945_get_passive_dwell_time(priv, band);
b481de9c 4038
8f4807a1
AK
4039 if (passive_dwell <= active_dwell)
4040 passive_dwell = active_dwell + 1;
4041
8318d78a 4042 for (i = 0, added = 0; i < sband->n_channels; i++) {
182e2e66
JB
4043 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4044 continue;
4045
8318d78a 4046 scan_ch->channel = channels[i].hw_value;
b481de9c 4047
8318d78a 4048 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
b481de9c 4049 if (!is_channel_valid(ch_info)) {
66b5004d 4050 IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
b481de9c
ZY
4051 scan_ch->channel);
4052 continue;
4053 }
4054
011a0330
AK
4055 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4056 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4057 /* If passive , set up for auto-switch
4058 * and use long active_dwell time.
4059 */
b481de9c 4060 if (!is_active || is_channel_passive(ch_info) ||
011a0330 4061 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
b481de9c 4062 scan_ch->type = 0; /* passive */
011a0330
AK
4063 if (IWL_UCODE_API(priv->ucode_ver) == 1)
4064 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
4065 } else {
b481de9c 4066 scan_ch->type = 1; /* active */
011a0330 4067 }
b481de9c 4068
011a0330
AK
4069 /* Set direct probe bits. These may be used both for active
4070 * scan channels (probes gets sent right away),
4071 * or for passive channels (probes get se sent only after
4072 * hearing clear Rx packet).*/
4073 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
4074 if (n_probes)
4075 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4076 } else {
4077 /* uCode v1 does not allow setting direct probe bits on
4078 * passive channel. */
4079 if ((scan_ch->type & 1) && n_probes)
4080 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4081 }
b481de9c 4082
9fbab516 4083 /* Set txpower levels to defaults */
b481de9c
ZY
4084 scan_ch->tpc.dsp_atten = 110;
4085 /* scan_pwr_info->tpc.dsp_atten; */
4086
4087 /*scan_pwr_info->tpc.tx_gain; */
8318d78a 4088 if (band == IEEE80211_BAND_5GHZ)
b481de9c
ZY
4089 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
4090 else {
4091 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
4092 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516 4093 * power level:
8a1b0245 4094 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
b481de9c
ZY
4095 */
4096 }
4097
4098 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
4099 scan_ch->channel,
4100 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
4101 (scan_ch->type & 1) ?
4102 active_dwell : passive_dwell);
4103
4104 scan_ch++;
4105 added++;
4106 }
4107
4108 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
4109 return added;
4110}
4111
4a8a4322 4112static void iwl3945_init_hw_rates(struct iwl_priv *priv,
b481de9c
ZY
4113 struct ieee80211_rate *rates)
4114{
4115 int i;
4116
4117 for (i = 0; i < IWL_RATE_COUNT; i++) {
8318d78a
JB
4118 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
4119 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4120 rates[i].hw_value_short = i;
4121 rates[i].flags = 0;
d9829a67 4122 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
b481de9c 4123 /*
8318d78a 4124 * If CCK != 1M then set short preamble rate flag.
b481de9c 4125 */
bb8c093b 4126 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
8318d78a 4127 0 : IEEE80211_RATE_SHORT_PREAMBLE;
b481de9c 4128 }
b481de9c
ZY
4129 }
4130}
4131
4132/**
bb8c093b 4133 * iwl3945_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 4134 */
4a8a4322 4135static int iwl3945_init_geos(struct iwl_priv *priv)
b481de9c 4136{
d20b3c65 4137 struct iwl_channel_info *ch;
8211ef78 4138 struct ieee80211_supported_band *sband;
b481de9c
ZY
4139 struct ieee80211_channel *channels;
4140 struct ieee80211_channel *geo_ch;
4141 struct ieee80211_rate *rates;
4142 int i = 0;
b481de9c 4143
8318d78a
JB
4144 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4145 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
b481de9c
ZY
4146 IWL_DEBUG_INFO("Geography modes already initialized.\n");
4147 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4148 return 0;
4149 }
4150
b481de9c
ZY
4151 channels = kzalloc(sizeof(struct ieee80211_channel) *
4152 priv->channel_count, GFP_KERNEL);
8318d78a 4153 if (!channels)
b481de9c 4154 return -ENOMEM;
b481de9c 4155
8211ef78 4156 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
b481de9c
ZY
4157 GFP_KERNEL);
4158 if (!rates) {
b481de9c
ZY
4159 kfree(channels);
4160 return -ENOMEM;
4161 }
4162
b481de9c 4163 /* 5.2GHz channels start after the 2.4GHz channels */
8211ef78
TW
4164 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4165 sband->channels = &channels[ARRAY_SIZE(iwl3945_eeprom_band_1)];
4166 /* just OFDM */
4167 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
4168 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
4169
4170 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4171 sband->channels = channels;
4172 /* OFDM & CCK */
4173 sband->bitrates = rates;
4174 sband->n_bitrates = IWL_RATE_COUNT;
b481de9c
ZY
4175
4176 priv->ieee_channels = channels;
4177 priv->ieee_rates = rates;
4178
bb8c093b 4179 iwl3945_init_hw_rates(priv, rates);
b481de9c 4180
8211ef78 4181 for (i = 0; i < priv->channel_count; i++) {
b481de9c
ZY
4182 ch = &priv->channel_info[i];
4183
8211ef78
TW
4184 /* FIXME: might be removed if scan is OK*/
4185 if (!is_channel_valid(ch))
b481de9c 4186 continue;
b481de9c
ZY
4187
4188 if (is_channel_a_band(ch))
8211ef78 4189 sband = &priv->bands[IEEE80211_BAND_5GHZ];
8318d78a 4190 else
8211ef78 4191 sband = &priv->bands[IEEE80211_BAND_2GHZ];
b481de9c 4192
8211ef78
TW
4193 geo_ch = &sband->channels[sband->n_channels++];
4194
4195 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
8318d78a
JB
4196 geo_ch->max_power = ch->max_power_avg;
4197 geo_ch->max_antenna_gain = 0xff;
7b72304d 4198 geo_ch->hw_value = ch->channel;
b481de9c
ZY
4199
4200 if (is_channel_valid(ch)) {
8318d78a
JB
4201 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
4202 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
b481de9c 4203
8318d78a
JB
4204 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
4205 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
b481de9c
ZY
4206
4207 if (ch->flags & EEPROM_CHANNEL_RADAR)
8318d78a 4208 geo_ch->flags |= IEEE80211_CHAN_RADAR;
b481de9c 4209
62ea9c5b
WT
4210 if (ch->max_power_avg > priv->tx_power_channel_lmt)
4211 priv->tx_power_channel_lmt =
b481de9c 4212 ch->max_power_avg;
8211ef78 4213 } else {
8318d78a 4214 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
8211ef78
TW
4215 }
4216
4217 /* Save flags for reg domain usage */
4218 geo_ch->orig_flags = geo_ch->flags;
4219
4220 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4221 ch->channel, geo_ch->center_freq,
4222 is_channel_a_band(ch) ? "5.2" : "2.4",
4223 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4224 "restricted" : "valid",
4225 geo_ch->flags);
b481de9c
ZY
4226 }
4227
82b9a121
TW
4228 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4229 priv->cfg->sku & IWL_SKU_A) {
978785a3
TW
4230 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
4231 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
4232 priv->pci_dev->device, priv->pci_dev->subsystem_device);
82b9a121 4233 priv->cfg->sku &= ~IWL_SKU_A;
b481de9c
ZY
4234 }
4235
978785a3 4236 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
8318d78a
JB
4237 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4238 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
b481de9c 4239
e0e0a67e
JL
4240 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4241 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4242 &priv->bands[IEEE80211_BAND_2GHZ];
4243 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4244 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4245 &priv->bands[IEEE80211_BAND_5GHZ];
b481de9c 4246
b481de9c
ZY
4247 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4248
4249 return 0;
4250}
4251
849e0dce
RC
4252/*
4253 * iwl3945_free_geos - undo allocations in iwl3945_init_geos
4254 */
4a8a4322 4255static void iwl3945_free_geos(struct iwl_priv *priv)
849e0dce 4256{
849e0dce
RC
4257 kfree(priv->ieee_channels);
4258 kfree(priv->ieee_rates);
4259 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4260}
4261
b481de9c
ZY
4262/******************************************************************************
4263 *
4264 * uCode download functions
4265 *
4266 ******************************************************************************/
4267
4a8a4322 4268static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
b481de9c 4269{
98c92211
TW
4270 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
4271 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
4272 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
4273 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
4274 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
4275 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c
ZY
4276}
4277
4278/**
bb8c093b 4279 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
4280 * looking at all data.
4281 */
4a8a4322 4282static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
4283{
4284 u32 val;
4285 u32 save_len = len;
4286 int rc = 0;
4287 u32 errcnt;
4288
4289 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4290
5d49f498 4291 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4292 if (rc)
4293 return rc;
4294
5d49f498 4295 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
250bdd21 4296 IWL39_RTC_INST_LOWER_BOUND);
b481de9c
ZY
4297
4298 errcnt = 0;
4299 for (; len > 0; len -= sizeof(u32), image++) {
4300 /* read data comes through single port, auto-incr addr */
4301 /* NOTE: Use the debugless read so we don't flood kernel log
4302 * if IWL_DL_IO is set */
5d49f498 4303 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c 4304 if (val != le32_to_cpu(*image)) {
15b1687c 4305 IWL_ERR(priv, "uCode INST section is invalid at "
b481de9c
ZY
4306 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4307 save_len - len, val, le32_to_cpu(*image));
4308 rc = -EIO;
4309 errcnt++;
4310 if (errcnt >= 20)
4311 break;
4312 }
4313 }
4314
5d49f498 4315 iwl_release_nic_access(priv);
b481de9c
ZY
4316
4317 if (!errcnt)
bc434dd2 4318 IWL_DEBUG_INFO("ucode image in INSTRUCTION memory is good\n");
b481de9c
ZY
4319
4320 return rc;
4321}
4322
4323
4324/**
bb8c093b 4325 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
4326 * using sample data 100 bytes apart. If these sample points are good,
4327 * it's a pretty good bet that everything between them is good, too.
4328 */
4a8a4322 4329static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
4330{
4331 u32 val;
4332 int rc = 0;
4333 u32 errcnt = 0;
4334 u32 i;
4335
4336 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4337
5d49f498 4338 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4339 if (rc)
4340 return rc;
4341
4342 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
4343 /* read data comes through single port, auto-incr addr */
4344 /* NOTE: Use the debugless read so we don't flood kernel log
4345 * if IWL_DL_IO is set */
5d49f498 4346 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
250bdd21 4347 i + IWL39_RTC_INST_LOWER_BOUND);
5d49f498 4348 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
4349 if (val != le32_to_cpu(*image)) {
4350#if 0 /* Enable this if you want to see details */
15b1687c 4351 IWL_ERR(priv, "uCode INST section is invalid at "
b481de9c
ZY
4352 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4353 i, val, *image);
4354#endif
4355 rc = -EIO;
4356 errcnt++;
4357 if (errcnt >= 3)
4358 break;
4359 }
4360 }
4361
5d49f498 4362 iwl_release_nic_access(priv);
b481de9c
ZY
4363
4364 return rc;
4365}
4366
4367
4368/**
bb8c093b 4369 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
4370 * and verify its contents
4371 */
4a8a4322 4372static int iwl3945_verify_ucode(struct iwl_priv *priv)
b481de9c
ZY
4373{
4374 __le32 *image;
4375 u32 len;
4376 int rc = 0;
4377
4378 /* Try bootstrap */
4379 image = (__le32 *)priv->ucode_boot.v_addr;
4380 len = priv->ucode_boot.len;
bb8c093b 4381 rc = iwl3945_verify_inst_sparse(priv, image, len);
b481de9c
ZY
4382 if (rc == 0) {
4383 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
4384 return 0;
4385 }
4386
4387 /* Try initialize */
4388 image = (__le32 *)priv->ucode_init.v_addr;
4389 len = priv->ucode_init.len;
bb8c093b 4390 rc = iwl3945_verify_inst_sparse(priv, image, len);
b481de9c
ZY
4391 if (rc == 0) {
4392 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
4393 return 0;
4394 }
4395
4396 /* Try runtime/protocol */
4397 image = (__le32 *)priv->ucode_code.v_addr;
4398 len = priv->ucode_code.len;
bb8c093b 4399 rc = iwl3945_verify_inst_sparse(priv, image, len);
b481de9c
ZY
4400 if (rc == 0) {
4401 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
4402 return 0;
4403 }
4404
15b1687c 4405 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
b481de9c 4406
9fbab516
BC
4407 /* Since nothing seems to match, show first several data entries in
4408 * instruction SRAM, so maybe visual inspection will give a clue.
4409 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
4410 image = (__le32 *)priv->ucode_boot.v_addr;
4411 len = priv->ucode_boot.len;
bb8c093b 4412 rc = iwl3945_verify_inst_full(priv, image, len);
b481de9c
ZY
4413
4414 return rc;
4415}
4416
4a8a4322 4417static void iwl3945_nic_start(struct iwl_priv *priv)
b481de9c
ZY
4418{
4419 /* Remove all resets to allow NIC to operate */
5d49f498 4420 iwl_write32(priv, CSR_RESET, 0);
b481de9c
ZY
4421}
4422
4423/**
bb8c093b 4424 * iwl3945_read_ucode - Read uCode images from disk file.
b481de9c
ZY
4425 *
4426 * Copy into buffers for card to fetch via bus-mastering
4427 */
4a8a4322 4428static int iwl3945_read_ucode(struct iwl_priv *priv)
b481de9c 4429{
a78fe754 4430 struct iwl_ucode *ucode;
a0987a8d 4431 int ret = -EINVAL, index;
b481de9c
ZY
4432 const struct firmware *ucode_raw;
4433 /* firmware file name contains uCode/driver compatibility version */
a0987a8d
RC
4434 const char *name_pre = priv->cfg->fw_name_pre;
4435 const unsigned int api_max = priv->cfg->ucode_api_max;
4436 const unsigned int api_min = priv->cfg->ucode_api_min;
4437 char buf[25];
b481de9c
ZY
4438 u8 *src;
4439 size_t len;
a0987a8d 4440 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
b481de9c
ZY
4441
4442 /* Ask kernel firmware_class module to get the boot firmware off disk.
4443 * request_firmware() is synchronous, file is in memory on return. */
a0987a8d
RC
4444 for (index = api_max; index >= api_min; index--) {
4445 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
4446 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
4447 if (ret < 0) {
15b1687c 4448 IWL_ERR(priv, "%s firmware file req failed: %d\n",
a0987a8d
RC
4449 buf, ret);
4450 if (ret == -ENOENT)
4451 continue;
4452 else
4453 goto error;
4454 } else {
4455 if (index < api_max)
15b1687c
WT
4456 IWL_ERR(priv, "Loaded firmware %s, "
4457 "which is deprecated. "
4458 " Please use API v%u instead.\n",
a0987a8d
RC
4459 buf, api_max);
4460 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
4461 buf, ucode_raw->size);
4462 break;
4463 }
b481de9c
ZY
4464 }
4465
a0987a8d
RC
4466 if (ret < 0)
4467 goto error;
b481de9c
ZY
4468
4469 /* Make sure that we got at least our header! */
4470 if (ucode_raw->size < sizeof(*ucode)) {
15b1687c 4471 IWL_ERR(priv, "File size way too small!\n");
90e759d1 4472 ret = -EINVAL;
b481de9c
ZY
4473 goto err_release;
4474 }
4475
4476 /* Data from ucode file: header followed by uCode images */
4477 ucode = (void *)ucode_raw->data;
4478
c02b3acd 4479 priv->ucode_ver = le32_to_cpu(ucode->ver);
a0987a8d 4480 api_ver = IWL_UCODE_API(priv->ucode_ver);
b481de9c
ZY
4481 inst_size = le32_to_cpu(ucode->inst_size);
4482 data_size = le32_to_cpu(ucode->data_size);
4483 init_size = le32_to_cpu(ucode->init_size);
4484 init_data_size = le32_to_cpu(ucode->init_data_size);
4485 boot_size = le32_to_cpu(ucode->boot_size);
4486
a0987a8d
RC
4487 /* api_ver should match the api version forming part of the
4488 * firmware filename ... but we don't check for that and only rely
4489 * on the API version read from firware header from here on forward */
4490
4491 if (api_ver < api_min || api_ver > api_max) {
15b1687c 4492 IWL_ERR(priv, "Driver unable to support your firmware API. "
a0987a8d
RC
4493 "Driver supports v%u, firmware is v%u.\n",
4494 api_max, api_ver);
4495 priv->ucode_ver = 0;
4496 ret = -EINVAL;
4497 goto err_release;
4498 }
4499 if (api_ver != api_max)
15b1687c 4500 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
a0987a8d
RC
4501 "got %u. New firmware can be obtained "
4502 "from http://www.intellinuxwireless.org.\n",
4503 api_max, api_ver);
4504
978785a3
TW
4505 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
4506 IWL_UCODE_MAJOR(priv->ucode_ver),
4507 IWL_UCODE_MINOR(priv->ucode_ver),
4508 IWL_UCODE_API(priv->ucode_ver),
4509 IWL_UCODE_SERIAL(priv->ucode_ver));
4510
a0987a8d
RC
4511 IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n",
4512 priv->ucode_ver);
bc434dd2
IS
4513 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
4514 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", data_size);
4515 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", init_size);
4516 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", init_data_size);
4517 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", boot_size);
b481de9c 4518
a0987a8d 4519
b481de9c
ZY
4520 /* Verify size of file vs. image size info in file's header */
4521 if (ucode_raw->size < sizeof(*ucode) +
4522 inst_size + data_size + init_size +
4523 init_data_size + boot_size) {
4524
4525 IWL_DEBUG_INFO("uCode file size %d too small\n",
4526 (int)ucode_raw->size);
90e759d1 4527 ret = -EINVAL;
b481de9c
ZY
4528 goto err_release;
4529 }
4530
4531 /* Verify that uCode images will fit in card's SRAM */
250bdd21 4532 if (inst_size > IWL39_MAX_INST_SIZE) {
90e759d1
TW
4533 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
4534 inst_size);
4535 ret = -EINVAL;
b481de9c
ZY
4536 goto err_release;
4537 }
4538
250bdd21 4539 if (data_size > IWL39_MAX_DATA_SIZE) {
90e759d1
TW
4540 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
4541 data_size);
4542 ret = -EINVAL;
b481de9c
ZY
4543 goto err_release;
4544 }
250bdd21 4545 if (init_size > IWL39_MAX_INST_SIZE) {
90e759d1
TW
4546 IWL_DEBUG_INFO("uCode init instr len %d too large to fit in\n",
4547 init_size);
4548 ret = -EINVAL;
b481de9c
ZY
4549 goto err_release;
4550 }
250bdd21 4551 if (init_data_size > IWL39_MAX_DATA_SIZE) {
90e759d1
TW
4552 IWL_DEBUG_INFO("uCode init data len %d too large to fit in\n",
4553 init_data_size);
4554 ret = -EINVAL;
b481de9c
ZY
4555 goto err_release;
4556 }
250bdd21 4557 if (boot_size > IWL39_MAX_BSM_SIZE) {
90e759d1
TW
4558 IWL_DEBUG_INFO("uCode boot instr len %d too large to fit in\n",
4559 boot_size);
4560 ret = -EINVAL;
b481de9c
ZY
4561 goto err_release;
4562 }
4563
4564 /* Allocate ucode buffers for card's bus-master loading ... */
4565
4566 /* Runtime instructions and 2 copies of data:
4567 * 1) unmodified from disk
4568 * 2) backup cache for save/restore during power-downs */
4569 priv->ucode_code.len = inst_size;
98c92211 4570 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
4571
4572 priv->ucode_data.len = data_size;
98c92211 4573 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
4574
4575 priv->ucode_data_backup.len = data_size;
98c92211 4576 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c 4577
90e759d1
TW
4578 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
4579 !priv->ucode_data_backup.v_addr)
4580 goto err_pci_alloc;
b481de9c
ZY
4581
4582 /* Initialization instructions and data */
90e759d1
TW
4583 if (init_size && init_data_size) {
4584 priv->ucode_init.len = init_size;
98c92211 4585 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
90e759d1
TW
4586
4587 priv->ucode_init_data.len = init_data_size;
98c92211 4588 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
90e759d1
TW
4589
4590 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
4591 goto err_pci_alloc;
4592 }
b481de9c
ZY
4593
4594 /* Bootstrap (instructions only, no data) */
90e759d1
TW
4595 if (boot_size) {
4596 priv->ucode_boot.len = boot_size;
98c92211 4597 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 4598
90e759d1
TW
4599 if (!priv->ucode_boot.v_addr)
4600 goto err_pci_alloc;
4601 }
b481de9c
ZY
4602
4603 /* Copy images into buffers for card's bus-master reads ... */
4604
4605 /* Runtime instructions (first block of data in file) */
4606 src = &ucode->data[0];
4607 len = priv->ucode_code.len;
90e759d1 4608 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
4609 memcpy(priv->ucode_code.v_addr, src, len);
4610 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4611 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
4612
4613 /* Runtime data (2nd block)
bb8c093b 4614 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
b481de9c
ZY
4615 src = &ucode->data[inst_size];
4616 len = priv->ucode_data.len;
90e759d1 4617 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
4618 memcpy(priv->ucode_data.v_addr, src, len);
4619 memcpy(priv->ucode_data_backup.v_addr, src, len);
4620
4621 /* Initialization instructions (3rd block) */
4622 if (init_size) {
4623 src = &ucode->data[inst_size + data_size];
4624 len = priv->ucode_init.len;
90e759d1
TW
4625 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
4626 len);
b481de9c
ZY
4627 memcpy(priv->ucode_init.v_addr, src, len);
4628 }
4629
4630 /* Initialization data (4th block) */
4631 if (init_data_size) {
4632 src = &ucode->data[inst_size + data_size + init_size];
4633 len = priv->ucode_init_data.len;
4634 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
4635 (int)len);
4636 memcpy(priv->ucode_init_data.v_addr, src, len);
4637 }
4638
4639 /* Bootstrap instructions (5th block) */
4640 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
4641 len = priv->ucode_boot.len;
4642 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
4643 (int)len);
4644 memcpy(priv->ucode_boot.v_addr, src, len);
4645
4646 /* We have our copies now, allow OS release its copies */
4647 release_firmware(ucode_raw);
4648 return 0;
4649
4650 err_pci_alloc:
15b1687c 4651 IWL_ERR(priv, "failed to allocate pci memory\n");
90e759d1 4652 ret = -ENOMEM;
bb8c093b 4653 iwl3945_dealloc_ucode_pci(priv);
b481de9c
ZY
4654
4655 err_release:
4656 release_firmware(ucode_raw);
4657
4658 error:
90e759d1 4659 return ret;
b481de9c
ZY
4660}
4661
4662
4663/**
bb8c093b 4664 * iwl3945_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
4665 *
4666 * Tell initialization uCode where to find runtime uCode.
4667 *
4668 * BSM registers initially contain pointers to initialization uCode.
4669 * We need to replace them to load runtime uCode inst and data,
4670 * and to save runtime data when powering down.
4671 */
4a8a4322 4672static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
b481de9c
ZY
4673{
4674 dma_addr_t pinst;
4675 dma_addr_t pdata;
4676 int rc = 0;
4677 unsigned long flags;
4678
4679 /* bits 31:0 for 3945 */
4680 pinst = priv->ucode_code.p_addr;
4681 pdata = priv->ucode_data_backup.p_addr;
4682
4683 spin_lock_irqsave(&priv->lock, flags);
5d49f498 4684 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4685 if (rc) {
4686 spin_unlock_irqrestore(&priv->lock, flags);
4687 return rc;
4688 }
4689
4690 /* Tell bootstrap uCode where to find image to load */
5d49f498
AK
4691 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
4692 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
4693 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
4694 priv->ucode_data.len);
4695
a96a27f9 4696 /* Inst byte count must be last to set up, bit 31 signals uCode
b481de9c 4697 * that all new ptr/size info is in place */
5d49f498 4698 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
4699 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
4700
5d49f498 4701 iwl_release_nic_access(priv);
b481de9c
ZY
4702
4703 spin_unlock_irqrestore(&priv->lock, flags);
4704
4705 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
4706
4707 return rc;
4708}
4709
4710/**
bb8c093b 4711 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
4712 *
4713 * Called after REPLY_ALIVE notification received from "initialize" uCode.
4714 *
b481de9c 4715 * Tell "initialize" uCode to go ahead and load the runtime uCode.
9fbab516 4716 */
4a8a4322 4717static void iwl3945_init_alive_start(struct iwl_priv *priv)
b481de9c
ZY
4718{
4719 /* Check alive response for "valid" sign from uCode */
4720 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
4721 /* We had an error bringing up the hardware, so take it
4722 * all the way back down so we can try again */
4723 IWL_DEBUG_INFO("Initialize Alive failed.\n");
4724 goto restart;
4725 }
4726
4727 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
4728 * This is a paranoid check, because we would not have gotten the
4729 * "initialize" alive if code weren't properly loaded. */
bb8c093b 4730 if (iwl3945_verify_ucode(priv)) {
b481de9c
ZY
4731 /* Runtime instruction load was bad;
4732 * take it all the way back down so we can try again */
4733 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
4734 goto restart;
4735 }
4736
4737 /* Send pointers to protocol/runtime uCode image ... init code will
4738 * load and launch runtime uCode, which will send us another "Alive"
4739 * notification. */
4740 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 4741 if (iwl3945_set_ucode_ptrs(priv)) {
b481de9c
ZY
4742 /* Runtime instruction load won't happen;
4743 * take it all the way back down so we can try again */
4744 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
4745 goto restart;
4746 }
4747 return;
4748
4749 restart:
4750 queue_work(priv->workqueue, &priv->restart);
4751}
4752
4753
9bdf5eca
MA
4754/* temporary */
4755static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
4756 struct sk_buff *skb);
4757
b481de9c 4758/**
bb8c093b 4759 * iwl3945_alive_start - called after REPLY_ALIVE notification received
b481de9c 4760 * from protocol/runtime uCode (initialization uCode's
bb8c093b 4761 * Alive gets handled by iwl3945_init_alive_start()).
b481de9c 4762 */
4a8a4322 4763static void iwl3945_alive_start(struct iwl_priv *priv)
b481de9c
ZY
4764{
4765 int rc = 0;
4766 int thermal_spin = 0;
4767 u32 rfkill;
4768
4769 IWL_DEBUG_INFO("Runtime Alive received.\n");
4770
4771 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
4772 /* We had an error bringing up the hardware, so take it
4773 * all the way back down so we can try again */
4774 IWL_DEBUG_INFO("Alive failed.\n");
4775 goto restart;
4776 }
4777
4778 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
4779 * This is a paranoid check, because we would not have gotten the
4780 * "runtime" alive if code weren't properly loaded. */
bb8c093b 4781 if (iwl3945_verify_ucode(priv)) {
b481de9c
ZY
4782 /* Runtime instruction load was bad;
4783 * take it all the way back down so we can try again */
4784 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
4785 goto restart;
4786 }
4787
bb8c093b 4788 iwl3945_clear_stations_table(priv);
b481de9c 4789
5d49f498 4790 rc = iwl_grab_nic_access(priv);
b481de9c 4791 if (rc) {
39aadf8c 4792 IWL_WARN(priv, "Can not read RFKILL status from adapter\n");
b481de9c
ZY
4793 return;
4794 }
4795
5d49f498 4796 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
b481de9c 4797 IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill);
5d49f498 4798 iwl_release_nic_access(priv);
b481de9c
ZY
4799
4800 if (rfkill & 0x1) {
4801 clear_bit(STATUS_RF_KILL_HW, &priv->status);
a96a27f9 4802 /* if RFKILL is not on, then wait for thermal
b481de9c 4803 * sensor in adapter to kick in */
bb8c093b 4804 while (iwl3945_hw_get_temperature(priv) == 0) {
b481de9c
ZY
4805 thermal_spin++;
4806 udelay(10);
4807 }
4808
4809 if (thermal_spin)
4810 IWL_DEBUG_INFO("Thermal calibration took %dus\n",
4811 thermal_spin * 10);
4812 } else
4813 set_bit(STATUS_RF_KILL_HW, &priv->status);
4814
9fbab516 4815 /* After the ALIVE response, we can send commands to 3945 uCode */
b481de9c
ZY
4816 set_bit(STATUS_ALIVE, &priv->status);
4817
4818 /* Clear out the uCode error bit if it is set */
4819 clear_bit(STATUS_FW_ERROR, &priv->status);
4820
775a6e27 4821 if (iwl_is_rfkill(priv))
b481de9c
ZY
4822 return;
4823
36d6825b 4824 ieee80211_wake_queues(priv->hw);
b481de9c
ZY
4825
4826 priv->active_rate = priv->rates_mask;
4827 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
4828
bb8c093b 4829 iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 4830
bb8c093b
CH
4831 if (iwl3945_is_associated(priv)) {
4832 struct iwl3945_rxon_cmd *active_rxon =
f2c7e521 4833 (struct iwl3945_rxon_cmd *)(&priv->active39_rxon);
b481de9c 4834
f2c7e521
AK
4835 memcpy(&priv->staging39_rxon, &priv->active39_rxon,
4836 sizeof(priv->staging39_rxon));
b481de9c
ZY
4837 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4838 } else {
4839 /* Initialize our rx_config data */
60294de3 4840 iwl3945_connection_init_rx_config(priv, priv->iw_mode);
f2c7e521 4841 memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
b481de9c
ZY
4842 }
4843
9fbab516 4844 /* Configure Bluetooth device coexistence support */
bb8c093b 4845 iwl3945_send_bt_config(priv);
b481de9c
ZY
4846
4847 /* Configure the adapter for unassociated operation */
bb8c093b 4848 iwl3945_commit_rxon(priv);
b481de9c 4849
b481de9c
ZY
4850 iwl3945_reg_txpower_periodic(priv);
4851
fe00b5a5
RC
4852 iwl3945_led_register(priv);
4853
b481de9c 4854 IWL_DEBUG_INFO("ALIVE processing complete.\n");
a9f46786 4855 set_bit(STATUS_READY, &priv->status);
5a66926a 4856 wake_up_interruptible(&priv->wait_command_queue);
b481de9c
ZY
4857
4858 if (priv->error_recovering)
bb8c093b 4859 iwl3945_error_recovery(priv);
b481de9c 4860
9bdf5eca
MA
4861 /* reassociate for ADHOC mode */
4862 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
4863 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
4864 priv->vif);
4865 if (beacon)
4866 iwl3945_mac_beacon_update(priv->hw, beacon);
4867 }
4868
b481de9c
ZY
4869 return;
4870
4871 restart:
4872 queue_work(priv->workqueue, &priv->restart);
4873}
4874
4a8a4322 4875static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
b481de9c 4876
4a8a4322 4877static void __iwl3945_down(struct iwl_priv *priv)
b481de9c
ZY
4878{
4879 unsigned long flags;
4880 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
4881 struct ieee80211_conf *conf = NULL;
4882
4883 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
4884
4885 conf = ieee80211_get_hw_conf(priv->hw);
4886
4887 if (!exit_pending)
4888 set_bit(STATUS_EXIT_PENDING, &priv->status);
4889
ab53d8af 4890 iwl3945_led_unregister(priv);
bb8c093b 4891 iwl3945_clear_stations_table(priv);
b481de9c
ZY
4892
4893 /* Unblock any waiting calls */
4894 wake_up_interruptible_all(&priv->wait_command_queue);
4895
b481de9c
ZY
4896 /* Wipe out the EXIT_PENDING status bit if we are not actually
4897 * exiting the module */
4898 if (!exit_pending)
4899 clear_bit(STATUS_EXIT_PENDING, &priv->status);
4900
4901 /* stop and reset the on-board processor */
5d49f498 4902 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
4903
4904 /* tell the device to stop sending interrupts */
0359facc 4905 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 4906 iwl3945_disable_interrupts(priv);
0359facc
MA
4907 spin_unlock_irqrestore(&priv->lock, flags);
4908 iwl_synchronize_irq(priv);
b481de9c
ZY
4909
4910 if (priv->mac80211_registered)
4911 ieee80211_stop_queues(priv->hw);
4912
bb8c093b 4913 /* If we have not previously called iwl3945_init() then
b481de9c 4914 * clear all bits but the RF Kill and SUSPEND bits and return */
775a6e27 4915 if (!iwl_is_init(priv)) {
b481de9c
ZY
4916 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
4917 STATUS_RF_KILL_HW |
4918 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
4919 STATUS_RF_KILL_SW |
9788864e
RC
4920 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
4921 STATUS_GEO_CONFIGURED |
b481de9c 4922 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
ebef2008
AK
4923 STATUS_IN_SUSPEND |
4924 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
4925 STATUS_EXIT_PENDING;
b481de9c
ZY
4926 goto exit;
4927 }
4928
4929 /* ...otherwise clear out all the status bits but the RF Kill and
4930 * SUSPEND bits and continue taking the NIC down. */
4931 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
4932 STATUS_RF_KILL_HW |
4933 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
4934 STATUS_RF_KILL_SW |
9788864e
RC
4935 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
4936 STATUS_GEO_CONFIGURED |
b481de9c
ZY
4937 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
4938 STATUS_IN_SUSPEND |
4939 test_bit(STATUS_FW_ERROR, &priv->status) <<
ebef2008
AK
4940 STATUS_FW_ERROR |
4941 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
4942 STATUS_EXIT_PENDING;
b481de9c
ZY
4943
4944 spin_lock_irqsave(&priv->lock, flags);
5d49f498 4945 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
4946 spin_unlock_irqrestore(&priv->lock, flags);
4947
bb8c093b
CH
4948 iwl3945_hw_txq_ctx_stop(priv);
4949 iwl3945_hw_rxq_stop(priv);
b481de9c
ZY
4950
4951 spin_lock_irqsave(&priv->lock, flags);
5d49f498
AK
4952 if (!iwl_grab_nic_access(priv)) {
4953 iwl_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 4954 APMG_CLK_VAL_DMA_CLK_RQT);
5d49f498 4955 iwl_release_nic_access(priv);
b481de9c
ZY
4956 }
4957 spin_unlock_irqrestore(&priv->lock, flags);
4958
4959 udelay(5);
4960
01ec616d 4961 priv->cfg->ops->lib->apm_ops.reset(priv);
b481de9c 4962 exit:
3d24a9f7 4963 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
b481de9c
ZY
4964
4965 if (priv->ibss_beacon)
4966 dev_kfree_skb(priv->ibss_beacon);
4967 priv->ibss_beacon = NULL;
4968
4969 /* clear out any free frames */
bb8c093b 4970 iwl3945_clear_free_frames(priv);
b481de9c
ZY
4971}
4972
4a8a4322 4973static void iwl3945_down(struct iwl_priv *priv)
b481de9c
ZY
4974{
4975 mutex_lock(&priv->mutex);
bb8c093b 4976 __iwl3945_down(priv);
b481de9c 4977 mutex_unlock(&priv->mutex);
b24d22b1 4978
bb8c093b 4979 iwl3945_cancel_deferred_work(priv);
b481de9c
ZY
4980}
4981
4982#define MAX_HW_RESTARTS 5
4983
4a8a4322 4984static int __iwl3945_up(struct iwl_priv *priv)
b481de9c
ZY
4985{
4986 int rc, i;
4987
4988 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
39aadf8c 4989 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
b481de9c
ZY
4990 return -EIO;
4991 }
4992
4993 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
39aadf8c 4994 IWL_WARN(priv, "Radio disabled by SW RF kill (module "
b481de9c 4995 "parameter)\n");
e655b9f0
ZY
4996 return -ENODEV;
4997 }
4998
e903fbd4 4999 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
15b1687c 5000 IWL_ERR(priv, "ucode not available for device bring up\n");
e903fbd4
RC
5001 return -EIO;
5002 }
5003
e655b9f0 5004 /* If platform's RF_KILL switch is NOT set to KILL */
5d49f498 5005 if (iwl_read32(priv, CSR_GP_CNTRL) &
e655b9f0
ZY
5006 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5007 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5008 else {
5009 set_bit(STATUS_RF_KILL_HW, &priv->status);
5010 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
39aadf8c 5011 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
e655b9f0
ZY
5012 return -ENODEV;
5013 }
b481de9c 5014 }
80fcc9e2 5015
5d49f498 5016 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 5017
bb8c093b 5018 rc = iwl3945_hw_nic_init(priv);
b481de9c 5019 if (rc) {
15b1687c 5020 IWL_ERR(priv, "Unable to int nic\n");
b481de9c
ZY
5021 return rc;
5022 }
5023
5024 /* make sure rfkill handshake bits are cleared */
5d49f498
AK
5025 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5026 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
5027 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5028
5029 /* clear (again), then enable host interrupts */
5d49f498 5030 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
bb8c093b 5031 iwl3945_enable_interrupts(priv);
b481de9c
ZY
5032
5033 /* really make sure rfkill handshake bits are cleared */
5d49f498
AK
5034 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5035 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
5036
5037 /* Copy original ucode data image from disk into backup cache.
5038 * This will be used to initialize the on-board processor's
5039 * data SRAM for a clean start when the runtime program first loads. */
5040 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
5a66926a 5041 priv->ucode_data.len);
b481de9c 5042
e655b9f0
ZY
5043 /* We return success when we resume from suspend and rf_kill is on. */
5044 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
5045 return 0;
5046
b481de9c
ZY
5047 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5048
bb8c093b 5049 iwl3945_clear_stations_table(priv);
b481de9c
ZY
5050
5051 /* load bootstrap state machine,
5052 * load bootstrap program into processor's memory,
5053 * prepare to load the "initialize" uCode */
0164b9b4 5054 priv->cfg->ops->lib->load_ucode(priv);
b481de9c
ZY
5055
5056 if (rc) {
15b1687c
WT
5057 IWL_ERR(priv,
5058 "Unable to set up bootstrap uCode: %d\n", rc);
b481de9c
ZY
5059 continue;
5060 }
5061
5062 /* start card; "initialize" will load runtime ucode */
bb8c093b 5063 iwl3945_nic_start(priv);
b481de9c 5064
b481de9c
ZY
5065 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
5066
5067 return 0;
5068 }
5069
5070 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 5071 __iwl3945_down(priv);
ebef2008 5072 clear_bit(STATUS_EXIT_PENDING, &priv->status);
b481de9c
ZY
5073
5074 /* tried to restart and config the device for as long as our
5075 * patience could withstand */
15b1687c 5076 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
b481de9c
ZY
5077 return -EIO;
5078}
5079
5080
5081/*****************************************************************************
5082 *
5083 * Workqueue callbacks
5084 *
5085 *****************************************************************************/
5086
bb8c093b 5087static void iwl3945_bg_init_alive_start(struct work_struct *data)
b481de9c 5088{
4a8a4322
AK
5089 struct iwl_priv *priv =
5090 container_of(data, struct iwl_priv, init_alive_start.work);
b481de9c
ZY
5091
5092 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5093 return;
5094
5095 mutex_lock(&priv->mutex);
bb8c093b 5096 iwl3945_init_alive_start(priv);
b481de9c
ZY
5097 mutex_unlock(&priv->mutex);
5098}
5099
bb8c093b 5100static void iwl3945_bg_alive_start(struct work_struct *data)
b481de9c 5101{
4a8a4322
AK
5102 struct iwl_priv *priv =
5103 container_of(data, struct iwl_priv, alive_start.work);
b481de9c
ZY
5104
5105 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5106 return;
5107
5108 mutex_lock(&priv->mutex);
bb8c093b 5109 iwl3945_alive_start(priv);
b481de9c
ZY
5110 mutex_unlock(&priv->mutex);
5111}
5112
2663516d
HS
5113static void iwl3945_rfkill_poll(struct work_struct *data)
5114{
5115 struct iwl_priv *priv =
5116 container_of(data, struct iwl_priv, rfkill_poll.work);
5117 unsigned long status = priv->status;
5118
5119 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5120 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5121 else
5122 set_bit(STATUS_RF_KILL_HW, &priv->status);
5123
5124 if (test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status))
5125 queue_work(priv->workqueue, &priv->rf_kill);
5126
5127 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
5128 round_jiffies_relative(2 * HZ));
5129
5130}
5131
b481de9c
ZY
5132#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
5133
bb8c093b 5134static void iwl3945_bg_scan_check(struct work_struct *data)
b481de9c 5135{
4a8a4322
AK
5136 struct iwl_priv *priv =
5137 container_of(data, struct iwl_priv, scan_check.work);
b481de9c
ZY
5138
5139 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5140 return;
5141
5142 mutex_lock(&priv->mutex);
5143 if (test_bit(STATUS_SCANNING, &priv->status) ||
5144 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5145 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
5146 "Scan completion watchdog resetting adapter (%dms)\n",
5147 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
15e869d8 5148
b481de9c 5149 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
bb8c093b 5150 iwl3945_send_scan_abort(priv);
b481de9c
ZY
5151 }
5152 mutex_unlock(&priv->mutex);
5153}
5154
bb8c093b 5155static void iwl3945_bg_request_scan(struct work_struct *data)
b481de9c 5156{
4a8a4322
AK
5157 struct iwl_priv *priv =
5158 container_of(data, struct iwl_priv, request_scan);
c2d79b48 5159 struct iwl_host_cmd cmd = {
b481de9c 5160 .id = REPLY_SCAN_CMD,
bb8c093b 5161 .len = sizeof(struct iwl3945_scan_cmd),
b481de9c
ZY
5162 .meta.flags = CMD_SIZE_HUGE,
5163 };
5164 int rc = 0;
bb8c093b 5165 struct iwl3945_scan_cmd *scan;
b481de9c 5166 struct ieee80211_conf *conf = NULL;
f9340520 5167 u8 n_probes = 2;
8318d78a 5168 enum ieee80211_band band;
9387b7ca 5169 DECLARE_SSID_BUF(ssid);
b481de9c
ZY
5170
5171 conf = ieee80211_get_hw_conf(priv->hw);
5172
5173 mutex_lock(&priv->mutex);
5174
775a6e27 5175 if (!iwl_is_ready(priv)) {
39aadf8c 5176 IWL_WARN(priv, "request scan called when driver not ready.\n");
b481de9c
ZY
5177 goto done;
5178 }
5179
a96a27f9 5180 /* Make sure the scan wasn't canceled before this queued work
b481de9c
ZY
5181 * was given the chance to run... */
5182 if (!test_bit(STATUS_SCANNING, &priv->status))
5183 goto done;
5184
5185 /* This should never be called or scheduled if there is currently
5186 * a scan active in the hardware. */
5187 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
5188 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
5189 "Ignoring second request.\n");
5190 rc = -EIO;
5191 goto done;
5192 }
5193
5194 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5195 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5196 goto done;
5197 }
5198
5199 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5200 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5201 goto done;
5202 }
5203
775a6e27 5204 if (iwl_is_rfkill(priv)) {
b481de9c
ZY
5205 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5206 goto done;
5207 }
5208
5209 if (!test_bit(STATUS_READY, &priv->status)) {
5210 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
5211 goto done;
5212 }
5213
5214 if (!priv->scan_bands) {
5215 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
5216 goto done;
5217 }
5218
805cee5b
WT
5219 if (!priv->scan) {
5220 priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
b481de9c 5221 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
805cee5b 5222 if (!priv->scan) {
b481de9c
ZY
5223 rc = -ENOMEM;
5224 goto done;
5225 }
5226 }
805cee5b 5227 scan = priv->scan;
bb8c093b 5228 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
5229
5230 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
5231 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
5232
bb8c093b 5233 if (iwl3945_is_associated(priv)) {
b481de9c
ZY
5234 u16 interval = 0;
5235 u32 extra;
5236 u32 suspend_time = 100;
5237 u32 scan_suspend_time = 100;
5238 unsigned long flags;
5239
5240 IWL_DEBUG_INFO("Scanning while associated...\n");
5241
5242 spin_lock_irqsave(&priv->lock, flags);
5243 interval = priv->beacon_int;
5244 spin_unlock_irqrestore(&priv->lock, flags);
5245
5246 scan->suspend_time = 0;
15e869d8 5247 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
5248 if (!interval)
5249 interval = suspend_time;
5250 /*
5251 * suspend time format:
5252 * 0-19: beacon interval in usec (time before exec.)
5253 * 20-23: 0
5254 * 24-31: number of beacons (suspend between channels)
5255 */
5256
5257 extra = (suspend_time / interval) << 24;
5258 scan_suspend_time = 0xFF0FFFFF &
5259 (extra | ((suspend_time % interval) * 1024));
5260
5261 scan->suspend_time = cpu_to_le32(scan_suspend_time);
5262 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
5263 scan_suspend_time, interval);
5264 }
5265
5266 /* We should add the ability for user to lock to PASSIVE ONLY */
5267 if (priv->one_direct_scan) {
5268 IWL_DEBUG_SCAN
5269 ("Kicking off one direct scan for '%s'\n",
9387b7ca
JL
5270 print_ssid(ssid, priv->direct_ssid,
5271 priv->direct_ssid_len));
b481de9c
ZY
5272 scan->direct_scan[0].id = WLAN_EID_SSID;
5273 scan->direct_scan[0].len = priv->direct_ssid_len;
5274 memcpy(scan->direct_scan[0].ssid,
5275 priv->direct_ssid, priv->direct_ssid_len);
f9340520 5276 n_probes++;
f9340520 5277 } else
786b4557 5278 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
b481de9c
ZY
5279
5280 /* We don't build a direct scan probe request; the uCode will do
5281 * that based on the direct_mask added to each channel entry */
5282 scan->tx_cmd.len = cpu_to_le16(
bb8c093b 5283 iwl3945_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
430cfe95 5284 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
b481de9c 5285 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3832ec9d 5286 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
b481de9c
ZY
5287 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
5288
5289 /* flags + rate selection */
5290
66b5004d 5291 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
b481de9c
ZY
5292 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
5293 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
5294 scan->good_CRC_th = 0;
8318d78a 5295 band = IEEE80211_BAND_2GHZ;
66b5004d 5296 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
b481de9c
ZY
5297 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
5298 scan->good_CRC_th = IWL_GOOD_CRC_TH;
8318d78a 5299 band = IEEE80211_BAND_5GHZ;
66b5004d 5300 } else {
39aadf8c 5301 IWL_WARN(priv, "Invalid scan band count\n");
b481de9c
ZY
5302 goto done;
5303 }
5304
5305 /* select Rx antennas */
5306 scan->flags |= iwl3945_get_antenna_flags(priv);
5307
05c914fe 5308 if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
b481de9c
ZY
5309 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
5310
f9340520
AK
5311 scan->channel_count =
5312 iwl3945_get_channels_for_scan(priv, band, 1, /* active */
5313 n_probes,
5314 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
b481de9c 5315
14b54336
RC
5316 if (scan->channel_count == 0) {
5317 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
5318 goto done;
5319 }
5320
b481de9c 5321 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 5322 scan->channel_count * sizeof(struct iwl3945_scan_channel);
b481de9c
ZY
5323 cmd.data = scan;
5324 scan->len = cpu_to_le16(cmd.len);
5325
5326 set_bit(STATUS_SCAN_HW, &priv->status);
518099a8 5327 rc = iwl_send_cmd_sync(priv, &cmd);
b481de9c
ZY
5328 if (rc)
5329 goto done;
5330
5331 queue_delayed_work(priv->workqueue, &priv->scan_check,
5332 IWL_SCAN_CHECK_WATCHDOG);
5333
5334 mutex_unlock(&priv->mutex);
5335 return;
5336
5337 done:
2420ebc1
MA
5338 /* can not perform scan make sure we clear scanning
5339 * bits from status so next scan request can be performed.
5340 * if we dont clear scanning status bit here all next scan
5341 * will fail
5342 */
5343 clear_bit(STATUS_SCAN_HW, &priv->status);
5344 clear_bit(STATUS_SCANNING, &priv->status);
5345
01ebd063 5346 /* inform mac80211 scan aborted */
b481de9c
ZY
5347 queue_work(priv->workqueue, &priv->scan_completed);
5348 mutex_unlock(&priv->mutex);
5349}
5350
bb8c093b 5351static void iwl3945_bg_up(struct work_struct *data)
b481de9c 5352{
4a8a4322 5353 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
b481de9c
ZY
5354
5355 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5356 return;
5357
5358 mutex_lock(&priv->mutex);
bb8c093b 5359 __iwl3945_up(priv);
b481de9c 5360 mutex_unlock(&priv->mutex);
c0af96a6 5361 iwl_rfkill_set_hw_state(priv);
b481de9c
ZY
5362}
5363
bb8c093b 5364static void iwl3945_bg_restart(struct work_struct *data)
b481de9c 5365{
4a8a4322 5366 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
b481de9c
ZY
5367
5368 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5369 return;
5370
bb8c093b 5371 iwl3945_down(priv);
b481de9c
ZY
5372 queue_work(priv->workqueue, &priv->up);
5373}
5374
bb8c093b 5375static void iwl3945_bg_rx_replenish(struct work_struct *data)
b481de9c 5376{
4a8a4322
AK
5377 struct iwl_priv *priv =
5378 container_of(data, struct iwl_priv, rx_replenish);
b481de9c
ZY
5379
5380 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5381 return;
5382
5383 mutex_lock(&priv->mutex);
bb8c093b 5384 iwl3945_rx_replenish(priv);
b481de9c
ZY
5385 mutex_unlock(&priv->mutex);
5386}
5387
7878a5a4
MA
5388#define IWL_DELAY_NEXT_SCAN (HZ*2)
5389
4a8a4322 5390static void iwl3945_post_associate(struct iwl_priv *priv)
b481de9c 5391{
b481de9c
ZY
5392 int rc = 0;
5393 struct ieee80211_conf *conf = NULL;
5394
05c914fe 5395 if (priv->iw_mode == NL80211_IFTYPE_AP) {
15b1687c 5396 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
b481de9c
ZY
5397 return;
5398 }
5399
5400
e174961c 5401 IWL_DEBUG_ASSOC("Associated as %d to: %pM\n",
f2c7e521 5402 priv->assoc_id, priv->active39_rxon.bssid_addr);
b481de9c
ZY
5403
5404 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5405 return;
5406
322a9811 5407 if (!priv->vif || !priv->is_open)
6ef89d0a 5408 return;
322a9811 5409
af0053d6 5410 iwl_scan_cancel_timeout(priv, 200);
15e869d8 5411
b481de9c
ZY
5412 conf = ieee80211_get_hw_conf(priv->hw);
5413
f2c7e521 5414 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5415 iwl3945_commit_rxon(priv);
b481de9c 5416
28afaf91 5417 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
bb8c093b 5418 iwl3945_setup_rxon_timing(priv);
518099a8 5419 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
5420 sizeof(priv->rxon_timing), &priv->rxon_timing);
5421 if (rc)
39aadf8c 5422 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
b481de9c
ZY
5423 "Attempting to continue.\n");
5424
f2c7e521 5425 priv->staging39_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
b481de9c 5426
f2c7e521 5427 priv->staging39_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
b481de9c
ZY
5428
5429 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
5430 priv->assoc_id, priv->beacon_int);
5431
5432 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
f2c7e521 5433 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c 5434 else
f2c7e521 5435 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c 5436
f2c7e521 5437 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) {
b481de9c 5438 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
f2c7e521 5439 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
b481de9c 5440 else
f2c7e521 5441 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c 5442
05c914fe 5443 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
f2c7e521 5444 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c
ZY
5445
5446 }
5447
bb8c093b 5448 iwl3945_commit_rxon(priv);
b481de9c
ZY
5449
5450 switch (priv->iw_mode) {
05c914fe 5451 case NL80211_IFTYPE_STATION:
bb8c093b 5452 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
5453 break;
5454
05c914fe 5455 case NL80211_IFTYPE_ADHOC:
b481de9c 5456
ce546fd2 5457 priv->assoc_id = 1;
bb8c093b 5458 iwl3945_add_station(priv, priv->bssid, 0, 0);
b481de9c 5459 iwl3945_sync_sta(priv, IWL_STA_ID,
8318d78a 5460 (priv->band == IEEE80211_BAND_5GHZ) ?
b481de9c
ZY
5461 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
5462 CMD_ASYNC);
bb8c093b
CH
5463 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
5464 iwl3945_send_beacon_cmd(priv);
b481de9c
ZY
5465
5466 break;
5467
5468 default:
15b1687c 5469 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3ac7f146 5470 __func__, priv->iw_mode);
b481de9c
ZY
5471 break;
5472 }
5473
bb8c093b 5474 iwl3945_activate_qos(priv, 0);
292ae174 5475
7878a5a4
MA
5476 /* we have just associated, don't start scan too early */
5477 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
cd56d331
AK
5478}
5479
bb8c093b 5480static void iwl3945_bg_abort_scan(struct work_struct *work)
b481de9c 5481{
4a8a4322 5482 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
b481de9c 5483
775a6e27 5484 if (!iwl_is_ready(priv))
b481de9c
ZY
5485 return;
5486
5487 mutex_lock(&priv->mutex);
5488
5489 set_bit(STATUS_SCAN_ABORTING, &priv->status);
bb8c093b 5490 iwl3945_send_scan_abort(priv);
b481de9c
ZY
5491
5492 mutex_unlock(&priv->mutex);
5493}
5494
e8975581 5495static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed);
76bb77e0 5496
bb8c093b 5497static void iwl3945_bg_scan_completed(struct work_struct *work)
b481de9c 5498{
4a8a4322
AK
5499 struct iwl_priv *priv =
5500 container_of(work, struct iwl_priv, scan_completed);
b481de9c
ZY
5501
5502 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
5503
5504 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5505 return;
5506
a0646470 5507 if (test_bit(STATUS_CONF_PENDING, &priv->status))
e8975581 5508 iwl3945_mac_config(priv->hw, 0);
76bb77e0 5509
b481de9c
ZY
5510 ieee80211_scan_completed(priv->hw);
5511
5512 /* Since setting the TXPOWER may have been deferred while
5513 * performing the scan, fire one off */
5514 mutex_lock(&priv->mutex);
bb8c093b 5515 iwl3945_hw_reg_send_txpower(priv);
b481de9c
ZY
5516 mutex_unlock(&priv->mutex);
5517}
5518
5519/*****************************************************************************
5520 *
5521 * mac80211 entry point functions
5522 *
5523 *****************************************************************************/
5524
5a66926a
ZY
5525#define UCODE_READY_TIMEOUT (2 * HZ)
5526
bb8c093b 5527static int iwl3945_mac_start(struct ieee80211_hw *hw)
b481de9c 5528{
4a8a4322 5529 struct iwl_priv *priv = hw->priv;
5a66926a 5530 int ret;
b481de9c
ZY
5531
5532 IWL_DEBUG_MAC80211("enter\n");
5533
5534 /* we should be verifying the device is ready to be opened */
5535 mutex_lock(&priv->mutex);
5536
f2c7e521 5537 memset(&priv->staging39_rxon, 0, sizeof(struct iwl3945_rxon_cmd));
5a66926a
ZY
5538 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
5539 * ucode filename and max sizes are card-specific. */
5540
5541 if (!priv->ucode_code.len) {
5542 ret = iwl3945_read_ucode(priv);
5543 if (ret) {
15b1687c 5544 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
5a66926a
ZY
5545 mutex_unlock(&priv->mutex);
5546 goto out_release_irq;
5547 }
5548 }
b481de9c 5549
e655b9f0 5550 ret = __iwl3945_up(priv);
b481de9c
ZY
5551
5552 mutex_unlock(&priv->mutex);
5a66926a 5553
c0af96a6 5554 iwl_rfkill_set_hw_state(priv);
80fcc9e2 5555
e655b9f0
ZY
5556 if (ret)
5557 goto out_release_irq;
5558
5559 IWL_DEBUG_INFO("Start UP work.\n");
5560
5561 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
5562 return 0;
5563
5a66926a
ZY
5564 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
5565 * mac80211 will not be run successfully. */
5566 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
5567 test_bit(STATUS_READY, &priv->status),
5568 UCODE_READY_TIMEOUT);
5569 if (!ret) {
5570 if (!test_bit(STATUS_READY, &priv->status)) {
15b1687c
WT
5571 IWL_ERR(priv,
5572 "Wait for START_ALIVE timeout after %dms.\n",
5573 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5a66926a
ZY
5574 ret = -ETIMEDOUT;
5575 goto out_release_irq;
5576 }
5577 }
5578
2663516d
HS
5579 /* ucode is running and will send rfkill notifications,
5580 * no need to poll the killswitch state anymore */
5581 cancel_delayed_work(&priv->rfkill_poll);
5582
e655b9f0 5583 priv->is_open = 1;
b481de9c
ZY
5584 IWL_DEBUG_MAC80211("leave\n");
5585 return 0;
5a66926a
ZY
5586
5587out_release_irq:
e655b9f0
ZY
5588 priv->is_open = 0;
5589 IWL_DEBUG_MAC80211("leave - failed\n");
5a66926a 5590 return ret;
b481de9c
ZY
5591}
5592
bb8c093b 5593static void iwl3945_mac_stop(struct ieee80211_hw *hw)
b481de9c 5594{
4a8a4322 5595 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5596
5597 IWL_DEBUG_MAC80211("enter\n");
6ef89d0a 5598
e655b9f0
ZY
5599 if (!priv->is_open) {
5600 IWL_DEBUG_MAC80211("leave - skip\n");
5601 return;
5602 }
5603
b481de9c 5604 priv->is_open = 0;
5a66926a 5605
775a6e27 5606 if (iwl_is_ready_rf(priv)) {
e655b9f0
ZY
5607 /* stop mac, cancel any scan request and clear
5608 * RXON_FILTER_ASSOC_MSK BIT
5609 */
5a66926a 5610 mutex_lock(&priv->mutex);
af0053d6 5611 iwl_scan_cancel_timeout(priv, 100);
fde3571f 5612 mutex_unlock(&priv->mutex);
fde3571f
MA
5613 }
5614
5a66926a
ZY
5615 iwl3945_down(priv);
5616
5617 flush_workqueue(priv->workqueue);
2663516d
HS
5618
5619 /* start polling the killswitch state again */
5620 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
5621 round_jiffies_relative(2 * HZ));
6ef89d0a 5622
b481de9c 5623 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
5624}
5625
e039fa4a 5626static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
b481de9c 5627{
4a8a4322 5628 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5629
5630 IWL_DEBUG_MAC80211("enter\n");
5631
b481de9c 5632 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
e039fa4a 5633 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
b481de9c 5634
e039fa4a 5635 if (iwl3945_tx_skb(priv, skb))
b481de9c
ZY
5636 dev_kfree_skb_any(skb);
5637
5638 IWL_DEBUG_MAC80211("leave\n");
637f8837 5639 return NETDEV_TX_OK;
b481de9c
ZY
5640}
5641
bb8c093b 5642static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
5643 struct ieee80211_if_init_conf *conf)
5644{
4a8a4322 5645 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5646 unsigned long flags;
5647
32bfd35d 5648 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
b481de9c 5649
32bfd35d
JB
5650 if (priv->vif) {
5651 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
864792e3 5652 return -EOPNOTSUPP;
b481de9c
ZY
5653 }
5654
5655 spin_lock_irqsave(&priv->lock, flags);
32bfd35d 5656 priv->vif = conf->vif;
60294de3 5657 priv->iw_mode = conf->type;
b481de9c
ZY
5658
5659 spin_unlock_irqrestore(&priv->lock, flags);
5660
5661 mutex_lock(&priv->mutex);
864792e3
TW
5662
5663 if (conf->mac_addr) {
e174961c 5664 IWL_DEBUG_MAC80211("Set: %pM\n", conf->mac_addr);
864792e3
TW
5665 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
5666 }
5667
775a6e27 5668 if (iwl_is_ready(priv))
5a66926a 5669 iwl3945_set_mode(priv, conf->type);
b481de9c 5670
b481de9c
ZY
5671 mutex_unlock(&priv->mutex);
5672
5a66926a 5673 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
5674 return 0;
5675}
5676
5677/**
bb8c093b 5678 * iwl3945_mac_config - mac80211 config callback
b481de9c
ZY
5679 *
5680 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
5681 * be set inappropriately and the driver currently sets the hardware up to
5682 * use it whenever needed.
5683 */
e8975581 5684static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
b481de9c 5685{
4a8a4322 5686 struct iwl_priv *priv = hw->priv;
d20b3c65 5687 const struct iwl_channel_info *ch_info;
e8975581 5688 struct ieee80211_conf *conf = &hw->conf;
b481de9c 5689 unsigned long flags;
76bb77e0 5690 int ret = 0;
b481de9c
ZY
5691
5692 mutex_lock(&priv->mutex);
8318d78a 5693 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
b481de9c 5694
775a6e27 5695 if (!iwl_is_ready(priv)) {
b481de9c 5696 IWL_DEBUG_MAC80211("leave - not ready\n");
76bb77e0
ZY
5697 ret = -EIO;
5698 goto out;
b481de9c
ZY
5699 }
5700
df878d8f 5701 if (unlikely(!iwl3945_mod_params.disable_hw_scan &&
b481de9c 5702 test_bit(STATUS_SCANNING, &priv->status))) {
a0646470
ZY
5703 IWL_DEBUG_MAC80211("leave - scanning\n");
5704 set_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 5705 mutex_unlock(&priv->mutex);
a0646470 5706 return 0;
b481de9c
ZY
5707 }
5708
5709 spin_lock_irqsave(&priv->lock, flags);
5710
8318d78a
JB
5711 ch_info = iwl3945_get_channel_info(priv, conf->channel->band,
5712 conf->channel->hw_value);
b481de9c 5713 if (!is_channel_valid(ch_info)) {
66b5004d 5714 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n",
8318d78a 5715 conf->channel->hw_value, conf->channel->band);
b481de9c
ZY
5716 IWL_DEBUG_MAC80211("leave - invalid channel\n");
5717 spin_unlock_irqrestore(&priv->lock, flags);
76bb77e0
ZY
5718 ret = -EINVAL;
5719 goto out;
b481de9c
ZY
5720 }
5721
8318d78a 5722 iwl3945_set_rxon_channel(priv, conf->channel->band, conf->channel->hw_value);
b481de9c 5723
8318d78a 5724 iwl3945_set_flags_for_phymode(priv, conf->channel->band);
b481de9c
ZY
5725
5726 /* The list of supported rates and rate mask can be different
5727 * for each phymode; since the phymode may have changed, reset
5728 * the rate mask to what mac80211 lists */
bb8c093b 5729 iwl3945_set_rate(priv);
b481de9c
ZY
5730
5731 spin_unlock_irqrestore(&priv->lock, flags);
5732
5733#ifdef IEEE80211_CONF_CHANNEL_SWITCH
5734 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 5735 iwl3945_hw_channel_switch(priv, conf->channel);
76bb77e0 5736 goto out;
b481de9c
ZY
5737 }
5738#endif
5739
bb8c093b 5740 iwl3945_radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
5741
5742 if (!conf->radio_enabled) {
5743 IWL_DEBUG_MAC80211("leave - radio disabled\n");
76bb77e0 5744 goto out;
b481de9c
ZY
5745 }
5746
775a6e27 5747 if (iwl_is_rfkill(priv)) {
b481de9c 5748 IWL_DEBUG_MAC80211("leave - RF kill\n");
76bb77e0
ZY
5749 ret = -EIO;
5750 goto out;
b481de9c
ZY
5751 }
5752
bb8c093b 5753 iwl3945_set_rate(priv);
b481de9c 5754
f2c7e521
AK
5755 if (memcmp(&priv->active39_rxon,
5756 &priv->staging39_rxon, sizeof(priv->staging39_rxon)))
bb8c093b 5757 iwl3945_commit_rxon(priv);
b481de9c
ZY
5758 else
5759 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
5760
5761 IWL_DEBUG_MAC80211("leave\n");
5762
76bb77e0 5763out:
a0646470 5764 clear_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 5765 mutex_unlock(&priv->mutex);
76bb77e0 5766 return ret;
b481de9c
ZY
5767}
5768
4a8a4322 5769static void iwl3945_config_ap(struct iwl_priv *priv)
b481de9c
ZY
5770{
5771 int rc = 0;
5772
d986bcd1 5773 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
b481de9c
ZY
5774 return;
5775
5776 /* The following should be done only at AP bring up */
5d1e2325 5777 if (!(iwl3945_is_associated(priv))) {
b481de9c
ZY
5778
5779 /* RXON - unassoc (to set timing command) */
f2c7e521 5780 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5781 iwl3945_commit_rxon(priv);
b481de9c
ZY
5782
5783 /* RXON Timing */
28afaf91 5784 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
bb8c093b 5785 iwl3945_setup_rxon_timing(priv);
518099a8
SO
5786 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
5787 sizeof(priv->rxon_timing),
5788 &priv->rxon_timing);
b481de9c 5789 if (rc)
39aadf8c 5790 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
b481de9c
ZY
5791 "Attempting to continue.\n");
5792
5793 /* FIXME: what should be the assoc_id for AP? */
f2c7e521 5794 priv->staging39_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
b481de9c 5795 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
f2c7e521 5796 priv->staging39_rxon.flags |=
b481de9c
ZY
5797 RXON_FLG_SHORT_PREAMBLE_MSK;
5798 else
f2c7e521 5799 priv->staging39_rxon.flags &=
b481de9c
ZY
5800 ~RXON_FLG_SHORT_PREAMBLE_MSK;
5801
f2c7e521 5802 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) {
b481de9c
ZY
5803 if (priv->assoc_capability &
5804 WLAN_CAPABILITY_SHORT_SLOT_TIME)
f2c7e521 5805 priv->staging39_rxon.flags |=
b481de9c
ZY
5806 RXON_FLG_SHORT_SLOT_MSK;
5807 else
f2c7e521 5808 priv->staging39_rxon.flags &=
b481de9c
ZY
5809 ~RXON_FLG_SHORT_SLOT_MSK;
5810
05c914fe 5811 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
f2c7e521 5812 priv->staging39_rxon.flags &=
b481de9c
ZY
5813 ~RXON_FLG_SHORT_SLOT_MSK;
5814 }
5815 /* restore RXON assoc */
f2c7e521 5816 priv->staging39_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 5817 iwl3945_commit_rxon(priv);
b5323d36 5818 iwl3945_add_station(priv, iwl_bcast_addr, 0, 0);
556f8db7 5819 }
bb8c093b 5820 iwl3945_send_beacon_cmd(priv);
b481de9c
ZY
5821
5822 /* FIXME - we need to add code here to detect a totally new
5823 * configuration, reset the AP, unassoc, rxon timing, assoc,
5824 * clear sta table, add BCAST sta... */
5825}
5826
32bfd35d
JB
5827static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
5828 struct ieee80211_vif *vif,
4a8a4322 5829 struct ieee80211_if_conf *conf)
b481de9c 5830{
4a8a4322 5831 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5832 int rc;
5833
5834 if (conf == NULL)
5835 return -EIO;
5836
b716bb91
EG
5837 if (priv->vif != vif) {
5838 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
b716bb91
EG
5839 return 0;
5840 }
5841
9d139c81 5842 /* handle this temporarily here */
05c914fe 5843 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
9d139c81
JB
5844 conf->changed & IEEE80211_IFCC_BEACON) {
5845 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
5846 if (!beacon)
5847 return -ENOMEM;
9bdf5eca 5848 mutex_lock(&priv->mutex);
9d139c81 5849 rc = iwl3945_mac_beacon_update(hw, beacon);
9bdf5eca 5850 mutex_unlock(&priv->mutex);
9d139c81
JB
5851 if (rc)
5852 return rc;
5853 }
5854
775a6e27 5855 if (!iwl_is_alive(priv))
5a66926a
ZY
5856 return -EAGAIN;
5857
b481de9c
ZY
5858 mutex_lock(&priv->mutex);
5859
b481de9c 5860 if (conf->bssid)
e174961c 5861 IWL_DEBUG_MAC80211("bssid: %pM\n", conf->bssid);
b481de9c 5862
4150c572
JB
5863/*
5864 * very dubious code was here; the probe filtering flag is never set:
5865 *
b481de9c
ZY
5866 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
5867 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572 5868 */
b481de9c 5869
05c914fe 5870 if (priv->iw_mode == NL80211_IFTYPE_AP) {
b481de9c
ZY
5871 if (!conf->bssid) {
5872 conf->bssid = priv->mac_addr;
5873 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
e174961c
JB
5874 IWL_DEBUG_MAC80211("bssid was set to: %pM\n",
5875 conf->bssid);
b481de9c
ZY
5876 }
5877 if (priv->ibss_beacon)
5878 dev_kfree_skb(priv->ibss_beacon);
5879
9d139c81 5880 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
b481de9c
ZY
5881 }
5882
775a6e27 5883 if (iwl_is_rfkill(priv))
fde3571f
MA
5884 goto done;
5885
b481de9c
ZY
5886 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
5887 !is_multicast_ether_addr(conf->bssid)) {
5888 /* If there is currently a HW scan going on in the background
5889 * then we need to cancel it else the RXON below will fail. */
af0053d6 5890 if (iwl_scan_cancel_timeout(priv, 100)) {
39aadf8c 5891 IWL_WARN(priv, "Aborted scan still in progress "
b481de9c
ZY
5892 "after 100ms\n");
5893 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
5894 mutex_unlock(&priv->mutex);
5895 return -EAGAIN;
5896 }
f2c7e521 5897 memcpy(priv->staging39_rxon.bssid_addr, conf->bssid, ETH_ALEN);
b481de9c
ZY
5898
5899 /* TODO: Audit driver for usage of these members and see
5900 * if mac80211 deprecates them (priv->bssid looks like it
5901 * shouldn't be there, but I haven't scanned the IBSS code
5902 * to verify) - jpk */
5903 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
5904
05c914fe 5905 if (priv->iw_mode == NL80211_IFTYPE_AP)
bb8c093b 5906 iwl3945_config_ap(priv);
b481de9c 5907 else {
bb8c093b 5908 rc = iwl3945_commit_rxon(priv);
05c914fe 5909 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
bb8c093b 5910 iwl3945_add_station(priv,
f2c7e521 5911 priv->active39_rxon.bssid_addr, 1, 0);
b481de9c
ZY
5912 }
5913
5914 } else {
af0053d6 5915 iwl_scan_cancel_timeout(priv, 100);
f2c7e521 5916 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5917 iwl3945_commit_rxon(priv);
b481de9c
ZY
5918 }
5919
fde3571f 5920 done:
b481de9c
ZY
5921 IWL_DEBUG_MAC80211("leave\n");
5922 mutex_unlock(&priv->mutex);
5923
5924 return 0;
5925}
5926
bb8c093b 5927static void iwl3945_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
5928 unsigned int changed_flags,
5929 unsigned int *total_flags,
5930 int mc_count, struct dev_addr_list *mc_list)
5931{
4a8a4322 5932 struct iwl_priv *priv = hw->priv;
f2c7e521 5933 __le32 *filter_flags = &priv->staging39_rxon.filter_flags;
25b3f57c 5934
352bc8de
ZY
5935 IWL_DEBUG_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
5936 changed_flags, *total_flags);
5937
5938 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
5939 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
5940 *filter_flags |= RXON_FILTER_PROMISC_MSK;
5941 else
5942 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
5943 }
5944 if (changed_flags & FIF_ALLMULTI) {
5945 if (*total_flags & FIF_ALLMULTI)
5946 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
5947 else
5948 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
5949 }
5950 if (changed_flags & FIF_CONTROL) {
5951 if (*total_flags & FIF_CONTROL)
5952 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
5953 else
5954 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
5ec03976 5955 }
352bc8de
ZY
5956 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
5957 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
5958 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
5959 else
5960 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
5961 }
5962
5963 /* We avoid iwl_commit_rxon here to commit the new filter flags
5964 * since mac80211 will call ieee80211_hw_config immediately.
5965 * (mc_list is not supported at this time). Otherwise, we need to
5966 * queue a background iwl_commit_rxon work.
5967 */
5968
5969 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
25b3f57c 5970 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
4150c572
JB
5971}
5972
bb8c093b 5973static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
5974 struct ieee80211_if_init_conf *conf)
5975{
4a8a4322 5976 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5977
5978 IWL_DEBUG_MAC80211("enter\n");
5979
5980 mutex_lock(&priv->mutex);
6ef89d0a 5981
775a6e27 5982 if (iwl_is_ready_rf(priv)) {
af0053d6 5983 iwl_scan_cancel_timeout(priv, 100);
f2c7e521 5984 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
fde3571f
MA
5985 iwl3945_commit_rxon(priv);
5986 }
32bfd35d
JB
5987 if (priv->vif == conf->vif) {
5988 priv->vif = NULL;
b481de9c 5989 memset(priv->bssid, 0, ETH_ALEN);
b481de9c
ZY
5990 }
5991 mutex_unlock(&priv->mutex);
5992
5993 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
5994}
5995
cd56d331
AK
5996#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
5997
5998static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
5999 struct ieee80211_vif *vif,
6000 struct ieee80211_bss_conf *bss_conf,
6001 u32 changes)
6002{
4a8a4322 6003 struct iwl_priv *priv = hw->priv;
cd56d331
AK
6004
6005 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
6006
6007 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
6008 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
6009 bss_conf->use_short_preamble);
6010 if (bss_conf->use_short_preamble)
f2c7e521 6011 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
cd56d331 6012 else
f2c7e521 6013 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
cd56d331
AK
6014 }
6015
6016 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
6017 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
6018 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
f2c7e521 6019 priv->staging39_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
cd56d331 6020 else
f2c7e521 6021 priv->staging39_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
cd56d331
AK
6022 }
6023
6024 if (changes & BSS_CHANGED_ASSOC) {
6025 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
6026 /* This should never happen as this function should
6027 * never be called from interrupt context. */
6028 if (WARN_ON_ONCE(in_interrupt()))
6029 return;
6030 if (bss_conf->assoc) {
6031 priv->assoc_id = bss_conf->aid;
6032 priv->beacon_int = bss_conf->beacon_int;
28afaf91 6033 priv->timestamp = bss_conf->timestamp;
cd56d331 6034 priv->assoc_capability = bss_conf->assoc_capability;
3dae0c42 6035 priv->power_data.dtim_period = bss_conf->dtim_period;
cd56d331
AK
6036 priv->next_scan_jiffies = jiffies +
6037 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
6038 mutex_lock(&priv->mutex);
6039 iwl3945_post_associate(priv);
6040 mutex_unlock(&priv->mutex);
6041 } else {
6042 priv->assoc_id = 0;
6043 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
6044 }
6045 } else if (changes && iwl3945_is_associated(priv) && priv->assoc_id) {
6046 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
6047 iwl3945_send_rxon_assoc(priv);
6048 }
6049
6050}
6051
bb8c093b 6052static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
6053{
6054 int rc = 0;
6055 unsigned long flags;
4a8a4322 6056 struct iwl_priv *priv = hw->priv;
9387b7ca 6057 DECLARE_SSID_BUF(ssid_buf);
b481de9c
ZY
6058
6059 IWL_DEBUG_MAC80211("enter\n");
6060
15e869d8 6061 mutex_lock(&priv->mutex);
b481de9c
ZY
6062 spin_lock_irqsave(&priv->lock, flags);
6063
775a6e27 6064 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
6065 rc = -EIO;
6066 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
6067 goto out_unlock;
6068 }
6069
7878a5a4
MA
6070 /* we don't schedule scan within next_scan_jiffies period */
6071 if (priv->next_scan_jiffies &&
6072 time_after(priv->next_scan_jiffies, jiffies)) {
6073 rc = -EAGAIN;
6074 goto out_unlock;
6075 }
15dbf1b7
BM
6076 /* if we just finished scan ask for delay for a broadcast scan */
6077 if ((len == 0) && priv->last_scan_jiffies &&
6078 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
6079 jiffies)) {
b481de9c
ZY
6080 rc = -EAGAIN;
6081 goto out_unlock;
6082 }
6083 if (len) {
7878a5a4 6084 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
9387b7ca 6085 print_ssid(ssid_buf, ssid, len), (int)len);
b481de9c
ZY
6086
6087 priv->one_direct_scan = 1;
6088 priv->direct_ssid_len = (u8)
6089 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
6090 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
6ef89d0a
MA
6091 } else
6092 priv->one_direct_scan = 0;
b481de9c 6093
bb8c093b 6094 rc = iwl3945_scan_initiate(priv);
b481de9c
ZY
6095
6096 IWL_DEBUG_MAC80211("leave\n");
6097
6098out_unlock:
6099 spin_unlock_irqrestore(&priv->lock, flags);
15e869d8 6100 mutex_unlock(&priv->mutex);
b481de9c
ZY
6101
6102 return rc;
6103}
6104
bb8c093b 6105static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
dc822b5d
JB
6106 struct ieee80211_vif *vif,
6107 struct ieee80211_sta *sta,
6108 struct ieee80211_key_conf *key)
b481de9c 6109{
4a8a4322 6110 struct iwl_priv *priv = hw->priv;
dc822b5d 6111 const u8 *addr;
42986796 6112 int ret;
b481de9c
ZY
6113 u8 sta_id;
6114
6115 IWL_DEBUG_MAC80211("enter\n");
6116
df878d8f 6117 if (iwl3945_mod_params.sw_crypto) {
b481de9c
ZY
6118 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
6119 return -EOPNOTSUPP;
6120 }
6121
42986796 6122 addr = sta ? sta->addr : iwl_bcast_addr;
bb8c093b 6123 sta_id = iwl3945_hw_find_station(priv, addr);
b481de9c 6124 if (sta_id == IWL_INVALID_STATION) {
e174961c
JB
6125 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n",
6126 addr);
b481de9c
ZY
6127 return -EINVAL;
6128 }
6129
6130 mutex_lock(&priv->mutex);
6131
af0053d6 6132 iwl_scan_cancel_timeout(priv, 100);
15e869d8 6133
b481de9c
ZY
6134 switch (cmd) {
6135 case SET_KEY:
42986796
WT
6136 ret = iwl3945_update_sta_key_info(priv, key, sta_id);
6137 if (!ret) {
bb8c093b
CH
6138 iwl3945_set_rxon_hwcrypto(priv, 1);
6139 iwl3945_commit_rxon(priv);
b481de9c
ZY
6140 key->hw_key_idx = sta_id;
6141 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
6142 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
6143 }
6144 break;
6145 case DISABLE_KEY:
42986796
WT
6146 ret = iwl3945_clear_sta_key_info(priv, sta_id);
6147 if (!ret) {
bb8c093b
CH
6148 iwl3945_set_rxon_hwcrypto(priv, 0);
6149 iwl3945_commit_rxon(priv);
b481de9c
ZY
6150 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
6151 }
6152 break;
6153 default:
42986796 6154 ret = -EINVAL;
b481de9c
ZY
6155 }
6156
6157 IWL_DEBUG_MAC80211("leave\n");
6158 mutex_unlock(&priv->mutex);
6159
42986796 6160 return ret;
b481de9c
ZY
6161}
6162
e100bb64 6163static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
b481de9c
ZY
6164 const struct ieee80211_tx_queue_params *params)
6165{
4a8a4322 6166 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6167 unsigned long flags;
6168 int q;
b481de9c
ZY
6169
6170 IWL_DEBUG_MAC80211("enter\n");
6171
775a6e27 6172 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
6173 IWL_DEBUG_MAC80211("leave - RF not ready\n");
6174 return -EIO;
6175 }
6176
6177 if (queue >= AC_NUM) {
6178 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
6179 return 0;
6180 }
6181
b481de9c
ZY
6182 q = AC_NUM - 1 - queue;
6183
6184 spin_lock_irqsave(&priv->lock, flags);
6185
6186 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
6187 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
6188 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
6189 priv->qos_data.def_qos_parm.ac[q].edca_txop =
3330d7be 6190 cpu_to_le16((params->txop * 32));
b481de9c
ZY
6191
6192 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
6193 priv->qos_data.qos_active = 1;
6194
6195 spin_unlock_irqrestore(&priv->lock, flags);
6196
6197 mutex_lock(&priv->mutex);
05c914fe 6198 if (priv->iw_mode == NL80211_IFTYPE_AP)
bb8c093b
CH
6199 iwl3945_activate_qos(priv, 1);
6200 else if (priv->assoc_id && iwl3945_is_associated(priv))
6201 iwl3945_activate_qos(priv, 0);
b481de9c
ZY
6202
6203 mutex_unlock(&priv->mutex);
6204
b481de9c
ZY
6205 IWL_DEBUG_MAC80211("leave\n");
6206 return 0;
6207}
6208
bb8c093b 6209static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
6210 struct ieee80211_tx_queue_stats *stats)
6211{
4a8a4322 6212 struct iwl_priv *priv = hw->priv;
b481de9c 6213 int i, avail;
188cf6c7 6214 struct iwl_tx_queue *txq;
d20b3c65 6215 struct iwl_queue *q;
b481de9c
ZY
6216 unsigned long flags;
6217
6218 IWL_DEBUG_MAC80211("enter\n");
6219
775a6e27 6220 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
6221 IWL_DEBUG_MAC80211("leave - RF not ready\n");
6222 return -EIO;
6223 }
6224
6225 spin_lock_irqsave(&priv->lock, flags);
6226
6227 for (i = 0; i < AC_NUM; i++) {
188cf6c7 6228 txq = &priv->txq[i];
b481de9c 6229 q = &txq->q;
d20b3c65 6230 avail = iwl_queue_space(q);
b481de9c 6231
57ffc589
JB
6232 stats[i].len = q->n_window - avail;
6233 stats[i].limit = q->n_window - q->high_mark;
6234 stats[i].count = q->n_window;
b481de9c
ZY
6235
6236 }
6237 spin_unlock_irqrestore(&priv->lock, flags);
6238
6239 IWL_DEBUG_MAC80211("leave\n");
6240
6241 return 0;
6242}
6243
bb8c093b 6244static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 6245{
4a8a4322 6246 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6247 unsigned long flags;
6248
6249 mutex_lock(&priv->mutex);
6250 IWL_DEBUG_MAC80211("enter\n");
6251
775a6e27 6252 iwl_reset_qos(priv);
292ae174 6253
b481de9c
ZY
6254 spin_lock_irqsave(&priv->lock, flags);
6255 priv->assoc_id = 0;
6256 priv->assoc_capability = 0;
b481de9c
ZY
6257
6258 /* new association get rid of ibss beacon skb */
6259 if (priv->ibss_beacon)
6260 dev_kfree_skb(priv->ibss_beacon);
6261
6262 priv->ibss_beacon = NULL;
6263
6264 priv->beacon_int = priv->hw->conf.beacon_int;
28afaf91 6265 priv->timestamp = 0;
05c914fe 6266 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
b481de9c
ZY
6267 priv->beacon_int = 0;
6268
6269 spin_unlock_irqrestore(&priv->lock, flags);
6270
775a6e27 6271 if (!iwl_is_ready_rf(priv)) {
fde3571f
MA
6272 IWL_DEBUG_MAC80211("leave - not ready\n");
6273 mutex_unlock(&priv->mutex);
6274 return;
6275 }
6276
15e869d8
MA
6277 /* we are restarting association process
6278 * clear RXON_FILTER_ASSOC_MSK bit
6279 */
05c914fe 6280 if (priv->iw_mode != NL80211_IFTYPE_AP) {
af0053d6 6281 iwl_scan_cancel_timeout(priv, 100);
f2c7e521 6282 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 6283 iwl3945_commit_rxon(priv);
15e869d8
MA
6284 }
6285
b481de9c 6286 /* Per mac80211.h: This is only used in IBSS mode... */
05c914fe 6287 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
15e869d8 6288
b481de9c
ZY
6289 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
6290 mutex_unlock(&priv->mutex);
6291 return;
b481de9c
ZY
6292 }
6293
bb8c093b 6294 iwl3945_set_rate(priv);
b481de9c
ZY
6295
6296 mutex_unlock(&priv->mutex);
6297
6298 IWL_DEBUG_MAC80211("leave\n");
6299
6300}
6301
e039fa4a 6302static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
b481de9c 6303{
4a8a4322 6304 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6305 unsigned long flags;
6306
b481de9c
ZY
6307 IWL_DEBUG_MAC80211("enter\n");
6308
775a6e27 6309 if (!iwl_is_ready_rf(priv)) {
b481de9c 6310 IWL_DEBUG_MAC80211("leave - RF not ready\n");
b481de9c
ZY
6311 return -EIO;
6312 }
6313
05c914fe 6314 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
b481de9c 6315 IWL_DEBUG_MAC80211("leave - not IBSS\n");
b481de9c
ZY
6316 return -EIO;
6317 }
6318
6319 spin_lock_irqsave(&priv->lock, flags);
6320
6321 if (priv->ibss_beacon)
6322 dev_kfree_skb(priv->ibss_beacon);
6323
6324 priv->ibss_beacon = skb;
6325
6326 priv->assoc_id = 0;
6327
6328 IWL_DEBUG_MAC80211("leave\n");
6329 spin_unlock_irqrestore(&priv->lock, flags);
6330
775a6e27 6331 iwl_reset_qos(priv);
b481de9c 6332
dc4b1e7d 6333 iwl3945_post_associate(priv);
b481de9c 6334
b481de9c
ZY
6335
6336 return 0;
6337}
6338
6339/*****************************************************************************
6340 *
6341 * sysfs attributes
6342 *
6343 *****************************************************************************/
6344
c8b0e6e1 6345#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
6346
6347/*
6348 * The following adds a new attribute to the sysfs representation
6349 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
6350 * used for controlling the debug level.
6351 *
6352 * See the level definitions in iwl for details.
6353 */
40b8ec0b
SO
6354static ssize_t show_debug_level(struct device *d,
6355 struct device_attribute *attr, char *buf)
b481de9c 6356{
4a8a4322 6357 struct iwl_priv *priv = d->driver_data;
40b8ec0b
SO
6358
6359 return sprintf(buf, "0x%08X\n", priv->debug_level);
b481de9c 6360}
40b8ec0b
SO
6361static ssize_t store_debug_level(struct device *d,
6362 struct device_attribute *attr,
b481de9c
ZY
6363 const char *buf, size_t count)
6364{
4a8a4322 6365 struct iwl_priv *priv = d->driver_data;
40b8ec0b
SO
6366 unsigned long val;
6367 int ret;
b481de9c 6368
40b8ec0b
SO
6369 ret = strict_strtoul(buf, 0, &val);
6370 if (ret)
978785a3 6371 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
b481de9c 6372 else
40b8ec0b 6373 priv->debug_level = val;
b481de9c
ZY
6374
6375 return strnlen(buf, count);
6376}
6377
40b8ec0b
SO
6378static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
6379 show_debug_level, store_debug_level);
b481de9c 6380
c8b0e6e1 6381#endif /* CONFIG_IWL3945_DEBUG */
b481de9c 6382
b481de9c
ZY
6383static ssize_t show_temperature(struct device *d,
6384 struct device_attribute *attr, char *buf)
6385{
4a8a4322 6386 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c 6387
775a6e27 6388 if (!iwl_is_alive(priv))
b481de9c
ZY
6389 return -EAGAIN;
6390
bb8c093b 6391 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
b481de9c
ZY
6392}
6393
6394static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
6395
b481de9c
ZY
6396static ssize_t show_tx_power(struct device *d,
6397 struct device_attribute *attr, char *buf)
6398{
4a8a4322 6399 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
62ea9c5b 6400 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
b481de9c
ZY
6401}
6402
6403static ssize_t store_tx_power(struct device *d,
6404 struct device_attribute *attr,
6405 const char *buf, size_t count)
6406{
4a8a4322 6407 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6408 char *p = (char *)buf;
6409 u32 val;
6410
6411 val = simple_strtoul(p, &p, 10);
6412 if (p == buf)
978785a3 6413 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
b481de9c 6414 else
bb8c093b 6415 iwl3945_hw_reg_set_txpower(priv, val);
b481de9c
ZY
6416
6417 return count;
6418}
6419
6420static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
6421
6422static ssize_t show_flags(struct device *d,
6423 struct device_attribute *attr, char *buf)
6424{
4a8a4322 6425 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c 6426
f2c7e521 6427 return sprintf(buf, "0x%04X\n", priv->active39_rxon.flags);
b481de9c
ZY
6428}
6429
6430static ssize_t store_flags(struct device *d,
6431 struct device_attribute *attr,
6432 const char *buf, size_t count)
6433{
4a8a4322 6434 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6435 u32 flags = simple_strtoul(buf, NULL, 0);
6436
6437 mutex_lock(&priv->mutex);
f2c7e521 6438 if (le32_to_cpu(priv->staging39_rxon.flags) != flags) {
b481de9c 6439 /* Cancel any currently running scans... */
af0053d6 6440 if (iwl_scan_cancel_timeout(priv, 100))
39aadf8c 6441 IWL_WARN(priv, "Could not cancel scan.\n");
b481de9c
ZY
6442 else {
6443 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
6444 flags);
f2c7e521 6445 priv->staging39_rxon.flags = cpu_to_le32(flags);
bb8c093b 6446 iwl3945_commit_rxon(priv);
b481de9c
ZY
6447 }
6448 }
6449 mutex_unlock(&priv->mutex);
6450
6451 return count;
6452}
6453
6454static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
6455
6456static ssize_t show_filter_flags(struct device *d,
6457 struct device_attribute *attr, char *buf)
6458{
4a8a4322 6459 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6460
6461 return sprintf(buf, "0x%04X\n",
f2c7e521 6462 le32_to_cpu(priv->active39_rxon.filter_flags));
b481de9c
ZY
6463}
6464
6465static ssize_t store_filter_flags(struct device *d,
6466 struct device_attribute *attr,
6467 const char *buf, size_t count)
6468{
4a8a4322 6469 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6470 u32 filter_flags = simple_strtoul(buf, NULL, 0);
6471
6472 mutex_lock(&priv->mutex);
f2c7e521 6473 if (le32_to_cpu(priv->staging39_rxon.filter_flags) != filter_flags) {
b481de9c 6474 /* Cancel any currently running scans... */
af0053d6 6475 if (iwl_scan_cancel_timeout(priv, 100))
39aadf8c 6476 IWL_WARN(priv, "Could not cancel scan.\n");
b481de9c
ZY
6477 else {
6478 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
6479 "0x%04X\n", filter_flags);
f2c7e521 6480 priv->staging39_rxon.filter_flags =
b481de9c 6481 cpu_to_le32(filter_flags);
bb8c093b 6482 iwl3945_commit_rxon(priv);
b481de9c
ZY
6483 }
6484 }
6485 mutex_unlock(&priv->mutex);
6486
6487 return count;
6488}
6489
6490static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
6491 store_filter_flags);
6492
c8b0e6e1 6493#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
6494
6495static ssize_t show_measurement(struct device *d,
6496 struct device_attribute *attr, char *buf)
6497{
4a8a4322 6498 struct iwl_priv *priv = dev_get_drvdata(d);
600c0e11 6499 struct iwl_spectrum_notification measure_report;
b481de9c 6500 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3ac7f146 6501 u8 *data = (u8 *)&measure_report;
b481de9c
ZY
6502 unsigned long flags;
6503
6504 spin_lock_irqsave(&priv->lock, flags);
6505 if (!(priv->measurement_status & MEASUREMENT_READY)) {
6506 spin_unlock_irqrestore(&priv->lock, flags);
6507 return 0;
6508 }
6509 memcpy(&measure_report, &priv->measure_report, size);
6510 priv->measurement_status = 0;
6511 spin_unlock_irqrestore(&priv->lock, flags);
6512
6513 while (size && (PAGE_SIZE - len)) {
6514 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
6515 PAGE_SIZE - len, 1);
6516 len = strlen(buf);
6517 if (PAGE_SIZE - len)
6518 buf[len++] = '\n';
6519
6520 ofs += 16;
6521 size -= min(size, 16U);
6522 }
6523
6524 return len;
6525}
6526
6527static ssize_t store_measurement(struct device *d,
6528 struct device_attribute *attr,
6529 const char *buf, size_t count)
6530{
4a8a4322 6531 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c 6532 struct ieee80211_measurement_params params = {
f2c7e521 6533 .channel = le16_to_cpu(priv->active39_rxon.channel),
b481de9c
ZY
6534 .start_time = cpu_to_le64(priv->last_tsf),
6535 .duration = cpu_to_le16(1),
6536 };
6537 u8 type = IWL_MEASURE_BASIC;
6538 u8 buffer[32];
6539 u8 channel;
6540
6541 if (count) {
6542 char *p = buffer;
6543 strncpy(buffer, buf, min(sizeof(buffer), count));
6544 channel = simple_strtoul(p, NULL, 0);
6545 if (channel)
6546 params.channel = channel;
6547
6548 p = buffer;
6549 while (*p && *p != ' ')
6550 p++;
6551 if (*p)
6552 type = simple_strtoul(p + 1, NULL, 0);
6553 }
6554
6555 IWL_DEBUG_INFO("Invoking measurement of type %d on "
6556 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 6557 iwl3945_get_measurement(priv, &params, type);
b481de9c
ZY
6558
6559 return count;
6560}
6561
6562static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
6563 show_measurement, store_measurement);
c8b0e6e1 6564#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
b481de9c 6565
b481de9c
ZY
6566static ssize_t store_retry_rate(struct device *d,
6567 struct device_attribute *attr,
6568 const char *buf, size_t count)
6569{
4a8a4322 6570 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6571
6572 priv->retry_rate = simple_strtoul(buf, NULL, 0);
6573 if (priv->retry_rate <= 0)
6574 priv->retry_rate = 1;
6575
6576 return count;
6577}
6578
6579static ssize_t show_retry_rate(struct device *d,
6580 struct device_attribute *attr, char *buf)
6581{
4a8a4322 6582 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6583 return sprintf(buf, "%d", priv->retry_rate);
6584}
6585
6586static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
6587 store_retry_rate);
6588
6589static ssize_t store_power_level(struct device *d,
6590 struct device_attribute *attr,
6591 const char *buf, size_t count)
6592{
4a8a4322 6593 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6594 int rc;
6595 int mode;
6596
6597 mode = simple_strtoul(buf, NULL, 0);
6598 mutex_lock(&priv->mutex);
6599
775a6e27 6600 if (!iwl_is_ready(priv)) {
b481de9c
ZY
6601 rc = -EAGAIN;
6602 goto out;
6603 }
6604
1125eff3
SO
6605 if ((mode < 1) || (mode > IWL39_POWER_LIMIT) ||
6606 (mode == IWL39_POWER_AC))
6607 mode = IWL39_POWER_AC;
b481de9c
ZY
6608 else
6609 mode |= IWL_POWER_ENABLED;
6610
6611 if (mode != priv->power_mode) {
bb8c093b 6612 rc = iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
6613 if (rc) {
6614 IWL_DEBUG_MAC80211("failed setting power mode.\n");
6615 goto out;
6616 }
6617 priv->power_mode = mode;
6618 }
6619
6620 rc = count;
6621
6622 out:
6623 mutex_unlock(&priv->mutex);
6624 return rc;
6625}
6626
6627#define MAX_WX_STRING 80
6628
6629/* Values are in microsecond */
6630static const s32 timeout_duration[] = {
6631 350000,
6632 250000,
6633 75000,
6634 37000,
6635 25000,
6636};
6637static const s32 period_duration[] = {
6638 400000,
6639 700000,
6640 1000000,
6641 1000000,
6642 1000000
6643};
6644
6645static ssize_t show_power_level(struct device *d,
6646 struct device_attribute *attr, char *buf)
6647{
4a8a4322 6648 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6649 int level = IWL_POWER_LEVEL(priv->power_mode);
6650 char *p = buf;
6651
6652 p += sprintf(p, "%d ", level);
6653 switch (level) {
6654 case IWL_POWER_MODE_CAM:
1125eff3 6655 case IWL39_POWER_AC:
b481de9c
ZY
6656 p += sprintf(p, "(AC)");
6657 break;
1125eff3 6658 case IWL39_POWER_BATTERY:
b481de9c
ZY
6659 p += sprintf(p, "(BATTERY)");
6660 break;
6661 default:
6662 p += sprintf(p,
6663 "(Timeout %dms, Period %dms)",
6664 timeout_duration[level - 1] / 1000,
6665 period_duration[level - 1] / 1000);
6666 }
6667
6668 if (!(priv->power_mode & IWL_POWER_ENABLED))
6669 p += sprintf(p, " OFF\n");
6670 else
6671 p += sprintf(p, " \n");
6672
3ac7f146 6673 return p - buf + 1;
b481de9c
ZY
6674
6675}
6676
6677static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
6678 store_power_level);
6679
6680static ssize_t show_channels(struct device *d,
6681 struct device_attribute *attr, char *buf)
6682{
8318d78a
JB
6683 /* all this shit doesn't belong into sysfs anyway */
6684 return 0;
b481de9c
ZY
6685}
6686
6687static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
6688
6689static ssize_t show_statistics(struct device *d,
6690 struct device_attribute *attr, char *buf)
6691{
4a8a4322 6692 struct iwl_priv *priv = dev_get_drvdata(d);
bb8c093b 6693 u32 size = sizeof(struct iwl3945_notif_statistics);
b481de9c 6694 u32 len = 0, ofs = 0;
f2c7e521 6695 u8 *data = (u8 *)&priv->statistics_39;
b481de9c
ZY
6696 int rc = 0;
6697
775a6e27 6698 if (!iwl_is_alive(priv))
b481de9c
ZY
6699 return -EAGAIN;
6700
6701 mutex_lock(&priv->mutex);
bb8c093b 6702 rc = iwl3945_send_statistics_request(priv);
b481de9c
ZY
6703 mutex_unlock(&priv->mutex);
6704
6705 if (rc) {
6706 len = sprintf(buf,
6707 "Error sending statistics request: 0x%08X\n", rc);
6708 return len;
6709 }
6710
6711 while (size && (PAGE_SIZE - len)) {
6712 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
6713 PAGE_SIZE - len, 1);
6714 len = strlen(buf);
6715 if (PAGE_SIZE - len)
6716 buf[len++] = '\n';
6717
6718 ofs += 16;
6719 size -= min(size, 16U);
6720 }
6721
6722 return len;
6723}
6724
6725static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
6726
6727static ssize_t show_antenna(struct device *d,
6728 struct device_attribute *attr, char *buf)
6729{
4a8a4322 6730 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c 6731
775a6e27 6732 if (!iwl_is_alive(priv))
b481de9c
ZY
6733 return -EAGAIN;
6734
6735 return sprintf(buf, "%d\n", priv->antenna);
6736}
6737
6738static ssize_t store_antenna(struct device *d,
6739 struct device_attribute *attr,
6740 const char *buf, size_t count)
6741{
6742 int ant;
4a8a4322 6743 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6744
6745 if (count == 0)
6746 return 0;
6747
6748 if (sscanf(buf, "%1i", &ant) != 1) {
6749 IWL_DEBUG_INFO("not in hex or decimal form.\n");
6750 return count;
6751 }
6752
6753 if ((ant >= 0) && (ant <= 2)) {
6754 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 6755 priv->antenna = (enum iwl3945_antenna)ant;
b481de9c
ZY
6756 } else
6757 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
6758
6759
6760 return count;
6761}
6762
6763static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
6764
6765static ssize_t show_status(struct device *d,
6766 struct device_attribute *attr, char *buf)
6767{
4a8a4322 6768 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
775a6e27 6769 if (!iwl_is_alive(priv))
b481de9c
ZY
6770 return -EAGAIN;
6771 return sprintf(buf, "0x%08x\n", (int)priv->status);
6772}
6773
6774static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
6775
6776static ssize_t dump_error_log(struct device *d,
6777 struct device_attribute *attr,
6778 const char *buf, size_t count)
6779{
6780 char *p = (char *)buf;
6781
6782 if (p[0] == '1')
4a8a4322 6783 iwl3945_dump_nic_error_log((struct iwl_priv *)d->driver_data);
b481de9c
ZY
6784
6785 return strnlen(buf, count);
6786}
6787
6788static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
6789
6790static ssize_t dump_event_log(struct device *d,
6791 struct device_attribute *attr,
6792 const char *buf, size_t count)
6793{
6794 char *p = (char *)buf;
6795
6796 if (p[0] == '1')
4a8a4322 6797 iwl3945_dump_nic_event_log((struct iwl_priv *)d->driver_data);
b481de9c
ZY
6798
6799 return strnlen(buf, count);
6800}
6801
6802static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
6803
6804/*****************************************************************************
6805 *
a96a27f9 6806 * driver setup and tear down
b481de9c
ZY
6807 *
6808 *****************************************************************************/
6809
4a8a4322 6810static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
b481de9c
ZY
6811{
6812 priv->workqueue = create_workqueue(DRV_NAME);
6813
6814 init_waitqueue_head(&priv->wait_command_queue);
6815
bb8c093b
CH
6816 INIT_WORK(&priv->up, iwl3945_bg_up);
6817 INIT_WORK(&priv->restart, iwl3945_bg_restart);
6818 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
6819 INIT_WORK(&priv->scan_completed, iwl3945_bg_scan_completed);
6820 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
6821 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan);
c0af96a6 6822 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
bb8c093b 6823 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
bb8c093b
CH
6824 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
6825 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
6826 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check);
2663516d 6827 INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll);
bb8c093b
CH
6828
6829 iwl3945_hw_setup_deferred_work(priv);
b481de9c
ZY
6830
6831 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 6832 iwl3945_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
6833}
6834
4a8a4322 6835static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
b481de9c 6836{
bb8c093b 6837 iwl3945_hw_cancel_deferred_work(priv);
b481de9c 6838
e47eb6ad 6839 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
6840 cancel_delayed_work(&priv->scan_check);
6841 cancel_delayed_work(&priv->alive_start);
b481de9c
ZY
6842 cancel_work_sync(&priv->beacon_update);
6843}
6844
bb8c093b 6845static struct attribute *iwl3945_sysfs_entries[] = {
b481de9c
ZY
6846 &dev_attr_antenna.attr,
6847 &dev_attr_channels.attr,
6848 &dev_attr_dump_errors.attr,
6849 &dev_attr_dump_events.attr,
6850 &dev_attr_flags.attr,
6851 &dev_attr_filter_flags.attr,
c8b0e6e1 6852#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
6853 &dev_attr_measurement.attr,
6854#endif
6855 &dev_attr_power_level.attr,
b481de9c 6856 &dev_attr_retry_rate.attr,
b481de9c
ZY
6857 &dev_attr_statistics.attr,
6858 &dev_attr_status.attr,
6859 &dev_attr_temperature.attr,
b481de9c 6860 &dev_attr_tx_power.attr,
40b8ec0b
SO
6861#ifdef CONFIG_IWL3945_DEBUG
6862 &dev_attr_debug_level.attr,
6863#endif
b481de9c
ZY
6864 NULL
6865};
6866
bb8c093b 6867static struct attribute_group iwl3945_attribute_group = {
b481de9c 6868 .name = NULL, /* put in device directory */
bb8c093b 6869 .attrs = iwl3945_sysfs_entries,
b481de9c
ZY
6870};
6871
bb8c093b
CH
6872static struct ieee80211_ops iwl3945_hw_ops = {
6873 .tx = iwl3945_mac_tx,
6874 .start = iwl3945_mac_start,
6875 .stop = iwl3945_mac_stop,
6876 .add_interface = iwl3945_mac_add_interface,
6877 .remove_interface = iwl3945_mac_remove_interface,
6878 .config = iwl3945_mac_config,
6879 .config_interface = iwl3945_mac_config_interface,
6880 .configure_filter = iwl3945_configure_filter,
6881 .set_key = iwl3945_mac_set_key,
bb8c093b
CH
6882 .get_tx_stats = iwl3945_mac_get_tx_stats,
6883 .conf_tx = iwl3945_mac_conf_tx,
bb8c093b 6884 .reset_tsf = iwl3945_mac_reset_tsf,
cd56d331 6885 .bss_info_changed = iwl3945_bss_info_changed,
bb8c093b 6886 .hw_scan = iwl3945_mac_hw_scan
b481de9c
ZY
6887};
6888
e52119c5 6889static int iwl3945_init_drv(struct iwl_priv *priv)
90a30a02
KA
6890{
6891 int ret;
6892
6893 priv->retry_rate = 1;
6894 priv->ibss_beacon = NULL;
6895
6896 spin_lock_init(&priv->lock);
3dae0c42 6897 spin_lock_init(&priv->power_data.lock);
90a30a02
KA
6898 spin_lock_init(&priv->sta_lock);
6899 spin_lock_init(&priv->hcmd_lock);
6900
6901 INIT_LIST_HEAD(&priv->free_frames);
6902
6903 mutex_init(&priv->mutex);
6904
6905 /* Clear the driver's (not device's) station table */
6906 iwl3945_clear_stations_table(priv);
6907
6908 priv->data_retry_limit = -1;
6909 priv->ieee_channels = NULL;
6910 priv->ieee_rates = NULL;
6911 priv->band = IEEE80211_BAND_2GHZ;
6912
6913 priv->iw_mode = NL80211_IFTYPE_STATION;
6914
6915 iwl_reset_qos(priv);
6916
6917 priv->qos_data.qos_active = 0;
6918 priv->qos_data.qos_cap.val = 0;
6919
6920 priv->rates_mask = IWL_RATES_MASK;
6921 /* If power management is turned on, default to AC mode */
c7a7c8ec 6922 priv->power_mode = IWL39_POWER_AC;
62ea9c5b 6923 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
90a30a02
KA
6924
6925 ret = iwl3945_init_channel_map(priv);
6926 if (ret) {
6927 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
6928 goto err;
6929 }
6930
6931 ret = iwl3945_init_geos(priv);
6932 if (ret) {
6933 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
6934 goto err_free_channel_map;
6935 }
6936
6937 return 0;
6938
6939err_free_channel_map:
6940 iwl3945_free_channel_map(priv);
6941err:
6942 return ret;
6943}
6944
bb8c093b 6945static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
6946{
6947 int err = 0;
4a8a4322 6948 struct iwl_priv *priv;
b481de9c 6949 struct ieee80211_hw *hw;
c0f20d91 6950 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
0359facc 6951 unsigned long flags;
b481de9c 6952
cee53ddb
KA
6953 /***********************
6954 * 1. Allocating HW data
6955 * ********************/
6956
b481de9c
ZY
6957 /* mac80211 allocates memory for this device instance, including
6958 * space for this driver's private structure */
90a30a02 6959 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops);
b481de9c 6960 if (hw == NULL) {
a3139c59 6961 printk(KERN_ERR DRV_NAME "Can not allocate network device\n");
b481de9c
ZY
6962 err = -ENOMEM;
6963 goto out;
6964 }
b481de9c 6965 priv = hw->priv;
90a30a02 6966 SET_IEEE80211_DEV(hw, &pdev->dev);
6440adb5 6967
df878d8f
KA
6968 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) ||
6969 (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) {
15b1687c
WT
6970 IWL_ERR(priv,
6971 "invalid queues_num, should be between %d and %d\n",
6972 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
a3139c59
SO
6973 err = -EINVAL;
6974 goto out;
6975 }
6976
90a30a02
KA
6977 /*
6978 * Disabling hardware scan means that mac80211 will perform scans
6979 * "the hard way", rather than using device's scan.
6980 */
df878d8f 6981 if (iwl3945_mod_params.disable_hw_scan) {
40b8ec0b
SO
6982 IWL_DEBUG_INFO("Disabling hw_scan\n");
6983 iwl3945_hw_ops.hw_scan = NULL;
6984 }
6985
90a30a02 6986
cee53ddb 6987 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
90a30a02
KA
6988 priv->cfg = cfg;
6989 priv->pci_dev = pdev;
cee53ddb 6990
c8b0e6e1 6991#ifdef CONFIG_IWL3945_DEBUG
df878d8f 6992 priv->debug_level = iwl3945_mod_params.debug;
b481de9c
ZY
6993 atomic_set(&priv->restrict_refcnt, 0);
6994#endif
90a30a02
KA
6995 hw->rate_control_algorithm = "iwl-3945-rs";
6996 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
6997
6998 /* Select antenna (may be helpful if only one antenna is connected) */
6999 priv->antenna = (enum iwl3945_antenna)iwl3945_mod_params.antenna;
b481de9c 7000
566bfe5a 7001 /* Tell mac80211 our characteristics */
605a0bd6 7002 hw->flags = IEEE80211_HW_SIGNAL_DBM |
566bfe5a 7003 IEEE80211_HW_NOISE_DBM;
b481de9c 7004
f59ac048 7005 hw->wiphy->interface_modes =
f59ac048
LR
7006 BIT(NL80211_IFTYPE_STATION) |
7007 BIT(NL80211_IFTYPE_ADHOC);
7008
ea4a82dc
LR
7009 hw->wiphy->fw_handles_regulatory = true;
7010
6440adb5 7011 /* 4 EDCA QOS priorities */
b481de9c
ZY
7012 hw->queues = 4;
7013
cee53ddb
KA
7014 /***************************
7015 * 2. Initializing PCI bus
7016 * *************************/
b481de9c
ZY
7017 if (pci_enable_device(pdev)) {
7018 err = -ENODEV;
7019 goto out_ieee80211_free_hw;
7020 }
7021
7022 pci_set_master(pdev);
7023
b481de9c
ZY
7024 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7025 if (!err)
7026 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
7027 if (err) {
978785a3 7028 IWL_WARN(priv, "No suitable DMA available.\n");
b481de9c
ZY
7029 goto out_pci_disable_device;
7030 }
7031
7032 pci_set_drvdata(pdev, priv);
7033 err = pci_request_regions(pdev, DRV_NAME);
7034 if (err)
7035 goto out_pci_disable_device;
6440adb5 7036
cee53ddb
KA
7037 /***********************
7038 * 3. Read REV Register
7039 * ********************/
b481de9c
ZY
7040 priv->hw_base = pci_iomap(pdev, 0, 0);
7041 if (!priv->hw_base) {
7042 err = -ENODEV;
7043 goto out_pci_release_regions;
7044 }
7045
7046 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
7047 (unsigned long long) pci_resource_len(pdev, 0));
7048 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
7049
cee53ddb
KA
7050 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7051 * PCI Tx retries from interfering with C3 CPU state */
7052 pci_write_config_byte(pdev, 0x41, 0x00);
b481de9c 7053
90a30a02
KA
7054 /* amp init */
7055 err = priv->cfg->ops->lib->apm_ops.init(priv);
cee53ddb 7056 if (err < 0) {
90a30a02
KA
7057 IWL_DEBUG_INFO("Failed to init APMG\n");
7058 goto out_iounmap;
cee53ddb 7059 }
b481de9c 7060
cee53ddb
KA
7061 /***********************
7062 * 4. Read EEPROM
7063 * ********************/
90a30a02 7064
cee53ddb
KA
7065 /* Read the EEPROM */
7066 err = iwl3945_eeprom_init(priv);
7067 if (err) {
15b1687c 7068 IWL_ERR(priv, "Unable to init EEPROM\n");
cee53ddb
KA
7069 goto out_remove_sysfs;
7070 }
7071 /* MAC Address location in EEPROM same for 3945/4965 */
7072 get_eeprom_mac(priv, priv->mac_addr);
7073 IWL_DEBUG_INFO("MAC address: %pM\n", priv->mac_addr);
7074 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
b481de9c 7075
cee53ddb
KA
7076 /***********************
7077 * 5. Setup HW Constants
7078 * ********************/
b481de9c 7079 /* Device-specific setup */
3832ec9d 7080 if (iwl3945_hw_set_hw_params(priv)) {
15b1687c 7081 IWL_ERR(priv, "failed to set hw settings\n");
b481de9c
ZY
7082 goto out_iounmap;
7083 }
7084
cee53ddb
KA
7085 /***********************
7086 * 6. Setup priv
7087 * ********************/
cee53ddb 7088
90a30a02 7089 err = iwl3945_init_drv(priv);
b481de9c 7090 if (err) {
90a30a02
KA
7091 IWL_ERR(priv, "initializing driver failed\n");
7092 goto out_free_geos;
b481de9c
ZY
7093 }
7094
978785a3
TW
7095 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
7096 priv->cfg->name);
cee53ddb
KA
7097
7098 /***********************************
7099 * 7. Initialize Module Parameters
7100 * **********************************/
7101
7102 /* Initialize module parameter values here */
7103 /* Disable radio (SW RF KILL) via parameter when loading driver */
df878d8f 7104 if (iwl3945_mod_params.disable) {
cee53ddb
KA
7105 set_bit(STATUS_RF_KILL_SW, &priv->status);
7106 IWL_DEBUG_INFO("Radio disabled.\n");
849e0dce
RC
7107 }
7108
cee53ddb
KA
7109
7110 /***********************
7111 * 8. Setup Services
7112 * ********************/
7113
7114 spin_lock_irqsave(&priv->lock, flags);
7115 iwl3945_disable_interrupts(priv);
7116 spin_unlock_irqrestore(&priv->lock, flags);
7117
2663516d
HS
7118 pci_enable_msi(priv->pci_dev);
7119
7120 err = request_irq(priv->pci_dev->irq, iwl3945_isr, IRQF_SHARED,
7121 DRV_NAME, priv);
7122 if (err) {
7123 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
7124 goto out_disable_msi;
7125 }
7126
cee53ddb 7127 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
849e0dce 7128 if (err) {
15b1687c 7129 IWL_ERR(priv, "failed to create sysfs device attributes\n");
90a30a02 7130 goto out_release_irq;
849e0dce 7131 }
849e0dce 7132
cee53ddb
KA
7133 iwl3945_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
7134 iwl3945_setup_deferred_work(priv);
7135 iwl3945_setup_rx_handlers(priv);
7136
cee53ddb 7137 /*********************************
2663516d 7138 * 9. Setup and Register mac80211
cee53ddb
KA
7139 * *******************************/
7140
5a66926a
ZY
7141 err = ieee80211_register_hw(priv->hw);
7142 if (err) {
15b1687c 7143 IWL_ERR(priv, "Failed to register network device: %d\n", err);
cee53ddb 7144 goto out_remove_sysfs;
5a66926a 7145 }
b481de9c 7146
5a66926a
ZY
7147 priv->hw->conf.beacon_int = 100;
7148 priv->mac80211_registered = 1;
cee53ddb 7149
c0af96a6 7150 err = iwl_rfkill_init(priv);
ebef2008 7151 if (err)
15b1687c 7152 IWL_ERR(priv, "Unable to initialize RFKILL system. "
ebef2008
AK
7153 "Ignoring error: %d\n", err);
7154
2663516d
HS
7155 /* Start monitoring the killswitch */
7156 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
7157 2 * HZ);
7158
b481de9c
ZY
7159 return 0;
7160
cee53ddb
KA
7161 out_remove_sysfs:
7162 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
849e0dce
RC
7163 out_free_geos:
7164 iwl3945_free_geos(priv);
b481de9c
ZY
7165
7166 out_release_irq:
2663516d 7167 free_irq(priv->pci_dev->irq, priv);
b481de9c
ZY
7168 destroy_workqueue(priv->workqueue);
7169 priv->workqueue = NULL;
3832ec9d 7170 iwl3945_unset_hw_params(priv);
2663516d
HS
7171 out_disable_msi:
7172 pci_disable_msi(priv->pci_dev);
b481de9c
ZY
7173 out_iounmap:
7174 pci_iounmap(pdev, priv->hw_base);
7175 out_pci_release_regions:
7176 pci_release_regions(pdev);
7177 out_pci_disable_device:
7178 pci_disable_device(pdev);
7179 pci_set_drvdata(pdev, NULL);
7180 out_ieee80211_free_hw:
7181 ieee80211_free_hw(priv->hw);
7182 out:
7183 return err;
7184}
7185
c83dbf68 7186static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
b481de9c 7187{
4a8a4322 7188 struct iwl_priv *priv = pci_get_drvdata(pdev);
0359facc 7189 unsigned long flags;
b481de9c
ZY
7190
7191 if (!priv)
7192 return;
7193
7194 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
7195
b481de9c 7196 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 7197
d552bfb6
KA
7198 if (priv->mac80211_registered) {
7199 ieee80211_unregister_hw(priv->hw);
7200 priv->mac80211_registered = 0;
7201 } else {
7202 iwl3945_down(priv);
7203 }
b481de9c 7204
0359facc
MA
7205 /* make sure we flush any pending irq or
7206 * tasklet for the driver
7207 */
7208 spin_lock_irqsave(&priv->lock, flags);
7209 iwl3945_disable_interrupts(priv);
7210 spin_unlock_irqrestore(&priv->lock, flags);
7211
7212 iwl_synchronize_irq(priv);
7213
bb8c093b 7214 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
b481de9c 7215
c0af96a6 7216 iwl_rfkill_unregister(priv);
2663516d
HS
7217 cancel_delayed_work(&priv->rfkill_poll);
7218
bb8c093b 7219 iwl3945_dealloc_ucode_pci(priv);
b481de9c
ZY
7220
7221 if (priv->rxq.bd)
51af3d3f 7222 iwl_rx_queue_free(priv, &priv->rxq);
bb8c093b 7223 iwl3945_hw_txq_ctx_free(priv);
b481de9c 7224
3832ec9d 7225 iwl3945_unset_hw_params(priv);
bb8c093b 7226 iwl3945_clear_stations_table(priv);
b481de9c 7227
6ef89d0a
MA
7228 /*netif_stop_queue(dev); */
7229 flush_workqueue(priv->workqueue);
7230
bb8c093b 7231 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
b481de9c
ZY
7232 * priv->workqueue... so we can't take down the workqueue
7233 * until now... */
7234 destroy_workqueue(priv->workqueue);
7235 priv->workqueue = NULL;
7236
2663516d
HS
7237 free_irq(pdev->irq, priv);
7238 pci_disable_msi(pdev);
7239
b481de9c
ZY
7240 pci_iounmap(pdev, priv->hw_base);
7241 pci_release_regions(pdev);
7242 pci_disable_device(pdev);
7243 pci_set_drvdata(pdev, NULL);
7244
849e0dce
RC
7245 iwl3945_free_channel_map(priv);
7246 iwl3945_free_geos(priv);
805cee5b 7247 kfree(priv->scan);
b481de9c
ZY
7248 if (priv->ibss_beacon)
7249 dev_kfree_skb(priv->ibss_beacon);
7250
7251 ieee80211_free_hw(priv->hw);
7252}
7253
7254#ifdef CONFIG_PM
7255
bb8c093b 7256static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 7257{
4a8a4322 7258 struct iwl_priv *priv = pci_get_drvdata(pdev);
b481de9c 7259
e655b9f0
ZY
7260 if (priv->is_open) {
7261 set_bit(STATUS_IN_SUSPEND, &priv->status);
7262 iwl3945_mac_stop(priv->hw);
7263 priv->is_open = 1;
7264 }
2663516d
HS
7265 pci_save_state(pdev);
7266 pci_disable_device(pdev);
b481de9c
ZY
7267 pci_set_power_state(pdev, PCI_D3hot);
7268
b481de9c
ZY
7269 return 0;
7270}
7271
bb8c093b 7272static int iwl3945_pci_resume(struct pci_dev *pdev)
b481de9c 7273{
4a8a4322 7274 struct iwl_priv *priv = pci_get_drvdata(pdev);
b481de9c 7275
b481de9c 7276 pci_set_power_state(pdev, PCI_D0);
2663516d
HS
7277 pci_enable_device(pdev);
7278 pci_restore_state(pdev);
b481de9c 7279
e655b9f0
ZY
7280 if (priv->is_open)
7281 iwl3945_mac_start(priv->hw);
b481de9c 7282
e655b9f0 7283 clear_bit(STATUS_IN_SUSPEND, &priv->status);
b481de9c
ZY
7284 return 0;
7285}
7286
7287#endif /* CONFIG_PM */
7288
7289/*****************************************************************************
7290 *
7291 * driver and module entry point
7292 *
7293 *****************************************************************************/
7294
bb8c093b 7295static struct pci_driver iwl3945_driver = {
b481de9c 7296 .name = DRV_NAME,
bb8c093b
CH
7297 .id_table = iwl3945_hw_card_ids,
7298 .probe = iwl3945_pci_probe,
7299 .remove = __devexit_p(iwl3945_pci_remove),
b481de9c 7300#ifdef CONFIG_PM
bb8c093b
CH
7301 .suspend = iwl3945_pci_suspend,
7302 .resume = iwl3945_pci_resume,
b481de9c
ZY
7303#endif
7304};
7305
bb8c093b 7306static int __init iwl3945_init(void)
b481de9c
ZY
7307{
7308
7309 int ret;
7310 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
7311 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
897e1cf2
RC
7312
7313 ret = iwl3945_rate_control_register();
7314 if (ret) {
a3139c59
SO
7315 printk(KERN_ERR DRV_NAME
7316 "Unable to register rate control algorithm: %d\n", ret);
897e1cf2
RC
7317 return ret;
7318 }
7319
bb8c093b 7320 ret = pci_register_driver(&iwl3945_driver);
b481de9c 7321 if (ret) {
a3139c59 7322 printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n");
897e1cf2 7323 goto error_register;
b481de9c 7324 }
b481de9c
ZY
7325
7326 return ret;
897e1cf2 7327
897e1cf2
RC
7328error_register:
7329 iwl3945_rate_control_unregister();
7330 return ret;
b481de9c
ZY
7331}
7332
bb8c093b 7333static void __exit iwl3945_exit(void)
b481de9c 7334{
bb8c093b 7335 pci_unregister_driver(&iwl3945_driver);
897e1cf2 7336 iwl3945_rate_control_unregister();
b481de9c
ZY
7337}
7338
a0987a8d 7339MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
25cb6cad 7340
df878d8f 7341module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444);
b481de9c 7342MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
df878d8f 7343module_param_named(disable, iwl3945_mod_params.disable, int, 0444);
b481de9c 7344MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
9c74d9fb
SO
7345module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444);
7346MODULE_PARM_DESC(swcrypto,
7347 "using software crypto (default 1 [software])\n");
df878d8f 7348module_param_named(debug, iwl3945_mod_params.debug, uint, 0444);
b481de9c 7349MODULE_PARM_DESC(debug, "debug output mask");
df878d8f 7350module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444);
b481de9c
ZY
7351MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
7352
df878d8f 7353module_param_named(queues_num, iwl3945_mod_params.num_of_queues, int, 0444);
b481de9c
ZY
7354MODULE_PARM_DESC(queues_num, "number of hw queues.");
7355
bb8c093b
CH
7356module_exit(iwl3945_exit);
7357module_init(iwl3945_init);