]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/iwlwifi/iwl-core.c
iwl3945: Remaining host command cleanups
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlwifi / iwl-core.c
1 /******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <net/mac80211.h>
32
33 #include "iwl-eeprom.h"
34 #include "iwl-dev.h" /* FIXME: remove */
35 #include "iwl-debug.h"
36 #include "iwl-core.h"
37 #include "iwl-io.h"
38 #include "iwl-rfkill.h"
39 #include "iwl-power.h"
40 #include "iwl-sta.h"
41
42
43 MODULE_DESCRIPTION("iwl core");
44 MODULE_VERSION(IWLWIFI_VERSION);
45 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
46 MODULE_LICENSE("GPL");
47
48 #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
49 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
50 IWL_RATE_SISO_##s##M_PLCP, \
51 IWL_RATE_MIMO2_##s##M_PLCP,\
52 IWL_RATE_MIMO3_##s##M_PLCP,\
53 IWL_RATE_##r##M_IEEE, \
54 IWL_RATE_##ip##M_INDEX, \
55 IWL_RATE_##in##M_INDEX, \
56 IWL_RATE_##rp##M_INDEX, \
57 IWL_RATE_##rn##M_INDEX, \
58 IWL_RATE_##pp##M_INDEX, \
59 IWL_RATE_##np##M_INDEX }
60
61 /*
62 * Parameter order:
63 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
64 *
65 * If there isn't a valid next or previous rate then INV is used which
66 * maps to IWL_RATE_INVALID
67 *
68 */
69 const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
70 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
71 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
72 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
73 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
74 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
75 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
76 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
77 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
78 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
79 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
80 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
81 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
82 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
83 /* FIXME:RS: ^^ should be INV (legacy) */
84 };
85 EXPORT_SYMBOL(iwl_rates);
86
87 /**
88 * translate ucode response to mac80211 tx status control values
89 */
90 void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
91 struct ieee80211_tx_info *info)
92 {
93 int rate_index;
94 struct ieee80211_tx_rate *r = &info->control.rates[0];
95
96 info->antenna_sel_tx =
97 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
98 if (rate_n_flags & RATE_MCS_HT_MSK)
99 r->flags |= IEEE80211_TX_RC_MCS;
100 if (rate_n_flags & RATE_MCS_GF_MSK)
101 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
102 if (rate_n_flags & RATE_MCS_FAT_MSK)
103 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
104 if (rate_n_flags & RATE_MCS_DUP_MSK)
105 r->flags |= IEEE80211_TX_RC_DUP_DATA;
106 if (rate_n_flags & RATE_MCS_SGI_MSK)
107 r->flags |= IEEE80211_TX_RC_SHORT_GI;
108 rate_index = iwl_hwrate_to_plcp_idx(rate_n_flags);
109 if (info->band == IEEE80211_BAND_5GHZ)
110 rate_index -= IWL_FIRST_OFDM_RATE;
111 r->idx = rate_index;
112 }
113 EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
114
115 int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
116 {
117 int idx = 0;
118
119 /* HT rate format */
120 if (rate_n_flags & RATE_MCS_HT_MSK) {
121 idx = (rate_n_flags & 0xff);
122
123 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
124 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
125 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
126 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
127
128 idx += IWL_FIRST_OFDM_RATE;
129 /* skip 9M not supported in ht*/
130 if (idx >= IWL_RATE_9M_INDEX)
131 idx += 1;
132 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
133 return idx;
134
135 /* legacy rate format, search for match in table */
136 } else {
137 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
138 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
139 return idx;
140 }
141
142 return -1;
143 }
144 EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
145
146 u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
147 {
148 int i;
149 u8 ind = ant;
150 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
151 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
152 if (priv->hw_params.valid_tx_ant & BIT(ind))
153 return ind;
154 }
155 return ant;
156 }
157
158 const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
159 EXPORT_SYMBOL(iwl_bcast_addr);
160
161
162 /* This function both allocates and initializes hw and priv. */
163 struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
164 struct ieee80211_ops *hw_ops)
165 {
166 struct iwl_priv *priv;
167
168 /* mac80211 allocates memory for this device instance, including
169 * space for this driver's private structure */
170 struct ieee80211_hw *hw =
171 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
172 if (hw == NULL) {
173 printk(KERN_ERR "%s: Can not allocate network device\n",
174 cfg->name);
175 goto out;
176 }
177
178 priv = hw->priv;
179 priv->hw = hw;
180
181 out:
182 return hw;
183 }
184 EXPORT_SYMBOL(iwl_alloc_all);
185
186 void iwl_hw_detect(struct iwl_priv *priv)
187 {
188 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
189 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
190 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
191 }
192 EXPORT_SYMBOL(iwl_hw_detect);
193
194 int iwl_hw_nic_init(struct iwl_priv *priv)
195 {
196 unsigned long flags;
197 struct iwl_rx_queue *rxq = &priv->rxq;
198 int ret;
199
200 /* nic_init */
201 spin_lock_irqsave(&priv->lock, flags);
202 priv->cfg->ops->lib->apm_ops.init(priv);
203 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
204 spin_unlock_irqrestore(&priv->lock, flags);
205
206 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
207
208 priv->cfg->ops->lib->apm_ops.config(priv);
209
210 /* Allocate the RX queue, or reset if it is already allocated */
211 if (!rxq->bd) {
212 ret = iwl_rx_queue_alloc(priv);
213 if (ret) {
214 IWL_ERR(priv, "Unable to initialize Rx queue\n");
215 return -ENOMEM;
216 }
217 } else
218 iwl_rx_queue_reset(priv, rxq);
219
220 iwl_rx_replenish(priv);
221
222 iwl_rx_init(priv, rxq);
223
224 spin_lock_irqsave(&priv->lock, flags);
225
226 rxq->need_update = 1;
227 iwl_rx_queue_update_write_ptr(priv, rxq);
228
229 spin_unlock_irqrestore(&priv->lock, flags);
230
231 /* Allocate and init all Tx and Command queues */
232 ret = iwl_txq_ctx_reset(priv);
233 if (ret)
234 return ret;
235
236 set_bit(STATUS_INIT, &priv->status);
237
238 return 0;
239 }
240 EXPORT_SYMBOL(iwl_hw_nic_init);
241
242 void iwl_reset_qos(struct iwl_priv *priv)
243 {
244 u16 cw_min = 15;
245 u16 cw_max = 1023;
246 u8 aifs = 2;
247 bool is_legacy = false;
248 unsigned long flags;
249 int i;
250
251 spin_lock_irqsave(&priv->lock, flags);
252 /* QoS always active in AP and ADHOC mode
253 * In STA mode wait for association
254 */
255 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
256 priv->iw_mode == NL80211_IFTYPE_AP)
257 priv->qos_data.qos_active = 1;
258 else
259 priv->qos_data.qos_active = 0;
260
261 /* check for legacy mode */
262 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
263 (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
264 (priv->iw_mode == NL80211_IFTYPE_STATION &&
265 (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
266 cw_min = 31;
267 is_legacy = 1;
268 }
269
270 if (priv->qos_data.qos_active)
271 aifs = 3;
272
273 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
274 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
275 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
276 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
277 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
278
279 if (priv->qos_data.qos_active) {
280 i = 1;
281 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
282 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
283 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
284 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
285 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
286
287 i = 2;
288 priv->qos_data.def_qos_parm.ac[i].cw_min =
289 cpu_to_le16((cw_min + 1) / 2 - 1);
290 priv->qos_data.def_qos_parm.ac[i].cw_max =
291 cpu_to_le16(cw_max);
292 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
293 if (is_legacy)
294 priv->qos_data.def_qos_parm.ac[i].edca_txop =
295 cpu_to_le16(6016);
296 else
297 priv->qos_data.def_qos_parm.ac[i].edca_txop =
298 cpu_to_le16(3008);
299 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
300
301 i = 3;
302 priv->qos_data.def_qos_parm.ac[i].cw_min =
303 cpu_to_le16((cw_min + 1) / 4 - 1);
304 priv->qos_data.def_qos_parm.ac[i].cw_max =
305 cpu_to_le16((cw_max + 1) / 2 - 1);
306 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
307 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
308 if (is_legacy)
309 priv->qos_data.def_qos_parm.ac[i].edca_txop =
310 cpu_to_le16(3264);
311 else
312 priv->qos_data.def_qos_parm.ac[i].edca_txop =
313 cpu_to_le16(1504);
314 } else {
315 for (i = 1; i < 4; i++) {
316 priv->qos_data.def_qos_parm.ac[i].cw_min =
317 cpu_to_le16(cw_min);
318 priv->qos_data.def_qos_parm.ac[i].cw_max =
319 cpu_to_le16(cw_max);
320 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
321 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
322 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
323 }
324 }
325 IWL_DEBUG_QOS("set QoS to default \n");
326
327 spin_unlock_irqrestore(&priv->lock, flags);
328 }
329 EXPORT_SYMBOL(iwl_reset_qos);
330
331 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
332 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
333 static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
334 struct ieee80211_sta_ht_cap *ht_info,
335 enum ieee80211_band band)
336 {
337 u16 max_bit_rate = 0;
338 u8 rx_chains_num = priv->hw_params.rx_chains_num;
339 u8 tx_chains_num = priv->hw_params.tx_chains_num;
340
341 ht_info->cap = 0;
342 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
343
344 ht_info->ht_supported = true;
345
346 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
347 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
348 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
349 (WLAN_HT_CAP_SM_PS_DISABLED << 2));
350
351 max_bit_rate = MAX_BIT_RATE_20_MHZ;
352 if (priv->hw_params.fat_channel & BIT(band)) {
353 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
354 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
355 ht_info->mcs.rx_mask[4] = 0x01;
356 max_bit_rate = MAX_BIT_RATE_40_MHZ;
357 }
358
359 if (priv->cfg->mod_params->amsdu_size_8K)
360 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
361
362 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
363 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
364
365 ht_info->mcs.rx_mask[0] = 0xFF;
366 if (rx_chains_num >= 2)
367 ht_info->mcs.rx_mask[1] = 0xFF;
368 if (rx_chains_num >= 3)
369 ht_info->mcs.rx_mask[2] = 0xFF;
370
371 /* Highest supported Rx data rate */
372 max_bit_rate *= rx_chains_num;
373 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
374 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
375
376 /* Tx MCS capabilities */
377 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
378 if (tx_chains_num != rx_chains_num) {
379 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
380 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
381 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
382 }
383 }
384
385 static void iwlcore_init_hw_rates(struct iwl_priv *priv,
386 struct ieee80211_rate *rates)
387 {
388 int i;
389
390 for (i = 0; i < IWL_RATE_COUNT; i++) {
391 rates[i].bitrate = iwl_rates[i].ieee * 5;
392 rates[i].hw_value = i; /* Rate scaling will work on indexes */
393 rates[i].hw_value_short = i;
394 rates[i].flags = 0;
395 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
396 /*
397 * If CCK != 1M then set short preamble rate flag.
398 */
399 rates[i].flags |=
400 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
401 0 : IEEE80211_RATE_SHORT_PREAMBLE;
402 }
403 }
404 }
405
406 /**
407 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
408 */
409 int iwlcore_init_geos(struct iwl_priv *priv)
410 {
411 struct iwl_channel_info *ch;
412 struct ieee80211_supported_band *sband;
413 struct ieee80211_channel *channels;
414 struct ieee80211_channel *geo_ch;
415 struct ieee80211_rate *rates;
416 int i = 0;
417
418 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
419 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
420 IWL_DEBUG_INFO("Geography modes already initialized.\n");
421 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
422 return 0;
423 }
424
425 channels = kzalloc(sizeof(struct ieee80211_channel) *
426 priv->channel_count, GFP_KERNEL);
427 if (!channels)
428 return -ENOMEM;
429
430 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
431 GFP_KERNEL);
432 if (!rates) {
433 kfree(channels);
434 return -ENOMEM;
435 }
436
437 /* 5.2GHz channels start after the 2.4GHz channels */
438 sband = &priv->bands[IEEE80211_BAND_5GHZ];
439 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
440 /* just OFDM */
441 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
442 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
443
444 if (priv->cfg->sku & IWL_SKU_N)
445 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
446 IEEE80211_BAND_5GHZ);
447
448 sband = &priv->bands[IEEE80211_BAND_2GHZ];
449 sband->channels = channels;
450 /* OFDM & CCK */
451 sband->bitrates = rates;
452 sband->n_bitrates = IWL_RATE_COUNT;
453
454 if (priv->cfg->sku & IWL_SKU_N)
455 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
456 IEEE80211_BAND_2GHZ);
457
458 priv->ieee_channels = channels;
459 priv->ieee_rates = rates;
460
461 for (i = 0; i < priv->channel_count; i++) {
462 ch = &priv->channel_info[i];
463
464 /* FIXME: might be removed if scan is OK */
465 if (!is_channel_valid(ch))
466 continue;
467
468 if (is_channel_a_band(ch))
469 sband = &priv->bands[IEEE80211_BAND_5GHZ];
470 else
471 sband = &priv->bands[IEEE80211_BAND_2GHZ];
472
473 geo_ch = &sband->channels[sband->n_channels++];
474
475 geo_ch->center_freq =
476 ieee80211_channel_to_frequency(ch->channel);
477 geo_ch->max_power = ch->max_power_avg;
478 geo_ch->max_antenna_gain = 0xff;
479 geo_ch->hw_value = ch->channel;
480
481 if (is_channel_valid(ch)) {
482 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
483 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
484
485 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
486 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
487
488 if (ch->flags & EEPROM_CHANNEL_RADAR)
489 geo_ch->flags |= IEEE80211_CHAN_RADAR;
490
491 geo_ch->flags |= ch->fat_extension_channel;
492
493 if (ch->max_power_avg > priv->tx_power_channel_lmt)
494 priv->tx_power_channel_lmt = ch->max_power_avg;
495 } else {
496 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
497 }
498
499 /* Save flags for reg domain usage */
500 geo_ch->orig_flags = geo_ch->flags;
501
502 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
503 ch->channel, geo_ch->center_freq,
504 is_channel_a_band(ch) ? "5.2" : "2.4",
505 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
506 "restricted" : "valid",
507 geo_ch->flags);
508 }
509
510 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
511 priv->cfg->sku & IWL_SKU_A) {
512 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
513 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
514 priv->pci_dev->device,
515 priv->pci_dev->subsystem_device);
516 priv->cfg->sku &= ~IWL_SKU_A;
517 }
518
519 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
520 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
521 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
522
523 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
524
525 return 0;
526 }
527 EXPORT_SYMBOL(iwlcore_init_geos);
528
529 /*
530 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
531 */
532 void iwlcore_free_geos(struct iwl_priv *priv)
533 {
534 kfree(priv->ieee_channels);
535 kfree(priv->ieee_rates);
536 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
537 }
538 EXPORT_SYMBOL(iwlcore_free_geos);
539
540 static bool is_single_rx_stream(struct iwl_priv *priv)
541 {
542 return !priv->current_ht_config.is_ht ||
543 ((priv->current_ht_config.mcs.rx_mask[1] == 0) &&
544 (priv->current_ht_config.mcs.rx_mask[2] == 0));
545 }
546
547 static u8 iwl_is_channel_extension(struct iwl_priv *priv,
548 enum ieee80211_band band,
549 u16 channel, u8 extension_chan_offset)
550 {
551 const struct iwl_channel_info *ch_info;
552
553 ch_info = iwl_get_channel_info(priv, band, channel);
554 if (!is_channel_valid(ch_info))
555 return 0;
556
557 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
558 return !(ch_info->fat_extension_channel &
559 IEEE80211_CHAN_NO_FAT_ABOVE);
560 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
561 return !(ch_info->fat_extension_channel &
562 IEEE80211_CHAN_NO_FAT_BELOW);
563
564 return 0;
565 }
566
567 u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
568 struct ieee80211_sta_ht_cap *sta_ht_inf)
569 {
570 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
571
572 if ((!iwl_ht_conf->is_ht) ||
573 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
574 (iwl_ht_conf->extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_NONE))
575 return 0;
576
577 if (sta_ht_inf) {
578 if ((!sta_ht_inf->ht_supported) ||
579 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)))
580 return 0;
581 }
582
583 return iwl_is_channel_extension(priv, priv->band,
584 le16_to_cpu(priv->staging_rxon.channel),
585 iwl_ht_conf->extension_chan_offset);
586 }
587 EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
588
589 void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
590 {
591 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
592 u32 val;
593
594 if (!ht_info->is_ht) {
595 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
596 RXON_FLG_CHANNEL_MODE_PURE_40_MSK |
597 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
598 RXON_FLG_FAT_PROT_MSK |
599 RXON_FLG_HT_PROT_MSK);
600 return;
601 }
602
603 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
604 if (iwl_is_fat_tx_allowed(priv, NULL))
605 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
606 else
607 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
608 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
609
610 /* Note: control channel is opposite of extension channel */
611 switch (ht_info->extension_chan_offset) {
612 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
613 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
614 break;
615 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
616 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
617 break;
618 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
619 default:
620 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
621 break;
622 }
623
624 val = ht_info->ht_protection;
625
626 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
627
628 iwl_set_rxon_chain(priv);
629
630 IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X "
631 "rxon flags 0x%X operation mode :0x%X "
632 "extension channel offset 0x%x\n",
633 ht_info->mcs.rx_mask[0],
634 ht_info->mcs.rx_mask[1],
635 ht_info->mcs.rx_mask[2],
636 le32_to_cpu(rxon->flags), ht_info->ht_protection,
637 ht_info->extension_chan_offset);
638 return;
639 }
640 EXPORT_SYMBOL(iwl_set_rxon_ht);
641
642 #define IWL_NUM_RX_CHAINS_MULTIPLE 3
643 #define IWL_NUM_RX_CHAINS_SINGLE 2
644 #define IWL_NUM_IDLE_CHAINS_DUAL 2
645 #define IWL_NUM_IDLE_CHAINS_SINGLE 1
646
647 /* Determine how many receiver/antenna chains to use.
648 * More provides better reception via diversity. Fewer saves power.
649 * MIMO (dual stream) requires at least 2, but works better with 3.
650 * This does not determine *which* chains to use, just how many.
651 */
652 static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
653 {
654 bool is_single = is_single_rx_stream(priv);
655 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
656
657 /* # of Rx chains to use when expecting MIMO. */
658 if (is_single || (!is_cam && (priv->current_ht_config.sm_ps ==
659 WLAN_HT_CAP_SM_PS_STATIC)))
660 return IWL_NUM_RX_CHAINS_SINGLE;
661 else
662 return IWL_NUM_RX_CHAINS_MULTIPLE;
663 }
664
665 static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
666 {
667 int idle_cnt;
668 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
669 /* # Rx chains when idling and maybe trying to save power */
670 switch (priv->current_ht_config.sm_ps) {
671 case WLAN_HT_CAP_SM_PS_STATIC:
672 case WLAN_HT_CAP_SM_PS_DYNAMIC:
673 idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
674 IWL_NUM_IDLE_CHAINS_SINGLE;
675 break;
676 case WLAN_HT_CAP_SM_PS_DISABLED:
677 idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
678 break;
679 case WLAN_HT_CAP_SM_PS_INVALID:
680 default:
681 IWL_ERR(priv, "invalid mimo ps mode %d\n",
682 priv->current_ht_config.sm_ps);
683 WARN_ON(1);
684 idle_cnt = -1;
685 break;
686 }
687 return idle_cnt;
688 }
689
690 /* up to 4 chains */
691 static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
692 {
693 u8 res;
694 res = (chain_bitmap & BIT(0)) >> 0;
695 res += (chain_bitmap & BIT(1)) >> 1;
696 res += (chain_bitmap & BIT(2)) >> 2;
697 res += (chain_bitmap & BIT(4)) >> 4;
698 return res;
699 }
700
701 /**
702 * iwl_is_monitor_mode - Determine if interface in monitor mode
703 *
704 * priv->iw_mode is set in add_interface, but add_interface is
705 * never called for monitor mode. The only way mac80211 informs us about
706 * monitor mode is through configuring filters (call to configure_filter).
707 */
708 static bool iwl_is_monitor_mode(struct iwl_priv *priv)
709 {
710 return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
711 }
712
713 /**
714 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
715 *
716 * Selects how many and which Rx receivers/antennas/chains to use.
717 * This should not be used for scan command ... it puts data in wrong place.
718 */
719 void iwl_set_rxon_chain(struct iwl_priv *priv)
720 {
721 bool is_single = is_single_rx_stream(priv);
722 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
723 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
724 u32 active_chains;
725 u16 rx_chain;
726
727 /* Tell uCode which antennas are actually connected.
728 * Before first association, we assume all antennas are connected.
729 * Just after first association, iwl_chain_noise_calibration()
730 * checks which antennas actually *are* connected. */
731 if (priv->chain_noise_data.active_chains)
732 active_chains = priv->chain_noise_data.active_chains;
733 else
734 active_chains = priv->hw_params.valid_rx_ant;
735
736 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
737
738 /* How many receivers should we use? */
739 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
740 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
741
742
743 /* correct rx chain count according hw settings
744 * and chain noise calibration
745 */
746 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
747 if (valid_rx_cnt < active_rx_cnt)
748 active_rx_cnt = valid_rx_cnt;
749
750 if (valid_rx_cnt < idle_rx_cnt)
751 idle_rx_cnt = valid_rx_cnt;
752
753 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
754 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
755
756 /* copied from 'iwl_bg_request_scan()' */
757 /* Force use of chains B and C (0x6) for Rx for 4965
758 * Avoid A (0x1) because of its off-channel reception on A-band.
759 * MIMO is not used here, but value is required */
760 if (iwl_is_monitor_mode(priv) &&
761 !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
762 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
763 rx_chain = 0x07 << RXON_RX_CHAIN_VALID_POS;
764 rx_chain |= 0x06 << RXON_RX_CHAIN_FORCE_SEL_POS;
765 rx_chain |= 0x07 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
766 rx_chain |= 0x01 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
767 }
768
769 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
770
771 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
772 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
773 else
774 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
775
776 IWL_DEBUG_ASSOC("rx_chain=0x%X active=%d idle=%d\n",
777 priv->staging_rxon.rx_chain,
778 active_rx_cnt, idle_rx_cnt);
779
780 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
781 active_rx_cnt < idle_rx_cnt);
782 }
783 EXPORT_SYMBOL(iwl_set_rxon_chain);
784
785 /**
786 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
787 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
788 * @channel: Any channel valid for the requested phymode
789
790 * In addition to setting the staging RXON, priv->phymode is also set.
791 *
792 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
793 * in the staging RXON flag structure based on the phymode
794 */
795 int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
796 {
797 enum ieee80211_band band = ch->band;
798 u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
799
800 if (!iwl_get_channel_info(priv, band, channel)) {
801 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
802 channel, band);
803 return -EINVAL;
804 }
805
806 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
807 (priv->band == band))
808 return 0;
809
810 priv->staging_rxon.channel = cpu_to_le16(channel);
811 if (band == IEEE80211_BAND_5GHZ)
812 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
813 else
814 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
815
816 priv->band = band;
817
818 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
819
820 return 0;
821 }
822 EXPORT_SYMBOL(iwl_set_rxon_channel);
823
824 int iwl_setup_mac(struct iwl_priv *priv)
825 {
826 int ret;
827 struct ieee80211_hw *hw = priv->hw;
828 hw->rate_control_algorithm = "iwl-agn-rs";
829
830 /* Tell mac80211 our characteristics */
831 hw->flags = IEEE80211_HW_SIGNAL_DBM |
832 IEEE80211_HW_NOISE_DBM |
833 IEEE80211_HW_AMPDU_AGGREGATION |
834 IEEE80211_HW_SUPPORTS_PS;
835 hw->wiphy->interface_modes =
836 BIT(NL80211_IFTYPE_STATION) |
837 BIT(NL80211_IFTYPE_ADHOC);
838
839 hw->wiphy->custom_regulatory = true;
840
841 /* Default value; 4 EDCA QOS priorities */
842 hw->queues = 4;
843 /* queues to support 11n aggregation */
844 if (priv->cfg->sku & IWL_SKU_N)
845 hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues;
846
847 hw->conf.beacon_int = 100;
848 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
849
850 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
851 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
852 &priv->bands[IEEE80211_BAND_2GHZ];
853 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
854 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
855 &priv->bands[IEEE80211_BAND_5GHZ];
856
857 ret = ieee80211_register_hw(priv->hw);
858 if (ret) {
859 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
860 return ret;
861 }
862 priv->mac80211_registered = 1;
863
864 return 0;
865 }
866 EXPORT_SYMBOL(iwl_setup_mac);
867
868 int iwl_set_hw_params(struct iwl_priv *priv)
869 {
870 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
871 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
872 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
873 if (priv->cfg->mod_params->amsdu_size_8K)
874 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
875 else
876 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
877 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
878
879 if (priv->cfg->mod_params->disable_11n)
880 priv->cfg->sku &= ~IWL_SKU_N;
881
882 /* Device-specific setup */
883 return priv->cfg->ops->lib->set_hw_params(priv);
884 }
885 EXPORT_SYMBOL(iwl_set_hw_params);
886
887 int iwl_init_drv(struct iwl_priv *priv)
888 {
889 int ret;
890
891 priv->ibss_beacon = NULL;
892
893 spin_lock_init(&priv->lock);
894 spin_lock_init(&priv->power_data.lock);
895 spin_lock_init(&priv->sta_lock);
896 spin_lock_init(&priv->hcmd_lock);
897
898 INIT_LIST_HEAD(&priv->free_frames);
899
900 mutex_init(&priv->mutex);
901
902 /* Clear the driver's (not device's) station table */
903 iwl_clear_stations_table(priv);
904
905 priv->data_retry_limit = -1;
906 priv->ieee_channels = NULL;
907 priv->ieee_rates = NULL;
908 priv->band = IEEE80211_BAND_2GHZ;
909
910 priv->iw_mode = NL80211_IFTYPE_STATION;
911
912 priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
913
914 /* Choose which receivers/antennas to use */
915 iwl_set_rxon_chain(priv);
916 iwl_init_scan_params(priv);
917
918 iwl_reset_qos(priv);
919
920 priv->qos_data.qos_active = 0;
921 priv->qos_data.qos_cap.val = 0;
922
923 priv->rates_mask = IWL_RATES_MASK;
924 /* If power management is turned on, default to AC mode */
925 priv->power_mode = IWL_POWER_AC;
926 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MAX;
927
928 ret = iwl_init_channel_map(priv);
929 if (ret) {
930 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
931 goto err;
932 }
933
934 ret = iwlcore_init_geos(priv);
935 if (ret) {
936 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
937 goto err_free_channel_map;
938 }
939 iwlcore_init_hw_rates(priv, priv->ieee_rates);
940
941 return 0;
942
943 err_free_channel_map:
944 iwl_free_channel_map(priv);
945 err:
946 return ret;
947 }
948 EXPORT_SYMBOL(iwl_init_drv);
949
950 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
951 {
952 int ret = 0;
953 if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
954 IWL_WARN(priv, "Requested user TXPOWER %d below limit.\n",
955 priv->tx_power_user_lmt);
956 return -EINVAL;
957 }
958
959 if (tx_power > IWL_TX_POWER_TARGET_POWER_MAX) {
960 IWL_WARN(priv, "Requested user TXPOWER %d above limit.\n",
961 priv->tx_power_user_lmt);
962 return -EINVAL;
963 }
964
965 if (priv->tx_power_user_lmt != tx_power)
966 force = true;
967
968 priv->tx_power_user_lmt = tx_power;
969
970 if (force && priv->cfg->ops->lib->send_tx_power)
971 ret = priv->cfg->ops->lib->send_tx_power(priv);
972
973 return ret;
974 }
975 EXPORT_SYMBOL(iwl_set_tx_power);
976
977 void iwl_uninit_drv(struct iwl_priv *priv)
978 {
979 iwl_calib_free_results(priv);
980 iwlcore_free_geos(priv);
981 iwl_free_channel_map(priv);
982 kfree(priv->scan);
983 }
984 EXPORT_SYMBOL(iwl_uninit_drv);
985
986
987 void iwl_disable_interrupts(struct iwl_priv *priv)
988 {
989 clear_bit(STATUS_INT_ENABLED, &priv->status);
990
991 /* disable interrupts from uCode/NIC to host */
992 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
993
994 /* acknowledge/clear/reset any interrupts still pending
995 * from uCode or flow handler (Rx/Tx DMA) */
996 iwl_write32(priv, CSR_INT, 0xffffffff);
997 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
998 IWL_DEBUG_ISR("Disabled interrupts\n");
999 }
1000 EXPORT_SYMBOL(iwl_disable_interrupts);
1001
1002 void iwl_enable_interrupts(struct iwl_priv *priv)
1003 {
1004 IWL_DEBUG_ISR("Enabling interrupts\n");
1005 set_bit(STATUS_INT_ENABLED, &priv->status);
1006 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
1007 }
1008 EXPORT_SYMBOL(iwl_enable_interrupts);
1009
1010 int iwl_send_bt_config(struct iwl_priv *priv)
1011 {
1012 struct iwl_bt_cmd bt_cmd = {
1013 .flags = 3,
1014 .lead_time = 0xAA,
1015 .max_kill = 1,
1016 .kill_ack_mask = 0,
1017 .kill_cts_mask = 0,
1018 };
1019
1020 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1021 sizeof(struct iwl_bt_cmd), &bt_cmd);
1022 }
1023 EXPORT_SYMBOL(iwl_send_bt_config);
1024
1025 int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
1026 {
1027 u32 stat_flags = 0;
1028 struct iwl_host_cmd cmd = {
1029 .id = REPLY_STATISTICS_CMD,
1030 .meta.flags = flags,
1031 .len = sizeof(stat_flags),
1032 .data = (u8 *) &stat_flags,
1033 };
1034 return iwl_send_cmd(priv, &cmd);
1035 }
1036 EXPORT_SYMBOL(iwl_send_statistics_request);
1037
1038 /**
1039 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
1040 * using sample data 100 bytes apart. If these sample points are good,
1041 * it's a pretty good bet that everything between them is good, too.
1042 */
1043 static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1044 {
1045 u32 val;
1046 int ret = 0;
1047 u32 errcnt = 0;
1048 u32 i;
1049
1050 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
1051
1052 ret = iwl_grab_nic_access(priv);
1053 if (ret)
1054 return ret;
1055
1056 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1057 /* read data comes through single port, auto-incr addr */
1058 /* NOTE: Use the debugless read so we don't flood kernel log
1059 * if IWL_DL_IO is set */
1060 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1061 i + IWL49_RTC_INST_LOWER_BOUND);
1062 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1063 if (val != le32_to_cpu(*image)) {
1064 ret = -EIO;
1065 errcnt++;
1066 if (errcnt >= 3)
1067 break;
1068 }
1069 }
1070
1071 iwl_release_nic_access(priv);
1072
1073 return ret;
1074 }
1075
1076 /**
1077 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
1078 * looking at all data.
1079 */
1080 static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1081 u32 len)
1082 {
1083 u32 val;
1084 u32 save_len = len;
1085 int ret = 0;
1086 u32 errcnt;
1087
1088 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
1089
1090 ret = iwl_grab_nic_access(priv);
1091 if (ret)
1092 return ret;
1093
1094 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1095 IWL49_RTC_INST_LOWER_BOUND);
1096
1097 errcnt = 0;
1098 for (; len > 0; len -= sizeof(u32), image++) {
1099 /* read data comes through single port, auto-incr addr */
1100 /* NOTE: Use the debugless read so we don't flood kernel log
1101 * if IWL_DL_IO is set */
1102 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1103 if (val != le32_to_cpu(*image)) {
1104 IWL_ERR(priv, "uCode INST section is invalid at "
1105 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1106 save_len - len, val, le32_to_cpu(*image));
1107 ret = -EIO;
1108 errcnt++;
1109 if (errcnt >= 20)
1110 break;
1111 }
1112 }
1113
1114 iwl_release_nic_access(priv);
1115
1116 if (!errcnt)
1117 IWL_DEBUG_INFO
1118 ("ucode image in INSTRUCTION memory is good\n");
1119
1120 return ret;
1121 }
1122
1123 /**
1124 * iwl_verify_ucode - determine which instruction image is in SRAM,
1125 * and verify its contents
1126 */
1127 int iwl_verify_ucode(struct iwl_priv *priv)
1128 {
1129 __le32 *image;
1130 u32 len;
1131 int ret;
1132
1133 /* Try bootstrap */
1134 image = (__le32 *)priv->ucode_boot.v_addr;
1135 len = priv->ucode_boot.len;
1136 ret = iwlcore_verify_inst_sparse(priv, image, len);
1137 if (!ret) {
1138 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
1139 return 0;
1140 }
1141
1142 /* Try initialize */
1143 image = (__le32 *)priv->ucode_init.v_addr;
1144 len = priv->ucode_init.len;
1145 ret = iwlcore_verify_inst_sparse(priv, image, len);
1146 if (!ret) {
1147 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
1148 return 0;
1149 }
1150
1151 /* Try runtime/protocol */
1152 image = (__le32 *)priv->ucode_code.v_addr;
1153 len = priv->ucode_code.len;
1154 ret = iwlcore_verify_inst_sparse(priv, image, len);
1155 if (!ret) {
1156 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
1157 return 0;
1158 }
1159
1160 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1161
1162 /* Since nothing seems to match, show first several data entries in
1163 * instruction SRAM, so maybe visual inspection will give a clue.
1164 * Selection of bootstrap image (vs. other images) is arbitrary. */
1165 image = (__le32 *)priv->ucode_boot.v_addr;
1166 len = priv->ucode_boot.len;
1167 ret = iwl_verify_inst_full(priv, image, len);
1168
1169 return ret;
1170 }
1171 EXPORT_SYMBOL(iwl_verify_ucode);
1172
1173
1174 static const char *desc_lookup_text[] = {
1175 "OK",
1176 "FAIL",
1177 "BAD_PARAM",
1178 "BAD_CHECKSUM",
1179 "NMI_INTERRUPT_WDG",
1180 "SYSASSERT",
1181 "FATAL_ERROR",
1182 "BAD_COMMAND",
1183 "HW_ERROR_TUNE_LOCK",
1184 "HW_ERROR_TEMPERATURE",
1185 "ILLEGAL_CHAN_FREQ",
1186 "VCC_NOT_STABLE",
1187 "FH_ERROR",
1188 "NMI_INTERRUPT_HOST",
1189 "NMI_INTERRUPT_ACTION_PT",
1190 "NMI_INTERRUPT_UNKNOWN",
1191 "UCODE_VERSION_MISMATCH",
1192 "HW_ERROR_ABS_LOCK",
1193 "HW_ERROR_CAL_LOCK_FAIL",
1194 "NMI_INTERRUPT_INST_ACTION_PT",
1195 "NMI_INTERRUPT_DATA_ACTION_PT",
1196 "NMI_TRM_HW_ER",
1197 "NMI_INTERRUPT_TRM",
1198 "NMI_INTERRUPT_BREAK_POINT"
1199 "DEBUG_0",
1200 "DEBUG_1",
1201 "DEBUG_2",
1202 "DEBUG_3",
1203 "UNKNOWN"
1204 };
1205
1206 static const char *desc_lookup(int i)
1207 {
1208 int max = ARRAY_SIZE(desc_lookup_text) - 1;
1209
1210 if (i < 0 || i > max)
1211 i = max;
1212
1213 return desc_lookup_text[i];
1214 }
1215
1216 #define ERROR_START_OFFSET (1 * sizeof(u32))
1217 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
1218
1219 void iwl_dump_nic_error_log(struct iwl_priv *priv)
1220 {
1221 u32 data2, line;
1222 u32 desc, time, count, base, data1;
1223 u32 blink1, blink2, ilink1, ilink2;
1224 int ret;
1225
1226 if (priv->ucode_type == UCODE_INIT)
1227 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1228 else
1229 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1230
1231 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1232 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1233 return;
1234 }
1235
1236 ret = iwl_grab_nic_access(priv);
1237 if (ret) {
1238 IWL_WARN(priv, "Can not read from adapter at this time.\n");
1239 return;
1240 }
1241
1242 count = iwl_read_targ_mem(priv, base);
1243
1244 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1245 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1246 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1247 priv->status, count);
1248 }
1249
1250 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1251 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1252 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1253 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
1254 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
1255 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
1256 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1257 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1258 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1259
1260 IWL_ERR(priv, "Desc Time "
1261 "data1 data2 line\n");
1262 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
1263 desc_lookup(desc), desc, time, data1, data2, line);
1264 IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
1265 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1266 ilink1, ilink2);
1267
1268 iwl_release_nic_access(priv);
1269 }
1270 EXPORT_SYMBOL(iwl_dump_nic_error_log);
1271
1272 #define EVENT_START_OFFSET (4 * sizeof(u32))
1273
1274 /**
1275 * iwl_print_event_log - Dump error event log to syslog
1276 *
1277 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
1278 */
1279 static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1280 u32 num_events, u32 mode)
1281 {
1282 u32 i;
1283 u32 base; /* SRAM byte address of event log header */
1284 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1285 u32 ptr; /* SRAM byte address of log data */
1286 u32 ev, time, data; /* event log data */
1287
1288 if (num_events == 0)
1289 return;
1290 if (priv->ucode_type == UCODE_INIT)
1291 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1292 else
1293 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1294
1295 if (mode == 0)
1296 event_size = 2 * sizeof(u32);
1297 else
1298 event_size = 3 * sizeof(u32);
1299
1300 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1301
1302 /* "time" is actually "data" for mode 0 (no timestamp).
1303 * place event id # at far right for easier visual parsing. */
1304 for (i = 0; i < num_events; i++) {
1305 ev = iwl_read_targ_mem(priv, ptr);
1306 ptr += sizeof(u32);
1307 time = iwl_read_targ_mem(priv, ptr);
1308 ptr += sizeof(u32);
1309 if (mode == 0) {
1310 /* data, ev */
1311 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
1312 } else {
1313 data = iwl_read_targ_mem(priv, ptr);
1314 ptr += sizeof(u32);
1315 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1316 time, data, ev);
1317 }
1318 }
1319 }
1320
1321 void iwl_dump_nic_event_log(struct iwl_priv *priv)
1322 {
1323 int ret;
1324 u32 base; /* SRAM byte address of event log header */
1325 u32 capacity; /* event log capacity in # entries */
1326 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1327 u32 num_wraps; /* # times uCode wrapped to top of log */
1328 u32 next_entry; /* index of next entry to be written by uCode */
1329 u32 size; /* # entries that we'll print */
1330
1331 if (priv->ucode_type == UCODE_INIT)
1332 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1333 else
1334 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1335
1336 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1337 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1338 return;
1339 }
1340
1341 ret = iwl_grab_nic_access(priv);
1342 if (ret) {
1343 IWL_WARN(priv, "Can not read from adapter at this time.\n");
1344 return;
1345 }
1346
1347 /* event log header */
1348 capacity = iwl_read_targ_mem(priv, base);
1349 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1350 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1351 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1352
1353 size = num_wraps ? capacity : next_entry;
1354
1355 /* bail out if nothing in log */
1356 if (size == 0) {
1357 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1358 iwl_release_nic_access(priv);
1359 return;
1360 }
1361
1362 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
1363 size, num_wraps);
1364
1365 /* if uCode has wrapped back to top of log, start at the oldest entry,
1366 * i.e the next one that uCode would fill. */
1367 if (num_wraps)
1368 iwl_print_event_log(priv, next_entry,
1369 capacity - next_entry, mode);
1370 /* (then/else) start at top of log */
1371 iwl_print_event_log(priv, 0, next_entry, mode);
1372
1373 iwl_release_nic_access(priv);
1374 }
1375 EXPORT_SYMBOL(iwl_dump_nic_event_log);
1376
1377 void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1378 {
1379 struct iwl_ct_kill_config cmd;
1380 unsigned long flags;
1381 int ret = 0;
1382
1383 spin_lock_irqsave(&priv->lock, flags);
1384 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1385 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1386 spin_unlock_irqrestore(&priv->lock, flags);
1387
1388 cmd.critical_temperature_R =
1389 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1390
1391 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1392 sizeof(cmd), &cmd);
1393 if (ret)
1394 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1395 else
1396 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded, "
1397 "critical temperature is %d\n",
1398 cmd.critical_temperature_R);
1399 }
1400 EXPORT_SYMBOL(iwl_rf_kill_ct_config);
1401
1402
1403 /*
1404 * CARD_STATE_CMD
1405 *
1406 * Use: Sets the device's internal card state to enable, disable, or halt
1407 *
1408 * When in the 'enable' state the card operates as normal.
1409 * When in the 'disable' state, the card enters into a low power mode.
1410 * When in the 'halt' state, the card is shut down and must be fully
1411 * restarted to come back on.
1412 */
1413 int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1414 {
1415 struct iwl_host_cmd cmd = {
1416 .id = REPLY_CARD_STATE_CMD,
1417 .len = sizeof(u32),
1418 .data = &flags,
1419 .meta.flags = meta_flag,
1420 };
1421
1422 return iwl_send_cmd(priv, &cmd);
1423 }
1424 EXPORT_SYMBOL(iwl_send_card_state);
1425
1426 void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv)
1427 {
1428 unsigned long flags;
1429
1430 if (test_bit(STATUS_RF_KILL_SW, &priv->status))
1431 return;
1432
1433 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO OFF\n");
1434
1435 iwl_scan_cancel(priv);
1436 /* FIXME: This is a workaround for AP */
1437 if (priv->iw_mode != NL80211_IFTYPE_AP) {
1438 spin_lock_irqsave(&priv->lock, flags);
1439 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
1440 CSR_UCODE_SW_BIT_RFKILL);
1441 spin_unlock_irqrestore(&priv->lock, flags);
1442 /* call the host command only if no hw rf-kill set */
1443 if (!test_bit(STATUS_RF_KILL_HW, &priv->status) &&
1444 iwl_is_ready(priv))
1445 iwl_send_card_state(priv,
1446 CARD_STATE_CMD_DISABLE, 0);
1447 set_bit(STATUS_RF_KILL_SW, &priv->status);
1448 /* make sure mac80211 stop sending Tx frame */
1449 if (priv->mac80211_registered)
1450 ieee80211_stop_queues(priv->hw);
1451 }
1452 }
1453 EXPORT_SYMBOL(iwl_radio_kill_sw_disable_radio);
1454
1455 int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv)
1456 {
1457 unsigned long flags;
1458
1459 if (!test_bit(STATUS_RF_KILL_SW, &priv->status))
1460 return 0;
1461
1462 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO ON\n");
1463
1464 spin_lock_irqsave(&priv->lock, flags);
1465 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1466
1467 /* If the driver is up it will receive CARD_STATE_NOTIFICATION
1468 * notification where it will clear SW rfkill status.
1469 * Setting it here would break the handler. Only if the
1470 * interface is down we can set here since we don't
1471 * receive any further notification.
1472 */
1473 if (!priv->is_open)
1474 clear_bit(STATUS_RF_KILL_SW, &priv->status);
1475 spin_unlock_irqrestore(&priv->lock, flags);
1476
1477 /* wake up ucode */
1478 msleep(10);
1479
1480 spin_lock_irqsave(&priv->lock, flags);
1481 iwl_read32(priv, CSR_UCODE_DRV_GP1);
1482 if (!iwl_grab_nic_access(priv))
1483 iwl_release_nic_access(priv);
1484 spin_unlock_irqrestore(&priv->lock, flags);
1485
1486 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
1487 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
1488 "disabled by HW switch\n");
1489 return 0;
1490 }
1491
1492 /* when driver is up while rfkill is on, it wont receive
1493 * any CARD_STATE_NOTIFICATION notifications so we have to
1494 * restart it in here
1495 */
1496 if (priv->is_open && !test_bit(STATUS_ALIVE, &priv->status)) {
1497 clear_bit(STATUS_RF_KILL_SW, &priv->status);
1498 if (!iwl_is_rfkill(priv))
1499 queue_work(priv->workqueue, &priv->up);
1500 }
1501
1502 /* If the driver is already loaded, it will receive
1503 * CARD_STATE_NOTIFICATION notifications and the handler will
1504 * call restart to reload the driver.
1505 */
1506 return 1;
1507 }
1508 EXPORT_SYMBOL(iwl_radio_kill_sw_enable_radio);
1509
1510 void iwl_bg_rf_kill(struct work_struct *work)
1511 {
1512 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
1513
1514 wake_up_interruptible(&priv->wait_command_queue);
1515
1516 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1517 return;
1518
1519 mutex_lock(&priv->mutex);
1520
1521 if (!iwl_is_rfkill(priv)) {
1522 IWL_DEBUG(IWL_DL_RF_KILL,
1523 "HW and/or SW RF Kill no longer active, restarting "
1524 "device\n");
1525 if (!test_bit(STATUS_EXIT_PENDING, &priv->status) &&
1526 test_bit(STATUS_ALIVE, &priv->status))
1527 queue_work(priv->workqueue, &priv->restart);
1528 } else {
1529 /* make sure mac80211 stop sending Tx frame */
1530 if (priv->mac80211_registered)
1531 ieee80211_stop_queues(priv->hw);
1532
1533 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
1534 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
1535 "disabled by SW switch\n");
1536 else
1537 IWL_WARN(priv, "Radio Frequency Kill Switch is On:\n"
1538 "Kill switch must be turned off for "
1539 "wireless networking to work.\n");
1540 }
1541 mutex_unlock(&priv->mutex);
1542 iwl_rfkill_set_hw_state(priv);
1543 }
1544 EXPORT_SYMBOL(iwl_bg_rf_kill);