]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/iwlwifi/mvm/nvm.c
mac80211: support more than one band in scan request
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / iwlwifi / mvm / nvm.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63 #include <linux/firmware.h>
64 #include "iwl-trans.h"
65 #include "mvm.h"
66 #include "iwl-eeprom-parse.h"
67 #include "iwl-eeprom-read.h"
68 #include "iwl-nvm-parse.h"
69
70 /* Default NVM size to read */
71 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
72 #define IWL_MAX_NVM_SECTION_SIZE 7000
73
74 #define NVM_WRITE_OPCODE 1
75 #define NVM_READ_OPCODE 0
76
77 /* load nvm chunk response */
78 enum {
79 READ_NVM_CHUNK_SUCCEED = 0,
80 READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
81 };
82
83 /*
84 * prepare the NVM host command w/ the pointers to the nvm buffer
85 * and send it to fw
86 */
87 static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
88 u16 offset, u16 length, const u8 *data)
89 {
90 struct iwl_nvm_access_cmd nvm_access_cmd = {
91 .offset = cpu_to_le16(offset),
92 .length = cpu_to_le16(length),
93 .type = cpu_to_le16(section),
94 .op_code = NVM_WRITE_OPCODE,
95 };
96 struct iwl_host_cmd cmd = {
97 .id = NVM_ACCESS_CMD,
98 .len = { sizeof(struct iwl_nvm_access_cmd), length },
99 .flags = CMD_SEND_IN_RFKILL,
100 .data = { &nvm_access_cmd, data },
101 /* data may come from vmalloc, so use _DUP */
102 .dataflags = { 0, IWL_HCMD_DFL_DUP },
103 };
104
105 return iwl_mvm_send_cmd(mvm, &cmd);
106 }
107
108 static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
109 u16 offset, u16 length, u8 *data)
110 {
111 struct iwl_nvm_access_cmd nvm_access_cmd = {
112 .offset = cpu_to_le16(offset),
113 .length = cpu_to_le16(length),
114 .type = cpu_to_le16(section),
115 .op_code = NVM_READ_OPCODE,
116 };
117 struct iwl_nvm_access_resp *nvm_resp;
118 struct iwl_rx_packet *pkt;
119 struct iwl_host_cmd cmd = {
120 .id = NVM_ACCESS_CMD,
121 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
122 .data = { &nvm_access_cmd, },
123 };
124 int ret, bytes_read, offset_read;
125 u8 *resp_data;
126
127 cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
128
129 ret = iwl_mvm_send_cmd(mvm, &cmd);
130 if (ret)
131 return ret;
132
133 pkt = cmd.resp_pkt;
134 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
135 IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
136 pkt->hdr.flags);
137 ret = -EIO;
138 goto exit;
139 }
140
141 /* Extract NVM response */
142 nvm_resp = (void *)pkt->data;
143 ret = le16_to_cpu(nvm_resp->status);
144 bytes_read = le16_to_cpu(nvm_resp->length);
145 offset_read = le16_to_cpu(nvm_resp->offset);
146 resp_data = nvm_resp->data;
147 if (ret) {
148 if ((offset != 0) &&
149 (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
150 /*
151 * meaning of NOT_VALID_ADDRESS:
152 * driver try to read chunk from address that is
153 * multiple of 2K and got an error since addr is empty.
154 * meaning of (offset != 0): driver already
155 * read valid data from another chunk so this case
156 * is not an error.
157 */
158 IWL_DEBUG_EEPROM(mvm->trans->dev,
159 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
160 offset);
161 ret = 0;
162 } else {
163 IWL_DEBUG_EEPROM(mvm->trans->dev,
164 "NVM access command failed with status %d (device: %s)\n",
165 ret, mvm->cfg->name);
166 ret = -EIO;
167 }
168 goto exit;
169 }
170
171 if (offset_read != offset) {
172 IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
173 offset_read);
174 ret = -EINVAL;
175 goto exit;
176 }
177
178 /* Write data to NVM */
179 memcpy(data + offset, resp_data, bytes_read);
180 ret = bytes_read;
181
182 exit:
183 iwl_free_resp(&cmd);
184 return ret;
185 }
186
187 static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
188 const u8 *data, u16 length)
189 {
190 int offset = 0;
191
192 /* copy data in chunks of 2k (and remainder if any) */
193
194 while (offset < length) {
195 int chunk_size, ret;
196
197 chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
198 length - offset);
199
200 ret = iwl_nvm_write_chunk(mvm, section, offset,
201 chunk_size, data + offset);
202 if (ret < 0)
203 return ret;
204
205 offset += chunk_size;
206 }
207
208 return 0;
209 }
210
211 /*
212 * Reads an NVM section completely.
213 * NICs prior to 7000 family doesn't have a real NVM, but just read
214 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
215 * by uCode, we need to manually check in this case that we don't
216 * overflow and try to read more than the EEPROM size.
217 * For 7000 family NICs, we supply the maximal size we can read, and
218 * the uCode fills the response with as much data as we can,
219 * without overflowing, so no check is needed.
220 */
221 static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
222 u8 *data)
223 {
224 u16 length, offset = 0;
225 int ret;
226
227 /* Set nvm section read length */
228 length = IWL_NVM_DEFAULT_CHUNK_SIZE;
229
230 ret = length;
231
232 /* Read the NVM until exhausted (reading less than requested) */
233 while (ret == length) {
234 ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
235 if (ret < 0) {
236 IWL_DEBUG_EEPROM(mvm->trans->dev,
237 "Cannot read NVM from section %d offset %d, length %d\n",
238 section, offset, length);
239 return ret;
240 }
241 offset += ret;
242 }
243
244 IWL_DEBUG_EEPROM(mvm->trans->dev,
245 "NVM section %d read completed\n", section);
246 return offset;
247 }
248
249 static struct iwl_nvm_data *
250 iwl_parse_nvm_sections(struct iwl_mvm *mvm)
251 {
252 struct iwl_nvm_section *sections = mvm->nvm_sections;
253 const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
254
255 /* Checking for required sections */
256 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
257 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
258 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
259 IWL_ERR(mvm, "Can't parse empty NVM sections\n");
260 return NULL;
261 }
262 } else {
263 /* SW and REGULATORY sections are mandatory */
264 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
265 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
266 IWL_ERR(mvm,
267 "Can't parse empty family 8000 NVM sections\n");
268 return NULL;
269 }
270 /* MAC_OVERRIDE or at least HW section must exist */
271 if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
272 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
273 IWL_ERR(mvm,
274 "Can't parse mac_address, empty sections\n");
275 return NULL;
276 }
277 }
278
279 if (WARN_ON(!mvm->cfg))
280 return NULL;
281
282 hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
283 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
284 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
285 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
286 mac_override =
287 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
288
289 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
290 regulatory, mac_override,
291 mvm->fw->valid_tx_ant,
292 mvm->fw->valid_rx_ant);
293 }
294
295 #define MAX_NVM_FILE_LEN 16384
296
297 /*
298 * Reads external NVM from a file into mvm->nvm_sections
299 *
300 * HOW TO CREATE THE NVM FILE FORMAT:
301 * ------------------------------
302 * 1. create hex file, format:
303 * 3800 -> header
304 * 0000 -> header
305 * 5a40 -> data
306 *
307 * rev - 6 bit (word1)
308 * len - 10 bit (word1)
309 * id - 4 bit (word2)
310 * rsv - 12 bit (word2)
311 *
312 * 2. flip 8bits with 8 bits per line to get the right NVM file format
313 *
314 * 3. create binary file from the hex file
315 *
316 * 4. save as "iNVM_xxx.bin" under /lib/firmware
317 */
318 static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
319 {
320 int ret, section_size;
321 u16 section_id;
322 const struct firmware *fw_entry;
323 const struct {
324 __le16 word1;
325 __le16 word2;
326 u8 data[];
327 } *file_sec;
328 const u8 *eof, *temp;
329
330 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
331 #define NVM_WORD2_ID(x) (x >> 12)
332 #define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
333 #define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
334
335 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
336
337 /*
338 * Obtain NVM image via request_firmware. Since we already used
339 * request_firmware_nowait() for the firmware binary load and only
340 * get here after that we assume the NVM request can be satisfied
341 * synchronously.
342 */
343 ret = request_firmware(&fw_entry, mvm->nvm_file_name,
344 mvm->trans->dev);
345 if (ret) {
346 IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
347 mvm->nvm_file_name, ret);
348 return ret;
349 }
350
351 IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
352 mvm->nvm_file_name, fw_entry->size);
353
354 if (fw_entry->size < sizeof(*file_sec)) {
355 IWL_ERR(mvm, "NVM file too small\n");
356 ret = -EINVAL;
357 goto out;
358 }
359
360 if (fw_entry->size > MAX_NVM_FILE_LEN) {
361 IWL_ERR(mvm, "NVM file too large\n");
362 ret = -EINVAL;
363 goto out;
364 }
365
366 eof = fw_entry->data + fw_entry->size;
367
368 file_sec = (void *)fw_entry->data;
369
370 while (true) {
371 if (file_sec->data > eof) {
372 IWL_ERR(mvm,
373 "ERROR - NVM file too short for section header\n");
374 ret = -EINVAL;
375 break;
376 }
377
378 /* check for EOF marker */
379 if (!file_sec->word1 && !file_sec->word2) {
380 ret = 0;
381 break;
382 }
383
384 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
385 section_size =
386 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
387 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
388 } else {
389 section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
390 le16_to_cpu(file_sec->word2));
391 section_id = NVM_WORD1_ID_FAMILY_8000(
392 le16_to_cpu(file_sec->word1));
393 }
394
395 if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
396 IWL_ERR(mvm, "ERROR - section too large (%d)\n",
397 section_size);
398 ret = -EINVAL;
399 break;
400 }
401
402 if (!section_size) {
403 IWL_ERR(mvm, "ERROR - section empty\n");
404 ret = -EINVAL;
405 break;
406 }
407
408 if (file_sec->data + section_size > eof) {
409 IWL_ERR(mvm,
410 "ERROR - NVM file too short for section (%d bytes)\n",
411 section_size);
412 ret = -EINVAL;
413 break;
414 }
415
416 if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
417 "Invalid NVM section ID %d\n", section_id)) {
418 ret = -EINVAL;
419 break;
420 }
421
422 temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
423 if (!temp) {
424 ret = -ENOMEM;
425 break;
426 }
427 mvm->nvm_sections[section_id].data = temp;
428 mvm->nvm_sections[section_id].length = section_size;
429
430 /* advance to the next section */
431 file_sec = (void *)(file_sec->data + section_size);
432 }
433 out:
434 release_firmware(fw_entry);
435 return ret;
436 }
437
438 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
439 int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
440 {
441 int i, ret = 0;
442 struct iwl_nvm_section *sections = mvm->nvm_sections;
443
444 IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
445
446 for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
447 if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
448 continue;
449 ret = iwl_nvm_write_section(mvm, i, sections[i].data,
450 sections[i].length);
451 if (ret < 0) {
452 IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
453 break;
454 }
455 }
456 return ret;
457 }
458
459 int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
460 {
461 int ret, section;
462 u8 *nvm_buffer, *temp;
463
464 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
465 return -EINVAL;
466
467 /* load NVM values from nic */
468 if (read_nvm_from_nic) {
469 /* Read From FW NVM */
470 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
471
472 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
473 GFP_KERNEL);
474 if (!nvm_buffer)
475 return -ENOMEM;
476 for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
477 /* we override the constness for initial read */
478 ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
479 if (ret < 0)
480 continue;
481 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
482 if (!temp) {
483 ret = -ENOMEM;
484 break;
485 }
486 mvm->nvm_sections[section].data = temp;
487 mvm->nvm_sections[section].length = ret;
488
489 #ifdef CONFIG_IWLWIFI_DEBUGFS
490 switch (section) {
491 case NVM_SECTION_TYPE_SW:
492 mvm->nvm_sw_blob.data = temp;
493 mvm->nvm_sw_blob.size = ret;
494 break;
495 case NVM_SECTION_TYPE_CALIBRATION:
496 mvm->nvm_calib_blob.data = temp;
497 mvm->nvm_calib_blob.size = ret;
498 break;
499 case NVM_SECTION_TYPE_PRODUCTION:
500 mvm->nvm_prod_blob.data = temp;
501 mvm->nvm_prod_blob.size = ret;
502 break;
503 default:
504 if (section == mvm->cfg->nvm_hw_section_num) {
505 mvm->nvm_hw_blob.data = temp;
506 mvm->nvm_hw_blob.size = ret;
507 break;
508 }
509 }
510 #endif
511 }
512 kfree(nvm_buffer);
513 }
514
515 /* load external NVM if configured */
516 if (mvm->nvm_file_name) {
517 /* move to External NVM flow */
518 ret = iwl_mvm_read_external_nvm(mvm);
519 if (ret)
520 return ret;
521 }
522
523 /* parse the relevant nvm sections */
524 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
525 if (!mvm->nvm_data)
526 return -ENODATA;
527
528 return 0;
529 }