]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/intel/i40e/i40e_nvm.c
Merge remote-tracking branches 'spi/fix/au1550', 'spi/fix/davinci', 'spi/fix/doc...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / intel / i40e / i40e_nvm.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 #include "i40e_prototype.h"
28
29 /**
30 * i40e_init_nvm_ops - Initialize NVM function pointers
31 * @hw: pointer to the HW structure
32 *
33 * Setup the function pointers and the NVM info structure. Should be called
34 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
35 * Please notice that the NVM term is used here (& in all methods covered
36 * in this file) as an equivalent of the FLASH part mapped into the SR.
37 * We are accessing FLASH always thru the Shadow RAM.
38 **/
39 i40e_status i40e_init_nvm(struct i40e_hw *hw)
40 {
41 struct i40e_nvm_info *nvm = &hw->nvm;
42 i40e_status ret_code = 0;
43 u32 fla, gens;
44 u8 sr_size;
45
46 /* The SR size is stored regardless of the nvm programming mode
47 * as the blank mode may be used in the factory line.
48 */
49 gens = rd32(hw, I40E_GLNVM_GENS);
50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
51 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
52 /* Switching to words (sr_size contains power of 2KB) */
53 nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
54
55 /* Check if we are in the normal or blank NVM programming mode */
56 fla = rd32(hw, I40E_GLNVM_FLA);
57 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
58 /* Max NVM timeout */
59 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
60 nvm->blank_nvm_mode = false;
61 } else { /* Blank programming mode */
62 nvm->blank_nvm_mode = true;
63 ret_code = I40E_ERR_NVM_BLANK_MODE;
64 hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
65 }
66
67 return ret_code;
68 }
69
70 /**
71 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
72 * @hw: pointer to the HW structure
73 * @access: NVM access type (read or write)
74 *
75 * This function will request NVM ownership for reading
76 * via the proper Admin Command.
77 **/
78 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
79 enum i40e_aq_resource_access_type access)
80 {
81 i40e_status ret_code = 0;
82 u64 gtime, timeout;
83 u64 time = 0;
84
85 if (hw->nvm.blank_nvm_mode)
86 goto i40e_i40e_acquire_nvm_exit;
87
88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
89 0, &time, NULL);
90 /* Reading the Global Device Timer */
91 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
92
93 /* Store the timeout */
94 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
95
96 if (ret_code) {
97 /* Set the polling timeout */
98 if (time > I40E_MAX_NVM_TIMEOUT)
99 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
100 + gtime;
101 else
102 timeout = hw->nvm.hw_semaphore_timeout;
103 /* Poll until the current NVM owner timeouts */
104 while (gtime < timeout) {
105 usleep_range(10000, 20000);
106 ret_code = i40e_aq_request_resource(hw,
107 I40E_NVM_RESOURCE_ID,
108 access, 0, &time,
109 NULL);
110 if (!ret_code) {
111 hw->nvm.hw_semaphore_timeout =
112 I40E_MS_TO_GTIME(time) + gtime;
113 break;
114 }
115 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 }
117 if (ret_code) {
118 hw->nvm.hw_semaphore_timeout = 0;
119 hw->nvm.hw_semaphore_wait =
120 I40E_MS_TO_GTIME(time) + gtime;
121 hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
122 time);
123 }
124 }
125
126 i40e_i40e_acquire_nvm_exit:
127 return ret_code;
128 }
129
130 /**
131 * i40e_release_nvm - Generic request for releasing the NVM ownership
132 * @hw: pointer to the HW structure
133 *
134 * This function will release NVM resource via the proper Admin Command.
135 **/
136 void i40e_release_nvm(struct i40e_hw *hw)
137 {
138 if (!hw->nvm.blank_nvm_mode)
139 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
140 }
141
142 /**
143 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
144 * @hw: pointer to the HW structure
145 *
146 * Polls the SRCTL Shadow RAM register done bit.
147 **/
148 static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
149 {
150 i40e_status ret_code = I40E_ERR_TIMEOUT;
151 u32 srctl, wait_cnt;
152
153 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
154 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
155 srctl = rd32(hw, I40E_GLNVM_SRCTL);
156 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
157 ret_code = 0;
158 break;
159 }
160 udelay(5);
161 }
162 if (ret_code == I40E_ERR_TIMEOUT)
163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
164 return ret_code;
165 }
166
167 /**
168 * i40e_read_nvm_word - Reads Shadow RAM
169 * @hw: pointer to the HW structure
170 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
171 * @data: word read from the Shadow RAM
172 *
173 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
174 **/
175 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
176 u16 *data)
177 {
178 i40e_status ret_code = I40E_ERR_TIMEOUT;
179 u32 sr_reg;
180
181 if (offset >= hw->nvm.sr_size) {
182 hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
183 ret_code = I40E_ERR_PARAM;
184 goto read_nvm_exit;
185 }
186
187 /* Poll the done bit first */
188 ret_code = i40e_poll_sr_srctl_done_bit(hw);
189 if (!ret_code) {
190 /* Write the address and start reading */
191 sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
192 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
193 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
194
195 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
196 ret_code = i40e_poll_sr_srctl_done_bit(hw);
197 if (!ret_code) {
198 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
199 *data = (u16)((sr_reg &
200 I40E_GLNVM_SRDATA_RDDATA_MASK)
201 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
202 }
203 }
204 if (ret_code)
205 hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
206 offset);
207
208 read_nvm_exit:
209 return ret_code;
210 }
211
212 /**
213 * i40e_read_nvm_buffer - Reads Shadow RAM buffer
214 * @hw: pointer to the HW structure
215 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
216 * @words: (in) number of words to read; (out) number of words actually read
217 * @data: words read from the Shadow RAM
218 *
219 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
220 * method. The buffer read is preceded by the NVM ownership take
221 * and followed by the release.
222 **/
223 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
224 u16 *words, u16 *data)
225 {
226 i40e_status ret_code = 0;
227 u16 index, word;
228
229 /* Loop thru the selected region */
230 for (word = 0; word < *words; word++) {
231 index = offset + word;
232 ret_code = i40e_read_nvm_word(hw, index, &data[word]);
233 if (ret_code)
234 break;
235 }
236
237 /* Update the number of words read from the Shadow RAM */
238 *words = word;
239
240 return ret_code;
241 }
242
243 /**
244 * i40e_write_nvm_aq - Writes Shadow RAM.
245 * @hw: pointer to the HW structure.
246 * @module_pointer: module pointer location in words from the NVM beginning
247 * @offset: offset in words from module start
248 * @words: number of words to write
249 * @data: buffer with words to write to the Shadow RAM
250 * @last_command: tells the AdminQ that this is the last command
251 *
252 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
253 **/
254 static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
255 u32 offset, u16 words, void *data,
256 bool last_command)
257 {
258 i40e_status ret_code = I40E_ERR_NVM;
259
260 /* Here we are checking the SR limit only for the flat memory model.
261 * We cannot do it for the module-based model, as we did not acquire
262 * the NVM resource yet (we cannot get the module pointer value).
263 * Firmware will check the module-based model.
264 */
265 if ((offset + words) > hw->nvm.sr_size)
266 hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
267 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
268 /* We can write only up to 4KB (one sector), in one AQ write */
269 hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
270 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
271 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
272 /* A single write cannot spread over two sectors */
273 hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
274 else
275 ret_code = i40e_aq_update_nvm(hw, module_pointer,
276 2 * offset, /*bytes*/
277 2 * words, /*bytes*/
278 data, last_command, NULL);
279
280 return ret_code;
281 }
282
283 /**
284 * i40e_calc_nvm_checksum - Calculates and returns the checksum
285 * @hw: pointer to hardware structure
286 * @checksum: pointer to the checksum
287 *
288 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
289 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
290 * is customer specific and unknown. Therefore, this function skips all maximum
291 * possible size of VPD (1kB).
292 **/
293 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
294 u16 *checksum)
295 {
296 i40e_status ret_code = 0;
297 u16 pcie_alt_module = 0;
298 u16 checksum_local = 0;
299 u16 vpd_module = 0;
300 u16 word = 0;
301 u32 i = 0;
302
303 /* read pointer to VPD area */
304 ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
305 if (ret_code) {
306 ret_code = I40E_ERR_NVM_CHECKSUM;
307 goto i40e_calc_nvm_checksum_exit;
308 }
309
310 /* read pointer to PCIe Alt Auto-load module */
311 ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
312 &pcie_alt_module);
313 if (ret_code) {
314 ret_code = I40E_ERR_NVM_CHECKSUM;
315 goto i40e_calc_nvm_checksum_exit;
316 }
317
318 /* Calculate SW checksum that covers the whole 64kB shadow RAM
319 * except the VPD and PCIe ALT Auto-load modules
320 */
321 for (i = 0; i < hw->nvm.sr_size; i++) {
322 /* Skip Checksum word */
323 if (i == I40E_SR_SW_CHECKSUM_WORD)
324 i++;
325 /* Skip VPD module (convert byte size to word count) */
326 if (i == (u32)vpd_module) {
327 i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
328 if (i >= hw->nvm.sr_size)
329 break;
330 }
331 /* Skip PCIe ALT module (convert byte size to word count) */
332 if (i == (u32)pcie_alt_module) {
333 i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
334 if (i >= hw->nvm.sr_size)
335 break;
336 }
337
338 ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
339 if (ret_code) {
340 ret_code = I40E_ERR_NVM_CHECKSUM;
341 goto i40e_calc_nvm_checksum_exit;
342 }
343 checksum_local += word;
344 }
345
346 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
347
348 i40e_calc_nvm_checksum_exit:
349 return ret_code;
350 }
351
352 /**
353 * i40e_update_nvm_checksum - Updates the NVM checksum
354 * @hw: pointer to hardware structure
355 *
356 * NVM ownership must be acquired before calling this function and released
357 * on ARQ completion event reception by caller.
358 * This function will commit SR to NVM.
359 **/
360 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
361 {
362 i40e_status ret_code = 0;
363 u16 checksum;
364
365 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
366 if (!ret_code)
367 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
368 1, &checksum, true);
369
370 return ret_code;
371 }
372
373 /**
374 * i40e_validate_nvm_checksum - Validate EEPROM checksum
375 * @hw: pointer to hardware structure
376 * @checksum: calculated checksum
377 *
378 * Performs checksum calculation and validates the NVM SW checksum. If the
379 * caller does not need checksum, the value can be NULL.
380 **/
381 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
382 u16 *checksum)
383 {
384 i40e_status ret_code = 0;
385 u16 checksum_sr = 0;
386 u16 checksum_local = 0;
387
388 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
389 if (ret_code)
390 goto i40e_validate_nvm_checksum_exit;
391
392 /* Do not use i40e_read_nvm_word() because we do not want to take
393 * the synchronization semaphores twice here.
394 */
395 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
396
397 /* Verify read checksum from EEPROM is the same as
398 * calculated checksum
399 */
400 if (checksum_local != checksum_sr)
401 ret_code = I40E_ERR_NVM_CHECKSUM;
402
403 /* If the user cares, return the calculated checksum */
404 if (checksum)
405 *checksum = checksum_local;
406
407 i40e_validate_nvm_checksum_exit:
408 return ret_code;
409 }
410
411 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
412 struct i40e_nvm_access *cmd,
413 u8 *bytes, int *errno);
414 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
415 struct i40e_nvm_access *cmd,
416 u8 *bytes, int *errno);
417 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
418 struct i40e_nvm_access *cmd,
419 u8 *bytes, int *errno);
420 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
421 struct i40e_nvm_access *cmd,
422 int *errno);
423 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
424 struct i40e_nvm_access *cmd,
425 int *errno);
426 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
427 struct i40e_nvm_access *cmd,
428 u8 *bytes, int *errno);
429 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
430 struct i40e_nvm_access *cmd,
431 u8 *bytes, int *errno);
432 static inline u8 i40e_nvmupd_get_module(u32 val)
433 {
434 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
435 }
436 static inline u8 i40e_nvmupd_get_transaction(u32 val)
437 {
438 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
439 }
440
441 /**
442 * i40e_nvmupd_command - Process an NVM update command
443 * @hw: pointer to hardware structure
444 * @cmd: pointer to nvm update command
445 * @bytes: pointer to the data buffer
446 * @errno: pointer to return error code
447 *
448 * Dispatches command depending on what update state is current
449 **/
450 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
451 struct i40e_nvm_access *cmd,
452 u8 *bytes, int *errno)
453 {
454 i40e_status status;
455
456 /* assume success */
457 *errno = 0;
458
459 switch (hw->nvmupd_state) {
460 case I40E_NVMUPD_STATE_INIT:
461 status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
462 break;
463
464 case I40E_NVMUPD_STATE_READING:
465 status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
466 break;
467
468 case I40E_NVMUPD_STATE_WRITING:
469 status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
470 break;
471
472 default:
473 /* invalid state, should never happen */
474 status = I40E_NOT_SUPPORTED;
475 *errno = -ESRCH;
476 break;
477 }
478 return status;
479 }
480
481 /**
482 * i40e_nvmupd_state_init - Handle NVM update state Init
483 * @hw: pointer to hardware structure
484 * @cmd: pointer to nvm update command buffer
485 * @bytes: pointer to the data buffer
486 * @errno: pointer to return error code
487 *
488 * Process legitimate commands of the Init state and conditionally set next
489 * state. Reject all other commands.
490 **/
491 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
492 struct i40e_nvm_access *cmd,
493 u8 *bytes, int *errno)
494 {
495 i40e_status status = 0;
496 enum i40e_nvmupd_cmd upd_cmd;
497
498 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
499
500 switch (upd_cmd) {
501 case I40E_NVMUPD_READ_SA:
502 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
503 if (status) {
504 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
505 } else {
506 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
507 i40e_release_nvm(hw);
508 }
509 break;
510
511 case I40E_NVMUPD_READ_SNT:
512 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
513 if (status) {
514 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
515 } else {
516 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
517 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
518 }
519 break;
520
521 case I40E_NVMUPD_WRITE_ERA:
522 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
523 if (status) {
524 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
525 } else {
526 status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
527 if (status)
528 i40e_release_nvm(hw);
529 else
530 hw->aq.nvm_release_on_done = true;
531 }
532 break;
533
534 case I40E_NVMUPD_WRITE_SA:
535 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
536 if (status) {
537 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
538 } else {
539 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
540 if (status)
541 i40e_release_nvm(hw);
542 else
543 hw->aq.nvm_release_on_done = true;
544 }
545 break;
546
547 case I40E_NVMUPD_WRITE_SNT:
548 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
549 if (status) {
550 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
551 } else {
552 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
553 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
554 }
555 break;
556
557 case I40E_NVMUPD_CSUM_SA:
558 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
559 if (status) {
560 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
561 } else {
562 status = i40e_update_nvm_checksum(hw);
563 if (status) {
564 *errno = hw->aq.asq_last_status ?
565 i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
566 -EIO;
567 i40e_release_nvm(hw);
568 } else {
569 hw->aq.nvm_release_on_done = true;
570 }
571 }
572 break;
573
574 default:
575 status = I40E_ERR_NVM;
576 *errno = -ESRCH;
577 break;
578 }
579 return status;
580 }
581
582 /**
583 * i40e_nvmupd_state_reading - Handle NVM update state Reading
584 * @hw: pointer to hardware structure
585 * @cmd: pointer to nvm update command buffer
586 * @bytes: pointer to the data buffer
587 * @errno: pointer to return error code
588 *
589 * NVM ownership is already held. Process legitimate commands and set any
590 * change in state; reject all other commands.
591 **/
592 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
593 struct i40e_nvm_access *cmd,
594 u8 *bytes, int *errno)
595 {
596 i40e_status status;
597 enum i40e_nvmupd_cmd upd_cmd;
598
599 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
600
601 switch (upd_cmd) {
602 case I40E_NVMUPD_READ_SA:
603 case I40E_NVMUPD_READ_CON:
604 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
605 break;
606
607 case I40E_NVMUPD_READ_LCB:
608 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
609 i40e_release_nvm(hw);
610 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
611 break;
612
613 default:
614 status = I40E_NOT_SUPPORTED;
615 *errno = -ESRCH;
616 break;
617 }
618 return status;
619 }
620
621 /**
622 * i40e_nvmupd_state_writing - Handle NVM update state Writing
623 * @hw: pointer to hardware structure
624 * @cmd: pointer to nvm update command buffer
625 * @bytes: pointer to the data buffer
626 * @errno: pointer to return error code
627 *
628 * NVM ownership is already held. Process legitimate commands and set any
629 * change in state; reject all other commands
630 **/
631 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
632 struct i40e_nvm_access *cmd,
633 u8 *bytes, int *errno)
634 {
635 i40e_status status;
636 enum i40e_nvmupd_cmd upd_cmd;
637
638 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
639
640 switch (upd_cmd) {
641 case I40E_NVMUPD_WRITE_CON:
642 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
643 break;
644
645 case I40E_NVMUPD_WRITE_LCB:
646 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
647 if (!status) {
648 hw->aq.nvm_release_on_done = true;
649 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
650 }
651 break;
652
653 case I40E_NVMUPD_CSUM_CON:
654 status = i40e_update_nvm_checksum(hw);
655 if (status)
656 *errno = hw->aq.asq_last_status ?
657 i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
658 -EIO;
659 break;
660
661 case I40E_NVMUPD_CSUM_LCB:
662 status = i40e_update_nvm_checksum(hw);
663 if (status) {
664 *errno = hw->aq.asq_last_status ?
665 i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
666 -EIO;
667 } else {
668 hw->aq.nvm_release_on_done = true;
669 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
670 }
671 break;
672
673 default:
674 status = I40E_NOT_SUPPORTED;
675 *errno = -ESRCH;
676 break;
677 }
678 return status;
679 }
680
681 /**
682 * i40e_nvmupd_validate_command - Validate given command
683 * @hw: pointer to hardware structure
684 * @cmd: pointer to nvm update command buffer
685 * @errno: pointer to return error code
686 *
687 * Return one of the valid command types or I40E_NVMUPD_INVALID
688 **/
689 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
690 struct i40e_nvm_access *cmd,
691 int *errno)
692 {
693 enum i40e_nvmupd_cmd upd_cmd;
694 u8 transaction, module;
695
696 /* anything that doesn't match a recognized case is an error */
697 upd_cmd = I40E_NVMUPD_INVALID;
698
699 transaction = i40e_nvmupd_get_transaction(cmd->config);
700 module = i40e_nvmupd_get_module(cmd->config);
701
702 /* limits on data size */
703 if ((cmd->data_size < 1) ||
704 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
705 hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n",
706 cmd->data_size);
707 *errno = -EFAULT;
708 return I40E_NVMUPD_INVALID;
709 }
710
711 switch (cmd->command) {
712 case I40E_NVM_READ:
713 switch (transaction) {
714 case I40E_NVM_CON:
715 upd_cmd = I40E_NVMUPD_READ_CON;
716 break;
717 case I40E_NVM_SNT:
718 upd_cmd = I40E_NVMUPD_READ_SNT;
719 break;
720 case I40E_NVM_LCB:
721 upd_cmd = I40E_NVMUPD_READ_LCB;
722 break;
723 case I40E_NVM_SA:
724 upd_cmd = I40E_NVMUPD_READ_SA;
725 break;
726 }
727 break;
728
729 case I40E_NVM_WRITE:
730 switch (transaction) {
731 case I40E_NVM_CON:
732 upd_cmd = I40E_NVMUPD_WRITE_CON;
733 break;
734 case I40E_NVM_SNT:
735 upd_cmd = I40E_NVMUPD_WRITE_SNT;
736 break;
737 case I40E_NVM_LCB:
738 upd_cmd = I40E_NVMUPD_WRITE_LCB;
739 break;
740 case I40E_NVM_SA:
741 upd_cmd = I40E_NVMUPD_WRITE_SA;
742 break;
743 case I40E_NVM_ERA:
744 upd_cmd = I40E_NVMUPD_WRITE_ERA;
745 break;
746 case I40E_NVM_CSUM:
747 upd_cmd = I40E_NVMUPD_CSUM_CON;
748 break;
749 case (I40E_NVM_CSUM|I40E_NVM_SA):
750 upd_cmd = I40E_NVMUPD_CSUM_SA;
751 break;
752 case (I40E_NVM_CSUM|I40E_NVM_LCB):
753 upd_cmd = I40E_NVMUPD_CSUM_LCB;
754 break;
755 }
756 break;
757 }
758
759 if (upd_cmd == I40E_NVMUPD_INVALID) {
760 *errno = -EFAULT;
761 hw_dbg(hw,
762 "i40e_nvmupd_validate_command returns %d errno: %d\n",
763 upd_cmd, *errno);
764 }
765 return upd_cmd;
766 }
767
768 /**
769 * i40e_nvmupd_nvm_read - Read NVM
770 * @hw: pointer to hardware structure
771 * @cmd: pointer to nvm update command buffer
772 * @bytes: pointer to the data buffer
773 * @errno: pointer to return error code
774 *
775 * cmd structure contains identifiers and data buffer
776 **/
777 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
778 struct i40e_nvm_access *cmd,
779 u8 *bytes, int *errno)
780 {
781 i40e_status status;
782 u8 module, transaction;
783 bool last;
784
785 transaction = i40e_nvmupd_get_transaction(cmd->config);
786 module = i40e_nvmupd_get_module(cmd->config);
787 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
788 hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
789 module, cmd->offset, cmd->data_size);
790
791 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
792 bytes, last, NULL);
793 hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status);
794 if (status)
795 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
796
797 return status;
798 }
799
800 /**
801 * i40e_nvmupd_nvm_erase - Erase an NVM module
802 * @hw: pointer to hardware structure
803 * @cmd: pointer to nvm update command buffer
804 * @errno: pointer to return error code
805 *
806 * module, offset, data_size and data are in cmd structure
807 **/
808 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
809 struct i40e_nvm_access *cmd,
810 int *errno)
811 {
812 i40e_status status = 0;
813 u8 module, transaction;
814 bool last;
815
816 transaction = i40e_nvmupd_get_transaction(cmd->config);
817 module = i40e_nvmupd_get_module(cmd->config);
818 last = (transaction & I40E_NVM_LCB);
819 hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
820 module, cmd->offset, cmd->data_size);
821 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
822 last, NULL);
823 hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status);
824 if (status)
825 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
826
827 return status;
828 }
829
830 /**
831 * i40e_nvmupd_nvm_write - Write NVM
832 * @hw: pointer to hardware structure
833 * @cmd: pointer to nvm update command buffer
834 * @bytes: pointer to the data buffer
835 * @errno: pointer to return error code
836 *
837 * module, offset, data_size and data are in cmd structure
838 **/
839 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
840 struct i40e_nvm_access *cmd,
841 u8 *bytes, int *errno)
842 {
843 i40e_status status = 0;
844 u8 module, transaction;
845 bool last;
846
847 transaction = i40e_nvmupd_get_transaction(cmd->config);
848 module = i40e_nvmupd_get_module(cmd->config);
849 last = (transaction & I40E_NVM_LCB);
850 hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
851 module, cmd->offset, cmd->data_size);
852 status = i40e_aq_update_nvm(hw, module, cmd->offset,
853 (u16)cmd->data_size, bytes, last, NULL);
854 hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status);
855 if (status)
856 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
857
858 return status;
859 }