]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/intel/i40e/i40e_nvm.c
i40e/i40evf: add wait states to NVM state machine
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / intel / i40e / i40e_nvm.c
CommitLineData
56a62fc8
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
dc641b73 4 * Copyright(c) 2013 - 2014 Intel Corporation.
56a62fc8
JB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
dc641b73
GR
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
56a62fc8
JB
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27#include "i40e_prototype.h"
28
29/**
3e26186d
SN
30 * i40e_init_nvm_ops - Initialize NVM function pointers
31 * @hw: pointer to the HW structure
56a62fc8 32 *
3e26186d
SN
33 * Setup the function pointers and the NVM info structure. Should be called
34 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
35 * Please notice that the NVM term is used here (& in all methods covered
36 * in this file) as an equivalent of the FLASH part mapped into the SR.
37 * We are accessing FLASH always thru the Shadow RAM.
56a62fc8
JB
38 **/
39i40e_status i40e_init_nvm(struct i40e_hw *hw)
40{
41 struct i40e_nvm_info *nvm = &hw->nvm;
42 i40e_status ret_code = 0;
43 u32 fla, gens;
44 u8 sr_size;
45
46 /* The SR size is stored regardless of the nvm programming mode
47 * as the blank mode may be used in the factory line.
48 */
49 gens = rd32(hw, I40E_GLNVM_GENS);
50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
51 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
3e26186d 52 /* Switching to words (sr_size contains power of 2KB) */
41a1d04b 53 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
56a62fc8 54
3e26186d 55 /* Check if we are in the normal or blank NVM programming mode */
56a62fc8 56 fla = rd32(hw, I40E_GLNVM_FLA);
3e26186d
SN
57 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
58 /* Max NVM timeout */
56a62fc8
JB
59 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
60 nvm->blank_nvm_mode = false;
3e26186d 61 } else { /* Blank programming mode */
56a62fc8
JB
62 nvm->blank_nvm_mode = true;
63 ret_code = I40E_ERR_NVM_BLANK_MODE;
74d0d0ed 64 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
56a62fc8
JB
65 }
66
67 return ret_code;
68}
69
70/**
3e26186d
SN
71 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
72 * @hw: pointer to the HW structure
73 * @access: NVM access type (read or write)
56a62fc8 74 *
3e26186d
SN
75 * This function will request NVM ownership for reading
76 * via the proper Admin Command.
56a62fc8
JB
77 **/
78i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
79 enum i40e_aq_resource_access_type access)
80{
81 i40e_status ret_code = 0;
82 u64 gtime, timeout;
c509c1de 83 u64 time_left = 0;
56a62fc8
JB
84
85 if (hw->nvm.blank_nvm_mode)
86 goto i40e_i40e_acquire_nvm_exit;
87
88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
c509c1de 89 0, &time_left, NULL);
3e26186d 90 /* Reading the Global Device Timer */
56a62fc8
JB
91 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
92
3e26186d 93 /* Store the timeout */
c509c1de 94 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
56a62fc8 95
a3f0b381
SN
96 if (ret_code)
97 i40e_debug(hw, I40E_DEBUG_NVM,
98 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
99 access, time_left, ret_code, hw->aq.asq_last_status);
100
101 if (ret_code && time_left) {
3e26186d 102 /* Poll until the current NVM owner timeouts */
c509c1de 103 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
a3f0b381 104 while ((gtime < timeout) && time_left) {
56a62fc8 105 usleep_range(10000, 20000);
c509c1de 106 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
56a62fc8
JB
107 ret_code = i40e_aq_request_resource(hw,
108 I40E_NVM_RESOURCE_ID,
c509c1de 109 access, 0, &time_left,
56a62fc8
JB
110 NULL);
111 if (!ret_code) {
112 hw->nvm.hw_semaphore_timeout =
c509c1de 113 I40E_MS_TO_GTIME(time_left) + gtime;
56a62fc8
JB
114 break;
115 }
56a62fc8
JB
116 }
117 if (ret_code) {
118 hw->nvm.hw_semaphore_timeout = 0;
74d0d0ed 119 i40e_debug(hw, I40E_DEBUG_NVM,
a3f0b381
SN
120 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
121 time_left, ret_code, hw->aq.asq_last_status);
56a62fc8
JB
122 }
123 }
124
125i40e_i40e_acquire_nvm_exit:
126 return ret_code;
127}
128
129/**
3e26186d
SN
130 * i40e_release_nvm - Generic request for releasing the NVM ownership
131 * @hw: pointer to the HW structure
56a62fc8 132 *
3e26186d 133 * This function will release NVM resource via the proper Admin Command.
56a62fc8
JB
134 **/
135void i40e_release_nvm(struct i40e_hw *hw)
136{
137 if (!hw->nvm.blank_nvm_mode)
138 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
139}
140
141/**
3e26186d
SN
142 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
143 * @hw: pointer to the HW structure
56a62fc8 144 *
3e26186d 145 * Polls the SRCTL Shadow RAM register done bit.
56a62fc8
JB
146 **/
147static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
148{
149 i40e_status ret_code = I40E_ERR_TIMEOUT;
150 u32 srctl, wait_cnt;
151
3e26186d 152 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
56a62fc8
JB
153 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
154 srctl = rd32(hw, I40E_GLNVM_SRCTL);
155 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
156 ret_code = 0;
157 break;
158 }
159 udelay(5);
160 }
161 if (ret_code == I40E_ERR_TIMEOUT)
74d0d0ed 162 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
56a62fc8
JB
163 return ret_code;
164}
165
166/**
d1bbe0ea 167 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
3e26186d
SN
168 * @hw: pointer to the HW structure
169 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
170 * @data: word read from the Shadow RAM
56a62fc8 171 *
3e26186d 172 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
56a62fc8 173 **/
37a2973a
SN
174static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
175 u16 *data)
56a62fc8
JB
176{
177 i40e_status ret_code = I40E_ERR_TIMEOUT;
178 u32 sr_reg;
179
180 if (offset >= hw->nvm.sr_size) {
74d0d0ed
SN
181 i40e_debug(hw, I40E_DEBUG_NVM,
182 "NVM read error: offset %d beyond Shadow RAM limit %d\n",
183 offset, hw->nvm.sr_size);
56a62fc8
JB
184 ret_code = I40E_ERR_PARAM;
185 goto read_nvm_exit;
186 }
187
3e26186d 188 /* Poll the done bit first */
56a62fc8
JB
189 ret_code = i40e_poll_sr_srctl_done_bit(hw);
190 if (!ret_code) {
3e26186d 191 /* Write the address and start reading */
41a1d04b
JB
192 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
193 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
56a62fc8
JB
194 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
195
3e26186d 196 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
56a62fc8
JB
197 ret_code = i40e_poll_sr_srctl_done_bit(hw);
198 if (!ret_code) {
199 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
200 *data = (u16)((sr_reg &
201 I40E_GLNVM_SRDATA_RDDATA_MASK)
202 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
203 }
204 }
205 if (ret_code)
74d0d0ed
SN
206 i40e_debug(hw, I40E_DEBUG_NVM,
207 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
208 offset);
56a62fc8
JB
209
210read_nvm_exit:
211 return ret_code;
212}
213
7073f46e
SN
214/**
215 * i40e_read_nvm_aq - Read Shadow RAM.
216 * @hw: pointer to the HW structure.
217 * @module_pointer: module pointer location in words from the NVM beginning
218 * @offset: offset in words from module start
219 * @words: number of words to write
220 * @data: buffer with words to write to the Shadow RAM
221 * @last_command: tells the AdminQ that this is the last command
222 *
223 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
224 **/
225static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
226 u32 offset, u16 words, void *data,
227 bool last_command)
228{
229 i40e_status ret_code = I40E_ERR_NVM;
230 struct i40e_asq_cmd_details cmd_details;
231
232 memset(&cmd_details, 0, sizeof(cmd_details));
233
234 /* Here we are checking the SR limit only for the flat memory model.
235 * We cannot do it for the module-based model, as we did not acquire
236 * the NVM resource yet (we cannot get the module pointer value).
237 * Firmware will check the module-based model.
238 */
239 if ((offset + words) > hw->nvm.sr_size)
240 i40e_debug(hw, I40E_DEBUG_NVM,
241 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
242 (offset + words), hw->nvm.sr_size);
243 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
244 /* We can write only up to 4KB (one sector), in one AQ write */
245 i40e_debug(hw, I40E_DEBUG_NVM,
246 "NVM write fail error: tried to write %d words, limit is %d.\n",
247 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
248 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
249 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
250 /* A single write cannot spread over two sectors */
251 i40e_debug(hw, I40E_DEBUG_NVM,
252 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
253 offset, words);
254 else
255 ret_code = i40e_aq_read_nvm(hw, module_pointer,
256 2 * offset, /*bytes*/
257 2 * words, /*bytes*/
258 data, last_command, &cmd_details);
259
260 return ret_code;
261}
262
263/**
264 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
265 * @hw: pointer to the HW structure
266 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
267 * @data: word read from the Shadow RAM
268 *
269 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
270 **/
271static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
272 u16 *data)
273{
274 i40e_status ret_code = I40E_ERR_TIMEOUT;
275
276 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
277 *data = le16_to_cpu(*(__le16 *)data);
278
279 return ret_code;
280}
281
56a62fc8 282/**
d1bbe0ea
KK
283 * i40e_read_nvm_word - Reads Shadow RAM
284 * @hw: pointer to the HW structure
285 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
286 * @data: word read from the Shadow RAM
287 *
288 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
289 **/
290i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
291 u16 *data)
292{
7073f46e
SN
293 if (hw->mac.type == I40E_MAC_X722)
294 return i40e_read_nvm_word_aq(hw, offset, data);
d1bbe0ea
KK
295 return i40e_read_nvm_word_srctl(hw, offset, data);
296}
297
298/**
299 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
3e26186d
SN
300 * @hw: pointer to the HW structure
301 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
302 * @words: (in) number of words to read; (out) number of words actually read
303 * @data: words read from the Shadow RAM
56a62fc8 304 *
3e26186d
SN
305 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
306 * method. The buffer read is preceded by the NVM ownership take
307 * and followed by the release.
56a62fc8 308 **/
37a2973a
SN
309static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
310 u16 *words, u16 *data)
56a62fc8
JB
311{
312 i40e_status ret_code = 0;
313 u16 index, word;
56a62fc8 314
3e26186d 315 /* Loop thru the selected region */
a4bcfbb7
SN
316 for (word = 0; word < *words; word++) {
317 index = offset + word;
d1bbe0ea 318 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
a4bcfbb7
SN
319 if (ret_code)
320 break;
56a62fc8
JB
321 }
322
3e26186d 323 /* Update the number of words read from the Shadow RAM */
a4bcfbb7
SN
324 *words = word;
325
56a62fc8
JB
326 return ret_code;
327}
328
7073f46e
SN
329/**
330 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
331 * @hw: pointer to the HW structure
332 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
333 * @words: (in) number of words to read; (out) number of words actually read
334 * @data: words read from the Shadow RAM
335 *
336 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
337 * method. The buffer read is preceded by the NVM ownership take
338 * and followed by the release.
339 **/
340static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
341 u16 *words, u16 *data)
342{
343 i40e_status ret_code;
344 u16 read_size = *words;
345 bool last_cmd = false;
346 u16 words_read = 0;
347 u16 i = 0;
348
349 do {
350 /* Calculate number of bytes we should read in this step.
351 * FVL AQ do not allow to read more than one page at a time or
352 * to cross page boundaries.
353 */
354 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
355 read_size = min(*words,
356 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
357 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
358 else
359 read_size = min((*words - words_read),
360 I40E_SR_SECTOR_SIZE_IN_WORDS);
361
362 /* Check if this is last command, if so set proper flag */
363 if ((words_read + read_size) >= *words)
364 last_cmd = true;
365
366 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
367 data + words_read, last_cmd);
368 if (ret_code)
369 goto read_nvm_buffer_aq_exit;
370
371 /* Increment counter for words already read and move offset to
372 * new read location
373 */
374 words_read += read_size;
375 offset += read_size;
376 } while (words_read < *words);
377
378 for (i = 0; i < *words; i++)
379 data[i] = le16_to_cpu(((__le16 *)data)[i]);
380
381read_nvm_buffer_aq_exit:
382 *words = words_read;
383 return ret_code;
384}
385
d1bbe0ea
KK
386/**
387 * i40e_read_nvm_buffer - Reads Shadow RAM buffer
388 * @hw: pointer to the HW structure
389 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
390 * @words: (in) number of words to read; (out) number of words actually read
391 * @data: words read from the Shadow RAM
392 *
393 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
394 * method. The buffer read is preceded by the NVM ownership take
395 * and followed by the release.
396 **/
397i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
398 u16 *words, u16 *data)
399{
7073f46e
SN
400 if (hw->mac.type == I40E_MAC_X722)
401 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
d1bbe0ea
KK
402 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
403}
404
cd552cb4
SN
405/**
406 * i40e_write_nvm_aq - Writes Shadow RAM.
407 * @hw: pointer to the HW structure.
408 * @module_pointer: module pointer location in words from the NVM beginning
409 * @offset: offset in words from module start
410 * @words: number of words to write
411 * @data: buffer with words to write to the Shadow RAM
412 * @last_command: tells the AdminQ that this is the last command
413 *
414 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
415 **/
952d9639
WY
416static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
417 u32 offset, u16 words, void *data,
418 bool last_command)
cd552cb4
SN
419{
420 i40e_status ret_code = I40E_ERR_NVM;
6b5c1b89
SN
421 struct i40e_asq_cmd_details cmd_details;
422
423 memset(&cmd_details, 0, sizeof(cmd_details));
424 cmd_details.wb_desc = &hw->nvm_wb_desc;
cd552cb4
SN
425
426 /* Here we are checking the SR limit only for the flat memory model.
427 * We cannot do it for the module-based model, as we did not acquire
428 * the NVM resource yet (we cannot get the module pointer value).
429 * Firmware will check the module-based model.
430 */
431 if ((offset + words) > hw->nvm.sr_size)
74d0d0ed
SN
432 i40e_debug(hw, I40E_DEBUG_NVM,
433 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
434 (offset + words), hw->nvm.sr_size);
cd552cb4
SN
435 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
436 /* We can write only up to 4KB (one sector), in one AQ write */
74d0d0ed
SN
437 i40e_debug(hw, I40E_DEBUG_NVM,
438 "NVM write fail error: tried to write %d words, limit is %d.\n",
439 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
cd552cb4
SN
440 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
441 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
442 /* A single write cannot spread over two sectors */
74d0d0ed
SN
443 i40e_debug(hw, I40E_DEBUG_NVM,
444 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
445 offset, words);
cd552cb4
SN
446 else
447 ret_code = i40e_aq_update_nvm(hw, module_pointer,
448 2 * offset, /*bytes*/
449 2 * words, /*bytes*/
6b5c1b89 450 data, last_command, &cmd_details);
cd552cb4
SN
451
452 return ret_code;
453}
454
56a62fc8 455/**
3e26186d
SN
456 * i40e_calc_nvm_checksum - Calculates and returns the checksum
457 * @hw: pointer to hardware structure
458 * @checksum: pointer to the checksum
56a62fc8 459 *
3e26186d
SN
460 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
461 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
462 * is customer specific and unknown. Therefore, this function skips all maximum
463 * possible size of VPD (1kB).
56a62fc8
JB
464 **/
465static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
466 u16 *checksum)
467{
468 i40e_status ret_code = 0;
d1bbe0ea 469 struct i40e_virt_mem vmem;
56a62fc8
JB
470 u16 pcie_alt_module = 0;
471 u16 checksum_local = 0;
472 u16 vpd_module = 0;
d1bbe0ea
KK
473 u16 *data;
474 u16 i = 0;
475
476 ret_code = i40e_allocate_virt_mem(hw, &vmem,
477 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
478 if (ret_code)
479 goto i40e_calc_nvm_checksum_exit;
480 data = (u16 *)vmem.va;
56a62fc8
JB
481
482 /* read pointer to VPD area */
a4bcfbb7 483 ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
56a62fc8
JB
484 if (ret_code) {
485 ret_code = I40E_ERR_NVM_CHECKSUM;
486 goto i40e_calc_nvm_checksum_exit;
487 }
488
489 /* read pointer to PCIe Alt Auto-load module */
a4bcfbb7 490 ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
d1bbe0ea 491 &pcie_alt_module);
56a62fc8
JB
492 if (ret_code) {
493 ret_code = I40E_ERR_NVM_CHECKSUM;
494 goto i40e_calc_nvm_checksum_exit;
495 }
496
497 /* Calculate SW checksum that covers the whole 64kB shadow RAM
498 * except the VPD and PCIe ALT Auto-load modules
499 */
500 for (i = 0; i < hw->nvm.sr_size; i++) {
d1bbe0ea
KK
501 /* Read SR page */
502 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
503 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
504
505 ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
506 if (ret_code) {
507 ret_code = I40E_ERR_NVM_CHECKSUM;
508 goto i40e_calc_nvm_checksum_exit;
509 }
510 }
511
56a62fc8
JB
512 /* Skip Checksum word */
513 if (i == I40E_SR_SW_CHECKSUM_WORD)
d1bbe0ea 514 continue;
56a62fc8 515 /* Skip VPD module (convert byte size to word count) */
d1bbe0ea
KK
516 if ((i >= (u32)vpd_module) &&
517 (i < ((u32)vpd_module +
518 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
519 continue;
56a62fc8
JB
520 }
521 /* Skip PCIe ALT module (convert byte size to word count) */
d1bbe0ea
KK
522 if ((i >= (u32)pcie_alt_module) &&
523 (i < ((u32)pcie_alt_module +
524 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
525 continue;
56a62fc8
JB
526 }
527
d1bbe0ea 528 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
56a62fc8
JB
529 }
530
531 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
532
533i40e_calc_nvm_checksum_exit:
d1bbe0ea 534 i40e_free_virt_mem(hw, &vmem);
56a62fc8
JB
535 return ret_code;
536}
537
cd552cb4
SN
538/**
539 * i40e_update_nvm_checksum - Updates the NVM checksum
540 * @hw: pointer to hardware structure
541 *
542 * NVM ownership must be acquired before calling this function and released
543 * on ARQ completion event reception by caller.
544 * This function will commit SR to NVM.
545 **/
546i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
547{
548 i40e_status ret_code = 0;
549 u16 checksum;
550
551 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
552 if (!ret_code)
553 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
554 1, &checksum, true);
555
556 return ret_code;
557}
558
56a62fc8 559/**
3e26186d
SN
560 * i40e_validate_nvm_checksum - Validate EEPROM checksum
561 * @hw: pointer to hardware structure
562 * @checksum: calculated checksum
56a62fc8 563 *
3e26186d
SN
564 * Performs checksum calculation and validates the NVM SW checksum. If the
565 * caller does not need checksum, the value can be NULL.
56a62fc8
JB
566 **/
567i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
568 u16 *checksum)
569{
570 i40e_status ret_code = 0;
571 u16 checksum_sr = 0;
e15c9fa0 572 u16 checksum_local = 0;
56a62fc8 573
56a62fc8
JB
574 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
575 if (ret_code)
7a208e83 576 goto i40e_validate_nvm_checksum_exit;
56a62fc8
JB
577
578 /* Do not use i40e_read_nvm_word() because we do not want to take
579 * the synchronization semaphores twice here.
580 */
a4bcfbb7 581 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
56a62fc8
JB
582
583 /* Verify read checksum from EEPROM is the same as
584 * calculated checksum
585 */
586 if (checksum_local != checksum_sr)
587 ret_code = I40E_ERR_NVM_CHECKSUM;
588
589 /* If the user cares, return the calculated checksum */
590 if (checksum)
591 *checksum = checksum_local;
592
56a62fc8
JB
593i40e_validate_nvm_checksum_exit:
594 return ret_code;
595}
cd552cb4
SN
596
597static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
598 struct i40e_nvm_access *cmd,
79afe839 599 u8 *bytes, int *perrno);
cd552cb4
SN
600static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
601 struct i40e_nvm_access *cmd,
79afe839 602 u8 *bytes, int *perrno);
cd552cb4
SN
603static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
604 struct i40e_nvm_access *cmd,
605 u8 *bytes, int *errno);
606static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
607 struct i40e_nvm_access *cmd,
79afe839 608 int *perrno);
cd552cb4
SN
609static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
610 struct i40e_nvm_access *cmd,
79afe839 611 int *perrno);
cd552cb4
SN
612static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
613 struct i40e_nvm_access *cmd,
79afe839 614 u8 *bytes, int *perrno);
cd552cb4
SN
615static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
616 struct i40e_nvm_access *cmd,
79afe839 617 u8 *bytes, int *perrno);
cd552cb4
SN
618static inline u8 i40e_nvmupd_get_module(u32 val)
619{
620 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
621}
622static inline u8 i40e_nvmupd_get_transaction(u32 val)
623{
624 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
625}
626
74d0d0ed
SN
627static char *i40e_nvm_update_state_str[] = {
628 "I40E_NVMUPD_INVALID",
629 "I40E_NVMUPD_READ_CON",
630 "I40E_NVMUPD_READ_SNT",
631 "I40E_NVMUPD_READ_LCB",
632 "I40E_NVMUPD_READ_SA",
633 "I40E_NVMUPD_WRITE_ERA",
634 "I40E_NVMUPD_WRITE_CON",
635 "I40E_NVMUPD_WRITE_SNT",
636 "I40E_NVMUPD_WRITE_LCB",
637 "I40E_NVMUPD_WRITE_SA",
638 "I40E_NVMUPD_CSUM_CON",
639 "I40E_NVMUPD_CSUM_SA",
640 "I40E_NVMUPD_CSUM_LCB",
0af8e9db 641 "I40E_NVMUPD_STATUS",
74d0d0ed
SN
642};
643
cd552cb4
SN
644/**
645 * i40e_nvmupd_command - Process an NVM update command
646 * @hw: pointer to hardware structure
647 * @cmd: pointer to nvm update command
648 * @bytes: pointer to the data buffer
79afe839 649 * @perrno: pointer to return error code
cd552cb4
SN
650 *
651 * Dispatches command depending on what update state is current
652 **/
653i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
654 struct i40e_nvm_access *cmd,
79afe839 655 u8 *bytes, int *perrno)
cd552cb4
SN
656{
657 i40e_status status;
0af8e9db 658 enum i40e_nvmupd_cmd upd_cmd;
cd552cb4
SN
659
660 /* assume success */
79afe839 661 *perrno = 0;
cd552cb4 662
0af8e9db
SN
663 /* early check for status command and debug msgs */
664 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
665
666 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
667 i40e_nvm_update_state_str[upd_cmd],
668 hw->nvmupd_state,
669 hw->aq.nvm_release_on_done);
670
671 if (upd_cmd == I40E_NVMUPD_INVALID) {
672 *perrno = -EFAULT;
673 i40e_debug(hw, I40E_DEBUG_NVM,
674 "i40e_nvmupd_validate_command returns %d errno %d\n",
675 upd_cmd, *perrno);
676 }
677
678 /* a status request returns immediately rather than
679 * going into the state machine
680 */
681 if (upd_cmd == I40E_NVMUPD_STATUS) {
682 bytes[0] = hw->nvmupd_state;
683 return 0;
684 }
685
cd552cb4
SN
686 switch (hw->nvmupd_state) {
687 case I40E_NVMUPD_STATE_INIT:
79afe839 688 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
cd552cb4
SN
689 break;
690
691 case I40E_NVMUPD_STATE_READING:
79afe839 692 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
cd552cb4
SN
693 break;
694
695 case I40E_NVMUPD_STATE_WRITING:
79afe839 696 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
cd552cb4
SN
697 break;
698
2f1b5bc8
SN
699 case I40E_NVMUPD_STATE_INIT_WAIT:
700 case I40E_NVMUPD_STATE_WRITE_WAIT:
701 status = I40E_ERR_NOT_READY;
702 *perrno = -EBUSY;
703 break;
704
cd552cb4
SN
705 default:
706 /* invalid state, should never happen */
74d0d0ed
SN
707 i40e_debug(hw, I40E_DEBUG_NVM,
708 "NVMUPD: no such state %d\n", hw->nvmupd_state);
cd552cb4 709 status = I40E_NOT_SUPPORTED;
79afe839 710 *perrno = -ESRCH;
cd552cb4
SN
711 break;
712 }
713 return status;
714}
715
716/**
717 * i40e_nvmupd_state_init - Handle NVM update state Init
718 * @hw: pointer to hardware structure
719 * @cmd: pointer to nvm update command buffer
720 * @bytes: pointer to the data buffer
79afe839 721 * @perrno: pointer to return error code
cd552cb4
SN
722 *
723 * Process legitimate commands of the Init state and conditionally set next
724 * state. Reject all other commands.
725 **/
726static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
727 struct i40e_nvm_access *cmd,
79afe839 728 u8 *bytes, int *perrno)
cd552cb4
SN
729{
730 i40e_status status = 0;
731 enum i40e_nvmupd_cmd upd_cmd;
732
79afe839 733 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
cd552cb4
SN
734
735 switch (upd_cmd) {
736 case I40E_NVMUPD_READ_SA:
737 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
738 if (status) {
79afe839 739 *perrno = i40e_aq_rc_to_posix(status,
bf848f32 740 hw->aq.asq_last_status);
cd552cb4 741 } else {
79afe839 742 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
cd552cb4
SN
743 i40e_release_nvm(hw);
744 }
745 break;
746
747 case I40E_NVMUPD_READ_SNT:
748 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
749 if (status) {
79afe839 750 *perrno = i40e_aq_rc_to_posix(status,
bf848f32 751 hw->aq.asq_last_status);
cd552cb4 752 } else {
79afe839 753 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
0fdd052c
SN
754 if (status)
755 i40e_release_nvm(hw);
756 else
757 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
cd552cb4
SN
758 }
759 break;
760
761 case I40E_NVMUPD_WRITE_ERA:
762 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
763 if (status) {
79afe839 764 *perrno = i40e_aq_rc_to_posix(status,
bf848f32 765 hw->aq.asq_last_status);
cd552cb4 766 } else {
79afe839 767 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
2f1b5bc8 768 if (status) {
cd552cb4 769 i40e_release_nvm(hw);
2f1b5bc8 770 } else {
cd552cb4 771 hw->aq.nvm_release_on_done = true;
2f1b5bc8
SN
772 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
773 }
cd552cb4
SN
774 }
775 break;
776
777 case I40E_NVMUPD_WRITE_SA:
778 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
779 if (status) {
79afe839 780 *perrno = i40e_aq_rc_to_posix(status,
bf848f32 781 hw->aq.asq_last_status);
cd552cb4 782 } else {
79afe839 783 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
2f1b5bc8 784 if (status) {
cd552cb4 785 i40e_release_nvm(hw);
2f1b5bc8 786 } else {
cd552cb4 787 hw->aq.nvm_release_on_done = true;
2f1b5bc8
SN
788 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
789 }
cd552cb4
SN
790 }
791 break;
792
793 case I40E_NVMUPD_WRITE_SNT:
794 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
795 if (status) {
79afe839 796 *perrno = i40e_aq_rc_to_posix(status,
bf848f32 797 hw->aq.asq_last_status);
cd552cb4 798 } else {
79afe839 799 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
0fdd052c
SN
800 if (status)
801 i40e_release_nvm(hw);
802 else
2f1b5bc8 803 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
cd552cb4
SN
804 }
805 break;
806
807 case I40E_NVMUPD_CSUM_SA:
808 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
809 if (status) {
79afe839 810 *perrno = i40e_aq_rc_to_posix(status,
bf848f32 811 hw->aq.asq_last_status);
cd552cb4
SN
812 } else {
813 status = i40e_update_nvm_checksum(hw);
814 if (status) {
79afe839 815 *perrno = hw->aq.asq_last_status ?
bf848f32
SN
816 i40e_aq_rc_to_posix(status,
817 hw->aq.asq_last_status) :
cd552cb4
SN
818 -EIO;
819 i40e_release_nvm(hw);
820 } else {
821 hw->aq.nvm_release_on_done = true;
2f1b5bc8 822 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
cd552cb4
SN
823 }
824 }
825 break;
826
827 default:
74d0d0ed
SN
828 i40e_debug(hw, I40E_DEBUG_NVM,
829 "NVMUPD: bad cmd %s in init state\n",
830 i40e_nvm_update_state_str[upd_cmd]);
cd552cb4 831 status = I40E_ERR_NVM;
79afe839 832 *perrno = -ESRCH;
cd552cb4
SN
833 break;
834 }
835 return status;
836}
837
838/**
839 * i40e_nvmupd_state_reading - Handle NVM update state Reading
840 * @hw: pointer to hardware structure
841 * @cmd: pointer to nvm update command buffer
842 * @bytes: pointer to the data buffer
79afe839 843 * @perrno: pointer to return error code
cd552cb4
SN
844 *
845 * NVM ownership is already held. Process legitimate commands and set any
846 * change in state; reject all other commands.
847 **/
848static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
849 struct i40e_nvm_access *cmd,
79afe839 850 u8 *bytes, int *perrno)
cd552cb4 851{
2f1b5bc8 852 i40e_status status = 0;
cd552cb4
SN
853 enum i40e_nvmupd_cmd upd_cmd;
854
79afe839 855 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
cd552cb4
SN
856
857 switch (upd_cmd) {
858 case I40E_NVMUPD_READ_SA:
859 case I40E_NVMUPD_READ_CON:
79afe839 860 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
cd552cb4
SN
861 break;
862
863 case I40E_NVMUPD_READ_LCB:
79afe839 864 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
cd552cb4
SN
865 i40e_release_nvm(hw);
866 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
867 break;
868
869 default:
74d0d0ed
SN
870 i40e_debug(hw, I40E_DEBUG_NVM,
871 "NVMUPD: bad cmd %s in reading state.\n",
872 i40e_nvm_update_state_str[upd_cmd]);
cd552cb4 873 status = I40E_NOT_SUPPORTED;
79afe839 874 *perrno = -ESRCH;
cd552cb4
SN
875 break;
876 }
877 return status;
878}
879
880/**
881 * i40e_nvmupd_state_writing - Handle NVM update state Writing
882 * @hw: pointer to hardware structure
883 * @cmd: pointer to nvm update command buffer
884 * @bytes: pointer to the data buffer
79afe839 885 * @perrno: pointer to return error code
cd552cb4
SN
886 *
887 * NVM ownership is already held. Process legitimate commands and set any
888 * change in state; reject all other commands
889 **/
890static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
891 struct i40e_nvm_access *cmd,
79afe839 892 u8 *bytes, int *perrno)
cd552cb4 893{
2f1b5bc8 894 i40e_status status = 0;
cd552cb4 895 enum i40e_nvmupd_cmd upd_cmd;
2c47e351 896 bool retry_attempt = false;
cd552cb4 897
79afe839 898 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
cd552cb4 899
2c47e351 900retry:
cd552cb4
SN
901 switch (upd_cmd) {
902 case I40E_NVMUPD_WRITE_CON:
79afe839 903 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
2f1b5bc8
SN
904 if (!status)
905 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
cd552cb4
SN
906 break;
907
908 case I40E_NVMUPD_WRITE_LCB:
79afe839 909 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
2f1b5bc8
SN
910 if (status) {
911 *perrno = hw->aq.asq_last_status ?
912 i40e_aq_rc_to_posix(status,
913 hw->aq.asq_last_status) :
914 -EIO;
915 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
916 } else {
cd552cb4 917 hw->aq.nvm_release_on_done = true;
2f1b5bc8
SN
918 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
919 }
cd552cb4
SN
920 break;
921
922 case I40E_NVMUPD_CSUM_CON:
923 status = i40e_update_nvm_checksum(hw);
0fdd052c 924 if (status) {
79afe839 925 *perrno = hw->aq.asq_last_status ?
bf848f32
SN
926 i40e_aq_rc_to_posix(status,
927 hw->aq.asq_last_status) :
cd552cb4 928 -EIO;
0fdd052c 929 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
2f1b5bc8
SN
930 } else {
931 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
0fdd052c 932 }
cd552cb4
SN
933 break;
934
935 case I40E_NVMUPD_CSUM_LCB:
936 status = i40e_update_nvm_checksum(hw);
2f1b5bc8 937 if (status) {
79afe839 938 *perrno = hw->aq.asq_last_status ?
bf848f32
SN
939 i40e_aq_rc_to_posix(status,
940 hw->aq.asq_last_status) :
cd552cb4 941 -EIO;
2f1b5bc8
SN
942 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
943 } else {
cd552cb4 944 hw->aq.nvm_release_on_done = true;
2f1b5bc8
SN
945 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
946 }
cd552cb4
SN
947 break;
948
949 default:
74d0d0ed
SN
950 i40e_debug(hw, I40E_DEBUG_NVM,
951 "NVMUPD: bad cmd %s in writing state.\n",
952 i40e_nvm_update_state_str[upd_cmd]);
cd552cb4 953 status = I40E_NOT_SUPPORTED;
79afe839 954 *perrno = -ESRCH;
cd552cb4
SN
955 break;
956 }
2c47e351
SN
957
958 /* In some circumstances, a multi-write transaction takes longer
959 * than the default 3 minute timeout on the write semaphore. If
960 * the write failed with an EBUSY status, this is likely the problem,
961 * so here we try to reacquire the semaphore then retry the write.
962 * We only do one retry, then give up.
963 */
964 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
965 !retry_attempt) {
966 i40e_status old_status = status;
967 u32 old_asq_status = hw->aq.asq_last_status;
968 u32 gtime;
969
970 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
971 if (gtime >= hw->nvm.hw_semaphore_timeout) {
972 i40e_debug(hw, I40E_DEBUG_ALL,
973 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
974 gtime, hw->nvm.hw_semaphore_timeout);
975 i40e_release_nvm(hw);
976 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
977 if (status) {
978 i40e_debug(hw, I40E_DEBUG_ALL,
979 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
980 hw->aq.asq_last_status);
981 status = old_status;
982 hw->aq.asq_last_status = old_asq_status;
983 } else {
984 retry_attempt = true;
985 goto retry;
986 }
987 }
988 }
989
cd552cb4
SN
990 return status;
991}
992
993/**
994 * i40e_nvmupd_validate_command - Validate given command
995 * @hw: pointer to hardware structure
996 * @cmd: pointer to nvm update command buffer
79afe839 997 * @perrno: pointer to return error code
cd552cb4
SN
998 *
999 * Return one of the valid command types or I40E_NVMUPD_INVALID
1000 **/
1001static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1002 struct i40e_nvm_access *cmd,
79afe839 1003 int *perrno)
cd552cb4
SN
1004{
1005 enum i40e_nvmupd_cmd upd_cmd;
0af8e9db 1006 u8 module, transaction;
cd552cb4
SN
1007
1008 /* anything that doesn't match a recognized case is an error */
1009 upd_cmd = I40E_NVMUPD_INVALID;
1010
1011 transaction = i40e_nvmupd_get_transaction(cmd->config);
0af8e9db 1012 module = i40e_nvmupd_get_module(cmd->config);
cd552cb4
SN
1013
1014 /* limits on data size */
1015 if ((cmd->data_size < 1) ||
1016 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
74d0d0ed
SN
1017 i40e_debug(hw, I40E_DEBUG_NVM,
1018 "i40e_nvmupd_validate_command data_size %d\n",
1019 cmd->data_size);
79afe839 1020 *perrno = -EFAULT;
cd552cb4
SN
1021 return I40E_NVMUPD_INVALID;
1022 }
1023
1024 switch (cmd->command) {
1025 case I40E_NVM_READ:
1026 switch (transaction) {
1027 case I40E_NVM_CON:
1028 upd_cmd = I40E_NVMUPD_READ_CON;
1029 break;
1030 case I40E_NVM_SNT:
1031 upd_cmd = I40E_NVMUPD_READ_SNT;
1032 break;
1033 case I40E_NVM_LCB:
1034 upd_cmd = I40E_NVMUPD_READ_LCB;
1035 break;
1036 case I40E_NVM_SA:
1037 upd_cmd = I40E_NVMUPD_READ_SA;
1038 break;
0af8e9db
SN
1039 case I40E_NVM_EXEC:
1040 if (module == 0xf)
1041 upd_cmd = I40E_NVMUPD_STATUS;
1042 break;
cd552cb4
SN
1043 }
1044 break;
1045
1046 case I40E_NVM_WRITE:
1047 switch (transaction) {
1048 case I40E_NVM_CON:
1049 upd_cmd = I40E_NVMUPD_WRITE_CON;
1050 break;
1051 case I40E_NVM_SNT:
1052 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1053 break;
1054 case I40E_NVM_LCB:
1055 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1056 break;
1057 case I40E_NVM_SA:
1058 upd_cmd = I40E_NVMUPD_WRITE_SA;
1059 break;
1060 case I40E_NVM_ERA:
1061 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1062 break;
1063 case I40E_NVM_CSUM:
1064 upd_cmd = I40E_NVMUPD_CSUM_CON;
1065 break;
1066 case (I40E_NVM_CSUM|I40E_NVM_SA):
1067 upd_cmd = I40E_NVMUPD_CSUM_SA;
1068 break;
1069 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1070 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1071 break;
1072 }
1073 break;
1074 }
1075
cd552cb4
SN
1076 return upd_cmd;
1077}
1078
1079/**
1080 * i40e_nvmupd_nvm_read - Read NVM
1081 * @hw: pointer to hardware structure
1082 * @cmd: pointer to nvm update command buffer
1083 * @bytes: pointer to the data buffer
79afe839 1084 * @perrno: pointer to return error code
cd552cb4
SN
1085 *
1086 * cmd structure contains identifiers and data buffer
1087 **/
1088static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1089 struct i40e_nvm_access *cmd,
79afe839 1090 u8 *bytes, int *perrno)
cd552cb4 1091{
6b5c1b89 1092 struct i40e_asq_cmd_details cmd_details;
cd552cb4
SN
1093 i40e_status status;
1094 u8 module, transaction;
1095 bool last;
1096
1097 transaction = i40e_nvmupd_get_transaction(cmd->config);
1098 module = i40e_nvmupd_get_module(cmd->config);
1099 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
cd552cb4 1100
6b5c1b89
SN
1101 memset(&cmd_details, 0, sizeof(cmd_details));
1102 cmd_details.wb_desc = &hw->nvm_wb_desc;
1103
cd552cb4 1104 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
6b5c1b89 1105 bytes, last, &cmd_details);
74d0d0ed
SN
1106 if (status) {
1107 i40e_debug(hw, I40E_DEBUG_NVM,
1108 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1109 module, cmd->offset, cmd->data_size);
1110 i40e_debug(hw, I40E_DEBUG_NVM,
1111 "i40e_nvmupd_nvm_read status %d aq %d\n",
1112 status, hw->aq.asq_last_status);
79afe839 1113 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
74d0d0ed 1114 }
cd552cb4
SN
1115
1116 return status;
1117}
1118
1119/**
1120 * i40e_nvmupd_nvm_erase - Erase an NVM module
1121 * @hw: pointer to hardware structure
1122 * @cmd: pointer to nvm update command buffer
79afe839 1123 * @perrno: pointer to return error code
cd552cb4
SN
1124 *
1125 * module, offset, data_size and data are in cmd structure
1126 **/
1127static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1128 struct i40e_nvm_access *cmd,
79afe839 1129 int *perrno)
cd552cb4
SN
1130{
1131 i40e_status status = 0;
6b5c1b89 1132 struct i40e_asq_cmd_details cmd_details;
cd552cb4
SN
1133 u8 module, transaction;
1134 bool last;
1135
1136 transaction = i40e_nvmupd_get_transaction(cmd->config);
1137 module = i40e_nvmupd_get_module(cmd->config);
1138 last = (transaction & I40E_NVM_LCB);
6b5c1b89
SN
1139
1140 memset(&cmd_details, 0, sizeof(cmd_details));
1141 cmd_details.wb_desc = &hw->nvm_wb_desc;
1142
cd552cb4 1143 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
6b5c1b89 1144 last, &cmd_details);
74d0d0ed
SN
1145 if (status) {
1146 i40e_debug(hw, I40E_DEBUG_NVM,
1147 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1148 module, cmd->offset, cmd->data_size);
1149 i40e_debug(hw, I40E_DEBUG_NVM,
1150 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1151 status, hw->aq.asq_last_status);
79afe839 1152 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
74d0d0ed 1153 }
cd552cb4
SN
1154
1155 return status;
1156}
1157
1158/**
1159 * i40e_nvmupd_nvm_write - Write NVM
1160 * @hw: pointer to hardware structure
1161 * @cmd: pointer to nvm update command buffer
1162 * @bytes: pointer to the data buffer
79afe839 1163 * @perrno: pointer to return error code
cd552cb4
SN
1164 *
1165 * module, offset, data_size and data are in cmd structure
1166 **/
1167static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1168 struct i40e_nvm_access *cmd,
79afe839 1169 u8 *bytes, int *perrno)
cd552cb4
SN
1170{
1171 i40e_status status = 0;
6b5c1b89 1172 struct i40e_asq_cmd_details cmd_details;
cd552cb4
SN
1173 u8 module, transaction;
1174 bool last;
1175
1176 transaction = i40e_nvmupd_get_transaction(cmd->config);
1177 module = i40e_nvmupd_get_module(cmd->config);
1178 last = (transaction & I40E_NVM_LCB);
74d0d0ed 1179
6b5c1b89
SN
1180 memset(&cmd_details, 0, sizeof(cmd_details));
1181 cmd_details.wb_desc = &hw->nvm_wb_desc;
1182
cd552cb4 1183 status = i40e_aq_update_nvm(hw, module, cmd->offset,
6b5c1b89
SN
1184 (u16)cmd->data_size, bytes, last,
1185 &cmd_details);
74d0d0ed
SN
1186 if (status) {
1187 i40e_debug(hw, I40E_DEBUG_NVM,
1188 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1189 module, cmd->offset, cmd->data_size);
1190 i40e_debug(hw, I40E_DEBUG_NVM,
1191 "i40e_nvmupd_nvm_write status %d aq %d\n",
1192 status, hw->aq.asq_last_status);
79afe839 1193 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
74d0d0ed 1194 }
cd552cb4
SN
1195
1196 return status;
1197}