]> git.proxmox.com Git - ceph.git/blame - ceph/src/dpdk/drivers/net/i40e/base/i40e_nvm.c
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / drivers / net / i40e / base / i40e_nvm.c
CommitLineData
7c673cae
FG
1/*******************************************************************************
2
3Copyright (c) 2013 - 2015, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34#include "i40e_prototype.h"
35
36enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
37 u16 *data);
38enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
39 u16 *data);
40enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
41 u16 *words, u16 *data);
42enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
43 u16 *words, u16 *data);
44enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
45 u32 offset, u16 words, void *data,
46 bool last_command);
47
48/**
49 * i40e_init_nvm_ops - Initialize NVM function pointers
50 * @hw: pointer to the HW structure
51 *
52 * Setup the function pointers and the NVM info structure. Should be called
53 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
54 * Please notice that the NVM term is used here (& in all methods covered
55 * in this file) as an equivalent of the FLASH part mapped into the SR.
56 * We are accessing FLASH always through the Shadow RAM.
57 **/
58enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
59{
60 struct i40e_nvm_info *nvm = &hw->nvm;
61 enum i40e_status_code ret_code = I40E_SUCCESS;
62 u32 fla, gens;
63 u8 sr_size;
64
65 DEBUGFUNC("i40e_init_nvm");
66
67 /* The SR size is stored regardless of the nvm programming mode
68 * as the blank mode may be used in the factory line.
69 */
70 gens = rd32(hw, I40E_GLNVM_GENS);
71 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
72 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
73 /* Switching to words (sr_size contains power of 2KB) */
74 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
75
76 /* Check if we are in the normal or blank NVM programming mode */
77 fla = rd32(hw, I40E_GLNVM_FLA);
78 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
79 /* Max NVM timeout */
80 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
81 nvm->blank_nvm_mode = false;
82 } else { /* Blank programming mode */
83 nvm->blank_nvm_mode = true;
84 ret_code = I40E_ERR_NVM_BLANK_MODE;
85 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
86 }
87
88 return ret_code;
89}
90
91/**
92 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
93 * @hw: pointer to the HW structure
94 * @access: NVM access type (read or write)
95 *
96 * This function will request NVM ownership for reading
97 * via the proper Admin Command.
98 **/
99enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
100 enum i40e_aq_resource_access_type access)
101{
102 enum i40e_status_code ret_code = I40E_SUCCESS;
103 u64 gtime, timeout;
104 u64 time_left = 0;
105
106 DEBUGFUNC("i40e_acquire_nvm");
107
108 if (hw->nvm.blank_nvm_mode)
109 goto i40e_i40e_acquire_nvm_exit;
110
111 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
112 0, &time_left, NULL);
113 /* Reading the Global Device Timer */
114 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
115
116 /* Store the timeout */
117 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
118
119 if (ret_code)
120 i40e_debug(hw, I40E_DEBUG_NVM,
121 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
122 access, time_left, ret_code, hw->aq.asq_last_status);
123
124 if (ret_code && time_left) {
125 /* Poll until the current NVM owner timeouts */
126 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
127 while ((gtime < timeout) && time_left) {
128 i40e_msec_delay(10);
129 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
130 ret_code = i40e_aq_request_resource(hw,
131 I40E_NVM_RESOURCE_ID,
132 access, 0, &time_left,
133 NULL);
134 if (ret_code == I40E_SUCCESS) {
135 hw->nvm.hw_semaphore_timeout =
136 I40E_MS_TO_GTIME(time_left) + gtime;
137 break;
138 }
139 }
140 if (ret_code != I40E_SUCCESS) {
141 hw->nvm.hw_semaphore_timeout = 0;
142 i40e_debug(hw, I40E_DEBUG_NVM,
143 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
144 time_left, ret_code, hw->aq.asq_last_status);
145 }
146 }
147
148i40e_i40e_acquire_nvm_exit:
149 return ret_code;
150}
151
152/**
153 * i40e_release_nvm - Generic request for releasing the NVM ownership
154 * @hw: pointer to the HW structure
155 *
156 * This function will release NVM resource via the proper Admin Command.
157 **/
158void i40e_release_nvm(struct i40e_hw *hw)
159{
160 enum i40e_status_code ret_code = I40E_SUCCESS;
161 u32 total_delay = 0;
162
163 DEBUGFUNC("i40e_release_nvm");
164
165 if (hw->nvm.blank_nvm_mode)
166 return;
167
168 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
169
170 /* there are some rare cases when trying to release the resource
171 * results in an admin Q timeout, so handle them correctly
172 */
173 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
174 (total_delay < hw->aq.asq_cmd_timeout)) {
175 i40e_msec_delay(1);
176 ret_code = i40e_aq_release_resource(hw,
177 I40E_NVM_RESOURCE_ID, 0, NULL);
178 total_delay++;
179 }
180}
181
182/**
183 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
184 * @hw: pointer to the HW structure
185 *
186 * Polls the SRCTL Shadow RAM register done bit.
187 **/
188static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
189{
190 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
191 u32 srctl, wait_cnt;
192
193 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
194
195 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
196 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
197 srctl = rd32(hw, I40E_GLNVM_SRCTL);
198 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
199 ret_code = I40E_SUCCESS;
200 break;
201 }
202 i40e_usec_delay(5);
203 }
204 if (ret_code == I40E_ERR_TIMEOUT)
205 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
206 return ret_code;
207}
208
209/**
210 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
211 * @hw: pointer to the HW structure
212 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
213 * @data: word read from the Shadow RAM
214 *
215 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
216 **/
217enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
218 u16 *data)
219{
220 enum i40e_status_code ret_code = I40E_SUCCESS;
221
222#ifdef X722_SUPPORT
223 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
224 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
225 if (!ret_code) {
226 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
227 i40e_release_nvm(hw);
228 }
229 } else {
230 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
231 }
232#else
233 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
234#endif
235 return ret_code;
236}
237
238/**
239 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
240 * @hw: pointer to the HW structure
241 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
242 * @data: word read from the Shadow RAM
243 *
244 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
245 **/
246enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
247 u16 offset,
248 u16 *data)
249{
250 enum i40e_status_code ret_code = I40E_SUCCESS;
251
252#ifdef X722_SUPPORT
253 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
254 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
255 else
256 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
257#else
258 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
259#endif
260 return ret_code;
261}
262
263/**
264 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
265 * @hw: pointer to the HW structure
266 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
267 * @data: word read from the Shadow RAM
268 *
269 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
270 **/
271enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
272 u16 *data)
273{
274 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
275 u32 sr_reg;
276
277 DEBUGFUNC("i40e_read_nvm_word_srctl");
278
279 if (offset >= hw->nvm.sr_size) {
280 i40e_debug(hw, I40E_DEBUG_NVM,
281 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
282 offset, hw->nvm.sr_size);
283 ret_code = I40E_ERR_PARAM;
284 goto read_nvm_exit;
285 }
286
287 /* Poll the done bit first */
288 ret_code = i40e_poll_sr_srctl_done_bit(hw);
289 if (ret_code == I40E_SUCCESS) {
290 /* Write the address and start reading */
291 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
292 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
293 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
294
295 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
296 ret_code = i40e_poll_sr_srctl_done_bit(hw);
297 if (ret_code == I40E_SUCCESS) {
298 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
299 *data = (u16)((sr_reg &
300 I40E_GLNVM_SRDATA_RDDATA_MASK)
301 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
302 }
303 }
304 if (ret_code != I40E_SUCCESS)
305 i40e_debug(hw, I40E_DEBUG_NVM,
306 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
307 offset);
308
309read_nvm_exit:
310 return ret_code;
311}
312
313/**
314 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
315 * @hw: pointer to the HW structure
316 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
317 * @data: word read from the Shadow RAM
318 *
319 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
320 **/
321enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
322 u16 *data)
323{
324 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
325
326 DEBUGFUNC("i40e_read_nvm_word_aq");
327
328 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
329 *data = LE16_TO_CPU(*(__le16 *)data);
330
331 return ret_code;
332}
333
334/**
335 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
336 * @hw: pointer to the HW structure
337 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
338 * @words: (in) number of words to read; (out) number of words actually read
339 * @data: words read from the Shadow RAM
340 *
341 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
342 * method. The buffer read is preceded by the NVM ownership take
343 * and followed by the release.
344 **/
345enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
346 u16 offset,
347 u16 *words, u16 *data)
348{
349 enum i40e_status_code ret_code = I40E_SUCCESS;
350
351#ifdef X722_SUPPORT
352 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
353 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
354 else
355 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
356#else
357 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
358#endif
359 return ret_code;
360}
361
362/**
363 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
364 * @hw: pointer to the HW structure
365 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
366 * @words: (in) number of words to read; (out) number of words actually read
367 * @data: words read from the Shadow RAM
368 *
369 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
370 * method. The buffer read is preceded by the NVM ownership take
371 * and followed by the release.
372 **/
373enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
374 u16 *words, u16 *data)
375{
376 enum i40e_status_code ret_code = I40E_SUCCESS;
377
378#ifdef X722_SUPPORT
379 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
380 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
381 if (!ret_code) {
382 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
383 data);
384 i40e_release_nvm(hw);
385 }
386 } else {
387 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
388 }
389#else
390 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
391#endif
392 return ret_code;
393}
394
395/**
396 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
397 * @hw: pointer to the HW structure
398 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
399 * @words: (in) number of words to read; (out) number of words actually read
400 * @data: words read from the Shadow RAM
401 *
402 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
403 * method. The buffer read is preceded by the NVM ownership take
404 * and followed by the release.
405 **/
406enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
407 u16 *words, u16 *data)
408{
409 enum i40e_status_code ret_code = I40E_SUCCESS;
410 u16 index, word;
411
412 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
413
414 /* Loop through the selected region */
415 for (word = 0; word < *words; word++) {
416 index = offset + word;
417 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
418 if (ret_code != I40E_SUCCESS)
419 break;
420 }
421
422 /* Update the number of words read from the Shadow RAM */
423 *words = word;
424
425 return ret_code;
426}
427
428/**
429 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
430 * @hw: pointer to the HW structure
431 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
432 * @words: (in) number of words to read; (out) number of words actually read
433 * @data: words read from the Shadow RAM
434 *
435 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
436 * method. The buffer read is preceded by the NVM ownership take
437 * and followed by the release.
438 **/
439enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
440 u16 *words, u16 *data)
441{
442 enum i40e_status_code ret_code;
443 u16 read_size = *words;
444 bool last_cmd = false;
445 u16 words_read = 0;
446 u16 i = 0;
447
448 DEBUGFUNC("i40e_read_nvm_buffer_aq");
449
450 do {
451 /* Calculate number of bytes we should read in this step.
452 * FVL AQ do not allow to read more than one page at a time or
453 * to cross page boundaries.
454 */
455 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
456 read_size = min(*words,
457 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
458 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
459 else
460 read_size = min((*words - words_read),
461 I40E_SR_SECTOR_SIZE_IN_WORDS);
462
463 /* Check if this is last command, if so set proper flag */
464 if ((words_read + read_size) >= *words)
465 last_cmd = true;
466
467 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
468 data + words_read, last_cmd);
469 if (ret_code != I40E_SUCCESS)
470 goto read_nvm_buffer_aq_exit;
471
472 /* Increment counter for words already read and move offset to
473 * new read location
474 */
475 words_read += read_size;
476 offset += read_size;
477 } while (words_read < *words);
478
479 for (i = 0; i < *words; i++)
480 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
481
482read_nvm_buffer_aq_exit:
483 *words = words_read;
484 return ret_code;
485}
486
487/**
488 * i40e_read_nvm_aq - Read Shadow RAM.
489 * @hw: pointer to the HW structure.
490 * @module_pointer: module pointer location in words from the NVM beginning
491 * @offset: offset in words from module start
492 * @words: number of words to write
493 * @data: buffer with words to write to the Shadow RAM
494 * @last_command: tells the AdminQ that this is the last command
495 *
496 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
497 **/
498enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
499 u32 offset, u16 words, void *data,
500 bool last_command)
501{
502 enum i40e_status_code ret_code = I40E_ERR_NVM;
503 struct i40e_asq_cmd_details cmd_details;
504
505 DEBUGFUNC("i40e_read_nvm_aq");
506
507 memset(&cmd_details, 0, sizeof(cmd_details));
508 cmd_details.wb_desc = &hw->nvm_wb_desc;
509
510 /* Here we are checking the SR limit only for the flat memory model.
511 * We cannot do it for the module-based model, as we did not acquire
512 * the NVM resource yet (we cannot get the module pointer value).
513 * Firmware will check the module-based model.
514 */
515 if ((offset + words) > hw->nvm.sr_size)
516 i40e_debug(hw, I40E_DEBUG_NVM,
517 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
518 (offset + words), hw->nvm.sr_size);
519 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
520 /* We can write only up to 4KB (one sector), in one AQ write */
521 i40e_debug(hw, I40E_DEBUG_NVM,
522 "NVM write fail error: tried to write %d words, limit is %d.\n",
523 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
524 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
525 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
526 /* A single write cannot spread over two sectors */
527 i40e_debug(hw, I40E_DEBUG_NVM,
528 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
529 offset, words);
530 else
531 ret_code = i40e_aq_read_nvm(hw, module_pointer,
532 2 * offset, /*bytes*/
533 2 * words, /*bytes*/
534 data, last_command, &cmd_details);
535
536 return ret_code;
537}
538
539/**
540 * i40e_write_nvm_aq - Writes Shadow RAM.
541 * @hw: pointer to the HW structure.
542 * @module_pointer: module pointer location in words from the NVM beginning
543 * @offset: offset in words from module start
544 * @words: number of words to write
545 * @data: buffer with words to write to the Shadow RAM
546 * @last_command: tells the AdminQ that this is the last command
547 *
548 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
549 **/
550enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
551 u32 offset, u16 words, void *data,
552 bool last_command)
553{
554 enum i40e_status_code ret_code = I40E_ERR_NVM;
555 struct i40e_asq_cmd_details cmd_details;
556
557 DEBUGFUNC("i40e_write_nvm_aq");
558
559 memset(&cmd_details, 0, sizeof(cmd_details));
560 cmd_details.wb_desc = &hw->nvm_wb_desc;
561
562 /* Here we are checking the SR limit only for the flat memory model.
563 * We cannot do it for the module-based model, as we did not acquire
564 * the NVM resource yet (we cannot get the module pointer value).
565 * Firmware will check the module-based model.
566 */
567 if ((offset + words) > hw->nvm.sr_size)
568 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
569 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
570 /* We can write only up to 4KB (one sector), in one AQ write */
571 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
572 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
573 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
574 /* A single write cannot spread over two sectors */
575 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
576 else
577 ret_code = i40e_aq_update_nvm(hw, module_pointer,
578 2 * offset, /*bytes*/
579 2 * words, /*bytes*/
580 data, last_command, &cmd_details);
581
582 return ret_code;
583}
584
585/**
586 * __i40e_write_nvm_word - Writes Shadow RAM word
587 * @hw: pointer to the HW structure
588 * @offset: offset of the Shadow RAM word to write
589 * @data: word to write to the Shadow RAM
590 *
591 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
592 * NVM ownership have to be acquired and released (on ARQ completion event
593 * reception) by caller. To commit SR to NVM update checksum function
594 * should be called.
595 **/
596enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
597 void *data)
598{
599 DEBUGFUNC("i40e_write_nvm_word");
600
601 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
602
603 /* Value 0x00 below means that we treat SR as a flat mem */
604 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
605}
606
607/**
608 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
609 * @hw: pointer to the HW structure
610 * @module_pointer: module pointer location in words from the NVM beginning
611 * @offset: offset of the Shadow RAM buffer to write
612 * @words: number of words to write
613 * @data: words to write to the Shadow RAM
614 *
615 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
616 * NVM ownership must be acquired before calling this function and released
617 * on ARQ completion event reception by caller. To commit SR to NVM update
618 * checksum function should be called.
619 **/
620enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
621 u8 module_pointer, u32 offset,
622 u16 words, void *data)
623{
624 __le16 *le_word_ptr = (__le16 *)data;
625 u16 *word_ptr = (u16 *)data;
626 u32 i = 0;
627
628 DEBUGFUNC("i40e_write_nvm_buffer");
629
630 for (i = 0; i < words; i++)
631 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
632
633 /* Here we will only write one buffer as the size of the modules
634 * mirrored in the Shadow RAM is always less than 4K.
635 */
636 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
637 data, false);
638}
639
640/**
641 * i40e_calc_nvm_checksum - Calculates and returns the checksum
642 * @hw: pointer to hardware structure
643 * @checksum: pointer to the checksum
644 *
645 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
646 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
647 * is customer specific and unknown. Therefore, this function skips all maximum
648 * possible size of VPD (1kB).
649 **/
650enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
651{
652 enum i40e_status_code ret_code = I40E_SUCCESS;
653 struct i40e_virt_mem vmem;
654 u16 pcie_alt_module = 0;
655 u16 checksum_local = 0;
656 u16 vpd_module = 0;
657 u16 *data;
658 u16 i = 0;
659
660 DEBUGFUNC("i40e_calc_nvm_checksum");
661
662 ret_code = i40e_allocate_virt_mem(hw, &vmem,
663 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
664 if (ret_code)
665 goto i40e_calc_nvm_checksum_exit;
666 data = (u16 *)vmem.va;
667
668 /* read pointer to VPD area */
669 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
670 &vpd_module);
671 if (ret_code != I40E_SUCCESS) {
672 ret_code = I40E_ERR_NVM_CHECKSUM;
673 goto i40e_calc_nvm_checksum_exit;
674 }
675
676 /* read pointer to PCIe Alt Auto-load module */
677 ret_code = __i40e_read_nvm_word(hw,
678 I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
679 &pcie_alt_module);
680 if (ret_code != I40E_SUCCESS) {
681 ret_code = I40E_ERR_NVM_CHECKSUM;
682 goto i40e_calc_nvm_checksum_exit;
683 }
684
685 /* Calculate SW checksum that covers the whole 64kB shadow RAM
686 * except the VPD and PCIe ALT Auto-load modules
687 */
688 for (i = 0; i < hw->nvm.sr_size; i++) {
689 /* Read SR page */
690 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
691 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
692
693 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
694 if (ret_code != I40E_SUCCESS) {
695 ret_code = I40E_ERR_NVM_CHECKSUM;
696 goto i40e_calc_nvm_checksum_exit;
697 }
698 }
699
700 /* Skip Checksum word */
701 if (i == I40E_SR_SW_CHECKSUM_WORD)
702 continue;
703 /* Skip VPD module (convert byte size to word count) */
704 if ((i >= (u32)vpd_module) &&
705 (i < ((u32)vpd_module +
706 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
707 continue;
708 }
709 /* Skip PCIe ALT module (convert byte size to word count) */
710 if ((i >= (u32)pcie_alt_module) &&
711 (i < ((u32)pcie_alt_module +
712 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
713 continue;
714 }
715
716 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
717 }
718
719 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
720
721i40e_calc_nvm_checksum_exit:
722 i40e_free_virt_mem(hw, &vmem);
723 return ret_code;
724}
725
726/**
727 * i40e_update_nvm_checksum - Updates the NVM checksum
728 * @hw: pointer to hardware structure
729 *
730 * NVM ownership must be acquired before calling this function and released
731 * on ARQ completion event reception by caller.
732 * This function will commit SR to NVM.
733 **/
734enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
735{
736 enum i40e_status_code ret_code = I40E_SUCCESS;
737 u16 checksum;
738 __le16 le_sum;
739
740 DEBUGFUNC("i40e_update_nvm_checksum");
741
742 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
743 le_sum = CPU_TO_LE16(checksum);
744 if (ret_code == I40E_SUCCESS)
745 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
746 1, &le_sum, true);
747
748 return ret_code;
749}
750
751/**
752 * i40e_validate_nvm_checksum - Validate EEPROM checksum
753 * @hw: pointer to hardware structure
754 * @checksum: calculated checksum
755 *
756 * Performs checksum calculation and validates the NVM SW checksum. If the
757 * caller does not need checksum, the value can be NULL.
758 **/
759enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
760 u16 *checksum)
761{
762 enum i40e_status_code ret_code = I40E_SUCCESS;
763 u16 checksum_sr = 0;
764 u16 checksum_local = 0;
765
766 DEBUGFUNC("i40e_validate_nvm_checksum");
767
768 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
769 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
770 if (!ret_code) {
771 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
772 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
773 i40e_release_nvm(hw);
774 if (ret_code != I40E_SUCCESS)
775 goto i40e_validate_nvm_checksum_exit;
776 } else {
777 goto i40e_validate_nvm_checksum_exit;
778 }
779
780 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
781
782 /* Verify read checksum from EEPROM is the same as
783 * calculated checksum
784 */
785 if (checksum_local != checksum_sr)
786 ret_code = I40E_ERR_NVM_CHECKSUM;
787
788 /* If the user cares, return the calculated checksum */
789 if (checksum)
790 *checksum = checksum_local;
791
792i40e_validate_nvm_checksum_exit:
793 return ret_code;
794}
795
796STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
797 struct i40e_nvm_access *cmd,
798 u8 *bytes, int *perrno);
799STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
800 struct i40e_nvm_access *cmd,
801 u8 *bytes, int *perrno);
802STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
803 struct i40e_nvm_access *cmd,
804 u8 *bytes, int *perrno);
805STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
806 struct i40e_nvm_access *cmd,
807 int *perrno);
808STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
809 struct i40e_nvm_access *cmd,
810 int *perrno);
811STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
812 struct i40e_nvm_access *cmd,
813 u8 *bytes, int *perrno);
814STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
815 struct i40e_nvm_access *cmd,
816 u8 *bytes, int *perrno);
817STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
818 struct i40e_nvm_access *cmd,
819 u8 *bytes, int *perrno);
820STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
821 struct i40e_nvm_access *cmd,
822 u8 *bytes, int *perrno);
823STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
824{
825 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
826}
827STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
828{
829 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
830}
831
832STATIC const char *i40e_nvm_update_state_str[] = {
833 "I40E_NVMUPD_INVALID",
834 "I40E_NVMUPD_READ_CON",
835 "I40E_NVMUPD_READ_SNT",
836 "I40E_NVMUPD_READ_LCB",
837 "I40E_NVMUPD_READ_SA",
838 "I40E_NVMUPD_WRITE_ERA",
839 "I40E_NVMUPD_WRITE_CON",
840 "I40E_NVMUPD_WRITE_SNT",
841 "I40E_NVMUPD_WRITE_LCB",
842 "I40E_NVMUPD_WRITE_SA",
843 "I40E_NVMUPD_CSUM_CON",
844 "I40E_NVMUPD_CSUM_SA",
845 "I40E_NVMUPD_CSUM_LCB",
846 "I40E_NVMUPD_STATUS",
847 "I40E_NVMUPD_EXEC_AQ",
848 "I40E_NVMUPD_GET_AQ_RESULT",
849};
850
851/**
852 * i40e_nvmupd_command - Process an NVM update command
853 * @hw: pointer to hardware structure
854 * @cmd: pointer to nvm update command
855 * @bytes: pointer to the data buffer
856 * @perrno: pointer to return error code
857 *
858 * Dispatches command depending on what update state is current
859 **/
860enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
861 struct i40e_nvm_access *cmd,
862 u8 *bytes, int *perrno)
863{
864 enum i40e_status_code status;
865 enum i40e_nvmupd_cmd upd_cmd;
866
867 DEBUGFUNC("i40e_nvmupd_command");
868
869 /* assume success */
870 *perrno = 0;
871
872 /* early check for status command and debug msgs */
873 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
874
875 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
876 i40e_nvm_update_state_str[upd_cmd],
877 hw->nvmupd_state,
878 hw->nvm_release_on_done, hw->nvm_wait_opcode,
879 cmd->command, cmd->config, cmd->offset, cmd->data_size);
880
881 if (upd_cmd == I40E_NVMUPD_INVALID) {
882 *perrno = -EFAULT;
883 i40e_debug(hw, I40E_DEBUG_NVM,
884 "i40e_nvmupd_validate_command returns %d errno %d\n",
885 upd_cmd, *perrno);
886 }
887
888 /* a status request returns immediately rather than
889 * going into the state machine
890 */
891 if (upd_cmd == I40E_NVMUPD_STATUS) {
892 if (!cmd->data_size) {
893 *perrno = -EFAULT;
894 return I40E_ERR_BUF_TOO_SHORT;
895 }
896
897 bytes[0] = hw->nvmupd_state;
898
899 if (cmd->data_size >= 4) {
900 bytes[1] = 0;
901 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
902 }
903
904 return I40E_SUCCESS;
905 }
906
907 switch (hw->nvmupd_state) {
908 case I40E_NVMUPD_STATE_INIT:
909 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
910 break;
911
912 case I40E_NVMUPD_STATE_READING:
913 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
914 break;
915
916 case I40E_NVMUPD_STATE_WRITING:
917 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
918 break;
919
920 case I40E_NVMUPD_STATE_INIT_WAIT:
921 case I40E_NVMUPD_STATE_WRITE_WAIT:
922 /* if we need to stop waiting for an event, clear
923 * the wait info and return before doing anything else
924 */
925 if (cmd->offset == 0xffff) {
926 i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
927 return I40E_SUCCESS;
928 }
929
930 status = I40E_ERR_NOT_READY;
931 *perrno = -EBUSY;
932 break;
933
934 default:
935 /* invalid state, should never happen */
936 i40e_debug(hw, I40E_DEBUG_NVM,
937 "NVMUPD: no such state %d\n", hw->nvmupd_state);
938 status = I40E_NOT_SUPPORTED;
939 *perrno = -ESRCH;
940 break;
941 }
942 return status;
943}
944
945/**
946 * i40e_nvmupd_state_init - Handle NVM update state Init
947 * @hw: pointer to hardware structure
948 * @cmd: pointer to nvm update command buffer
949 * @bytes: pointer to the data buffer
950 * @perrno: pointer to return error code
951 *
952 * Process legitimate commands of the Init state and conditionally set next
953 * state. Reject all other commands.
954 **/
955STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
956 struct i40e_nvm_access *cmd,
957 u8 *bytes, int *perrno)
958{
959 enum i40e_status_code status = I40E_SUCCESS;
960 enum i40e_nvmupd_cmd upd_cmd;
961
962 DEBUGFUNC("i40e_nvmupd_state_init");
963
964 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
965
966 switch (upd_cmd) {
967 case I40E_NVMUPD_READ_SA:
968 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
969 if (status) {
970 *perrno = i40e_aq_rc_to_posix(status,
971 hw->aq.asq_last_status);
972 } else {
973 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
974 i40e_release_nvm(hw);
975 }
976 break;
977
978 case I40E_NVMUPD_READ_SNT:
979 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
980 if (status) {
981 *perrno = i40e_aq_rc_to_posix(status,
982 hw->aq.asq_last_status);
983 } else {
984 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
985 if (status)
986 i40e_release_nvm(hw);
987 else
988 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
989 }
990 break;
991
992 case I40E_NVMUPD_WRITE_ERA:
993 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
994 if (status) {
995 *perrno = i40e_aq_rc_to_posix(status,
996 hw->aq.asq_last_status);
997 } else {
998 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
999 if (status) {
1000 i40e_release_nvm(hw);
1001 } else {
1002 hw->nvm_release_on_done = true;
1003 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1004 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1005 }
1006 }
1007 break;
1008
1009 case I40E_NVMUPD_WRITE_SA:
1010 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1011 if (status) {
1012 *perrno = i40e_aq_rc_to_posix(status,
1013 hw->aq.asq_last_status);
1014 } else {
1015 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1016 if (status) {
1017 i40e_release_nvm(hw);
1018 } else {
1019 hw->nvm_release_on_done = true;
1020 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1021 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1022 }
1023 }
1024 break;
1025
1026 case I40E_NVMUPD_WRITE_SNT:
1027 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1028 if (status) {
1029 *perrno = i40e_aq_rc_to_posix(status,
1030 hw->aq.asq_last_status);
1031 } else {
1032 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1033 if (status) {
1034 i40e_release_nvm(hw);
1035 } else {
1036 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1037 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1038 }
1039 }
1040 break;
1041
1042 case I40E_NVMUPD_CSUM_SA:
1043 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1044 if (status) {
1045 *perrno = i40e_aq_rc_to_posix(status,
1046 hw->aq.asq_last_status);
1047 } else {
1048 status = i40e_update_nvm_checksum(hw);
1049 if (status) {
1050 *perrno = hw->aq.asq_last_status ?
1051 i40e_aq_rc_to_posix(status,
1052 hw->aq.asq_last_status) :
1053 -EIO;
1054 i40e_release_nvm(hw);
1055 } else {
1056 hw->nvm_release_on_done = true;
1057 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1058 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1059 }
1060 }
1061 break;
1062
1063 case I40E_NVMUPD_EXEC_AQ:
1064 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1065 break;
1066
1067 case I40E_NVMUPD_GET_AQ_RESULT:
1068 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1069 break;
1070
1071 default:
1072 i40e_debug(hw, I40E_DEBUG_NVM,
1073 "NVMUPD: bad cmd %s in init state\n",
1074 i40e_nvm_update_state_str[upd_cmd]);
1075 status = I40E_ERR_NVM;
1076 *perrno = -ESRCH;
1077 break;
1078 }
1079 return status;
1080}
1081
1082/**
1083 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1084 * @hw: pointer to hardware structure
1085 * @cmd: pointer to nvm update command buffer
1086 * @bytes: pointer to the data buffer
1087 * @perrno: pointer to return error code
1088 *
1089 * NVM ownership is already held. Process legitimate commands and set any
1090 * change in state; reject all other commands.
1091 **/
1092STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1093 struct i40e_nvm_access *cmd,
1094 u8 *bytes, int *perrno)
1095{
1096 enum i40e_status_code status = I40E_SUCCESS;
1097 enum i40e_nvmupd_cmd upd_cmd;
1098
1099 DEBUGFUNC("i40e_nvmupd_state_reading");
1100
1101 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1102
1103 switch (upd_cmd) {
1104 case I40E_NVMUPD_READ_SA:
1105 case I40E_NVMUPD_READ_CON:
1106 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1107 break;
1108
1109 case I40E_NVMUPD_READ_LCB:
1110 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1111 i40e_release_nvm(hw);
1112 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1113 break;
1114
1115 default:
1116 i40e_debug(hw, I40E_DEBUG_NVM,
1117 "NVMUPD: bad cmd %s in reading state.\n",
1118 i40e_nvm_update_state_str[upd_cmd]);
1119 status = I40E_NOT_SUPPORTED;
1120 *perrno = -ESRCH;
1121 break;
1122 }
1123 return status;
1124}
1125
1126/**
1127 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1128 * @hw: pointer to hardware structure
1129 * @cmd: pointer to nvm update command buffer
1130 * @bytes: pointer to the data buffer
1131 * @perrno: pointer to return error code
1132 *
1133 * NVM ownership is already held. Process legitimate commands and set any
1134 * change in state; reject all other commands
1135 **/
1136STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1137 struct i40e_nvm_access *cmd,
1138 u8 *bytes, int *perrno)
1139{
1140 enum i40e_status_code status = I40E_SUCCESS;
1141 enum i40e_nvmupd_cmd upd_cmd;
1142 bool retry_attempt = false;
1143
1144 DEBUGFUNC("i40e_nvmupd_state_writing");
1145
1146 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1147
1148retry:
1149 switch (upd_cmd) {
1150 case I40E_NVMUPD_WRITE_CON:
1151 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1152 if (!status) {
1153 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1154 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1155 }
1156 break;
1157
1158 case I40E_NVMUPD_WRITE_LCB:
1159 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1160 if (status) {
1161 *perrno = hw->aq.asq_last_status ?
1162 i40e_aq_rc_to_posix(status,
1163 hw->aq.asq_last_status) :
1164 -EIO;
1165 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1166 } else {
1167 hw->nvm_release_on_done = true;
1168 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1169 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1170 }
1171 break;
1172
1173 case I40E_NVMUPD_CSUM_CON:
1174 /* Assumes the caller has acquired the nvm */
1175 status = i40e_update_nvm_checksum(hw);
1176 if (status) {
1177 *perrno = hw->aq.asq_last_status ?
1178 i40e_aq_rc_to_posix(status,
1179 hw->aq.asq_last_status) :
1180 -EIO;
1181 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1182 } else {
1183 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1184 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1185 }
1186 break;
1187
1188 case I40E_NVMUPD_CSUM_LCB:
1189 /* Assumes the caller has acquired the nvm */
1190 status = i40e_update_nvm_checksum(hw);
1191 if (status) {
1192 *perrno = hw->aq.asq_last_status ?
1193 i40e_aq_rc_to_posix(status,
1194 hw->aq.asq_last_status) :
1195 -EIO;
1196 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1197 } else {
1198 hw->nvm_release_on_done = true;
1199 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1200 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1201 }
1202 break;
1203
1204 default:
1205 i40e_debug(hw, I40E_DEBUG_NVM,
1206 "NVMUPD: bad cmd %s in writing state.\n",
1207 i40e_nvm_update_state_str[upd_cmd]);
1208 status = I40E_NOT_SUPPORTED;
1209 *perrno = -ESRCH;
1210 break;
1211 }
1212
1213 /* In some circumstances, a multi-write transaction takes longer
1214 * than the default 3 minute timeout on the write semaphore. If
1215 * the write failed with an EBUSY status, this is likely the problem,
1216 * so here we try to reacquire the semaphore then retry the write.
1217 * We only do one retry, then give up.
1218 */
1219 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1220 !retry_attempt) {
1221 enum i40e_status_code old_status = status;
1222 u32 old_asq_status = hw->aq.asq_last_status;
1223 u32 gtime;
1224
1225 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1226 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1227 i40e_debug(hw, I40E_DEBUG_ALL,
1228 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1229 gtime, hw->nvm.hw_semaphore_timeout);
1230 i40e_release_nvm(hw);
1231 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1232 if (status) {
1233 i40e_debug(hw, I40E_DEBUG_ALL,
1234 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1235 hw->aq.asq_last_status);
1236 status = old_status;
1237 hw->aq.asq_last_status = old_asq_status;
1238 } else {
1239 retry_attempt = true;
1240 goto retry;
1241 }
1242 }
1243 }
1244
1245 return status;
1246}
1247
1248/**
1249 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1250 * @hw: pointer to the hardware structure
1251 * @opcode: the event that just happened
1252 **/
1253void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1254{
1255 if (opcode == hw->nvm_wait_opcode) {
1256 i40e_debug(hw, I40E_DEBUG_NVM,
1257 "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1258 if (hw->nvm_release_on_done) {
1259 i40e_release_nvm(hw);
1260 hw->nvm_release_on_done = false;
1261 }
1262 hw->nvm_wait_opcode = 0;
1263
1264 switch (hw->nvmupd_state) {
1265 case I40E_NVMUPD_STATE_INIT_WAIT:
1266 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1267 break;
1268
1269 case I40E_NVMUPD_STATE_WRITE_WAIT:
1270 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1271 break;
1272
1273 default:
1274 break;
1275 }
1276 }
1277}
1278
1279/**
1280 * i40e_nvmupd_validate_command - Validate given command
1281 * @hw: pointer to hardware structure
1282 * @cmd: pointer to nvm update command buffer
1283 * @perrno: pointer to return error code
1284 *
1285 * Return one of the valid command types or I40E_NVMUPD_INVALID
1286 **/
1287STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1288 struct i40e_nvm_access *cmd,
1289 int *perrno)
1290{
1291 enum i40e_nvmupd_cmd upd_cmd;
1292 u8 module, transaction;
1293
1294 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1295
1296 /* anything that doesn't match a recognized case is an error */
1297 upd_cmd = I40E_NVMUPD_INVALID;
1298
1299 transaction = i40e_nvmupd_get_transaction(cmd->config);
1300 module = i40e_nvmupd_get_module(cmd->config);
1301
1302 /* limits on data size */
1303 if ((cmd->data_size < 1) ||
1304 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1305 i40e_debug(hw, I40E_DEBUG_NVM,
1306 "i40e_nvmupd_validate_command data_size %d\n",
1307 cmd->data_size);
1308 *perrno = -EFAULT;
1309 return I40E_NVMUPD_INVALID;
1310 }
1311
1312 switch (cmd->command) {
1313 case I40E_NVM_READ:
1314 switch (transaction) {
1315 case I40E_NVM_CON:
1316 upd_cmd = I40E_NVMUPD_READ_CON;
1317 break;
1318 case I40E_NVM_SNT:
1319 upd_cmd = I40E_NVMUPD_READ_SNT;
1320 break;
1321 case I40E_NVM_LCB:
1322 upd_cmd = I40E_NVMUPD_READ_LCB;
1323 break;
1324 case I40E_NVM_SA:
1325 upd_cmd = I40E_NVMUPD_READ_SA;
1326 break;
1327 case I40E_NVM_EXEC:
1328 if (module == 0xf)
1329 upd_cmd = I40E_NVMUPD_STATUS;
1330 else if (module == 0)
1331 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1332 break;
1333 }
1334 break;
1335
1336 case I40E_NVM_WRITE:
1337 switch (transaction) {
1338 case I40E_NVM_CON:
1339 upd_cmd = I40E_NVMUPD_WRITE_CON;
1340 break;
1341 case I40E_NVM_SNT:
1342 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1343 break;
1344 case I40E_NVM_LCB:
1345 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1346 break;
1347 case I40E_NVM_SA:
1348 upd_cmd = I40E_NVMUPD_WRITE_SA;
1349 break;
1350 case I40E_NVM_ERA:
1351 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1352 break;
1353 case I40E_NVM_CSUM:
1354 upd_cmd = I40E_NVMUPD_CSUM_CON;
1355 break;
1356 case (I40E_NVM_CSUM|I40E_NVM_SA):
1357 upd_cmd = I40E_NVMUPD_CSUM_SA;
1358 break;
1359 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1360 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1361 break;
1362 case I40E_NVM_EXEC:
1363 if (module == 0)
1364 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1365 break;
1366 }
1367 break;
1368 }
1369
1370 return upd_cmd;
1371}
1372
1373/**
1374 * i40e_nvmupd_exec_aq - Run an AQ command
1375 * @hw: pointer to hardware structure
1376 * @cmd: pointer to nvm update command buffer
1377 * @bytes: pointer to the data buffer
1378 * @perrno: pointer to return error code
1379 *
1380 * cmd structure contains identifiers and data buffer
1381 **/
1382STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1383 struct i40e_nvm_access *cmd,
1384 u8 *bytes, int *perrno)
1385{
1386 struct i40e_asq_cmd_details cmd_details;
1387 enum i40e_status_code status;
1388 struct i40e_aq_desc *aq_desc;
1389 u32 buff_size = 0;
1390 u8 *buff = NULL;
1391 u32 aq_desc_len;
1392 u32 aq_data_len;
1393
1394 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1395 memset(&cmd_details, 0, sizeof(cmd_details));
1396 cmd_details.wb_desc = &hw->nvm_wb_desc;
1397
1398 aq_desc_len = sizeof(struct i40e_aq_desc);
1399 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1400
1401 /* get the aq descriptor */
1402 if (cmd->data_size < aq_desc_len) {
1403 i40e_debug(hw, I40E_DEBUG_NVM,
1404 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1405 cmd->data_size, aq_desc_len);
1406 *perrno = -EINVAL;
1407 return I40E_ERR_PARAM;
1408 }
1409 aq_desc = (struct i40e_aq_desc *)bytes;
1410
1411 /* if data buffer needed, make sure it's ready */
1412 aq_data_len = cmd->data_size - aq_desc_len;
1413 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1414 if (buff_size) {
1415 if (!hw->nvm_buff.va) {
1416 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1417 hw->aq.asq_buf_size);
1418 if (status)
1419 i40e_debug(hw, I40E_DEBUG_NVM,
1420 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1421 status);
1422 }
1423
1424 if (hw->nvm_buff.va) {
1425 buff = hw->nvm_buff.va;
1426 memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1427 }
1428 }
1429
1430 /* and away we go! */
1431 status = i40e_asq_send_command(hw, aq_desc, buff,
1432 buff_size, &cmd_details);
1433 if (status) {
1434 i40e_debug(hw, I40E_DEBUG_NVM,
1435 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1436 i40e_stat_str(hw, status),
1437 i40e_aq_str(hw, hw->aq.asq_last_status));
1438 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1439 }
1440
1441 /* should we wait for a followup event? */
1442 if (cmd->offset) {
1443 hw->nvm_wait_opcode = cmd->offset;
1444 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1445 }
1446
1447 return status;
1448}
1449
1450/**
1451 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1452 * @hw: pointer to hardware structure
1453 * @cmd: pointer to nvm update command buffer
1454 * @bytes: pointer to the data buffer
1455 * @perrno: pointer to return error code
1456 *
1457 * cmd structure contains identifiers and data buffer
1458 **/
1459STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1460 struct i40e_nvm_access *cmd,
1461 u8 *bytes, int *perrno)
1462{
1463 u32 aq_total_len;
1464 u32 aq_desc_len;
1465 int remainder;
1466 u8 *buff;
1467
1468 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1469
1470 aq_desc_len = sizeof(struct i40e_aq_desc);
1471 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1472
1473 /* check offset range */
1474 if (cmd->offset > aq_total_len) {
1475 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1476 __func__, cmd->offset, aq_total_len);
1477 *perrno = -EINVAL;
1478 return I40E_ERR_PARAM;
1479 }
1480
1481 /* check copylength range */
1482 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1483 int new_len = aq_total_len - cmd->offset;
1484
1485 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1486 __func__, cmd->data_size, new_len);
1487 cmd->data_size = new_len;
1488 }
1489
1490 remainder = cmd->data_size;
1491 if (cmd->offset < aq_desc_len) {
1492 u32 len = aq_desc_len - cmd->offset;
1493
1494 len = min(len, cmd->data_size);
1495 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1496 __func__, cmd->offset, cmd->offset + len);
1497
1498 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1499 memcpy(bytes, buff, len);
1500
1501 bytes += len;
1502 remainder -= len;
1503 buff = hw->nvm_buff.va;
1504 } else {
1505 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1506 }
1507
1508 if (remainder > 0) {
1509 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1510
1511 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1512 __func__, start_byte, start_byte + remainder);
1513 memcpy(bytes, buff, remainder);
1514 }
1515
1516 return I40E_SUCCESS;
1517}
1518
1519/**
1520 * i40e_nvmupd_nvm_read - Read NVM
1521 * @hw: pointer to hardware structure
1522 * @cmd: pointer to nvm update command buffer
1523 * @bytes: pointer to the data buffer
1524 * @perrno: pointer to return error code
1525 *
1526 * cmd structure contains identifiers and data buffer
1527 **/
1528STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1529 struct i40e_nvm_access *cmd,
1530 u8 *bytes, int *perrno)
1531{
1532 struct i40e_asq_cmd_details cmd_details;
1533 enum i40e_status_code status;
1534 u8 module, transaction;
1535 bool last;
1536
1537 transaction = i40e_nvmupd_get_transaction(cmd->config);
1538 module = i40e_nvmupd_get_module(cmd->config);
1539 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1540
1541 memset(&cmd_details, 0, sizeof(cmd_details));
1542 cmd_details.wb_desc = &hw->nvm_wb_desc;
1543
1544 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1545 bytes, last, &cmd_details);
1546 if (status) {
1547 i40e_debug(hw, I40E_DEBUG_NVM,
1548 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1549 module, cmd->offset, cmd->data_size);
1550 i40e_debug(hw, I40E_DEBUG_NVM,
1551 "i40e_nvmupd_nvm_read status %d aq %d\n",
1552 status, hw->aq.asq_last_status);
1553 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1554 }
1555
1556 return status;
1557}
1558
1559/**
1560 * i40e_nvmupd_nvm_erase - Erase an NVM module
1561 * @hw: pointer to hardware structure
1562 * @cmd: pointer to nvm update command buffer
1563 * @perrno: pointer to return error code
1564 *
1565 * module, offset, data_size and data are in cmd structure
1566 **/
1567STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1568 struct i40e_nvm_access *cmd,
1569 int *perrno)
1570{
1571 enum i40e_status_code status = I40E_SUCCESS;
1572 struct i40e_asq_cmd_details cmd_details;
1573 u8 module, transaction;
1574 bool last;
1575
1576 transaction = i40e_nvmupd_get_transaction(cmd->config);
1577 module = i40e_nvmupd_get_module(cmd->config);
1578 last = (transaction & I40E_NVM_LCB);
1579
1580 memset(&cmd_details, 0, sizeof(cmd_details));
1581 cmd_details.wb_desc = &hw->nvm_wb_desc;
1582
1583 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1584 last, &cmd_details);
1585 if (status) {
1586 i40e_debug(hw, I40E_DEBUG_NVM,
1587 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1588 module, cmd->offset, cmd->data_size);
1589 i40e_debug(hw, I40E_DEBUG_NVM,
1590 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1591 status, hw->aq.asq_last_status);
1592 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1593 }
1594
1595 return status;
1596}
1597
1598/**
1599 * i40e_nvmupd_nvm_write - Write NVM
1600 * @hw: pointer to hardware structure
1601 * @cmd: pointer to nvm update command buffer
1602 * @bytes: pointer to the data buffer
1603 * @perrno: pointer to return error code
1604 *
1605 * module, offset, data_size and data are in cmd structure
1606 **/
1607STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1608 struct i40e_nvm_access *cmd,
1609 u8 *bytes, int *perrno)
1610{
1611 enum i40e_status_code status = I40E_SUCCESS;
1612 struct i40e_asq_cmd_details cmd_details;
1613 u8 module, transaction;
1614 bool last;
1615
1616 transaction = i40e_nvmupd_get_transaction(cmd->config);
1617 module = i40e_nvmupd_get_module(cmd->config);
1618 last = (transaction & I40E_NVM_LCB);
1619
1620 memset(&cmd_details, 0, sizeof(cmd_details));
1621 cmd_details.wb_desc = &hw->nvm_wb_desc;
1622
1623 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1624 (u16)cmd->data_size, bytes, last,
1625 &cmd_details);
1626 if (status) {
1627 i40e_debug(hw, I40E_DEBUG_NVM,
1628 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1629 module, cmd->offset, cmd->data_size);
1630 i40e_debug(hw, I40E_DEBUG_NVM,
1631 "i40e_nvmupd_nvm_write status %d aq %d\n",
1632 status, hw->aq.asq_last_status);
1633 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1634 }
1635
1636 return status;
1637}