]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2001-2018 | |
3 | */ | |
7c673cae FG |
4 | |
5 | #include "i40e_prototype.h" | |
6 | ||
7c673cae FG |
7 | /** |
8 | * i40e_init_nvm_ops - Initialize NVM function pointers | |
9 | * @hw: pointer to the HW structure | |
10 | * | |
11 | * Setup the function pointers and the NVM info structure. Should be called | |
12 | * once per NVM initialization, e.g. inside the i40e_init_shared_code(). | |
13 | * Please notice that the NVM term is used here (& in all methods covered | |
14 | * in this file) as an equivalent of the FLASH part mapped into the SR. | |
15 | * We are accessing FLASH always through the Shadow RAM. | |
16 | **/ | |
17 | enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw) | |
18 | { | |
19 | struct i40e_nvm_info *nvm = &hw->nvm; | |
20 | enum i40e_status_code ret_code = I40E_SUCCESS; | |
21 | u32 fla, gens; | |
22 | u8 sr_size; | |
23 | ||
24 | DEBUGFUNC("i40e_init_nvm"); | |
25 | ||
26 | /* The SR size is stored regardless of the nvm programming mode | |
27 | * as the blank mode may be used in the factory line. | |
28 | */ | |
29 | gens = rd32(hw, I40E_GLNVM_GENS); | |
30 | sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> | |
31 | I40E_GLNVM_GENS_SR_SIZE_SHIFT); | |
32 | /* Switching to words (sr_size contains power of 2KB) */ | |
33 | nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; | |
34 | ||
35 | /* Check if we are in the normal or blank NVM programming mode */ | |
36 | fla = rd32(hw, I40E_GLNVM_FLA); | |
37 | if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ | |
38 | /* Max NVM timeout */ | |
39 | nvm->timeout = I40E_MAX_NVM_TIMEOUT; | |
40 | nvm->blank_nvm_mode = false; | |
41 | } else { /* Blank programming mode */ | |
42 | nvm->blank_nvm_mode = true; | |
43 | ret_code = I40E_ERR_NVM_BLANK_MODE; | |
44 | i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); | |
45 | } | |
46 | ||
47 | return ret_code; | |
48 | } | |
49 | ||
50 | /** | |
51 | * i40e_acquire_nvm - Generic request for acquiring the NVM ownership | |
52 | * @hw: pointer to the HW structure | |
53 | * @access: NVM access type (read or write) | |
54 | * | |
55 | * This function will request NVM ownership for reading | |
56 | * via the proper Admin Command. | |
57 | **/ | |
58 | enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, | |
59 | enum i40e_aq_resource_access_type access) | |
60 | { | |
61 | enum i40e_status_code ret_code = I40E_SUCCESS; | |
62 | u64 gtime, timeout; | |
63 | u64 time_left = 0; | |
64 | ||
65 | DEBUGFUNC("i40e_acquire_nvm"); | |
66 | ||
67 | if (hw->nvm.blank_nvm_mode) | |
68 | goto i40e_i40e_acquire_nvm_exit; | |
69 | ||
70 | ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, | |
71 | 0, &time_left, NULL); | |
72 | /* Reading the Global Device Timer */ | |
73 | gtime = rd32(hw, I40E_GLVFGEN_TIMER); | |
74 | ||
75 | /* Store the timeout */ | |
76 | hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; | |
77 | ||
78 | if (ret_code) | |
79 | i40e_debug(hw, I40E_DEBUG_NVM, | |
80 | "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", | |
81 | access, time_left, ret_code, hw->aq.asq_last_status); | |
82 | ||
83 | if (ret_code && time_left) { | |
84 | /* Poll until the current NVM owner timeouts */ | |
85 | timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; | |
86 | while ((gtime < timeout) && time_left) { | |
87 | i40e_msec_delay(10); | |
88 | gtime = rd32(hw, I40E_GLVFGEN_TIMER); | |
89 | ret_code = i40e_aq_request_resource(hw, | |
90 | I40E_NVM_RESOURCE_ID, | |
91 | access, 0, &time_left, | |
92 | NULL); | |
93 | if (ret_code == I40E_SUCCESS) { | |
94 | hw->nvm.hw_semaphore_timeout = | |
95 | I40E_MS_TO_GTIME(time_left) + gtime; | |
96 | break; | |
97 | } | |
98 | } | |
99 | if (ret_code != I40E_SUCCESS) { | |
100 | hw->nvm.hw_semaphore_timeout = 0; | |
101 | i40e_debug(hw, I40E_DEBUG_NVM, | |
102 | "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", | |
103 | time_left, ret_code, hw->aq.asq_last_status); | |
104 | } | |
105 | } | |
106 | ||
107 | i40e_i40e_acquire_nvm_exit: | |
108 | return ret_code; | |
109 | } | |
110 | ||
111 | /** | |
112 | * i40e_release_nvm - Generic request for releasing the NVM ownership | |
113 | * @hw: pointer to the HW structure | |
114 | * | |
115 | * This function will release NVM resource via the proper Admin Command. | |
116 | **/ | |
117 | void i40e_release_nvm(struct i40e_hw *hw) | |
118 | { | |
119 | enum i40e_status_code ret_code = I40E_SUCCESS; | |
120 | u32 total_delay = 0; | |
121 | ||
122 | DEBUGFUNC("i40e_release_nvm"); | |
123 | ||
124 | if (hw->nvm.blank_nvm_mode) | |
125 | return; | |
126 | ||
127 | ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); | |
128 | ||
129 | /* there are some rare cases when trying to release the resource | |
130 | * results in an admin Q timeout, so handle them correctly | |
131 | */ | |
132 | while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && | |
133 | (total_delay < hw->aq.asq_cmd_timeout)) { | |
134 | i40e_msec_delay(1); | |
135 | ret_code = i40e_aq_release_resource(hw, | |
136 | I40E_NVM_RESOURCE_ID, 0, NULL); | |
137 | total_delay++; | |
138 | } | |
139 | } | |
140 | ||
141 | /** | |
142 | * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit | |
143 | * @hw: pointer to the HW structure | |
144 | * | |
145 | * Polls the SRCTL Shadow RAM register done bit. | |
146 | **/ | |
147 | static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) | |
148 | { | |
149 | enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; | |
150 | u32 srctl, wait_cnt; | |
151 | ||
152 | DEBUGFUNC("i40e_poll_sr_srctl_done_bit"); | |
153 | ||
154 | /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ | |
155 | for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { | |
156 | srctl = rd32(hw, I40E_GLNVM_SRCTL); | |
157 | if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { | |
158 | ret_code = I40E_SUCCESS; | |
159 | break; | |
160 | } | |
161 | i40e_usec_delay(5); | |
162 | } | |
163 | if (ret_code == I40E_ERR_TIMEOUT) | |
164 | i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); | |
165 | return ret_code; | |
166 | } | |
167 | ||
7c673cae FG |
168 | /** |
169 | * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register | |
170 | * @hw: pointer to the HW structure | |
171 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) | |
172 | * @data: word read from the Shadow RAM | |
173 | * | |
174 | * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. | |
175 | **/ | |
11fdf7f2 TL |
176 | STATIC enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, |
177 | u16 offset, | |
178 | u16 *data) | |
7c673cae FG |
179 | { |
180 | enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; | |
181 | u32 sr_reg; | |
182 | ||
183 | DEBUGFUNC("i40e_read_nvm_word_srctl"); | |
184 | ||
185 | if (offset >= hw->nvm.sr_size) { | |
186 | i40e_debug(hw, I40E_DEBUG_NVM, | |
187 | "NVM read error: Offset %d beyond Shadow RAM limit %d\n", | |
188 | offset, hw->nvm.sr_size); | |
189 | ret_code = I40E_ERR_PARAM; | |
190 | goto read_nvm_exit; | |
191 | } | |
192 | ||
193 | /* Poll the done bit first */ | |
194 | ret_code = i40e_poll_sr_srctl_done_bit(hw); | |
195 | if (ret_code == I40E_SUCCESS) { | |
196 | /* Write the address and start reading */ | |
197 | sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | | |
198 | BIT(I40E_GLNVM_SRCTL_START_SHIFT); | |
199 | wr32(hw, I40E_GLNVM_SRCTL, sr_reg); | |
200 | ||
201 | /* Poll I40E_GLNVM_SRCTL until the done bit is set */ | |
202 | ret_code = i40e_poll_sr_srctl_done_bit(hw); | |
203 | if (ret_code == I40E_SUCCESS) { | |
204 | sr_reg = rd32(hw, I40E_GLNVM_SRDATA); | |
205 | *data = (u16)((sr_reg & | |
206 | I40E_GLNVM_SRDATA_RDDATA_MASK) | |
207 | >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); | |
208 | } | |
209 | } | |
210 | if (ret_code != I40E_SUCCESS) | |
211 | i40e_debug(hw, I40E_DEBUG_NVM, | |
212 | "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", | |
213 | offset); | |
214 | ||
215 | read_nvm_exit: | |
216 | return ret_code; | |
217 | } | |
218 | ||
11fdf7f2 TL |
219 | /** |
220 | * i40e_read_nvm_aq - Read Shadow RAM. | |
221 | * @hw: pointer to the HW structure. | |
222 | * @module_pointer: module pointer location in words from the NVM beginning | |
223 | * @offset: offset in words from module start | |
224 | * @words: number of words to write | |
225 | * @data: buffer with words to write to the Shadow RAM | |
226 | * @last_command: tells the AdminQ that this is the last command | |
227 | * | |
228 | * Writes a 16 bit words buffer to the Shadow RAM using the admin command. | |
229 | **/ | |
230 | STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, | |
231 | u8 module_pointer, u32 offset, | |
232 | u16 words, void *data, | |
233 | bool last_command) | |
234 | { | |
235 | enum i40e_status_code ret_code = I40E_ERR_NVM; | |
236 | struct i40e_asq_cmd_details cmd_details; | |
237 | ||
238 | DEBUGFUNC("i40e_read_nvm_aq"); | |
239 | ||
240 | memset(&cmd_details, 0, sizeof(cmd_details)); | |
241 | cmd_details.wb_desc = &hw->nvm_wb_desc; | |
242 | ||
243 | /* Here we are checking the SR limit only for the flat memory model. | |
244 | * We cannot do it for the module-based model, as we did not acquire | |
245 | * the NVM resource yet (we cannot get the module pointer value). | |
246 | * Firmware will check the module-based model. | |
247 | */ | |
248 | if ((offset + words) > hw->nvm.sr_size) | |
249 | i40e_debug(hw, I40E_DEBUG_NVM, | |
250 | "NVM write error: offset %d beyond Shadow RAM limit %d\n", | |
251 | (offset + words), hw->nvm.sr_size); | |
252 | else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) | |
253 | /* We can write only up to 4KB (one sector), in one AQ write */ | |
254 | i40e_debug(hw, I40E_DEBUG_NVM, | |
255 | "NVM write fail error: tried to write %d words, limit is %d.\n", | |
256 | words, I40E_SR_SECTOR_SIZE_IN_WORDS); | |
257 | else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) | |
258 | != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) | |
259 | /* A single write cannot spread over two sectors */ | |
260 | i40e_debug(hw, I40E_DEBUG_NVM, | |
261 | "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", | |
262 | offset, words); | |
263 | else | |
264 | ret_code = i40e_aq_read_nvm(hw, module_pointer, | |
265 | 2 * offset, /*bytes*/ | |
266 | 2 * words, /*bytes*/ | |
267 | data, last_command, &cmd_details); | |
268 | ||
269 | return ret_code; | |
270 | } | |
271 | ||
7c673cae FG |
272 | /** |
273 | * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ | |
274 | * @hw: pointer to the HW structure | |
275 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) | |
276 | * @data: word read from the Shadow RAM | |
277 | * | |
11fdf7f2 | 278 | * Reads one 16 bit word from the Shadow RAM using the AdminQ |
7c673cae | 279 | **/ |
11fdf7f2 TL |
280 | STATIC enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, |
281 | u16 *data) | |
7c673cae FG |
282 | { |
283 | enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; | |
284 | ||
285 | DEBUGFUNC("i40e_read_nvm_word_aq"); | |
286 | ||
287 | ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); | |
288 | *data = LE16_TO_CPU(*(__le16 *)data); | |
289 | ||
290 | return ret_code; | |
291 | } | |
292 | ||
293 | /** | |
11fdf7f2 | 294 | * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking |
7c673cae | 295 | * @hw: pointer to the HW structure |
11fdf7f2 TL |
296 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) |
297 | * @data: word read from the Shadow RAM | |
7c673cae | 298 | * |
11fdf7f2 TL |
299 | * Reads one 16 bit word from the Shadow RAM. |
300 | * | |
301 | * Do not use this function except in cases where the nvm lock is already | |
302 | * taken via i40e_acquire_nvm(). | |
7c673cae | 303 | **/ |
11fdf7f2 TL |
304 | enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, |
305 | u16 offset, | |
306 | u16 *data) | |
7c673cae | 307 | { |
7c673cae | 308 | |
7c673cae | 309 | if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) |
11fdf7f2 TL |
310 | return i40e_read_nvm_word_aq(hw, offset, data); |
311 | ||
312 | return i40e_read_nvm_word_srctl(hw, offset, data); | |
7c673cae FG |
313 | } |
314 | ||
315 | /** | |
11fdf7f2 | 316 | * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary |
7c673cae | 317 | * @hw: pointer to the HW structure |
11fdf7f2 TL |
318 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) |
319 | * @data: word read from the Shadow RAM | |
7c673cae | 320 | * |
11fdf7f2 | 321 | * Reads one 16 bit word from the Shadow RAM. |
7c673cae | 322 | **/ |
11fdf7f2 TL |
323 | enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, |
324 | u16 *data) | |
7c673cae FG |
325 | { |
326 | enum i40e_status_code ret_code = I40E_SUCCESS; | |
327 | ||
11fdf7f2 | 328 | if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) |
7c673cae | 329 | ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); |
11fdf7f2 TL |
330 | |
331 | if (ret_code) | |
332 | return ret_code; | |
333 | ret_code = __i40e_read_nvm_word(hw, offset, data); | |
334 | ||
335 | if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) | |
336 | i40e_release_nvm(hw); | |
7c673cae FG |
337 | return ret_code; |
338 | } | |
339 | ||
340 | /** | |
341 | * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register | |
342 | * @hw: pointer to the HW structure | |
343 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). | |
344 | * @words: (in) number of words to read; (out) number of words actually read | |
345 | * @data: words read from the Shadow RAM | |
346 | * | |
347 | * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() | |
348 | * method. The buffer read is preceded by the NVM ownership take | |
349 | * and followed by the release. | |
350 | **/ | |
11fdf7f2 TL |
351 | STATIC enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, |
352 | u16 *words, u16 *data) | |
7c673cae FG |
353 | { |
354 | enum i40e_status_code ret_code = I40E_SUCCESS; | |
355 | u16 index, word; | |
356 | ||
357 | DEBUGFUNC("i40e_read_nvm_buffer_srctl"); | |
358 | ||
359 | /* Loop through the selected region */ | |
360 | for (word = 0; word < *words; word++) { | |
361 | index = offset + word; | |
362 | ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); | |
363 | if (ret_code != I40E_SUCCESS) | |
364 | break; | |
365 | } | |
366 | ||
367 | /* Update the number of words read from the Shadow RAM */ | |
368 | *words = word; | |
369 | ||
370 | return ret_code; | |
371 | } | |
372 | ||
373 | /** | |
374 | * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ | |
375 | * @hw: pointer to the HW structure | |
376 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). | |
377 | * @words: (in) number of words to read; (out) number of words actually read | |
378 | * @data: words read from the Shadow RAM | |
379 | * | |
380 | * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() | |
381 | * method. The buffer read is preceded by the NVM ownership take | |
382 | * and followed by the release. | |
383 | **/ | |
11fdf7f2 TL |
384 | STATIC enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, |
385 | u16 *words, u16 *data) | |
7c673cae FG |
386 | { |
387 | enum i40e_status_code ret_code; | |
388 | u16 read_size = *words; | |
389 | bool last_cmd = false; | |
390 | u16 words_read = 0; | |
391 | u16 i = 0; | |
392 | ||
393 | DEBUGFUNC("i40e_read_nvm_buffer_aq"); | |
394 | ||
395 | do { | |
396 | /* Calculate number of bytes we should read in this step. | |
397 | * FVL AQ do not allow to read more than one page at a time or | |
398 | * to cross page boundaries. | |
399 | */ | |
400 | if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) | |
401 | read_size = min(*words, | |
402 | (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - | |
403 | (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); | |
404 | else | |
405 | read_size = min((*words - words_read), | |
406 | I40E_SR_SECTOR_SIZE_IN_WORDS); | |
407 | ||
408 | /* Check if this is last command, if so set proper flag */ | |
409 | if ((words_read + read_size) >= *words) | |
410 | last_cmd = true; | |
411 | ||
412 | ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, | |
413 | data + words_read, last_cmd); | |
414 | if (ret_code != I40E_SUCCESS) | |
415 | goto read_nvm_buffer_aq_exit; | |
416 | ||
417 | /* Increment counter for words already read and move offset to | |
418 | * new read location | |
419 | */ | |
420 | words_read += read_size; | |
421 | offset += read_size; | |
422 | } while (words_read < *words); | |
423 | ||
424 | for (i = 0; i < *words; i++) | |
425 | data[i] = LE16_TO_CPU(((__le16 *)data)[i]); | |
426 | ||
427 | read_nvm_buffer_aq_exit: | |
428 | *words = words_read; | |
429 | return ret_code; | |
430 | } | |
431 | ||
432 | /** | |
11fdf7f2 TL |
433 | * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock |
434 | * @hw: pointer to the HW structure | |
435 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). | |
436 | * @words: (in) number of words to read; (out) number of words actually read | |
437 | * @data: words read from the Shadow RAM | |
7c673cae | 438 | * |
11fdf7f2 TL |
439 | * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() |
440 | * method. | |
7c673cae | 441 | **/ |
11fdf7f2 TL |
442 | enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, |
443 | u16 offset, | |
444 | u16 *words, u16 *data) | |
7c673cae | 445 | { |
11fdf7f2 TL |
446 | if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) |
447 | return i40e_read_nvm_buffer_aq(hw, offset, words, data); | |
7c673cae | 448 | |
11fdf7f2 TL |
449 | return i40e_read_nvm_buffer_srctl(hw, offset, words, data); |
450 | } | |
7c673cae | 451 | |
11fdf7f2 TL |
452 | /** |
453 | * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary | |
454 | * @hw: pointer to the HW structure | |
455 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). | |
456 | * @words: (in) number of words to read; (out) number of words actually read | |
457 | * @data: words read from the Shadow RAM | |
458 | * | |
459 | * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() | |
460 | * method. The buffer read is preceded by the NVM ownership take | |
461 | * and followed by the release. | |
462 | **/ | |
463 | enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, | |
464 | u16 *words, u16 *data) | |
465 | { | |
466 | enum i40e_status_code ret_code = I40E_SUCCESS; | |
7c673cae | 467 | |
11fdf7f2 TL |
468 | if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { |
469 | ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); | |
470 | if (!ret_code) { | |
471 | ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, | |
472 | data); | |
473 | i40e_release_nvm(hw); | |
474 | } | |
475 | } else { | |
476 | ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); | |
477 | } | |
7c673cae FG |
478 | |
479 | return ret_code; | |
480 | } | |
481 | ||
482 | /** | |
483 | * i40e_write_nvm_aq - Writes Shadow RAM. | |
484 | * @hw: pointer to the HW structure. | |
485 | * @module_pointer: module pointer location in words from the NVM beginning | |
486 | * @offset: offset in words from module start | |
487 | * @words: number of words to write | |
488 | * @data: buffer with words to write to the Shadow RAM | |
489 | * @last_command: tells the AdminQ that this is the last command | |
490 | * | |
491 | * Writes a 16 bit words buffer to the Shadow RAM using the admin command. | |
492 | **/ | |
493 | enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, | |
494 | u32 offset, u16 words, void *data, | |
495 | bool last_command) | |
496 | { | |
497 | enum i40e_status_code ret_code = I40E_ERR_NVM; | |
498 | struct i40e_asq_cmd_details cmd_details; | |
499 | ||
500 | DEBUGFUNC("i40e_write_nvm_aq"); | |
501 | ||
502 | memset(&cmd_details, 0, sizeof(cmd_details)); | |
503 | cmd_details.wb_desc = &hw->nvm_wb_desc; | |
504 | ||
505 | /* Here we are checking the SR limit only for the flat memory model. | |
506 | * We cannot do it for the module-based model, as we did not acquire | |
507 | * the NVM resource yet (we cannot get the module pointer value). | |
508 | * Firmware will check the module-based model. | |
509 | */ | |
510 | if ((offset + words) > hw->nvm.sr_size) | |
511 | DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n"); | |
512 | else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) | |
513 | /* We can write only up to 4KB (one sector), in one AQ write */ | |
514 | DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n"); | |
515 | else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) | |
516 | != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) | |
517 | /* A single write cannot spread over two sectors */ | |
518 | DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n"); | |
519 | else | |
520 | ret_code = i40e_aq_update_nvm(hw, module_pointer, | |
521 | 2 * offset, /*bytes*/ | |
522 | 2 * words, /*bytes*/ | |
11fdf7f2 TL |
523 | data, last_command, 0, |
524 | &cmd_details); | |
7c673cae FG |
525 | |
526 | return ret_code; | |
527 | } | |
528 | ||
529 | /** | |
530 | * __i40e_write_nvm_word - Writes Shadow RAM word | |
531 | * @hw: pointer to the HW structure | |
532 | * @offset: offset of the Shadow RAM word to write | |
533 | * @data: word to write to the Shadow RAM | |
534 | * | |
535 | * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method. | |
536 | * NVM ownership have to be acquired and released (on ARQ completion event | |
537 | * reception) by caller. To commit SR to NVM update checksum function | |
538 | * should be called. | |
539 | **/ | |
540 | enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset, | |
541 | void *data) | |
542 | { | |
543 | DEBUGFUNC("i40e_write_nvm_word"); | |
544 | ||
545 | *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data)); | |
546 | ||
547 | /* Value 0x00 below means that we treat SR as a flat mem */ | |
548 | return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false); | |
549 | } | |
550 | ||
551 | /** | |
552 | * __i40e_write_nvm_buffer - Writes Shadow RAM buffer | |
553 | * @hw: pointer to the HW structure | |
554 | * @module_pointer: module pointer location in words from the NVM beginning | |
555 | * @offset: offset of the Shadow RAM buffer to write | |
556 | * @words: number of words to write | |
557 | * @data: words to write to the Shadow RAM | |
558 | * | |
559 | * Writes a 16 bit words buffer to the Shadow RAM using the admin command. | |
560 | * NVM ownership must be acquired before calling this function and released | |
561 | * on ARQ completion event reception by caller. To commit SR to NVM update | |
562 | * checksum function should be called. | |
563 | **/ | |
564 | enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, | |
565 | u8 module_pointer, u32 offset, | |
566 | u16 words, void *data) | |
567 | { | |
568 | __le16 *le_word_ptr = (__le16 *)data; | |
569 | u16 *word_ptr = (u16 *)data; | |
570 | u32 i = 0; | |
571 | ||
572 | DEBUGFUNC("i40e_write_nvm_buffer"); | |
573 | ||
574 | for (i = 0; i < words; i++) | |
575 | le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]); | |
576 | ||
577 | /* Here we will only write one buffer as the size of the modules | |
578 | * mirrored in the Shadow RAM is always less than 4K. | |
579 | */ | |
580 | return i40e_write_nvm_aq(hw, module_pointer, offset, words, | |
581 | data, false); | |
582 | } | |
583 | ||
584 | /** | |
585 | * i40e_calc_nvm_checksum - Calculates and returns the checksum | |
586 | * @hw: pointer to hardware structure | |
587 | * @checksum: pointer to the checksum | |
588 | * | |
589 | * This function calculates SW Checksum that covers the whole 64kB shadow RAM | |
590 | * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD | |
591 | * is customer specific and unknown. Therefore, this function skips all maximum | |
592 | * possible size of VPD (1kB). | |
593 | **/ | |
594 | enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) | |
595 | { | |
596 | enum i40e_status_code ret_code = I40E_SUCCESS; | |
597 | struct i40e_virt_mem vmem; | |
598 | u16 pcie_alt_module = 0; | |
599 | u16 checksum_local = 0; | |
600 | u16 vpd_module = 0; | |
601 | u16 *data; | |
602 | u16 i = 0; | |
603 | ||
604 | DEBUGFUNC("i40e_calc_nvm_checksum"); | |
605 | ||
606 | ret_code = i40e_allocate_virt_mem(hw, &vmem, | |
607 | I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); | |
608 | if (ret_code) | |
609 | goto i40e_calc_nvm_checksum_exit; | |
610 | data = (u16 *)vmem.va; | |
611 | ||
612 | /* read pointer to VPD area */ | |
11fdf7f2 | 613 | ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); |
7c673cae FG |
614 | if (ret_code != I40E_SUCCESS) { |
615 | ret_code = I40E_ERR_NVM_CHECKSUM; | |
616 | goto i40e_calc_nvm_checksum_exit; | |
617 | } | |
618 | ||
619 | /* read pointer to PCIe Alt Auto-load module */ | |
11fdf7f2 | 620 | ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, |
7c673cae FG |
621 | &pcie_alt_module); |
622 | if (ret_code != I40E_SUCCESS) { | |
623 | ret_code = I40E_ERR_NVM_CHECKSUM; | |
624 | goto i40e_calc_nvm_checksum_exit; | |
625 | } | |
626 | ||
627 | /* Calculate SW checksum that covers the whole 64kB shadow RAM | |
628 | * except the VPD and PCIe ALT Auto-load modules | |
629 | */ | |
630 | for (i = 0; i < hw->nvm.sr_size; i++) { | |
631 | /* Read SR page */ | |
632 | if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { | |
633 | u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; | |
634 | ||
635 | ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); | |
636 | if (ret_code != I40E_SUCCESS) { | |
637 | ret_code = I40E_ERR_NVM_CHECKSUM; | |
638 | goto i40e_calc_nvm_checksum_exit; | |
639 | } | |
640 | } | |
641 | ||
642 | /* Skip Checksum word */ | |
643 | if (i == I40E_SR_SW_CHECKSUM_WORD) | |
644 | continue; | |
645 | /* Skip VPD module (convert byte size to word count) */ | |
646 | if ((i >= (u32)vpd_module) && | |
647 | (i < ((u32)vpd_module + | |
648 | (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { | |
649 | continue; | |
650 | } | |
651 | /* Skip PCIe ALT module (convert byte size to word count) */ | |
652 | if ((i >= (u32)pcie_alt_module) && | |
653 | (i < ((u32)pcie_alt_module + | |
654 | (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { | |
655 | continue; | |
656 | } | |
657 | ||
658 | checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; | |
659 | } | |
660 | ||
661 | *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; | |
662 | ||
663 | i40e_calc_nvm_checksum_exit: | |
664 | i40e_free_virt_mem(hw, &vmem); | |
665 | return ret_code; | |
666 | } | |
667 | ||
668 | /** | |
669 | * i40e_update_nvm_checksum - Updates the NVM checksum | |
670 | * @hw: pointer to hardware structure | |
671 | * | |
672 | * NVM ownership must be acquired before calling this function and released | |
673 | * on ARQ completion event reception by caller. | |
674 | * This function will commit SR to NVM. | |
675 | **/ | |
676 | enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw) | |
677 | { | |
678 | enum i40e_status_code ret_code = I40E_SUCCESS; | |
679 | u16 checksum; | |
680 | __le16 le_sum; | |
681 | ||
682 | DEBUGFUNC("i40e_update_nvm_checksum"); | |
683 | ||
684 | ret_code = i40e_calc_nvm_checksum(hw, &checksum); | |
685 | le_sum = CPU_TO_LE16(checksum); | |
686 | if (ret_code == I40E_SUCCESS) | |
687 | ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, | |
688 | 1, &le_sum, true); | |
689 | ||
690 | return ret_code; | |
691 | } | |
692 | ||
693 | /** | |
694 | * i40e_validate_nvm_checksum - Validate EEPROM checksum | |
695 | * @hw: pointer to hardware structure | |
696 | * @checksum: calculated checksum | |
697 | * | |
698 | * Performs checksum calculation and validates the NVM SW checksum. If the | |
699 | * caller does not need checksum, the value can be NULL. | |
700 | **/ | |
701 | enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, | |
702 | u16 *checksum) | |
703 | { | |
704 | enum i40e_status_code ret_code = I40E_SUCCESS; | |
705 | u16 checksum_sr = 0; | |
706 | u16 checksum_local = 0; | |
707 | ||
708 | DEBUGFUNC("i40e_validate_nvm_checksum"); | |
709 | ||
11fdf7f2 TL |
710 | /* We must acquire the NVM lock in order to correctly synchronize the |
711 | * NVM accesses across multiple PFs. Without doing so it is possible | |
712 | * for one of the PFs to read invalid data potentially indicating that | |
713 | * the checksum is invalid. | |
714 | */ | |
715 | ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); | |
716 | if (ret_code) | |
717 | return ret_code; | |
718 | ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); | |
719 | __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); | |
720 | i40e_release_nvm(hw); | |
721 | if (ret_code) | |
722 | return ret_code; | |
7c673cae FG |
723 | |
724 | /* Verify read checksum from EEPROM is the same as | |
725 | * calculated checksum | |
726 | */ | |
727 | if (checksum_local != checksum_sr) | |
728 | ret_code = I40E_ERR_NVM_CHECKSUM; | |
729 | ||
730 | /* If the user cares, return the calculated checksum */ | |
731 | if (checksum) | |
732 | *checksum = checksum_local; | |
733 | ||
7c673cae FG |
734 | return ret_code; |
735 | } | |
736 | ||
737 | STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, | |
738 | struct i40e_nvm_access *cmd, | |
739 | u8 *bytes, int *perrno); | |
740 | STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw, | |
741 | struct i40e_nvm_access *cmd, | |
742 | u8 *bytes, int *perrno); | |
743 | STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw, | |
744 | struct i40e_nvm_access *cmd, | |
745 | u8 *bytes, int *perrno); | |
746 | STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, | |
747 | struct i40e_nvm_access *cmd, | |
748 | int *perrno); | |
749 | STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw, | |
750 | struct i40e_nvm_access *cmd, | |
751 | int *perrno); | |
752 | STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw, | |
753 | struct i40e_nvm_access *cmd, | |
754 | u8 *bytes, int *perrno); | |
755 | STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw, | |
756 | struct i40e_nvm_access *cmd, | |
757 | u8 *bytes, int *perrno); | |
758 | STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, | |
759 | struct i40e_nvm_access *cmd, | |
760 | u8 *bytes, int *perrno); | |
761 | STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, | |
762 | struct i40e_nvm_access *cmd, | |
763 | u8 *bytes, int *perrno); | |
11fdf7f2 TL |
764 | STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw, |
765 | struct i40e_nvm_access *cmd, | |
766 | u8 *bytes, int *perrno); | |
7c673cae FG |
767 | STATIC INLINE u8 i40e_nvmupd_get_module(u32 val) |
768 | { | |
769 | return (u8)(val & I40E_NVM_MOD_PNT_MASK); | |
770 | } | |
771 | STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val) | |
772 | { | |
773 | return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); | |
774 | } | |
775 | ||
11fdf7f2 TL |
776 | STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val) |
777 | { | |
778 | return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> | |
779 | I40E_NVM_PRESERVATION_FLAGS_SHIFT); | |
780 | } | |
781 | ||
7c673cae FG |
782 | STATIC const char *i40e_nvm_update_state_str[] = { |
783 | "I40E_NVMUPD_INVALID", | |
784 | "I40E_NVMUPD_READ_CON", | |
785 | "I40E_NVMUPD_READ_SNT", | |
786 | "I40E_NVMUPD_READ_LCB", | |
787 | "I40E_NVMUPD_READ_SA", | |
788 | "I40E_NVMUPD_WRITE_ERA", | |
789 | "I40E_NVMUPD_WRITE_CON", | |
790 | "I40E_NVMUPD_WRITE_SNT", | |
791 | "I40E_NVMUPD_WRITE_LCB", | |
792 | "I40E_NVMUPD_WRITE_SA", | |
793 | "I40E_NVMUPD_CSUM_CON", | |
794 | "I40E_NVMUPD_CSUM_SA", | |
795 | "I40E_NVMUPD_CSUM_LCB", | |
796 | "I40E_NVMUPD_STATUS", | |
797 | "I40E_NVMUPD_EXEC_AQ", | |
798 | "I40E_NVMUPD_GET_AQ_RESULT", | |
11fdf7f2 | 799 | "I40E_NVMUPD_GET_AQ_EVENT", |
7c673cae FG |
800 | }; |
801 | ||
802 | /** | |
803 | * i40e_nvmupd_command - Process an NVM update command | |
804 | * @hw: pointer to hardware structure | |
805 | * @cmd: pointer to nvm update command | |
806 | * @bytes: pointer to the data buffer | |
807 | * @perrno: pointer to return error code | |
808 | * | |
809 | * Dispatches command depending on what update state is current | |
810 | **/ | |
811 | enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, | |
812 | struct i40e_nvm_access *cmd, | |
813 | u8 *bytes, int *perrno) | |
814 | { | |
815 | enum i40e_status_code status; | |
816 | enum i40e_nvmupd_cmd upd_cmd; | |
817 | ||
818 | DEBUGFUNC("i40e_nvmupd_command"); | |
819 | ||
820 | /* assume success */ | |
821 | *perrno = 0; | |
822 | ||
823 | /* early check for status command and debug msgs */ | |
824 | upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); | |
825 | ||
826 | i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", | |
827 | i40e_nvm_update_state_str[upd_cmd], | |
828 | hw->nvmupd_state, | |
829 | hw->nvm_release_on_done, hw->nvm_wait_opcode, | |
830 | cmd->command, cmd->config, cmd->offset, cmd->data_size); | |
831 | ||
832 | if (upd_cmd == I40E_NVMUPD_INVALID) { | |
833 | *perrno = -EFAULT; | |
834 | i40e_debug(hw, I40E_DEBUG_NVM, | |
835 | "i40e_nvmupd_validate_command returns %d errno %d\n", | |
836 | upd_cmd, *perrno); | |
837 | } | |
838 | ||
839 | /* a status request returns immediately rather than | |
840 | * going into the state machine | |
841 | */ | |
842 | if (upd_cmd == I40E_NVMUPD_STATUS) { | |
843 | if (!cmd->data_size) { | |
844 | *perrno = -EFAULT; | |
845 | return I40E_ERR_BUF_TOO_SHORT; | |
846 | } | |
847 | ||
848 | bytes[0] = hw->nvmupd_state; | |
849 | ||
850 | if (cmd->data_size >= 4) { | |
851 | bytes[1] = 0; | |
852 | *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; | |
853 | } | |
854 | ||
11fdf7f2 TL |
855 | /* Clear error status on read */ |
856 | if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) | |
857 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; | |
858 | ||
7c673cae FG |
859 | return I40E_SUCCESS; |
860 | } | |
861 | ||
11fdf7f2 TL |
862 | /* Clear status even it is not read and log */ |
863 | if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { | |
864 | i40e_debug(hw, I40E_DEBUG_NVM, | |
865 | "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); | |
866 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; | |
867 | } | |
868 | ||
869 | /* Acquire lock to prevent race condition where adminq_task | |
870 | * can execute after i40e_nvmupd_nvm_read/write but before state | |
871 | * variables (nvm_wait_opcode, nvm_release_on_done) are updated. | |
872 | * | |
873 | * During NVMUpdate, it is observed that lock could be held for | |
874 | * ~5ms for most commands. However lock is held for ~60ms for | |
875 | * NVMUPD_CSUM_LCB command. | |
876 | */ | |
877 | i40e_acquire_spinlock(&hw->aq.arq_spinlock); | |
7c673cae FG |
878 | switch (hw->nvmupd_state) { |
879 | case I40E_NVMUPD_STATE_INIT: | |
880 | status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); | |
881 | break; | |
882 | ||
883 | case I40E_NVMUPD_STATE_READING: | |
884 | status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); | |
885 | break; | |
886 | ||
887 | case I40E_NVMUPD_STATE_WRITING: | |
888 | status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); | |
889 | break; | |
890 | ||
891 | case I40E_NVMUPD_STATE_INIT_WAIT: | |
892 | case I40E_NVMUPD_STATE_WRITE_WAIT: | |
893 | /* if we need to stop waiting for an event, clear | |
894 | * the wait info and return before doing anything else | |
895 | */ | |
896 | if (cmd->offset == 0xffff) { | |
11fdf7f2 TL |
897 | i40e_nvmupd_clear_wait_state(hw); |
898 | status = I40E_SUCCESS; | |
899 | break; | |
7c673cae FG |
900 | } |
901 | ||
902 | status = I40E_ERR_NOT_READY; | |
903 | *perrno = -EBUSY; | |
904 | break; | |
905 | ||
906 | default: | |
907 | /* invalid state, should never happen */ | |
908 | i40e_debug(hw, I40E_DEBUG_NVM, | |
909 | "NVMUPD: no such state %d\n", hw->nvmupd_state); | |
910 | status = I40E_NOT_SUPPORTED; | |
911 | *perrno = -ESRCH; | |
912 | break; | |
913 | } | |
11fdf7f2 TL |
914 | |
915 | i40e_release_spinlock(&hw->aq.arq_spinlock); | |
7c673cae FG |
916 | return status; |
917 | } | |
918 | ||
919 | /** | |
920 | * i40e_nvmupd_state_init - Handle NVM update state Init | |
921 | * @hw: pointer to hardware structure | |
922 | * @cmd: pointer to nvm update command buffer | |
923 | * @bytes: pointer to the data buffer | |
924 | * @perrno: pointer to return error code | |
925 | * | |
926 | * Process legitimate commands of the Init state and conditionally set next | |
927 | * state. Reject all other commands. | |
928 | **/ | |
929 | STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, | |
930 | struct i40e_nvm_access *cmd, | |
931 | u8 *bytes, int *perrno) | |
932 | { | |
933 | enum i40e_status_code status = I40E_SUCCESS; | |
934 | enum i40e_nvmupd_cmd upd_cmd; | |
935 | ||
936 | DEBUGFUNC("i40e_nvmupd_state_init"); | |
937 | ||
938 | upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); | |
939 | ||
940 | switch (upd_cmd) { | |
941 | case I40E_NVMUPD_READ_SA: | |
942 | status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); | |
943 | if (status) { | |
944 | *perrno = i40e_aq_rc_to_posix(status, | |
945 | hw->aq.asq_last_status); | |
946 | } else { | |
947 | status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); | |
948 | i40e_release_nvm(hw); | |
949 | } | |
950 | break; | |
951 | ||
952 | case I40E_NVMUPD_READ_SNT: | |
953 | status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); | |
954 | if (status) { | |
955 | *perrno = i40e_aq_rc_to_posix(status, | |
956 | hw->aq.asq_last_status); | |
957 | } else { | |
958 | status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); | |
959 | if (status) | |
960 | i40e_release_nvm(hw); | |
961 | else | |
962 | hw->nvmupd_state = I40E_NVMUPD_STATE_READING; | |
963 | } | |
964 | break; | |
965 | ||
966 | case I40E_NVMUPD_WRITE_ERA: | |
967 | status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); | |
968 | if (status) { | |
969 | *perrno = i40e_aq_rc_to_posix(status, | |
970 | hw->aq.asq_last_status); | |
971 | } else { | |
972 | status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); | |
973 | if (status) { | |
974 | i40e_release_nvm(hw); | |
975 | } else { | |
976 | hw->nvm_release_on_done = true; | |
977 | hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; | |
978 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; | |
979 | } | |
980 | } | |
981 | break; | |
982 | ||
983 | case I40E_NVMUPD_WRITE_SA: | |
984 | status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); | |
985 | if (status) { | |
986 | *perrno = i40e_aq_rc_to_posix(status, | |
987 | hw->aq.asq_last_status); | |
988 | } else { | |
989 | status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); | |
990 | if (status) { | |
991 | i40e_release_nvm(hw); | |
992 | } else { | |
993 | hw->nvm_release_on_done = true; | |
994 | hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; | |
995 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; | |
996 | } | |
997 | } | |
998 | break; | |
999 | ||
1000 | case I40E_NVMUPD_WRITE_SNT: | |
1001 | status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); | |
1002 | if (status) { | |
1003 | *perrno = i40e_aq_rc_to_posix(status, | |
1004 | hw->aq.asq_last_status); | |
1005 | } else { | |
1006 | status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); | |
1007 | if (status) { | |
1008 | i40e_release_nvm(hw); | |
1009 | } else { | |
1010 | hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; | |
1011 | hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; | |
1012 | } | |
1013 | } | |
1014 | break; | |
1015 | ||
1016 | case I40E_NVMUPD_CSUM_SA: | |
1017 | status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); | |
1018 | if (status) { | |
1019 | *perrno = i40e_aq_rc_to_posix(status, | |
1020 | hw->aq.asq_last_status); | |
1021 | } else { | |
1022 | status = i40e_update_nvm_checksum(hw); | |
1023 | if (status) { | |
1024 | *perrno = hw->aq.asq_last_status ? | |
1025 | i40e_aq_rc_to_posix(status, | |
1026 | hw->aq.asq_last_status) : | |
1027 | -EIO; | |
1028 | i40e_release_nvm(hw); | |
1029 | } else { | |
1030 | hw->nvm_release_on_done = true; | |
1031 | hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; | |
1032 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; | |
1033 | } | |
1034 | } | |
1035 | break; | |
1036 | ||
1037 | case I40E_NVMUPD_EXEC_AQ: | |
1038 | status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); | |
1039 | break; | |
1040 | ||
1041 | case I40E_NVMUPD_GET_AQ_RESULT: | |
1042 | status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); | |
1043 | break; | |
1044 | ||
11fdf7f2 TL |
1045 | case I40E_NVMUPD_GET_AQ_EVENT: |
1046 | status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno); | |
1047 | break; | |
1048 | ||
7c673cae FG |
1049 | default: |
1050 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1051 | "NVMUPD: bad cmd %s in init state\n", | |
1052 | i40e_nvm_update_state_str[upd_cmd]); | |
1053 | status = I40E_ERR_NVM; | |
1054 | *perrno = -ESRCH; | |
1055 | break; | |
1056 | } | |
1057 | return status; | |
1058 | } | |
1059 | ||
1060 | /** | |
1061 | * i40e_nvmupd_state_reading - Handle NVM update state Reading | |
1062 | * @hw: pointer to hardware structure | |
1063 | * @cmd: pointer to nvm update command buffer | |
1064 | * @bytes: pointer to the data buffer | |
1065 | * @perrno: pointer to return error code | |
1066 | * | |
1067 | * NVM ownership is already held. Process legitimate commands and set any | |
1068 | * change in state; reject all other commands. | |
1069 | **/ | |
1070 | STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw, | |
1071 | struct i40e_nvm_access *cmd, | |
1072 | u8 *bytes, int *perrno) | |
1073 | { | |
1074 | enum i40e_status_code status = I40E_SUCCESS; | |
1075 | enum i40e_nvmupd_cmd upd_cmd; | |
1076 | ||
1077 | DEBUGFUNC("i40e_nvmupd_state_reading"); | |
1078 | ||
1079 | upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); | |
1080 | ||
1081 | switch (upd_cmd) { | |
1082 | case I40E_NVMUPD_READ_SA: | |
1083 | case I40E_NVMUPD_READ_CON: | |
1084 | status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); | |
1085 | break; | |
1086 | ||
1087 | case I40E_NVMUPD_READ_LCB: | |
1088 | status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); | |
1089 | i40e_release_nvm(hw); | |
1090 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; | |
1091 | break; | |
1092 | ||
1093 | default: | |
1094 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1095 | "NVMUPD: bad cmd %s in reading state.\n", | |
1096 | i40e_nvm_update_state_str[upd_cmd]); | |
1097 | status = I40E_NOT_SUPPORTED; | |
1098 | *perrno = -ESRCH; | |
1099 | break; | |
1100 | } | |
1101 | return status; | |
1102 | } | |
1103 | ||
1104 | /** | |
1105 | * i40e_nvmupd_state_writing - Handle NVM update state Writing | |
1106 | * @hw: pointer to hardware structure | |
1107 | * @cmd: pointer to nvm update command buffer | |
1108 | * @bytes: pointer to the data buffer | |
1109 | * @perrno: pointer to return error code | |
1110 | * | |
1111 | * NVM ownership is already held. Process legitimate commands and set any | |
1112 | * change in state; reject all other commands | |
1113 | **/ | |
1114 | STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw, | |
1115 | struct i40e_nvm_access *cmd, | |
1116 | u8 *bytes, int *perrno) | |
1117 | { | |
1118 | enum i40e_status_code status = I40E_SUCCESS; | |
1119 | enum i40e_nvmupd_cmd upd_cmd; | |
1120 | bool retry_attempt = false; | |
1121 | ||
1122 | DEBUGFUNC("i40e_nvmupd_state_writing"); | |
1123 | ||
1124 | upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); | |
1125 | ||
1126 | retry: | |
1127 | switch (upd_cmd) { | |
1128 | case I40E_NVMUPD_WRITE_CON: | |
1129 | status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); | |
1130 | if (!status) { | |
1131 | hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; | |
1132 | hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; | |
1133 | } | |
1134 | break; | |
1135 | ||
1136 | case I40E_NVMUPD_WRITE_LCB: | |
1137 | status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); | |
1138 | if (status) { | |
1139 | *perrno = hw->aq.asq_last_status ? | |
1140 | i40e_aq_rc_to_posix(status, | |
1141 | hw->aq.asq_last_status) : | |
1142 | -EIO; | |
1143 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; | |
1144 | } else { | |
1145 | hw->nvm_release_on_done = true; | |
1146 | hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; | |
1147 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; | |
1148 | } | |
1149 | break; | |
1150 | ||
1151 | case I40E_NVMUPD_CSUM_CON: | |
1152 | /* Assumes the caller has acquired the nvm */ | |
1153 | status = i40e_update_nvm_checksum(hw); | |
1154 | if (status) { | |
1155 | *perrno = hw->aq.asq_last_status ? | |
1156 | i40e_aq_rc_to_posix(status, | |
1157 | hw->aq.asq_last_status) : | |
1158 | -EIO; | |
1159 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; | |
1160 | } else { | |
1161 | hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; | |
1162 | hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; | |
1163 | } | |
1164 | break; | |
1165 | ||
1166 | case I40E_NVMUPD_CSUM_LCB: | |
1167 | /* Assumes the caller has acquired the nvm */ | |
1168 | status = i40e_update_nvm_checksum(hw); | |
1169 | if (status) { | |
1170 | *perrno = hw->aq.asq_last_status ? | |
1171 | i40e_aq_rc_to_posix(status, | |
1172 | hw->aq.asq_last_status) : | |
1173 | -EIO; | |
1174 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; | |
1175 | } else { | |
1176 | hw->nvm_release_on_done = true; | |
1177 | hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; | |
1178 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; | |
1179 | } | |
1180 | break; | |
1181 | ||
1182 | default: | |
1183 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1184 | "NVMUPD: bad cmd %s in writing state.\n", | |
1185 | i40e_nvm_update_state_str[upd_cmd]); | |
1186 | status = I40E_NOT_SUPPORTED; | |
1187 | *perrno = -ESRCH; | |
1188 | break; | |
1189 | } | |
1190 | ||
1191 | /* In some circumstances, a multi-write transaction takes longer | |
1192 | * than the default 3 minute timeout on the write semaphore. If | |
1193 | * the write failed with an EBUSY status, this is likely the problem, | |
1194 | * so here we try to reacquire the semaphore then retry the write. | |
1195 | * We only do one retry, then give up. | |
1196 | */ | |
1197 | if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && | |
1198 | !retry_attempt) { | |
1199 | enum i40e_status_code old_status = status; | |
1200 | u32 old_asq_status = hw->aq.asq_last_status; | |
1201 | u32 gtime; | |
1202 | ||
1203 | gtime = rd32(hw, I40E_GLVFGEN_TIMER); | |
1204 | if (gtime >= hw->nvm.hw_semaphore_timeout) { | |
1205 | i40e_debug(hw, I40E_DEBUG_ALL, | |
1206 | "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", | |
1207 | gtime, hw->nvm.hw_semaphore_timeout); | |
1208 | i40e_release_nvm(hw); | |
1209 | status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); | |
1210 | if (status) { | |
1211 | i40e_debug(hw, I40E_DEBUG_ALL, | |
1212 | "NVMUPD: write semaphore reacquire failed aq_err = %d\n", | |
1213 | hw->aq.asq_last_status); | |
1214 | status = old_status; | |
1215 | hw->aq.asq_last_status = old_asq_status; | |
1216 | } else { | |
1217 | retry_attempt = true; | |
1218 | goto retry; | |
1219 | } | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | return status; | |
1224 | } | |
1225 | ||
1226 | /** | |
11fdf7f2 | 1227 | * i40e_nvmupd_clear_wait_state - clear wait state on hw |
7c673cae | 1228 | * @hw: pointer to the hardware structure |
7c673cae | 1229 | **/ |
11fdf7f2 | 1230 | void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) |
7c673cae | 1231 | { |
11fdf7f2 TL |
1232 | i40e_debug(hw, I40E_DEBUG_NVM, |
1233 | "NVMUPD: clearing wait on opcode 0x%04x\n", | |
1234 | hw->nvm_wait_opcode); | |
7c673cae | 1235 | |
11fdf7f2 TL |
1236 | if (hw->nvm_release_on_done) { |
1237 | i40e_release_nvm(hw); | |
1238 | hw->nvm_release_on_done = false; | |
1239 | } | |
1240 | hw->nvm_wait_opcode = 0; | |
7c673cae | 1241 | |
11fdf7f2 TL |
1242 | if (hw->aq.arq_last_status) { |
1243 | hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; | |
1244 | return; | |
1245 | } | |
7c673cae | 1246 | |
11fdf7f2 TL |
1247 | switch (hw->nvmupd_state) { |
1248 | case I40E_NVMUPD_STATE_INIT_WAIT: | |
1249 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; | |
1250 | break; | |
1251 | ||
1252 | case I40E_NVMUPD_STATE_WRITE_WAIT: | |
1253 | hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; | |
1254 | break; | |
1255 | ||
1256 | default: | |
1257 | break; | |
1258 | } | |
1259 | } | |
1260 | ||
1261 | /** | |
1262 | * i40e_nvmupd_check_wait_event - handle NVM update operation events | |
1263 | * @hw: pointer to the hardware structure | |
1264 | * @opcode: the event that just happened | |
1265 | * @desc: AdminQ descriptor | |
1266 | **/ | |
1267 | void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, | |
1268 | struct i40e_aq_desc *desc) | |
1269 | { | |
1270 | u32 aq_desc_len = sizeof(struct i40e_aq_desc); | |
1271 | ||
1272 | if (opcode == hw->nvm_wait_opcode) { | |
1273 | i40e_memcpy(&hw->nvm_aq_event_desc, desc, | |
1274 | aq_desc_len, I40E_NONDMA_TO_NONDMA); | |
1275 | i40e_nvmupd_clear_wait_state(hw); | |
7c673cae FG |
1276 | } |
1277 | } | |
1278 | ||
1279 | /** | |
1280 | * i40e_nvmupd_validate_command - Validate given command | |
1281 | * @hw: pointer to hardware structure | |
1282 | * @cmd: pointer to nvm update command buffer | |
1283 | * @perrno: pointer to return error code | |
1284 | * | |
1285 | * Return one of the valid command types or I40E_NVMUPD_INVALID | |
1286 | **/ | |
1287 | STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, | |
1288 | struct i40e_nvm_access *cmd, | |
1289 | int *perrno) | |
1290 | { | |
1291 | enum i40e_nvmupd_cmd upd_cmd; | |
1292 | u8 module, transaction; | |
1293 | ||
1294 | DEBUGFUNC("i40e_nvmupd_validate_command\n"); | |
1295 | ||
1296 | /* anything that doesn't match a recognized case is an error */ | |
1297 | upd_cmd = I40E_NVMUPD_INVALID; | |
1298 | ||
1299 | transaction = i40e_nvmupd_get_transaction(cmd->config); | |
1300 | module = i40e_nvmupd_get_module(cmd->config); | |
1301 | ||
1302 | /* limits on data size */ | |
1303 | if ((cmd->data_size < 1) || | |
1304 | (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { | |
1305 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1306 | "i40e_nvmupd_validate_command data_size %d\n", | |
1307 | cmd->data_size); | |
1308 | *perrno = -EFAULT; | |
1309 | return I40E_NVMUPD_INVALID; | |
1310 | } | |
1311 | ||
1312 | switch (cmd->command) { | |
1313 | case I40E_NVM_READ: | |
1314 | switch (transaction) { | |
1315 | case I40E_NVM_CON: | |
1316 | upd_cmd = I40E_NVMUPD_READ_CON; | |
1317 | break; | |
1318 | case I40E_NVM_SNT: | |
1319 | upd_cmd = I40E_NVMUPD_READ_SNT; | |
1320 | break; | |
1321 | case I40E_NVM_LCB: | |
1322 | upd_cmd = I40E_NVMUPD_READ_LCB; | |
1323 | break; | |
1324 | case I40E_NVM_SA: | |
1325 | upd_cmd = I40E_NVMUPD_READ_SA; | |
1326 | break; | |
1327 | case I40E_NVM_EXEC: | |
1328 | if (module == 0xf) | |
1329 | upd_cmd = I40E_NVMUPD_STATUS; | |
1330 | else if (module == 0) | |
1331 | upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; | |
1332 | break; | |
11fdf7f2 TL |
1333 | case I40E_NVM_AQE: |
1334 | upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; | |
1335 | break; | |
7c673cae FG |
1336 | } |
1337 | break; | |
1338 | ||
1339 | case I40E_NVM_WRITE: | |
1340 | switch (transaction) { | |
1341 | case I40E_NVM_CON: | |
1342 | upd_cmd = I40E_NVMUPD_WRITE_CON; | |
1343 | break; | |
1344 | case I40E_NVM_SNT: | |
1345 | upd_cmd = I40E_NVMUPD_WRITE_SNT; | |
1346 | break; | |
1347 | case I40E_NVM_LCB: | |
1348 | upd_cmd = I40E_NVMUPD_WRITE_LCB; | |
1349 | break; | |
1350 | case I40E_NVM_SA: | |
1351 | upd_cmd = I40E_NVMUPD_WRITE_SA; | |
1352 | break; | |
1353 | case I40E_NVM_ERA: | |
1354 | upd_cmd = I40E_NVMUPD_WRITE_ERA; | |
1355 | break; | |
1356 | case I40E_NVM_CSUM: | |
1357 | upd_cmd = I40E_NVMUPD_CSUM_CON; | |
1358 | break; | |
1359 | case (I40E_NVM_CSUM|I40E_NVM_SA): | |
1360 | upd_cmd = I40E_NVMUPD_CSUM_SA; | |
1361 | break; | |
1362 | case (I40E_NVM_CSUM|I40E_NVM_LCB): | |
1363 | upd_cmd = I40E_NVMUPD_CSUM_LCB; | |
1364 | break; | |
1365 | case I40E_NVM_EXEC: | |
1366 | if (module == 0) | |
1367 | upd_cmd = I40E_NVMUPD_EXEC_AQ; | |
1368 | break; | |
1369 | } | |
1370 | break; | |
1371 | } | |
1372 | ||
1373 | return upd_cmd; | |
1374 | } | |
1375 | ||
1376 | /** | |
1377 | * i40e_nvmupd_exec_aq - Run an AQ command | |
1378 | * @hw: pointer to hardware structure | |
1379 | * @cmd: pointer to nvm update command buffer | |
1380 | * @bytes: pointer to the data buffer | |
1381 | * @perrno: pointer to return error code | |
1382 | * | |
1383 | * cmd structure contains identifiers and data buffer | |
1384 | **/ | |
1385 | STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, | |
1386 | struct i40e_nvm_access *cmd, | |
1387 | u8 *bytes, int *perrno) | |
1388 | { | |
1389 | struct i40e_asq_cmd_details cmd_details; | |
1390 | enum i40e_status_code status; | |
1391 | struct i40e_aq_desc *aq_desc; | |
1392 | u32 buff_size = 0; | |
1393 | u8 *buff = NULL; | |
1394 | u32 aq_desc_len; | |
1395 | u32 aq_data_len; | |
1396 | ||
1397 | i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); | |
11fdf7f2 TL |
1398 | if (cmd->offset == 0xffff) |
1399 | return I40E_SUCCESS; | |
1400 | ||
7c673cae FG |
1401 | memset(&cmd_details, 0, sizeof(cmd_details)); |
1402 | cmd_details.wb_desc = &hw->nvm_wb_desc; | |
1403 | ||
1404 | aq_desc_len = sizeof(struct i40e_aq_desc); | |
1405 | memset(&hw->nvm_wb_desc, 0, aq_desc_len); | |
1406 | ||
1407 | /* get the aq descriptor */ | |
1408 | if (cmd->data_size < aq_desc_len) { | |
1409 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1410 | "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", | |
1411 | cmd->data_size, aq_desc_len); | |
1412 | *perrno = -EINVAL; | |
1413 | return I40E_ERR_PARAM; | |
1414 | } | |
1415 | aq_desc = (struct i40e_aq_desc *)bytes; | |
1416 | ||
1417 | /* if data buffer needed, make sure it's ready */ | |
1418 | aq_data_len = cmd->data_size - aq_desc_len; | |
1419 | buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen)); | |
1420 | if (buff_size) { | |
1421 | if (!hw->nvm_buff.va) { | |
1422 | status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, | |
1423 | hw->aq.asq_buf_size); | |
1424 | if (status) | |
1425 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1426 | "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", | |
1427 | status); | |
1428 | } | |
1429 | ||
1430 | if (hw->nvm_buff.va) { | |
1431 | buff = hw->nvm_buff.va; | |
11fdf7f2 TL |
1432 | i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len, |
1433 | I40E_NONDMA_TO_NONDMA); | |
7c673cae FG |
1434 | } |
1435 | } | |
1436 | ||
11fdf7f2 TL |
1437 | if (cmd->offset) |
1438 | memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); | |
1439 | ||
7c673cae FG |
1440 | /* and away we go! */ |
1441 | status = i40e_asq_send_command(hw, aq_desc, buff, | |
1442 | buff_size, &cmd_details); | |
1443 | if (status) { | |
1444 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1445 | "i40e_nvmupd_exec_aq err %s aq_err %s\n", | |
1446 | i40e_stat_str(hw, status), | |
1447 | i40e_aq_str(hw, hw->aq.asq_last_status)); | |
1448 | *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); | |
11fdf7f2 | 1449 | return status; |
7c673cae FG |
1450 | } |
1451 | ||
1452 | /* should we wait for a followup event? */ | |
1453 | if (cmd->offset) { | |
1454 | hw->nvm_wait_opcode = cmd->offset; | |
1455 | hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; | |
1456 | } | |
1457 | ||
1458 | return status; | |
1459 | } | |
1460 | ||
1461 | /** | |
1462 | * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq | |
1463 | * @hw: pointer to hardware structure | |
1464 | * @cmd: pointer to nvm update command buffer | |
1465 | * @bytes: pointer to the data buffer | |
1466 | * @perrno: pointer to return error code | |
1467 | * | |
1468 | * cmd structure contains identifiers and data buffer | |
1469 | **/ | |
1470 | STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, | |
1471 | struct i40e_nvm_access *cmd, | |
1472 | u8 *bytes, int *perrno) | |
1473 | { | |
1474 | u32 aq_total_len; | |
1475 | u32 aq_desc_len; | |
1476 | int remainder; | |
1477 | u8 *buff; | |
1478 | ||
1479 | i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); | |
1480 | ||
1481 | aq_desc_len = sizeof(struct i40e_aq_desc); | |
1482 | aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen); | |
1483 | ||
1484 | /* check offset range */ | |
1485 | if (cmd->offset > aq_total_len) { | |
1486 | i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", | |
1487 | __func__, cmd->offset, aq_total_len); | |
1488 | *perrno = -EINVAL; | |
1489 | return I40E_ERR_PARAM; | |
1490 | } | |
1491 | ||
1492 | /* check copylength range */ | |
1493 | if (cmd->data_size > (aq_total_len - cmd->offset)) { | |
1494 | int new_len = aq_total_len - cmd->offset; | |
1495 | ||
1496 | i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", | |
1497 | __func__, cmd->data_size, new_len); | |
1498 | cmd->data_size = new_len; | |
1499 | } | |
1500 | ||
1501 | remainder = cmd->data_size; | |
1502 | if (cmd->offset < aq_desc_len) { | |
1503 | u32 len = aq_desc_len - cmd->offset; | |
1504 | ||
1505 | len = min(len, cmd->data_size); | |
1506 | i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", | |
1507 | __func__, cmd->offset, cmd->offset + len); | |
1508 | ||
1509 | buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; | |
11fdf7f2 | 1510 | i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA); |
7c673cae FG |
1511 | |
1512 | bytes += len; | |
1513 | remainder -= len; | |
1514 | buff = hw->nvm_buff.va; | |
1515 | } else { | |
1516 | buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len); | |
1517 | } | |
1518 | ||
1519 | if (remainder > 0) { | |
1520 | int start_byte = buff - (u8 *)hw->nvm_buff.va; | |
1521 | ||
1522 | i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", | |
1523 | __func__, start_byte, start_byte + remainder); | |
11fdf7f2 | 1524 | i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA); |
7c673cae FG |
1525 | } |
1526 | ||
1527 | return I40E_SUCCESS; | |
1528 | } | |
1529 | ||
11fdf7f2 TL |
1530 | /** |
1531 | * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq | |
1532 | * @hw: pointer to hardware structure | |
1533 | * @cmd: pointer to nvm update command buffer | |
1534 | * @bytes: pointer to the data buffer | |
1535 | * @perrno: pointer to return error code | |
1536 | * | |
1537 | * cmd structure contains identifiers and data buffer | |
1538 | **/ | |
1539 | STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw, | |
1540 | struct i40e_nvm_access *cmd, | |
1541 | u8 *bytes, int *perrno) | |
1542 | { | |
1543 | u32 aq_total_len; | |
1544 | u32 aq_desc_len; | |
1545 | ||
1546 | i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); | |
1547 | ||
1548 | aq_desc_len = sizeof(struct i40e_aq_desc); | |
1549 | aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen); | |
1550 | ||
1551 | /* check copylength range */ | |
1552 | if (cmd->data_size > aq_total_len) { | |
1553 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1554 | "%s: copy length %d too big, trimming to %d\n", | |
1555 | __func__, cmd->data_size, aq_total_len); | |
1556 | cmd->data_size = aq_total_len; | |
1557 | } | |
1558 | ||
1559 | i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size, | |
1560 | I40E_NONDMA_TO_NONDMA); | |
1561 | ||
1562 | return I40E_SUCCESS; | |
1563 | } | |
1564 | ||
7c673cae FG |
1565 | /** |
1566 | * i40e_nvmupd_nvm_read - Read NVM | |
1567 | * @hw: pointer to hardware structure | |
1568 | * @cmd: pointer to nvm update command buffer | |
1569 | * @bytes: pointer to the data buffer | |
1570 | * @perrno: pointer to return error code | |
1571 | * | |
1572 | * cmd structure contains identifiers and data buffer | |
1573 | **/ | |
1574 | STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw, | |
1575 | struct i40e_nvm_access *cmd, | |
1576 | u8 *bytes, int *perrno) | |
1577 | { | |
1578 | struct i40e_asq_cmd_details cmd_details; | |
1579 | enum i40e_status_code status; | |
1580 | u8 module, transaction; | |
1581 | bool last; | |
1582 | ||
1583 | transaction = i40e_nvmupd_get_transaction(cmd->config); | |
1584 | module = i40e_nvmupd_get_module(cmd->config); | |
1585 | last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); | |
1586 | ||
1587 | memset(&cmd_details, 0, sizeof(cmd_details)); | |
1588 | cmd_details.wb_desc = &hw->nvm_wb_desc; | |
1589 | ||
1590 | status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, | |
1591 | bytes, last, &cmd_details); | |
1592 | if (status) { | |
1593 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1594 | "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", | |
1595 | module, cmd->offset, cmd->data_size); | |
1596 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1597 | "i40e_nvmupd_nvm_read status %d aq %d\n", | |
1598 | status, hw->aq.asq_last_status); | |
1599 | *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); | |
1600 | } | |
1601 | ||
1602 | return status; | |
1603 | } | |
1604 | ||
1605 | /** | |
1606 | * i40e_nvmupd_nvm_erase - Erase an NVM module | |
1607 | * @hw: pointer to hardware structure | |
1608 | * @cmd: pointer to nvm update command buffer | |
1609 | * @perrno: pointer to return error code | |
1610 | * | |
1611 | * module, offset, data_size and data are in cmd structure | |
1612 | **/ | |
1613 | STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw, | |
1614 | struct i40e_nvm_access *cmd, | |
1615 | int *perrno) | |
1616 | { | |
1617 | enum i40e_status_code status = I40E_SUCCESS; | |
1618 | struct i40e_asq_cmd_details cmd_details; | |
1619 | u8 module, transaction; | |
1620 | bool last; | |
1621 | ||
1622 | transaction = i40e_nvmupd_get_transaction(cmd->config); | |
1623 | module = i40e_nvmupd_get_module(cmd->config); | |
1624 | last = (transaction & I40E_NVM_LCB); | |
1625 | ||
1626 | memset(&cmd_details, 0, sizeof(cmd_details)); | |
1627 | cmd_details.wb_desc = &hw->nvm_wb_desc; | |
1628 | ||
1629 | status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, | |
1630 | last, &cmd_details); | |
1631 | if (status) { | |
1632 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1633 | "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", | |
1634 | module, cmd->offset, cmd->data_size); | |
1635 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1636 | "i40e_nvmupd_nvm_erase status %d aq %d\n", | |
1637 | status, hw->aq.asq_last_status); | |
1638 | *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); | |
1639 | } | |
1640 | ||
1641 | return status; | |
1642 | } | |
1643 | ||
1644 | /** | |
1645 | * i40e_nvmupd_nvm_write - Write NVM | |
1646 | * @hw: pointer to hardware structure | |
1647 | * @cmd: pointer to nvm update command buffer | |
1648 | * @bytes: pointer to the data buffer | |
1649 | * @perrno: pointer to return error code | |
1650 | * | |
1651 | * module, offset, data_size and data are in cmd structure | |
1652 | **/ | |
1653 | STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw, | |
1654 | struct i40e_nvm_access *cmd, | |
1655 | u8 *bytes, int *perrno) | |
1656 | { | |
1657 | enum i40e_status_code status = I40E_SUCCESS; | |
1658 | struct i40e_asq_cmd_details cmd_details; | |
1659 | u8 module, transaction; | |
11fdf7f2 | 1660 | u8 preservation_flags; |
7c673cae FG |
1661 | bool last; |
1662 | ||
1663 | transaction = i40e_nvmupd_get_transaction(cmd->config); | |
1664 | module = i40e_nvmupd_get_module(cmd->config); | |
1665 | last = (transaction & I40E_NVM_LCB); | |
11fdf7f2 | 1666 | preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); |
7c673cae FG |
1667 | |
1668 | memset(&cmd_details, 0, sizeof(cmd_details)); | |
1669 | cmd_details.wb_desc = &hw->nvm_wb_desc; | |
1670 | ||
1671 | status = i40e_aq_update_nvm(hw, module, cmd->offset, | |
1672 | (u16)cmd->data_size, bytes, last, | |
11fdf7f2 | 1673 | preservation_flags, &cmd_details); |
7c673cae FG |
1674 | if (status) { |
1675 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1676 | "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", | |
1677 | module, cmd->offset, cmd->data_size); | |
1678 | i40e_debug(hw, I40E_DEBUG_NVM, | |
1679 | "i40e_nvmupd_nvm_write status %d aq %d\n", | |
1680 | status, hw->aq.asq_last_status); | |
1681 | *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); | |
1682 | } | |
1683 | ||
1684 | return status; | |
1685 | } |