]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /******************************************************************************* |
2 | ||
3 | Copyright (c) 2001-2015, Intel Corporation | |
4 | All rights reserved. | |
5 | ||
6 | Redistribution and use in source and binary forms, with or without | |
7 | modification, are permitted provided that the following conditions are met: | |
8 | ||
9 | 1. Redistributions of source code must retain the above copyright notice, | |
10 | this list of conditions and the following disclaimer. | |
11 | ||
12 | 2. Redistributions in binary form must reproduce the above copyright | |
13 | notice, this list of conditions and the following disclaimer in the | |
14 | documentation and/or other materials provided with the distribution. | |
15 | ||
16 | 3. Neither the name of the Intel Corporation nor the names of its | |
17 | contributors may be used to endorse or promote products derived from | |
18 | this software without specific prior written permission. | |
19 | ||
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
23 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
24 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
25 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
26 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
27 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
28 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
29 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
30 | POSSIBILITY OF SUCH DAMAGE. | |
31 | ||
32 | ***************************************************************************/ | |
33 | ||
34 | #include "e1000_api.h" | |
35 | ||
36 | ||
37 | STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); | |
38 | STATIC void e1000_release_nvm_i210(struct e1000_hw *hw); | |
39 | STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); | |
40 | STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, | |
41 | u16 *data); | |
42 | STATIC s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); | |
43 | STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); | |
44 | ||
45 | /** | |
46 | * e1000_acquire_nvm_i210 - Request for access to EEPROM | |
47 | * @hw: pointer to the HW structure | |
48 | * | |
49 | * Acquire the necessary semaphores for exclusive access to the EEPROM. | |
50 | * Set the EEPROM access request bit and wait for EEPROM access grant bit. | |
51 | * Return successful if access grant bit set, else clear the request for | |
52 | * EEPROM access and return -E1000_ERR_NVM (-1). | |
53 | **/ | |
54 | STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) | |
55 | { | |
56 | s32 ret_val; | |
57 | ||
58 | DEBUGFUNC("e1000_acquire_nvm_i210"); | |
59 | ||
60 | ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); | |
61 | ||
62 | return ret_val; | |
63 | } | |
64 | ||
65 | /** | |
66 | * e1000_release_nvm_i210 - Release exclusive access to EEPROM | |
67 | * @hw: pointer to the HW structure | |
68 | * | |
69 | * Stop any current commands to the EEPROM and clear the EEPROM request bit, | |
70 | * then release the semaphores acquired. | |
71 | **/ | |
72 | STATIC void e1000_release_nvm_i210(struct e1000_hw *hw) | |
73 | { | |
74 | DEBUGFUNC("e1000_release_nvm_i210"); | |
75 | ||
76 | e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); | |
77 | } | |
78 | ||
79 | /** | |
80 | * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore | |
81 | * @hw: pointer to the HW structure | |
82 | * @mask: specifies which semaphore to acquire | |
83 | * | |
84 | * Acquire the SW/FW semaphore to access the PHY or NVM. The mask | |
85 | * will also specify which port we're acquiring the lock for. | |
86 | **/ | |
87 | s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) | |
88 | { | |
89 | u32 swfw_sync; | |
90 | u32 swmask = mask; | |
91 | u32 fwmask = mask << 16; | |
92 | s32 ret_val = E1000_SUCCESS; | |
93 | s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ | |
94 | ||
95 | DEBUGFUNC("e1000_acquire_swfw_sync_i210"); | |
96 | ||
97 | while (i < timeout) { | |
98 | if (e1000_get_hw_semaphore_i210(hw)) { | |
99 | ret_val = -E1000_ERR_SWFW_SYNC; | |
100 | goto out; | |
101 | } | |
102 | ||
103 | swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); | |
104 | if (!(swfw_sync & (fwmask | swmask))) | |
105 | break; | |
106 | ||
107 | /* | |
108 | * Firmware currently using resource (fwmask) | |
109 | * or other software thread using resource (swmask) | |
110 | */ | |
111 | e1000_put_hw_semaphore_generic(hw); | |
112 | msec_delay_irq(5); | |
113 | i++; | |
114 | } | |
115 | ||
116 | if (i == timeout) { | |
117 | DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); | |
118 | ret_val = -E1000_ERR_SWFW_SYNC; | |
119 | goto out; | |
120 | } | |
121 | ||
122 | swfw_sync |= swmask; | |
123 | E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); | |
124 | ||
125 | e1000_put_hw_semaphore_generic(hw); | |
126 | ||
127 | out: | |
128 | return ret_val; | |
129 | } | |
130 | ||
131 | /** | |
132 | * e1000_release_swfw_sync_i210 - Release SW/FW semaphore | |
133 | * @hw: pointer to the HW structure | |
134 | * @mask: specifies which semaphore to acquire | |
135 | * | |
136 | * Release the SW/FW semaphore used to access the PHY or NVM. The mask | |
137 | * will also specify which port we're releasing the lock for. | |
138 | **/ | |
139 | void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) | |
140 | { | |
141 | u32 swfw_sync; | |
142 | ||
143 | DEBUGFUNC("e1000_release_swfw_sync_i210"); | |
144 | ||
145 | while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) | |
146 | ; /* Empty */ | |
147 | ||
148 | swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); | |
149 | swfw_sync &= ~mask; | |
150 | E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); | |
151 | ||
152 | e1000_put_hw_semaphore_generic(hw); | |
153 | } | |
154 | ||
155 | /** | |
156 | * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore | |
157 | * @hw: pointer to the HW structure | |
158 | * | |
159 | * Acquire the HW semaphore to access the PHY or NVM | |
160 | **/ | |
161 | STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) | |
162 | { | |
163 | u32 swsm; | |
164 | s32 timeout = hw->nvm.word_size + 1; | |
165 | s32 i = 0; | |
166 | ||
167 | DEBUGFUNC("e1000_get_hw_semaphore_i210"); | |
168 | ||
169 | /* Get the SW semaphore */ | |
170 | while (i < timeout) { | |
171 | swsm = E1000_READ_REG(hw, E1000_SWSM); | |
172 | if (!(swsm & E1000_SWSM_SMBI)) | |
173 | break; | |
174 | ||
175 | usec_delay(50); | |
176 | i++; | |
177 | } | |
178 | ||
179 | if (i == timeout) { | |
180 | /* In rare circumstances, the SW semaphore may already be held | |
181 | * unintentionally. Clear the semaphore once before giving up. | |
182 | */ | |
183 | if (hw->dev_spec._82575.clear_semaphore_once) { | |
184 | hw->dev_spec._82575.clear_semaphore_once = false; | |
185 | e1000_put_hw_semaphore_generic(hw); | |
186 | for (i = 0; i < timeout; i++) { | |
187 | swsm = E1000_READ_REG(hw, E1000_SWSM); | |
188 | if (!(swsm & E1000_SWSM_SMBI)) | |
189 | break; | |
190 | ||
191 | usec_delay(50); | |
192 | } | |
193 | } | |
194 | ||
195 | /* If we do not have the semaphore here, we have to give up. */ | |
196 | if (i == timeout) { | |
197 | DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); | |
198 | return -E1000_ERR_NVM; | |
199 | } | |
200 | } | |
201 | ||
202 | /* Get the FW semaphore. */ | |
203 | for (i = 0; i < timeout; i++) { | |
204 | swsm = E1000_READ_REG(hw, E1000_SWSM); | |
205 | E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); | |
206 | ||
207 | /* Semaphore acquired if bit latched */ | |
208 | if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) | |
209 | break; | |
210 | ||
211 | usec_delay(50); | |
212 | } | |
213 | ||
214 | if (i == timeout) { | |
215 | /* Release semaphores */ | |
216 | e1000_put_hw_semaphore_generic(hw); | |
217 | DEBUGOUT("Driver can't access the NVM\n"); | |
218 | return -E1000_ERR_NVM; | |
219 | } | |
220 | ||
221 | return E1000_SUCCESS; | |
222 | } | |
223 | ||
224 | /** | |
225 | * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register | |
226 | * @hw: pointer to the HW structure | |
227 | * @offset: offset of word in the Shadow Ram to read | |
228 | * @words: number of words to read | |
229 | * @data: word read from the Shadow Ram | |
230 | * | |
231 | * Reads a 16 bit word from the Shadow Ram using the EERD register. | |
232 | * Uses necessary synchronization semaphores. | |
233 | **/ | |
234 | s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, | |
235 | u16 *data) | |
236 | { | |
237 | s32 status = E1000_SUCCESS; | |
238 | u16 i, count; | |
239 | ||
240 | DEBUGFUNC("e1000_read_nvm_srrd_i210"); | |
241 | ||
242 | /* We cannot hold synchronization semaphores for too long, | |
243 | * because of forceful takeover procedure. However it is more efficient | |
244 | * to read in bursts than synchronizing access for each word. */ | |
245 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { | |
246 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? | |
247 | E1000_EERD_EEWR_MAX_COUNT : (words - i); | |
248 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
249 | status = e1000_read_nvm_eerd(hw, offset, count, | |
250 | data + i); | |
251 | hw->nvm.ops.release(hw); | |
252 | } else { | |
253 | status = E1000_ERR_SWFW_SYNC; | |
254 | } | |
255 | ||
256 | if (status != E1000_SUCCESS) | |
257 | break; | |
258 | } | |
259 | ||
260 | return status; | |
261 | } | |
262 | ||
263 | /** | |
264 | * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR | |
265 | * @hw: pointer to the HW structure | |
266 | * @offset: offset within the Shadow RAM to be written to | |
267 | * @words: number of words to write | |
268 | * @data: 16 bit word(s) to be written to the Shadow RAM | |
269 | * | |
270 | * Writes data to Shadow RAM at offset using EEWR register. | |
271 | * | |
272 | * If e1000_update_nvm_checksum is not called after this function , the | |
273 | * data will not be committed to FLASH and also Shadow RAM will most likely | |
274 | * contain an invalid checksum. | |
275 | * | |
276 | * If error code is returned, data and Shadow RAM may be inconsistent - buffer | |
277 | * partially written. | |
278 | **/ | |
279 | s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, | |
280 | u16 *data) | |
281 | { | |
282 | s32 status = E1000_SUCCESS; | |
283 | u16 i, count; | |
284 | ||
285 | DEBUGFUNC("e1000_write_nvm_srwr_i210"); | |
286 | ||
287 | /* We cannot hold synchronization semaphores for too long, | |
288 | * because of forceful takeover procedure. However it is more efficient | |
289 | * to write in bursts than synchronizing access for each word. */ | |
290 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { | |
291 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? | |
292 | E1000_EERD_EEWR_MAX_COUNT : (words - i); | |
293 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
294 | status = e1000_write_nvm_srwr(hw, offset, count, | |
295 | data + i); | |
296 | hw->nvm.ops.release(hw); | |
297 | } else { | |
298 | status = E1000_ERR_SWFW_SYNC; | |
299 | } | |
300 | ||
301 | if (status != E1000_SUCCESS) | |
302 | break; | |
303 | } | |
304 | ||
305 | return status; | |
306 | } | |
307 | ||
308 | /** | |
309 | * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR | |
310 | * @hw: pointer to the HW structure | |
311 | * @offset: offset within the Shadow Ram to be written to | |
312 | * @words: number of words to write | |
313 | * @data: 16 bit word(s) to be written to the Shadow Ram | |
314 | * | |
315 | * Writes data to Shadow Ram at offset using EEWR register. | |
316 | * | |
317 | * If e1000_update_nvm_checksum is not called after this function , the | |
318 | * Shadow Ram will most likely contain an invalid checksum. | |
319 | **/ | |
320 | STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, | |
321 | u16 *data) | |
322 | { | |
323 | struct e1000_nvm_info *nvm = &hw->nvm; | |
324 | u32 i, k, eewr = 0; | |
325 | u32 attempts = 100000; | |
326 | s32 ret_val = E1000_SUCCESS; | |
327 | ||
328 | DEBUGFUNC("e1000_write_nvm_srwr"); | |
329 | ||
330 | /* | |
331 | * A check for invalid values: offset too large, too many words, | |
332 | * too many words for the offset, and not enough words. | |
333 | */ | |
334 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | |
335 | (words == 0)) { | |
336 | DEBUGOUT("nvm parameter(s) out of bounds\n"); | |
337 | ret_val = -E1000_ERR_NVM; | |
338 | goto out; | |
339 | } | |
340 | ||
341 | for (i = 0; i < words; i++) { | |
342 | eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | | |
343 | (data[i] << E1000_NVM_RW_REG_DATA) | | |
344 | E1000_NVM_RW_REG_START; | |
345 | ||
346 | E1000_WRITE_REG(hw, E1000_SRWR, eewr); | |
347 | ||
348 | for (k = 0; k < attempts; k++) { | |
349 | if (E1000_NVM_RW_REG_DONE & | |
350 | E1000_READ_REG(hw, E1000_SRWR)) { | |
351 | ret_val = E1000_SUCCESS; | |
352 | break; | |
353 | } | |
354 | usec_delay(5); | |
355 | } | |
356 | ||
357 | if (ret_val != E1000_SUCCESS) { | |
358 | DEBUGOUT("Shadow RAM write EEWR timed out\n"); | |
359 | break; | |
360 | } | |
361 | } | |
362 | ||
363 | out: | |
364 | return ret_val; | |
365 | } | |
366 | ||
367 | /** e1000_read_invm_word_i210 - Reads OTP | |
368 | * @hw: pointer to the HW structure | |
369 | * @address: the word address (aka eeprom offset) to read | |
370 | * @data: pointer to the data read | |
371 | * | |
372 | * Reads 16-bit words from the OTP. Return error when the word is not | |
373 | * stored in OTP. | |
374 | **/ | |
375 | STATIC s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) | |
376 | { | |
377 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; | |
378 | u32 invm_dword; | |
379 | u16 i; | |
380 | u8 record_type, word_address; | |
381 | ||
382 | DEBUGFUNC("e1000_read_invm_word_i210"); | |
383 | ||
384 | for (i = 0; i < E1000_INVM_SIZE; i++) { | |
385 | invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); | |
386 | /* Get record type */ | |
387 | record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); | |
388 | if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) | |
389 | break; | |
390 | if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) | |
391 | i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; | |
392 | if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) | |
393 | i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; | |
394 | if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { | |
395 | word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); | |
396 | if (word_address == address) { | |
397 | *data = INVM_DWORD_TO_WORD_DATA(invm_dword); | |
398 | DEBUGOUT2("Read INVM Word 0x%02x = %x", | |
399 | address, *data); | |
400 | status = E1000_SUCCESS; | |
401 | break; | |
402 | } | |
403 | } | |
404 | } | |
405 | if (status != E1000_SUCCESS) | |
406 | DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); | |
407 | return status; | |
408 | } | |
409 | ||
410 | /** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 | |
411 | * @hw: pointer to the HW structure | |
412 | * @address: the word address (aka eeprom offset) to read | |
413 | * @data: pointer to the data read | |
414 | * | |
415 | * Wrapper function to return data formerly found in the NVM. | |
416 | **/ | |
417 | STATIC s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, | |
418 | u16 E1000_UNUSEDARG words, u16 *data) | |
419 | { | |
420 | s32 ret_val = E1000_SUCCESS; | |
421 | UNREFERENCED_1PARAMETER(words); | |
422 | ||
423 | DEBUGFUNC("e1000_read_invm_i210"); | |
424 | ||
425 | /* Only the MAC addr is required to be present in the iNVM */ | |
426 | switch (offset) { | |
427 | case NVM_MAC_ADDR: | |
428 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); | |
429 | ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, | |
430 | &data[1]); | |
431 | ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, | |
432 | &data[2]); | |
433 | if (ret_val != E1000_SUCCESS) | |
434 | DEBUGOUT("MAC Addr not found in iNVM\n"); | |
435 | break; | |
436 | case NVM_INIT_CTRL_2: | |
437 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
438 | if (ret_val != E1000_SUCCESS) { | |
439 | *data = NVM_INIT_CTRL_2_DEFAULT_I211; | |
440 | ret_val = E1000_SUCCESS; | |
441 | } | |
442 | break; | |
443 | case NVM_INIT_CTRL_4: | |
444 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
445 | if (ret_val != E1000_SUCCESS) { | |
446 | *data = NVM_INIT_CTRL_4_DEFAULT_I211; | |
447 | ret_val = E1000_SUCCESS; | |
448 | } | |
449 | break; | |
450 | case NVM_LED_1_CFG: | |
451 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
452 | if (ret_val != E1000_SUCCESS) { | |
453 | *data = NVM_LED_1_CFG_DEFAULT_I211; | |
454 | ret_val = E1000_SUCCESS; | |
455 | } | |
456 | break; | |
457 | case NVM_LED_0_2_CFG: | |
458 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
459 | if (ret_val != E1000_SUCCESS) { | |
460 | *data = NVM_LED_0_2_CFG_DEFAULT_I211; | |
461 | ret_val = E1000_SUCCESS; | |
462 | } | |
463 | break; | |
464 | case NVM_ID_LED_SETTINGS: | |
465 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
466 | if (ret_val != E1000_SUCCESS) { | |
467 | *data = ID_LED_RESERVED_FFFF; | |
468 | ret_val = E1000_SUCCESS; | |
469 | } | |
470 | break; | |
471 | case NVM_SUB_DEV_ID: | |
472 | *data = hw->subsystem_device_id; | |
473 | break; | |
474 | case NVM_SUB_VEN_ID: | |
475 | *data = hw->subsystem_vendor_id; | |
476 | break; | |
477 | case NVM_DEV_ID: | |
478 | *data = hw->device_id; | |
479 | break; | |
480 | case NVM_VEN_ID: | |
481 | *data = hw->vendor_id; | |
482 | break; | |
483 | default: | |
484 | DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); | |
485 | *data = NVM_RESERVED_WORD; | |
486 | break; | |
487 | } | |
488 | return ret_val; | |
489 | } | |
490 | ||
491 | /** | |
492 | * e1000_read_invm_version - Reads iNVM version and image type | |
493 | * @hw: pointer to the HW structure | |
494 | * @invm_ver: version structure for the version read | |
495 | * | |
496 | * Reads iNVM version and image type. | |
497 | **/ | |
498 | s32 e1000_read_invm_version(struct e1000_hw *hw, | |
499 | struct e1000_fw_version *invm_ver) | |
500 | { | |
501 | u32 *record = NULL; | |
502 | u32 *next_record = NULL; | |
503 | u32 i = 0; | |
504 | u32 invm_dword = 0; | |
505 | u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / | |
506 | E1000_INVM_RECORD_SIZE_IN_BYTES); | |
507 | u32 buffer[E1000_INVM_SIZE]; | |
508 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; | |
509 | u16 version = 0; | |
510 | ||
511 | DEBUGFUNC("e1000_read_invm_version"); | |
512 | ||
513 | /* Read iNVM memory */ | |
514 | for (i = 0; i < E1000_INVM_SIZE; i++) { | |
515 | invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); | |
516 | buffer[i] = invm_dword; | |
517 | } | |
518 | ||
519 | /* Read version number */ | |
520 | for (i = 1; i < invm_blocks; i++) { | |
521 | record = &buffer[invm_blocks - i]; | |
522 | next_record = &buffer[invm_blocks - i + 1]; | |
523 | ||
524 | /* Check if we have first version location used */ | |
525 | if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { | |
526 | version = 0; | |
527 | status = E1000_SUCCESS; | |
528 | break; | |
529 | } | |
530 | /* Check if we have second version location used */ | |
531 | else if ((i == 1) && | |
532 | ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { | |
533 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; | |
534 | status = E1000_SUCCESS; | |
535 | break; | |
536 | } | |
537 | /* | |
538 | * Check if we have odd version location | |
539 | * used and it is the last one used | |
540 | */ | |
541 | else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && | |
542 | ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && | |
543 | (i != 1))) { | |
544 | version = (*next_record & E1000_INVM_VER_FIELD_TWO) | |
545 | >> 13; | |
546 | status = E1000_SUCCESS; | |
547 | break; | |
548 | } | |
549 | /* | |
550 | * Check if we have even version location | |
551 | * used and it is the last one used | |
552 | */ | |
553 | else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && | |
554 | ((*record & 0x3) == 0)) { | |
555 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; | |
556 | status = E1000_SUCCESS; | |
557 | break; | |
558 | } | |
559 | } | |
560 | ||
561 | if (status == E1000_SUCCESS) { | |
562 | invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) | |
563 | >> E1000_INVM_MAJOR_SHIFT; | |
564 | invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; | |
565 | } | |
566 | /* Read Image Type */ | |
567 | for (i = 1; i < invm_blocks; i++) { | |
568 | record = &buffer[invm_blocks - i]; | |
569 | next_record = &buffer[invm_blocks - i + 1]; | |
570 | ||
571 | /* Check if we have image type in first location used */ | |
572 | if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { | |
573 | invm_ver->invm_img_type = 0; | |
574 | status = E1000_SUCCESS; | |
575 | break; | |
576 | } | |
577 | /* Check if we have image type in first location used */ | |
578 | else if ((((*record & 0x3) == 0) && | |
579 | ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || | |
580 | ((((*record & 0x3) != 0) && (i != 1)))) { | |
581 | invm_ver->invm_img_type = | |
582 | (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; | |
583 | status = E1000_SUCCESS; | |
584 | break; | |
585 | } | |
586 | } | |
587 | return status; | |
588 | } | |
589 | ||
590 | /** | |
591 | * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum | |
592 | * @hw: pointer to the HW structure | |
593 | * | |
594 | * Calculates the EEPROM checksum by reading/adding each word of the EEPROM | |
595 | * and then verifies that the sum of the EEPROM is equal to 0xBABA. | |
596 | **/ | |
597 | s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) | |
598 | { | |
599 | s32 status = E1000_SUCCESS; | |
600 | s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); | |
601 | ||
602 | DEBUGFUNC("e1000_validate_nvm_checksum_i210"); | |
603 | ||
604 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
605 | ||
606 | /* | |
607 | * Replace the read function with semaphore grabbing with | |
608 | * the one that skips this for a while. | |
609 | * We have semaphore taken already here. | |
610 | */ | |
611 | read_op_ptr = hw->nvm.ops.read; | |
612 | hw->nvm.ops.read = e1000_read_nvm_eerd; | |
613 | ||
614 | status = e1000_validate_nvm_checksum_generic(hw); | |
615 | ||
616 | /* Revert original read operation. */ | |
617 | hw->nvm.ops.read = read_op_ptr; | |
618 | ||
619 | hw->nvm.ops.release(hw); | |
620 | } else { | |
621 | status = E1000_ERR_SWFW_SYNC; | |
622 | } | |
623 | ||
624 | return status; | |
625 | } | |
626 | ||
627 | ||
628 | /** | |
629 | * e1000_update_nvm_checksum_i210 - Update EEPROM checksum | |
630 | * @hw: pointer to the HW structure | |
631 | * | |
632 | * Updates the EEPROM checksum by reading/adding each word of the EEPROM | |
633 | * up to the checksum. Then calculates the EEPROM checksum and writes the | |
634 | * value to the EEPROM. Next commit EEPROM data onto the Flash. | |
635 | **/ | |
636 | s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) | |
637 | { | |
638 | s32 ret_val; | |
639 | u16 checksum = 0; | |
640 | u16 i, nvm_data; | |
641 | ||
642 | DEBUGFUNC("e1000_update_nvm_checksum_i210"); | |
643 | ||
644 | /* | |
645 | * Read the first word from the EEPROM. If this times out or fails, do | |
646 | * not continue or we could be in for a very long wait while every | |
647 | * EEPROM read fails | |
648 | */ | |
649 | ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); | |
650 | if (ret_val != E1000_SUCCESS) { | |
651 | DEBUGOUT("EEPROM read failed\n"); | |
652 | goto out; | |
653 | } | |
654 | ||
655 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
656 | /* | |
657 | * Do not use hw->nvm.ops.write, hw->nvm.ops.read | |
658 | * because we do not want to take the synchronization | |
659 | * semaphores twice here. | |
660 | */ | |
661 | ||
662 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | |
663 | ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); | |
664 | if (ret_val) { | |
665 | hw->nvm.ops.release(hw); | |
666 | DEBUGOUT("NVM Read Error while updating checksum.\n"); | |
667 | goto out; | |
668 | } | |
669 | checksum += nvm_data; | |
670 | } | |
671 | checksum = (u16) NVM_SUM - checksum; | |
672 | ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, | |
673 | &checksum); | |
674 | if (ret_val != E1000_SUCCESS) { | |
675 | hw->nvm.ops.release(hw); | |
676 | DEBUGOUT("NVM Write Error while updating checksum.\n"); | |
677 | goto out; | |
678 | } | |
679 | ||
680 | hw->nvm.ops.release(hw); | |
681 | ||
682 | ret_val = e1000_update_flash_i210(hw); | |
683 | } else { | |
684 | ret_val = E1000_ERR_SWFW_SYNC; | |
685 | } | |
686 | out: | |
687 | return ret_val; | |
688 | } | |
689 | ||
690 | /** | |
691 | * e1000_get_flash_presence_i210 - Check if flash device is detected. | |
692 | * @hw: pointer to the HW structure | |
693 | * | |
694 | **/ | |
695 | bool e1000_get_flash_presence_i210(struct e1000_hw *hw) | |
696 | { | |
697 | u32 eec = 0; | |
698 | bool ret_val = false; | |
699 | ||
700 | DEBUGFUNC("e1000_get_flash_presence_i210"); | |
701 | ||
702 | eec = E1000_READ_REG(hw, E1000_EECD); | |
703 | ||
704 | if (eec & E1000_EECD_FLASH_DETECTED_I210) | |
705 | ret_val = true; | |
706 | ||
707 | return ret_val; | |
708 | } | |
709 | ||
710 | /** | |
711 | * e1000_update_flash_i210 - Commit EEPROM to the flash | |
712 | * @hw: pointer to the HW structure | |
713 | * | |
714 | **/ | |
715 | s32 e1000_update_flash_i210(struct e1000_hw *hw) | |
716 | { | |
717 | s32 ret_val; | |
718 | u32 flup; | |
719 | ||
720 | DEBUGFUNC("e1000_update_flash_i210"); | |
721 | ||
722 | ret_val = e1000_pool_flash_update_done_i210(hw); | |
723 | if (ret_val == -E1000_ERR_NVM) { | |
724 | DEBUGOUT("Flash update time out\n"); | |
725 | goto out; | |
726 | } | |
727 | ||
728 | flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; | |
729 | E1000_WRITE_REG(hw, E1000_EECD, flup); | |
730 | ||
731 | ret_val = e1000_pool_flash_update_done_i210(hw); | |
732 | if (ret_val == E1000_SUCCESS) | |
733 | DEBUGOUT("Flash update complete\n"); | |
734 | else | |
735 | DEBUGOUT("Flash update time out\n"); | |
736 | ||
737 | out: | |
738 | return ret_val; | |
739 | } | |
740 | ||
741 | /** | |
742 | * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. | |
743 | * @hw: pointer to the HW structure | |
744 | * | |
745 | **/ | |
746 | s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) | |
747 | { | |
748 | s32 ret_val = -E1000_ERR_NVM; | |
749 | u32 i, reg; | |
750 | ||
751 | DEBUGFUNC("e1000_pool_flash_update_done_i210"); | |
752 | ||
753 | for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { | |
754 | reg = E1000_READ_REG(hw, E1000_EECD); | |
755 | if (reg & E1000_EECD_FLUDONE_I210) { | |
756 | ret_val = E1000_SUCCESS; | |
757 | break; | |
758 | } | |
759 | usec_delay(5); | |
760 | } | |
761 | ||
762 | return ret_val; | |
763 | } | |
764 | ||
765 | /** | |
766 | * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers | |
767 | * @hw: pointer to the HW structure | |
768 | * | |
769 | * Initialize the i210/i211 NVM parameters and function pointers. | |
770 | **/ | |
771 | STATIC s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) | |
772 | { | |
773 | s32 ret_val; | |
774 | struct e1000_nvm_info *nvm = &hw->nvm; | |
775 | ||
776 | DEBUGFUNC("e1000_init_nvm_params_i210"); | |
777 | ||
778 | ret_val = e1000_init_nvm_params_82575(hw); | |
779 | nvm->ops.acquire = e1000_acquire_nvm_i210; | |
780 | nvm->ops.release = e1000_release_nvm_i210; | |
781 | nvm->ops.valid_led_default = e1000_valid_led_default_i210; | |
782 | if (e1000_get_flash_presence_i210(hw)) { | |
783 | hw->nvm.type = e1000_nvm_flash_hw; | |
784 | nvm->ops.read = e1000_read_nvm_srrd_i210; | |
785 | nvm->ops.write = e1000_write_nvm_srwr_i210; | |
786 | nvm->ops.validate = e1000_validate_nvm_checksum_i210; | |
787 | nvm->ops.update = e1000_update_nvm_checksum_i210; | |
788 | } else { | |
789 | hw->nvm.type = e1000_nvm_invm; | |
790 | nvm->ops.read = e1000_read_invm_i210; | |
791 | nvm->ops.write = e1000_null_write_nvm; | |
792 | nvm->ops.validate = e1000_null_ops_generic; | |
793 | nvm->ops.update = e1000_null_ops_generic; | |
794 | } | |
795 | return ret_val; | |
796 | } | |
797 | ||
798 | /** | |
799 | * e1000_init_function_pointers_i210 - Init func ptrs. | |
800 | * @hw: pointer to the HW structure | |
801 | * | |
802 | * Called to initialize all function pointers and parameters. | |
803 | **/ | |
804 | void e1000_init_function_pointers_i210(struct e1000_hw *hw) | |
805 | { | |
806 | e1000_init_function_pointers_82575(hw); | |
807 | hw->nvm.ops.init_params = e1000_init_nvm_params_i210; | |
808 | ||
809 | return; | |
810 | } | |
811 | ||
812 | /** | |
813 | * e1000_valid_led_default_i210 - Verify a valid default LED config | |
814 | * @hw: pointer to the HW structure | |
815 | * @data: pointer to the NVM (EEPROM) | |
816 | * | |
817 | * Read the EEPROM for the current default LED configuration. If the | |
818 | * LED configuration is not valid, set to a valid LED configuration. | |
819 | **/ | |
820 | STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) | |
821 | { | |
822 | s32 ret_val; | |
823 | ||
824 | DEBUGFUNC("e1000_valid_led_default_i210"); | |
825 | ||
826 | ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); | |
827 | if (ret_val) { | |
828 | DEBUGOUT("NVM Read Error\n"); | |
829 | goto out; | |
830 | } | |
831 | ||
832 | if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { | |
833 | switch (hw->phy.media_type) { | |
834 | case e1000_media_type_internal_serdes: | |
835 | *data = ID_LED_DEFAULT_I210_SERDES; | |
836 | break; | |
837 | case e1000_media_type_copper: | |
838 | default: | |
839 | *data = ID_LED_DEFAULT_I210; | |
840 | break; | |
841 | } | |
842 | } | |
843 | out: | |
844 | return ret_val; | |
845 | } | |
846 | ||
847 | /** | |
848 | * __e1000_access_xmdio_reg - Read/write XMDIO register | |
849 | * @hw: pointer to the HW structure | |
850 | * @address: XMDIO address to program | |
851 | * @dev_addr: device address to program | |
852 | * @data: pointer to value to read/write from/to the XMDIO address | |
853 | * @read: boolean flag to indicate read or write | |
854 | **/ | |
855 | STATIC s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, | |
856 | u8 dev_addr, u16 *data, bool read) | |
857 | { | |
858 | s32 ret_val; | |
859 | ||
860 | DEBUGFUNC("__e1000_access_xmdio_reg"); | |
861 | ||
862 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); | |
863 | if (ret_val) | |
864 | return ret_val; | |
865 | ||
866 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); | |
867 | if (ret_val) | |
868 | return ret_val; | |
869 | ||
870 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | | |
871 | dev_addr); | |
872 | if (ret_val) | |
873 | return ret_val; | |
874 | ||
875 | if (read) | |
876 | ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); | |
877 | else | |
878 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); | |
879 | if (ret_val) | |
880 | return ret_val; | |
881 | ||
882 | /* Recalibrate the device back to 0 */ | |
883 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); | |
884 | if (ret_val) | |
885 | return ret_val; | |
886 | ||
887 | return ret_val; | |
888 | } | |
889 | ||
890 | /** | |
891 | * e1000_read_xmdio_reg - Read XMDIO register | |
892 | * @hw: pointer to the HW structure | |
893 | * @addr: XMDIO address to program | |
894 | * @dev_addr: device address to program | |
895 | * @data: value to be read from the EMI address | |
896 | **/ | |
897 | s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) | |
898 | { | |
899 | DEBUGFUNC("e1000_read_xmdio_reg"); | |
900 | ||
901 | return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); | |
902 | } | |
903 | ||
904 | /** | |
905 | * e1000_write_xmdio_reg - Write XMDIO register | |
906 | * @hw: pointer to the HW structure | |
907 | * @addr: XMDIO address to program | |
908 | * @dev_addr: device address to program | |
909 | * @data: value to be written to the XMDIO address | |
910 | **/ | |
911 | s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) | |
912 | { | |
913 | DEBUGFUNC("e1000_read_xmdio_reg"); | |
914 | ||
915 | return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); | |
916 | } | |
917 | ||
918 | /** | |
919 | * e1000_pll_workaround_i210 | |
920 | * @hw: pointer to the HW structure | |
921 | * | |
922 | * Works around an errata in the PLL circuit where it occasionally | |
923 | * provides the wrong clock frequency after power up. | |
924 | **/ | |
925 | STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw) | |
926 | { | |
927 | s32 ret_val; | |
928 | u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; | |
929 | u16 nvm_word, phy_word, pci_word, tmp_nvm; | |
930 | int i; | |
931 | ||
932 | /* Get and set needed register values */ | |
933 | wuc = E1000_READ_REG(hw, E1000_WUC); | |
934 | mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); | |
935 | reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; | |
936 | E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); | |
937 | ||
938 | /* Get data from NVM, or set default */ | |
939 | ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, | |
940 | &nvm_word); | |
941 | if (ret_val != E1000_SUCCESS) | |
942 | nvm_word = E1000_INVM_DEFAULT_AL; | |
943 | tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; | |
944 | for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { | |
945 | /* check current state directly from internal PHY */ | |
946 | e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | | |
947 | E1000_PHY_PLL_FREQ_REG), &phy_word); | |
948 | if ((phy_word & E1000_PHY_PLL_UNCONF) | |
949 | != E1000_PHY_PLL_UNCONF) { | |
950 | ret_val = E1000_SUCCESS; | |
951 | break; | |
952 | } else { | |
953 | ret_val = -E1000_ERR_PHY; | |
954 | } | |
955 | /* directly reset the internal PHY */ | |
956 | ctrl = E1000_READ_REG(hw, E1000_CTRL); | |
957 | E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); | |
958 | ||
959 | ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); | |
960 | ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); | |
961 | E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); | |
962 | ||
963 | E1000_WRITE_REG(hw, E1000_WUC, 0); | |
964 | reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); | |
965 | E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); | |
966 | ||
967 | e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); | |
968 | pci_word |= E1000_PCI_PMCSR_D3; | |
969 | e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); | |
970 | msec_delay(1); | |
971 | pci_word &= ~E1000_PCI_PMCSR_D3; | |
972 | e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); | |
973 | reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); | |
974 | E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); | |
975 | ||
976 | /* restore WUC register */ | |
977 | E1000_WRITE_REG(hw, E1000_WUC, wuc); | |
978 | } | |
979 | /* restore MDICNFG setting */ | |
980 | E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); | |
981 | return ret_val; | |
982 | } | |
983 | ||
984 | /** | |
985 | * e1000_get_cfg_done_i210 - Read config done bit | |
986 | * @hw: pointer to the HW structure | |
987 | * | |
988 | * Read the management control register for the config done bit for | |
989 | * completion status. NOTE: silicon which is EEPROM-less will fail trying | |
990 | * to read the config done bit, so an error is *ONLY* logged and returns | |
991 | * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon | |
992 | * would not be able to be reset or change link. | |
993 | **/ | |
994 | STATIC s32 e1000_get_cfg_done_i210(struct e1000_hw *hw) | |
995 | { | |
996 | s32 timeout = PHY_CFG_TIMEOUT; | |
997 | u32 mask = E1000_NVM_CFG_DONE_PORT_0; | |
998 | ||
999 | DEBUGFUNC("e1000_get_cfg_done_i210"); | |
1000 | ||
1001 | while (timeout) { | |
1002 | if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask) | |
1003 | break; | |
1004 | msec_delay(1); | |
1005 | timeout--; | |
1006 | } | |
1007 | if (!timeout) | |
1008 | DEBUGOUT("MNG configuration cycle has not completed.\n"); | |
1009 | ||
1010 | return E1000_SUCCESS; | |
1011 | } | |
1012 | ||
1013 | /** | |
1014 | * e1000_init_hw_i210 - Init hw for I210/I211 | |
1015 | * @hw: pointer to the HW structure | |
1016 | * | |
1017 | * Called to initialize hw for i210 hw family. | |
1018 | **/ | |
1019 | s32 e1000_init_hw_i210(struct e1000_hw *hw) | |
1020 | { | |
1021 | s32 ret_val; | |
1022 | ||
1023 | DEBUGFUNC("e1000_init_hw_i210"); | |
1024 | if ((hw->mac.type >= e1000_i210) && | |
1025 | !(e1000_get_flash_presence_i210(hw))) { | |
1026 | ret_val = e1000_pll_workaround_i210(hw); | |
1027 | if (ret_val != E1000_SUCCESS) | |
1028 | return ret_val; | |
1029 | } | |
1030 | hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210; | |
1031 | ret_val = e1000_init_hw_82575(hw); | |
1032 | return ret_val; | |
1033 | } |