]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /******************************************************************************* |
2 | ||
3 | Intel(R) Gigabit Ethernet Linux driver | |
4 | Copyright(c) 2007-2013 Intel Corporation. | |
5 | ||
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
16 | this program; if not, write to the Free Software Foundation, Inc., | |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | ||
19 | The full GNU General Public License is included in this distribution in | |
20 | the file called "LICENSE.GPL". | |
21 | ||
22 | Contact Information: | |
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
25 | ||
26 | *******************************************************************************/ | |
27 | ||
28 | #include "e1000_api.h" | |
29 | ||
30 | ||
31 | static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); | |
32 | static void e1000_release_nvm_i210(struct e1000_hw *hw); | |
33 | static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); | |
34 | static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, | |
35 | u16 *data); | |
36 | static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); | |
37 | static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); | |
38 | ||
39 | /** | |
40 | * e1000_acquire_nvm_i210 - Request for access to EEPROM | |
41 | * @hw: pointer to the HW structure | |
42 | * | |
43 | * Acquire the necessary semaphores for exclusive access to the EEPROM. | |
44 | * Set the EEPROM access request bit and wait for EEPROM access grant bit. | |
45 | * Return successful if access grant bit set, else clear the request for | |
46 | * EEPROM access and return -E1000_ERR_NVM (-1). | |
47 | **/ | |
48 | static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) | |
49 | { | |
50 | s32 ret_val; | |
51 | ||
52 | DEBUGFUNC("e1000_acquire_nvm_i210"); | |
53 | ||
54 | ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); | |
55 | ||
56 | return ret_val; | |
57 | } | |
58 | ||
59 | /** | |
60 | * e1000_release_nvm_i210 - Release exclusive access to EEPROM | |
61 | * @hw: pointer to the HW structure | |
62 | * | |
63 | * Stop any current commands to the EEPROM and clear the EEPROM request bit, | |
64 | * then release the semaphores acquired. | |
65 | **/ | |
66 | static void e1000_release_nvm_i210(struct e1000_hw *hw) | |
67 | { | |
68 | DEBUGFUNC("e1000_release_nvm_i210"); | |
69 | ||
70 | e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); | |
71 | } | |
72 | ||
73 | /** | |
74 | * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore | |
75 | * @hw: pointer to the HW structure | |
76 | * @mask: specifies which semaphore to acquire | |
77 | * | |
78 | * Acquire the SW/FW semaphore to access the PHY or NVM. The mask | |
79 | * will also specify which port we're acquiring the lock for. | |
80 | **/ | |
81 | s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) | |
82 | { | |
83 | u32 swfw_sync; | |
84 | u32 swmask = mask; | |
85 | u32 fwmask = mask << 16; | |
86 | s32 ret_val = E1000_SUCCESS; | |
87 | s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ | |
88 | ||
89 | DEBUGFUNC("e1000_acquire_swfw_sync_i210"); | |
90 | ||
91 | while (i < timeout) { | |
92 | if (e1000_get_hw_semaphore_i210(hw)) { | |
93 | ret_val = -E1000_ERR_SWFW_SYNC; | |
94 | goto out; | |
95 | } | |
96 | ||
97 | swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); | |
98 | if (!(swfw_sync & (fwmask | swmask))) | |
99 | break; | |
100 | ||
101 | /* | |
102 | * Firmware currently using resource (fwmask) | |
103 | * or other software thread using resource (swmask) | |
104 | */ | |
105 | e1000_put_hw_semaphore_generic(hw); | |
106 | msec_delay_irq(5); | |
107 | i++; | |
108 | } | |
109 | ||
110 | if (i == timeout) { | |
111 | DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); | |
112 | ret_val = -E1000_ERR_SWFW_SYNC; | |
113 | goto out; | |
114 | } | |
115 | ||
116 | swfw_sync |= swmask; | |
117 | E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); | |
118 | ||
119 | e1000_put_hw_semaphore_generic(hw); | |
120 | ||
121 | out: | |
122 | return ret_val; | |
123 | } | |
124 | ||
125 | /** | |
126 | * e1000_release_swfw_sync_i210 - Release SW/FW semaphore | |
127 | * @hw: pointer to the HW structure | |
128 | * @mask: specifies which semaphore to acquire | |
129 | * | |
130 | * Release the SW/FW semaphore used to access the PHY or NVM. The mask | |
131 | * will also specify which port we're releasing the lock for. | |
132 | **/ | |
133 | void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) | |
134 | { | |
135 | u32 swfw_sync; | |
136 | ||
137 | DEBUGFUNC("e1000_release_swfw_sync_i210"); | |
138 | ||
139 | while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) | |
140 | ; /* Empty */ | |
141 | ||
142 | swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); | |
143 | swfw_sync &= ~mask; | |
144 | E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); | |
145 | ||
146 | e1000_put_hw_semaphore_generic(hw); | |
147 | } | |
148 | ||
149 | /** | |
150 | * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore | |
151 | * @hw: pointer to the HW structure | |
152 | * | |
153 | * Acquire the HW semaphore to access the PHY or NVM | |
154 | **/ | |
155 | static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) | |
156 | { | |
157 | u32 swsm; | |
158 | s32 timeout = hw->nvm.word_size + 1; | |
159 | s32 i = 0; | |
160 | ||
161 | DEBUGFUNC("e1000_get_hw_semaphore_i210"); | |
162 | ||
163 | /* Get the SW semaphore */ | |
164 | while (i < timeout) { | |
165 | swsm = E1000_READ_REG(hw, E1000_SWSM); | |
166 | if (!(swsm & E1000_SWSM_SMBI)) | |
167 | break; | |
168 | ||
169 | usec_delay(50); | |
170 | i++; | |
171 | } | |
172 | ||
173 | if (i == timeout) { | |
174 | /* In rare circumstances, the SW semaphore may already be held | |
175 | * unintentionally. Clear the semaphore once before giving up. | |
176 | */ | |
177 | if (hw->dev_spec._82575.clear_semaphore_once) { | |
178 | hw->dev_spec._82575.clear_semaphore_once = false; | |
179 | e1000_put_hw_semaphore_generic(hw); | |
180 | for (i = 0; i < timeout; i++) { | |
181 | swsm = E1000_READ_REG(hw, E1000_SWSM); | |
182 | if (!(swsm & E1000_SWSM_SMBI)) | |
183 | break; | |
184 | ||
185 | usec_delay(50); | |
186 | } | |
187 | } | |
188 | ||
189 | /* If we do not have the semaphore here, we have to give up. */ | |
190 | if (i == timeout) { | |
191 | DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); | |
192 | return -E1000_ERR_NVM; | |
193 | } | |
194 | } | |
195 | ||
196 | /* Get the FW semaphore. */ | |
197 | for (i = 0; i < timeout; i++) { | |
198 | swsm = E1000_READ_REG(hw, E1000_SWSM); | |
199 | E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); | |
200 | ||
201 | /* Semaphore acquired if bit latched */ | |
202 | if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) | |
203 | break; | |
204 | ||
205 | usec_delay(50); | |
206 | } | |
207 | ||
208 | if (i == timeout) { | |
209 | /* Release semaphores */ | |
210 | e1000_put_hw_semaphore_generic(hw); | |
211 | DEBUGOUT("Driver can't access the NVM\n"); | |
212 | return -E1000_ERR_NVM; | |
213 | } | |
214 | ||
215 | return E1000_SUCCESS; | |
216 | } | |
217 | ||
218 | /** | |
219 | * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register | |
220 | * @hw: pointer to the HW structure | |
221 | * @offset: offset of word in the Shadow Ram to read | |
222 | * @words: number of words to read | |
223 | * @data: word read from the Shadow Ram | |
224 | * | |
225 | * Reads a 16 bit word from the Shadow Ram using the EERD register. | |
226 | * Uses necessary synchronization semaphores. | |
227 | **/ | |
228 | s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, | |
229 | u16 *data) | |
230 | { | |
231 | s32 status = E1000_SUCCESS; | |
232 | u16 i, count; | |
233 | ||
234 | DEBUGFUNC("e1000_read_nvm_srrd_i210"); | |
235 | ||
236 | /* We cannot hold synchronization semaphores for too long, | |
237 | * because of forceful takeover procedure. However it is more efficient | |
238 | * to read in bursts than synchronizing access for each word. */ | |
239 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { | |
240 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? | |
241 | E1000_EERD_EEWR_MAX_COUNT : (words - i); | |
242 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
243 | status = e1000_read_nvm_eerd(hw, offset, count, | |
244 | data + i); | |
245 | hw->nvm.ops.release(hw); | |
246 | } else { | |
247 | status = E1000_ERR_SWFW_SYNC; | |
248 | } | |
249 | ||
250 | if (status != E1000_SUCCESS) | |
251 | break; | |
252 | } | |
253 | ||
254 | return status; | |
255 | } | |
256 | ||
257 | /** | |
258 | * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR | |
259 | * @hw: pointer to the HW structure | |
260 | * @offset: offset within the Shadow RAM to be written to | |
261 | * @words: number of words to write | |
262 | * @data: 16 bit word(s) to be written to the Shadow RAM | |
263 | * | |
264 | * Writes data to Shadow RAM at offset using EEWR register. | |
265 | * | |
266 | * If e1000_update_nvm_checksum is not called after this function , the | |
267 | * data will not be committed to FLASH and also Shadow RAM will most likely | |
268 | * contain an invalid checksum. | |
269 | * | |
270 | * If error code is returned, data and Shadow RAM may be inconsistent - buffer | |
271 | * partially written. | |
272 | **/ | |
273 | s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, | |
274 | u16 *data) | |
275 | { | |
276 | s32 status = E1000_SUCCESS; | |
277 | u16 i, count; | |
278 | ||
279 | DEBUGFUNC("e1000_write_nvm_srwr_i210"); | |
280 | ||
281 | /* We cannot hold synchronization semaphores for too long, | |
282 | * because of forceful takeover procedure. However it is more efficient | |
283 | * to write in bursts than synchronizing access for each word. */ | |
284 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { | |
285 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? | |
286 | E1000_EERD_EEWR_MAX_COUNT : (words - i); | |
287 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
288 | status = e1000_write_nvm_srwr(hw, offset, count, | |
289 | data + i); | |
290 | hw->nvm.ops.release(hw); | |
291 | } else { | |
292 | status = E1000_ERR_SWFW_SYNC; | |
293 | } | |
294 | ||
295 | if (status != E1000_SUCCESS) | |
296 | break; | |
297 | } | |
298 | ||
299 | return status; | |
300 | } | |
301 | ||
302 | /** | |
303 | * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR | |
304 | * @hw: pointer to the HW structure | |
305 | * @offset: offset within the Shadow Ram to be written to | |
306 | * @words: number of words to write | |
307 | * @data: 16 bit word(s) to be written to the Shadow Ram | |
308 | * | |
309 | * Writes data to Shadow Ram at offset using EEWR register. | |
310 | * | |
311 | * If e1000_update_nvm_checksum is not called after this function , the | |
312 | * Shadow Ram will most likely contain an invalid checksum. | |
313 | **/ | |
314 | static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, | |
315 | u16 *data) | |
316 | { | |
317 | struct e1000_nvm_info *nvm = &hw->nvm; | |
318 | u32 i, k, eewr = 0; | |
319 | u32 attempts = 100000; | |
320 | s32 ret_val = E1000_SUCCESS; | |
321 | ||
322 | DEBUGFUNC("e1000_write_nvm_srwr"); | |
323 | ||
324 | /* | |
325 | * A check for invalid values: offset too large, too many words, | |
326 | * too many words for the offset, and not enough words. | |
327 | */ | |
328 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | |
329 | (words == 0)) { | |
330 | DEBUGOUT("nvm parameter(s) out of bounds\n"); | |
331 | ret_val = -E1000_ERR_NVM; | |
332 | goto out; | |
333 | } | |
334 | ||
335 | for (i = 0; i < words; i++) { | |
336 | eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | | |
337 | (data[i] << E1000_NVM_RW_REG_DATA) | | |
338 | E1000_NVM_RW_REG_START; | |
339 | ||
340 | E1000_WRITE_REG(hw, E1000_SRWR, eewr); | |
341 | ||
342 | for (k = 0; k < attempts; k++) { | |
343 | if (E1000_NVM_RW_REG_DONE & | |
344 | E1000_READ_REG(hw, E1000_SRWR)) { | |
345 | ret_val = E1000_SUCCESS; | |
346 | break; | |
347 | } | |
348 | usec_delay(5); | |
349 | } | |
350 | ||
351 | if (ret_val != E1000_SUCCESS) { | |
352 | DEBUGOUT("Shadow RAM write EEWR timed out\n"); | |
353 | break; | |
354 | } | |
355 | } | |
356 | ||
357 | out: | |
358 | return ret_val; | |
359 | } | |
360 | ||
361 | /** e1000_read_invm_word_i210 - Reads OTP | |
362 | * @hw: pointer to the HW structure | |
363 | * @address: the word address (aka eeprom offset) to read | |
364 | * @data: pointer to the data read | |
365 | * | |
366 | * Reads 16-bit words from the OTP. Return error when the word is not | |
367 | * stored in OTP. | |
368 | **/ | |
369 | static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) | |
370 | { | |
371 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; | |
372 | u32 invm_dword; | |
373 | u16 i; | |
374 | u8 record_type, word_address; | |
375 | ||
376 | DEBUGFUNC("e1000_read_invm_word_i210"); | |
377 | ||
378 | for (i = 0; i < E1000_INVM_SIZE; i++) { | |
379 | invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); | |
380 | /* Get record type */ | |
381 | record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); | |
382 | if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) | |
383 | break; | |
384 | if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) | |
385 | i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; | |
386 | if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) | |
387 | i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; | |
388 | if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { | |
389 | word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); | |
390 | if (word_address == address) { | |
391 | *data = INVM_DWORD_TO_WORD_DATA(invm_dword); | |
392 | DEBUGOUT2("Read INVM Word 0x%02x = %x", | |
393 | address, *data); | |
394 | status = E1000_SUCCESS; | |
395 | break; | |
396 | } | |
397 | } | |
398 | } | |
399 | if (status != E1000_SUCCESS) | |
400 | DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); | |
401 | return status; | |
402 | } | |
403 | ||
404 | /** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 | |
405 | * @hw: pointer to the HW structure | |
406 | * @address: the word address (aka eeprom offset) to read | |
407 | * @data: pointer to the data read | |
408 | * | |
409 | * Wrapper function to return data formerly found in the NVM. | |
410 | **/ | |
411 | static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, | |
412 | u16 E1000_UNUSEDARG words, u16 *data) | |
413 | { | |
414 | s32 ret_val = E1000_SUCCESS; | |
415 | ||
416 | DEBUGFUNC("e1000_read_invm_i210"); | |
417 | ||
418 | /* Only the MAC addr is required to be present in the iNVM */ | |
419 | switch (offset) { | |
420 | case NVM_MAC_ADDR: | |
421 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); | |
422 | ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, | |
423 | &data[1]); | |
424 | ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, | |
425 | &data[2]); | |
426 | if (ret_val != E1000_SUCCESS) | |
427 | DEBUGOUT("MAC Addr not found in iNVM\n"); | |
428 | break; | |
429 | case NVM_INIT_CTRL_2: | |
430 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
431 | if (ret_val != E1000_SUCCESS) { | |
432 | *data = NVM_INIT_CTRL_2_DEFAULT_I211; | |
433 | ret_val = E1000_SUCCESS; | |
434 | } | |
435 | break; | |
436 | case NVM_INIT_CTRL_4: | |
437 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
438 | if (ret_val != E1000_SUCCESS) { | |
439 | *data = NVM_INIT_CTRL_4_DEFAULT_I211; | |
440 | ret_val = E1000_SUCCESS; | |
441 | } | |
442 | break; | |
443 | case NVM_LED_1_CFG: | |
444 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
445 | if (ret_val != E1000_SUCCESS) { | |
446 | *data = NVM_LED_1_CFG_DEFAULT_I211; | |
447 | ret_val = E1000_SUCCESS; | |
448 | } | |
449 | break; | |
450 | case NVM_LED_0_2_CFG: | |
451 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
452 | if (ret_val != E1000_SUCCESS) { | |
453 | *data = NVM_LED_0_2_CFG_DEFAULT_I211; | |
454 | ret_val = E1000_SUCCESS; | |
455 | } | |
456 | break; | |
457 | case NVM_ID_LED_SETTINGS: | |
458 | ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); | |
459 | if (ret_val != E1000_SUCCESS) { | |
460 | *data = ID_LED_RESERVED_FFFF; | |
461 | ret_val = E1000_SUCCESS; | |
462 | } | |
463 | break; | |
464 | case NVM_SUB_DEV_ID: | |
465 | *data = hw->subsystem_device_id; | |
466 | break; | |
467 | case NVM_SUB_VEN_ID: | |
468 | *data = hw->subsystem_vendor_id; | |
469 | break; | |
470 | case NVM_DEV_ID: | |
471 | *data = hw->device_id; | |
472 | break; | |
473 | case NVM_VEN_ID: | |
474 | *data = hw->vendor_id; | |
475 | break; | |
476 | default: | |
477 | DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); | |
478 | *data = NVM_RESERVED_WORD; | |
479 | break; | |
480 | } | |
481 | return ret_val; | |
482 | } | |
483 | ||
484 | /** | |
485 | * e1000_read_invm_version - Reads iNVM version and image type | |
486 | * @hw: pointer to the HW structure | |
487 | * @invm_ver: version structure for the version read | |
488 | * | |
489 | * Reads iNVM version and image type. | |
490 | **/ | |
491 | s32 e1000_read_invm_version(struct e1000_hw *hw, | |
492 | struct e1000_fw_version *invm_ver) | |
493 | { | |
494 | u32 *record = NULL; | |
495 | u32 *next_record = NULL; | |
496 | u32 i = 0; | |
497 | u32 invm_dword = 0; | |
498 | u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / | |
499 | E1000_INVM_RECORD_SIZE_IN_BYTES); | |
500 | u32 buffer[E1000_INVM_SIZE]; | |
501 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; | |
502 | u16 version = 0; | |
503 | ||
504 | DEBUGFUNC("e1000_read_invm_version"); | |
505 | ||
506 | /* Read iNVM memory */ | |
507 | for (i = 0; i < E1000_INVM_SIZE; i++) { | |
508 | invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); | |
509 | buffer[i] = invm_dword; | |
510 | } | |
511 | ||
512 | /* Read version number */ | |
513 | for (i = 1; i < invm_blocks; i++) { | |
514 | record = &buffer[invm_blocks - i]; | |
515 | next_record = &buffer[invm_blocks - i + 1]; | |
516 | ||
517 | /* Check if we have first version location used */ | |
518 | if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { | |
519 | version = 0; | |
520 | status = E1000_SUCCESS; | |
521 | break; | |
522 | } | |
523 | /* Check if we have second version location used */ | |
524 | else if ((i == 1) && | |
525 | ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { | |
526 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; | |
527 | status = E1000_SUCCESS; | |
528 | break; | |
529 | } | |
530 | /* | |
531 | * Check if we have odd version location | |
532 | * used and it is the last one used | |
533 | */ | |
534 | else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && | |
535 | ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && | |
536 | (i != 1))) { | |
537 | version = (*next_record & E1000_INVM_VER_FIELD_TWO) | |
538 | >> 13; | |
539 | status = E1000_SUCCESS; | |
540 | break; | |
541 | } | |
542 | /* | |
543 | * Check if we have even version location | |
544 | * used and it is the last one used | |
545 | */ | |
546 | else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && | |
547 | ((*record & 0x3) == 0)) { | |
548 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; | |
549 | status = E1000_SUCCESS; | |
550 | break; | |
551 | } | |
552 | } | |
553 | ||
554 | if (status == E1000_SUCCESS) { | |
555 | invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) | |
556 | >> E1000_INVM_MAJOR_SHIFT; | |
557 | invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; | |
558 | } | |
559 | /* Read Image Type */ | |
560 | for (i = 1; i < invm_blocks; i++) { | |
561 | record = &buffer[invm_blocks - i]; | |
562 | next_record = &buffer[invm_blocks - i + 1]; | |
563 | ||
564 | /* Check if we have image type in first location used */ | |
565 | if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { | |
566 | invm_ver->invm_img_type = 0; | |
567 | status = E1000_SUCCESS; | |
568 | break; | |
569 | } | |
570 | /* Check if we have image type in first location used */ | |
571 | else if ((((*record & 0x3) == 0) && | |
572 | ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || | |
573 | ((((*record & 0x3) != 0) && (i != 1)))) { | |
574 | invm_ver->invm_img_type = | |
575 | (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; | |
576 | status = E1000_SUCCESS; | |
577 | break; | |
578 | } | |
579 | } | |
580 | return status; | |
581 | } | |
582 | ||
583 | /** | |
584 | * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum | |
585 | * @hw: pointer to the HW structure | |
586 | * | |
587 | * Calculates the EEPROM checksum by reading/adding each word of the EEPROM | |
588 | * and then verifies that the sum of the EEPROM is equal to 0xBABA. | |
589 | **/ | |
590 | s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) | |
591 | { | |
592 | s32 status = E1000_SUCCESS; | |
593 | s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); | |
594 | ||
595 | DEBUGFUNC("e1000_validate_nvm_checksum_i210"); | |
596 | ||
597 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
598 | ||
599 | /* | |
600 | * Replace the read function with semaphore grabbing with | |
601 | * the one that skips this for a while. | |
602 | * We have semaphore taken already here. | |
603 | */ | |
604 | read_op_ptr = hw->nvm.ops.read; | |
605 | hw->nvm.ops.read = e1000_read_nvm_eerd; | |
606 | ||
607 | status = e1000_validate_nvm_checksum_generic(hw); | |
608 | ||
609 | /* Revert original read operation. */ | |
610 | hw->nvm.ops.read = read_op_ptr; | |
611 | ||
612 | hw->nvm.ops.release(hw); | |
613 | } else { | |
614 | status = E1000_ERR_SWFW_SYNC; | |
615 | } | |
616 | ||
617 | return status; | |
618 | } | |
619 | ||
620 | ||
621 | /** | |
622 | * e1000_update_nvm_checksum_i210 - Update EEPROM checksum | |
623 | * @hw: pointer to the HW structure | |
624 | * | |
625 | * Updates the EEPROM checksum by reading/adding each word of the EEPROM | |
626 | * up to the checksum. Then calculates the EEPROM checksum and writes the | |
627 | * value to the EEPROM. Next commit EEPROM data onto the Flash. | |
628 | **/ | |
629 | s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) | |
630 | { | |
631 | s32 ret_val = E1000_SUCCESS; | |
632 | u16 checksum = 0; | |
633 | u16 i, nvm_data; | |
634 | ||
635 | DEBUGFUNC("e1000_update_nvm_checksum_i210"); | |
636 | ||
637 | /* | |
638 | * Read the first word from the EEPROM. If this times out or fails, do | |
639 | * not continue or we could be in for a very long wait while every | |
640 | * EEPROM read fails | |
641 | */ | |
642 | ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); | |
643 | if (ret_val != E1000_SUCCESS) { | |
644 | DEBUGOUT("EEPROM read failed\n"); | |
645 | goto out; | |
646 | } | |
647 | ||
648 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | |
649 | /* | |
650 | * Do not use hw->nvm.ops.write, hw->nvm.ops.read | |
651 | * because we do not want to take the synchronization | |
652 | * semaphores twice here. | |
653 | */ | |
654 | ||
655 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { | |
656 | ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); | |
657 | if (ret_val) { | |
658 | hw->nvm.ops.release(hw); | |
659 | DEBUGOUT("NVM Read Error while updating checksum.\n"); | |
660 | goto out; | |
661 | } | |
662 | checksum += nvm_data; | |
663 | } | |
664 | checksum = (u16) NVM_SUM - checksum; | |
665 | ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, | |
666 | &checksum); | |
667 | if (ret_val != E1000_SUCCESS) { | |
668 | hw->nvm.ops.release(hw); | |
669 | DEBUGOUT("NVM Write Error while updating checksum.\n"); | |
670 | goto out; | |
671 | } | |
672 | ||
673 | hw->nvm.ops.release(hw); | |
674 | ||
675 | ret_val = e1000_update_flash_i210(hw); | |
676 | } else { | |
677 | ret_val = E1000_ERR_SWFW_SYNC; | |
678 | } | |
679 | out: | |
680 | return ret_val; | |
681 | } | |
682 | ||
683 | /** | |
684 | * e1000_get_flash_presence_i210 - Check if flash device is detected. | |
685 | * @hw: pointer to the HW structure | |
686 | * | |
687 | **/ | |
688 | bool e1000_get_flash_presence_i210(struct e1000_hw *hw) | |
689 | { | |
690 | u32 eec = 0; | |
691 | bool ret_val = false; | |
692 | ||
693 | DEBUGFUNC("e1000_get_flash_presence_i210"); | |
694 | ||
695 | eec = E1000_READ_REG(hw, E1000_EECD); | |
696 | ||
697 | if (eec & E1000_EECD_FLASH_DETECTED_I210) | |
698 | ret_val = true; | |
699 | ||
700 | return ret_val; | |
701 | } | |
702 | ||
703 | /** | |
704 | * e1000_update_flash_i210 - Commit EEPROM to the flash | |
705 | * @hw: pointer to the HW structure | |
706 | * | |
707 | **/ | |
708 | s32 e1000_update_flash_i210(struct e1000_hw *hw) | |
709 | { | |
710 | s32 ret_val = E1000_SUCCESS; | |
711 | u32 flup; | |
712 | ||
713 | DEBUGFUNC("e1000_update_flash_i210"); | |
714 | ||
715 | ret_val = e1000_pool_flash_update_done_i210(hw); | |
716 | if (ret_val == -E1000_ERR_NVM) { | |
717 | DEBUGOUT("Flash update time out\n"); | |
718 | goto out; | |
719 | } | |
720 | ||
721 | flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; | |
722 | E1000_WRITE_REG(hw, E1000_EECD, flup); | |
723 | ||
724 | ret_val = e1000_pool_flash_update_done_i210(hw); | |
725 | if (ret_val == E1000_SUCCESS) | |
726 | DEBUGOUT("Flash update complete\n"); | |
727 | else | |
728 | DEBUGOUT("Flash update time out\n"); | |
729 | ||
730 | out: | |
731 | return ret_val; | |
732 | } | |
733 | ||
734 | /** | |
735 | * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. | |
736 | * @hw: pointer to the HW structure | |
737 | * | |
738 | **/ | |
739 | s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) | |
740 | { | |
741 | s32 ret_val = -E1000_ERR_NVM; | |
742 | u32 i, reg; | |
743 | ||
744 | DEBUGFUNC("e1000_pool_flash_update_done_i210"); | |
745 | ||
746 | for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { | |
747 | reg = E1000_READ_REG(hw, E1000_EECD); | |
748 | if (reg & E1000_EECD_FLUDONE_I210) { | |
749 | ret_val = E1000_SUCCESS; | |
750 | break; | |
751 | } | |
752 | usec_delay(5); | |
753 | } | |
754 | ||
755 | return ret_val; | |
756 | } | |
757 | ||
758 | /** | |
759 | * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers | |
760 | * @hw: pointer to the HW structure | |
761 | * | |
762 | * Initialize the i210/i211 NVM parameters and function pointers. | |
763 | **/ | |
764 | static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) | |
765 | { | |
766 | s32 ret_val = E1000_SUCCESS; | |
767 | struct e1000_nvm_info *nvm = &hw->nvm; | |
768 | ||
769 | DEBUGFUNC("e1000_init_nvm_params_i210"); | |
770 | ||
771 | ret_val = e1000_init_nvm_params_82575(hw); | |
772 | nvm->ops.acquire = e1000_acquire_nvm_i210; | |
773 | nvm->ops.release = e1000_release_nvm_i210; | |
774 | nvm->ops.valid_led_default = e1000_valid_led_default_i210; | |
775 | if (e1000_get_flash_presence_i210(hw)) { | |
776 | hw->nvm.type = e1000_nvm_flash_hw; | |
777 | nvm->ops.read = e1000_read_nvm_srrd_i210; | |
778 | nvm->ops.write = e1000_write_nvm_srwr_i210; | |
779 | nvm->ops.validate = e1000_validate_nvm_checksum_i210; | |
780 | nvm->ops.update = e1000_update_nvm_checksum_i210; | |
781 | } else { | |
782 | hw->nvm.type = e1000_nvm_invm; | |
783 | nvm->ops.read = e1000_read_invm_i210; | |
784 | nvm->ops.write = e1000_null_write_nvm; | |
785 | nvm->ops.validate = e1000_null_ops_generic; | |
786 | nvm->ops.update = e1000_null_ops_generic; | |
787 | } | |
788 | return ret_val; | |
789 | } | |
790 | ||
791 | /** | |
792 | * e1000_init_function_pointers_i210 - Init func ptrs. | |
793 | * @hw: pointer to the HW structure | |
794 | * | |
795 | * Called to initialize all function pointers and parameters. | |
796 | **/ | |
797 | void e1000_init_function_pointers_i210(struct e1000_hw *hw) | |
798 | { | |
799 | e1000_init_function_pointers_82575(hw); | |
800 | hw->nvm.ops.init_params = e1000_init_nvm_params_i210; | |
801 | ||
802 | return; | |
803 | } | |
804 | ||
805 | /** | |
806 | * e1000_valid_led_default_i210 - Verify a valid default LED config | |
807 | * @hw: pointer to the HW structure | |
808 | * @data: pointer to the NVM (EEPROM) | |
809 | * | |
810 | * Read the EEPROM for the current default LED configuration. If the | |
811 | * LED configuration is not valid, set to a valid LED configuration. | |
812 | **/ | |
813 | static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) | |
814 | { | |
815 | s32 ret_val; | |
816 | ||
817 | DEBUGFUNC("e1000_valid_led_default_i210"); | |
818 | ||
819 | ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); | |
820 | if (ret_val) { | |
821 | DEBUGOUT("NVM Read Error\n"); | |
822 | goto out; | |
823 | } | |
824 | ||
825 | if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { | |
826 | switch (hw->phy.media_type) { | |
827 | case e1000_media_type_internal_serdes: | |
828 | *data = ID_LED_DEFAULT_I210_SERDES; | |
829 | break; | |
830 | case e1000_media_type_copper: | |
831 | default: | |
832 | *data = ID_LED_DEFAULT_I210; | |
833 | break; | |
834 | } | |
835 | } | |
836 | out: | |
837 | return ret_val; | |
838 | } | |
839 | ||
840 | /** | |
841 | * __e1000_access_xmdio_reg - Read/write XMDIO register | |
842 | * @hw: pointer to the HW structure | |
843 | * @address: XMDIO address to program | |
844 | * @dev_addr: device address to program | |
845 | * @data: pointer to value to read/write from/to the XMDIO address | |
846 | * @read: boolean flag to indicate read or write | |
847 | **/ | |
848 | static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, | |
849 | u8 dev_addr, u16 *data, bool read) | |
850 | { | |
851 | s32 ret_val = E1000_SUCCESS; | |
852 | ||
853 | DEBUGFUNC("__e1000_access_xmdio_reg"); | |
854 | ||
855 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); | |
856 | if (ret_val) | |
857 | return ret_val; | |
858 | ||
859 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); | |
860 | if (ret_val) | |
861 | return ret_val; | |
862 | ||
863 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | | |
864 | dev_addr); | |
865 | if (ret_val) | |
866 | return ret_val; | |
867 | ||
868 | if (read) | |
869 | ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); | |
870 | else | |
871 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); | |
872 | if (ret_val) | |
873 | return ret_val; | |
874 | ||
875 | /* Recalibrate the device back to 0 */ | |
876 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); | |
877 | if (ret_val) | |
878 | return ret_val; | |
879 | ||
880 | return ret_val; | |
881 | } | |
882 | ||
883 | /** | |
884 | * e1000_read_xmdio_reg - Read XMDIO register | |
885 | * @hw: pointer to the HW structure | |
886 | * @addr: XMDIO address to program | |
887 | * @dev_addr: device address to program | |
888 | * @data: value to be read from the EMI address | |
889 | **/ | |
890 | s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) | |
891 | { | |
892 | DEBUGFUNC("e1000_read_xmdio_reg"); | |
893 | ||
894 | return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); | |
895 | } | |
896 | ||
897 | /** | |
898 | * e1000_write_xmdio_reg - Write XMDIO register | |
899 | * @hw: pointer to the HW structure | |
900 | * @addr: XMDIO address to program | |
901 | * @dev_addr: device address to program | |
902 | * @data: value to be written to the XMDIO address | |
903 | **/ | |
904 | s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) | |
905 | { | |
906 | DEBUGFUNC("e1000_read_xmdio_reg"); | |
907 | ||
908 | return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); | |
909 | } |