]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/ethernet/intel/igb/e1000_82575.c
UBUNTU: SAUCE: igb: add support for using Broadcom 54616 as PHY
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / intel / igb / e1000_82575.c
1 /* Intel(R) Gigabit Ethernet Linux driver
2 * Copyright(c) 2007-2015 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 */
23
24 /* e1000_82575
25 * e1000_82576
26 */
27
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30 #include <linux/types.h>
31 #include <linux/if_ether.h>
32 #include <linux/i2c.h>
33
34 #include "e1000_mac.h"
35 #include "e1000_82575.h"
36 #include "e1000_i210.h"
37 #include "igb.h"
38
39 static s32 igb_get_invariants_82575(struct e1000_hw *);
40 static s32 igb_acquire_phy_82575(struct e1000_hw *);
41 static void igb_release_phy_82575(struct e1000_hw *);
42 static s32 igb_acquire_nvm_82575(struct e1000_hw *);
43 static void igb_release_nvm_82575(struct e1000_hw *);
44 static s32 igb_check_for_link_82575(struct e1000_hw *);
45 static s32 igb_get_cfg_done_82575(struct e1000_hw *);
46 static s32 igb_init_hw_82575(struct e1000_hw *);
47 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
48 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
49 static s32 igb_reset_hw_82575(struct e1000_hw *);
50 static s32 igb_reset_hw_82580(struct e1000_hw *);
51 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
52 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
53 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
54 static s32 igb_setup_copper_link_82575(struct e1000_hw *);
55 static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
56 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
57 static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
58 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
59 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
60 u16 *);
61 static s32 igb_get_phy_id_82575(struct e1000_hw *);
62 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
63 static bool igb_sgmii_active_82575(struct e1000_hw *);
64 static s32 igb_reset_init_script_82575(struct e1000_hw *);
65 static s32 igb_read_mac_addr_82575(struct e1000_hw *);
66 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
67 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
68 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
69 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
70 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
71 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
72 static const u16 e1000_82580_rxpbs_table[] = {
73 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
74
75 /* Due to a hw errata, if the host tries to configure the VFTA register
76 * while performing queries from the BMC or DMA, then the VFTA in some
77 * cases won't be written.
78 */
79
80 /**
81 * igb_write_vfta_i350 - Write value to VLAN filter table
82 * @hw: pointer to the HW structure
83 * @offset: register offset in VLAN filter table
84 * @value: register value written to VLAN filter table
85 *
86 * Writes value at the given offset in the register array which stores
87 * the VLAN filter table.
88 **/
89 static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
90 {
91 struct igb_adapter *adapter = hw->back;
92 int i;
93
94 for (i = 10; i--;)
95 array_wr32(E1000_VFTA, offset, value);
96
97 wrfl();
98 adapter->shadow_vfta[offset] = value;
99 }
100
101 /**
102 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
103 * @hw: pointer to the HW structure
104 *
105 * Called to determine if the I2C pins are being used for I2C or as an
106 * external MDIO interface since the two options are mutually exclusive.
107 **/
108 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
109 {
110 u32 reg = 0;
111 bool ext_mdio = false;
112
113 switch (hw->mac.type) {
114 case e1000_82575:
115 case e1000_82576:
116 reg = rd32(E1000_MDIC);
117 ext_mdio = !!(reg & E1000_MDIC_DEST);
118 break;
119 case e1000_82580:
120 case e1000_i350:
121 case e1000_i354:
122 case e1000_i210:
123 case e1000_i211:
124 reg = rd32(E1000_MDICNFG);
125 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
126 break;
127 default:
128 break;
129 }
130 return ext_mdio;
131 }
132
133 /**
134 * igb_check_for_link_media_swap - Check which M88E1112 interface linked
135 * @hw: pointer to the HW structure
136 *
137 * Poll the M88E1112 interfaces to see which interface achieved link.
138 */
139 static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
140 {
141 struct e1000_phy_info *phy = &hw->phy;
142 s32 ret_val;
143 u16 data;
144 u8 port = 0;
145
146 /* Check the copper medium. */
147 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
148 if (ret_val)
149 return ret_val;
150
151 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
152 if (ret_val)
153 return ret_val;
154
155 if (data & E1000_M88E1112_STATUS_LINK)
156 port = E1000_MEDIA_PORT_COPPER;
157
158 /* Check the other medium. */
159 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
160 if (ret_val)
161 return ret_val;
162
163 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
164 if (ret_val)
165 return ret_val;
166
167
168 if (data & E1000_M88E1112_STATUS_LINK)
169 port = E1000_MEDIA_PORT_OTHER;
170
171 /* Determine if a swap needs to happen. */
172 if (port && (hw->dev_spec._82575.media_port != port)) {
173 hw->dev_spec._82575.media_port = port;
174 hw->dev_spec._82575.media_changed = true;
175 }
176
177 if (port == E1000_MEDIA_PORT_COPPER) {
178 /* reset page to 0 */
179 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
180 if (ret_val)
181 return ret_val;
182 igb_check_for_link_82575(hw);
183 } else {
184 igb_check_for_link_82575(hw);
185 /* reset page to 0 */
186 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
187 if (ret_val)
188 return ret_val;
189 }
190
191 return 0;
192 }
193
194 /**
195 * igb_init_phy_params_82575 - Init PHY func ptrs.
196 * @hw: pointer to the HW structure
197 **/
198 static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
199 {
200 struct e1000_phy_info *phy = &hw->phy;
201 s32 ret_val = 0;
202 u32 ctrl_ext;
203
204 if (hw->phy.media_type != e1000_media_type_copper) {
205 phy->type = e1000_phy_none;
206 goto out;
207 }
208
209 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
210 phy->reset_delay_us = 100;
211
212 ctrl_ext = rd32(E1000_CTRL_EXT);
213
214 if (igb_sgmii_active_82575(hw)) {
215 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
216 ctrl_ext |= E1000_CTRL_I2C_ENA;
217 } else {
218 phy->ops.reset = igb_phy_hw_reset;
219 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
220 }
221
222 wr32(E1000_CTRL_EXT, ctrl_ext);
223 igb_reset_mdicnfg_82580(hw);
224
225 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
226 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
227 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
228 } else {
229 switch (hw->mac.type) {
230 case e1000_82580:
231 case e1000_i350:
232 case e1000_i354:
233 case e1000_i210:
234 case e1000_i211:
235 phy->ops.read_reg = igb_read_phy_reg_82580;
236 phy->ops.write_reg = igb_write_phy_reg_82580;
237 break;
238 default:
239 phy->ops.read_reg = igb_read_phy_reg_igp;
240 phy->ops.write_reg = igb_write_phy_reg_igp;
241 }
242 }
243
244 /* set lan id */
245 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
246 E1000_STATUS_FUNC_SHIFT;
247
248 /* Set phy->phy_addr and phy->id. */
249 ret_val = igb_get_phy_id_82575(hw);
250 if (ret_val)
251 return ret_val;
252
253 /* Verify phy id and set remaining function pointers */
254 switch (phy->id) {
255 case M88E1543_E_PHY_ID:
256 case M88E1512_E_PHY_ID:
257 case I347AT4_E_PHY_ID:
258 case M88E1112_E_PHY_ID:
259 case M88E1111_I_PHY_ID:
260 phy->type = e1000_phy_m88;
261 phy->ops.check_polarity = igb_check_polarity_m88;
262 phy->ops.get_phy_info = igb_get_phy_info_m88;
263 if (phy->id != M88E1111_I_PHY_ID)
264 phy->ops.get_cable_length =
265 igb_get_cable_length_m88_gen2;
266 else
267 phy->ops.get_cable_length = igb_get_cable_length_m88;
268 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
269 /* Check if this PHY is configured for media swap. */
270 if (phy->id == M88E1112_E_PHY_ID) {
271 u16 data;
272
273 ret_val = phy->ops.write_reg(hw,
274 E1000_M88E1112_PAGE_ADDR,
275 2);
276 if (ret_val)
277 goto out;
278
279 ret_val = phy->ops.read_reg(hw,
280 E1000_M88E1112_MAC_CTRL_1,
281 &data);
282 if (ret_val)
283 goto out;
284
285 data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
286 E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
287 if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
288 data == E1000_M88E1112_AUTO_COPPER_BASEX)
289 hw->mac.ops.check_for_link =
290 igb_check_for_link_media_swap;
291 }
292 if (phy->id == M88E1512_E_PHY_ID) {
293 ret_val = igb_initialize_M88E1512_phy(hw);
294 if (ret_val)
295 goto out;
296 }
297 if (phy->id == M88E1543_E_PHY_ID) {
298 ret_val = igb_initialize_M88E1543_phy(hw);
299 if (ret_val)
300 goto out;
301 }
302 break;
303 case IGP03E1000_E_PHY_ID:
304 phy->type = e1000_phy_igp_3;
305 phy->ops.get_phy_info = igb_get_phy_info_igp;
306 phy->ops.get_cable_length = igb_get_cable_length_igp_2;
307 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
308 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
309 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
310 break;
311 case I82580_I_PHY_ID:
312 case I350_I_PHY_ID:
313 phy->type = e1000_phy_82580;
314 phy->ops.force_speed_duplex =
315 igb_phy_force_speed_duplex_82580;
316 phy->ops.get_cable_length = igb_get_cable_length_82580;
317 phy->ops.get_phy_info = igb_get_phy_info_82580;
318 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
319 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
320 break;
321 case I210_I_PHY_ID:
322 phy->type = e1000_phy_i210;
323 phy->ops.check_polarity = igb_check_polarity_m88;
324 phy->ops.get_cfg_done = igb_get_cfg_done_i210;
325 phy->ops.get_phy_info = igb_get_phy_info_m88;
326 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
327 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
328 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
329 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
330 break;
331 case BCM54616_E_PHY_ID:
332 phy->type = e1000_phy_bcm54616;
333 break;
334 default:
335 ret_val = -E1000_ERR_PHY;
336 goto out;
337 }
338
339 out:
340 return ret_val;
341 }
342
343 /**
344 * igb_init_nvm_params_82575 - Init NVM func ptrs.
345 * @hw: pointer to the HW structure
346 **/
347 static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
348 {
349 struct e1000_nvm_info *nvm = &hw->nvm;
350 u32 eecd = rd32(E1000_EECD);
351 u16 size;
352
353 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
354 E1000_EECD_SIZE_EX_SHIFT);
355
356 /* Added to a constant, "size" becomes the left-shift value
357 * for setting word_size.
358 */
359 size += NVM_WORD_SIZE_BASE_SHIFT;
360
361 /* Just in case size is out of range, cap it to the largest
362 * EEPROM size supported
363 */
364 if (size > 15)
365 size = 15;
366
367 nvm->word_size = BIT(size);
368 nvm->opcode_bits = 8;
369 nvm->delay_usec = 1;
370
371 switch (nvm->override) {
372 case e1000_nvm_override_spi_large:
373 nvm->page_size = 32;
374 nvm->address_bits = 16;
375 break;
376 case e1000_nvm_override_spi_small:
377 nvm->page_size = 8;
378 nvm->address_bits = 8;
379 break;
380 default:
381 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
382 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
383 16 : 8;
384 break;
385 }
386 if (nvm->word_size == BIT(15))
387 nvm->page_size = 128;
388
389 nvm->type = e1000_nvm_eeprom_spi;
390
391 /* NVM Function Pointers */
392 nvm->ops.acquire = igb_acquire_nvm_82575;
393 nvm->ops.release = igb_release_nvm_82575;
394 nvm->ops.write = igb_write_nvm_spi;
395 nvm->ops.validate = igb_validate_nvm_checksum;
396 nvm->ops.update = igb_update_nvm_checksum;
397 if (nvm->word_size < BIT(15))
398 nvm->ops.read = igb_read_nvm_eerd;
399 else
400 nvm->ops.read = igb_read_nvm_spi;
401
402 /* override generic family function pointers for specific descendants */
403 switch (hw->mac.type) {
404 case e1000_82580:
405 nvm->ops.validate = igb_validate_nvm_checksum_82580;
406 nvm->ops.update = igb_update_nvm_checksum_82580;
407 break;
408 case e1000_i354:
409 case e1000_i350:
410 nvm->ops.validate = igb_validate_nvm_checksum_i350;
411 nvm->ops.update = igb_update_nvm_checksum_i350;
412 break;
413 default:
414 break;
415 }
416
417 return 0;
418 }
419
420 /**
421 * igb_init_mac_params_82575 - Init MAC func ptrs.
422 * @hw: pointer to the HW structure
423 **/
424 static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
425 {
426 struct e1000_mac_info *mac = &hw->mac;
427 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
428
429 /* Set mta register count */
430 mac->mta_reg_count = 128;
431 /* Set uta register count */
432 mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
433 /* Set rar entry count */
434 switch (mac->type) {
435 case e1000_82576:
436 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
437 break;
438 case e1000_82580:
439 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
440 break;
441 case e1000_i350:
442 case e1000_i354:
443 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
444 break;
445 default:
446 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
447 break;
448 }
449 /* reset */
450 if (mac->type >= e1000_82580)
451 mac->ops.reset_hw = igb_reset_hw_82580;
452 else
453 mac->ops.reset_hw = igb_reset_hw_82575;
454
455 if (mac->type >= e1000_i210) {
456 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
457 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
458
459 } else {
460 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
461 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
462 }
463
464 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
465 mac->ops.write_vfta = igb_write_vfta_i350;
466 else
467 mac->ops.write_vfta = igb_write_vfta;
468
469 /* Set if part includes ASF firmware */
470 mac->asf_firmware_present = true;
471 /* Set if manageability features are enabled. */
472 mac->arc_subsystem_valid =
473 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
474 ? true : false;
475 /* enable EEE on i350 parts and later parts */
476 if (mac->type >= e1000_i350)
477 dev_spec->eee_disable = false;
478 else
479 dev_spec->eee_disable = true;
480 /* Allow a single clear of the SW semaphore on I210 and newer */
481 if (mac->type >= e1000_i210)
482 dev_spec->clear_semaphore_once = true;
483 /* physical interface link setup */
484 mac->ops.setup_physical_interface =
485 (hw->phy.media_type == e1000_media_type_copper)
486 ? igb_setup_copper_link_82575
487 : igb_setup_serdes_link_82575;
488
489 if (mac->type == e1000_82580) {
490 switch (hw->device_id) {
491 /* feature not supported on these id's */
492 case E1000_DEV_ID_DH89XXCC_SGMII:
493 case E1000_DEV_ID_DH89XXCC_SERDES:
494 case E1000_DEV_ID_DH89XXCC_BACKPLANE:
495 case E1000_DEV_ID_DH89XXCC_SFP:
496 break;
497 default:
498 hw->dev_spec._82575.mas_capable = true;
499 break;
500 }
501 }
502 return 0;
503 }
504
505 /**
506 * igb_set_sfp_media_type_82575 - derives SFP module media type.
507 * @hw: pointer to the HW structure
508 *
509 * The media type is chosen based on SFP module.
510 * compatibility flags retrieved from SFP ID EEPROM.
511 **/
512 static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
513 {
514 s32 ret_val = E1000_ERR_CONFIG;
515 u32 ctrl_ext = 0;
516 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
517 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
518 u8 tranceiver_type = 0;
519 s32 timeout = 3;
520
521 /* Turn I2C interface ON and power on sfp cage */
522 ctrl_ext = rd32(E1000_CTRL_EXT);
523 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
524 wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
525
526 wrfl();
527
528 /* Read SFP module data */
529 while (timeout) {
530 ret_val = igb_read_sfp_data_byte(hw,
531 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
532 &tranceiver_type);
533 if (ret_val == 0)
534 break;
535 msleep(100);
536 timeout--;
537 }
538 if (ret_val != 0)
539 goto out;
540
541 ret_val = igb_read_sfp_data_byte(hw,
542 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
543 (u8 *)eth_flags);
544 if (ret_val != 0)
545 goto out;
546
547 /* Check if there is some SFP module plugged and powered */
548 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
549 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
550 dev_spec->module_plugged = true;
551 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
552 hw->phy.media_type = e1000_media_type_internal_serdes;
553 } else if (eth_flags->e100_base_fx) {
554 dev_spec->sgmii_active = true;
555 hw->phy.media_type = e1000_media_type_internal_serdes;
556 } else if (eth_flags->e1000_base_t) {
557 dev_spec->sgmii_active = true;
558 hw->phy.media_type = e1000_media_type_copper;
559 } else {
560 hw->phy.media_type = e1000_media_type_unknown;
561 hw_dbg("PHY module has not been recognized\n");
562 goto out;
563 }
564 } else {
565 hw->phy.media_type = e1000_media_type_unknown;
566 }
567 ret_val = 0;
568 out:
569 /* Restore I2C interface setting */
570 wr32(E1000_CTRL_EXT, ctrl_ext);
571 return ret_val;
572 }
573
574 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
575 {
576 struct e1000_mac_info *mac = &hw->mac;
577 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
578 s32 ret_val;
579 u32 ctrl_ext = 0;
580 u32 link_mode = 0;
581
582 switch (hw->device_id) {
583 case E1000_DEV_ID_82575EB_COPPER:
584 case E1000_DEV_ID_82575EB_FIBER_SERDES:
585 case E1000_DEV_ID_82575GB_QUAD_COPPER:
586 mac->type = e1000_82575;
587 break;
588 case E1000_DEV_ID_82576:
589 case E1000_DEV_ID_82576_NS:
590 case E1000_DEV_ID_82576_NS_SERDES:
591 case E1000_DEV_ID_82576_FIBER:
592 case E1000_DEV_ID_82576_SERDES:
593 case E1000_DEV_ID_82576_QUAD_COPPER:
594 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
595 case E1000_DEV_ID_82576_SERDES_QUAD:
596 mac->type = e1000_82576;
597 break;
598 case E1000_DEV_ID_82580_COPPER:
599 case E1000_DEV_ID_82580_FIBER:
600 case E1000_DEV_ID_82580_QUAD_FIBER:
601 case E1000_DEV_ID_82580_SERDES:
602 case E1000_DEV_ID_82580_SGMII:
603 case E1000_DEV_ID_82580_COPPER_DUAL:
604 case E1000_DEV_ID_DH89XXCC_SGMII:
605 case E1000_DEV_ID_DH89XXCC_SERDES:
606 case E1000_DEV_ID_DH89XXCC_BACKPLANE:
607 case E1000_DEV_ID_DH89XXCC_SFP:
608 mac->type = e1000_82580;
609 break;
610 case E1000_DEV_ID_I350_COPPER:
611 case E1000_DEV_ID_I350_FIBER:
612 case E1000_DEV_ID_I350_SERDES:
613 case E1000_DEV_ID_I350_SGMII:
614 mac->type = e1000_i350;
615 break;
616 case E1000_DEV_ID_I210_COPPER:
617 case E1000_DEV_ID_I210_FIBER:
618 case E1000_DEV_ID_I210_SERDES:
619 case E1000_DEV_ID_I210_SGMII:
620 case E1000_DEV_ID_I210_COPPER_FLASHLESS:
621 case E1000_DEV_ID_I210_SERDES_FLASHLESS:
622 mac->type = e1000_i210;
623 break;
624 case E1000_DEV_ID_I211_COPPER:
625 mac->type = e1000_i211;
626 break;
627 case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
628 case E1000_DEV_ID_I354_SGMII:
629 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
630 mac->type = e1000_i354;
631 break;
632 default:
633 return -E1000_ERR_MAC_INIT;
634 }
635
636 /* Set media type */
637 /* The 82575 uses bits 22:23 for link mode. The mode can be changed
638 * based on the EEPROM. We cannot rely upon device ID. There
639 * is no distinguishable difference between fiber and internal
640 * SerDes mode on the 82575. There can be an external PHY attached
641 * on the SGMII interface. For this, we'll set sgmii_active to true.
642 */
643 hw->phy.media_type = e1000_media_type_copper;
644 dev_spec->sgmii_active = false;
645 dev_spec->module_plugged = false;
646
647 ctrl_ext = rd32(E1000_CTRL_EXT);
648
649 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
650 switch (link_mode) {
651 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
652 hw->phy.media_type = e1000_media_type_internal_serdes;
653 break;
654 case E1000_CTRL_EXT_LINK_MODE_SGMII:
655 /* Get phy control interface type set (MDIO vs. I2C)*/
656 if (igb_sgmii_uses_mdio_82575(hw)) {
657 hw->phy.media_type = e1000_media_type_copper;
658 dev_spec->sgmii_active = true;
659 break;
660 }
661 /* fall through for I2C based SGMII */
662 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
663 /* read media type from SFP EEPROM */
664 ret_val = igb_set_sfp_media_type_82575(hw);
665 if ((ret_val != 0) ||
666 (hw->phy.media_type == e1000_media_type_unknown)) {
667 /* If media type was not identified then return media
668 * type defined by the CTRL_EXT settings.
669 */
670 hw->phy.media_type = e1000_media_type_internal_serdes;
671
672 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
673 hw->phy.media_type = e1000_media_type_copper;
674 dev_spec->sgmii_active = true;
675 }
676
677 break;
678 }
679
680 /* do not change link mode for 100BaseFX */
681 if (dev_spec->eth_flags.e100_base_fx)
682 break;
683
684 /* change current link mode setting */
685 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
686
687 if (hw->phy.media_type == e1000_media_type_copper)
688 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
689 else
690 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
691
692 wr32(E1000_CTRL_EXT, ctrl_ext);
693
694 break;
695 default:
696 break;
697 }
698
699 /* mac initialization and operations */
700 ret_val = igb_init_mac_params_82575(hw);
701 if (ret_val)
702 goto out;
703
704 /* NVM initialization */
705 ret_val = igb_init_nvm_params_82575(hw);
706 switch (hw->mac.type) {
707 case e1000_i210:
708 case e1000_i211:
709 ret_val = igb_init_nvm_params_i210(hw);
710 break;
711 default:
712 break;
713 }
714
715 if (ret_val)
716 goto out;
717
718 /* if part supports SR-IOV then initialize mailbox parameters */
719 switch (mac->type) {
720 case e1000_82576:
721 case e1000_i350:
722 igb_init_mbx_params_pf(hw);
723 break;
724 default:
725 break;
726 }
727
728 /* setup PHY parameters */
729 ret_val = igb_init_phy_params_82575(hw);
730
731 out:
732 return ret_val;
733 }
734
735 /**
736 * igb_acquire_phy_82575 - Acquire rights to access PHY
737 * @hw: pointer to the HW structure
738 *
739 * Acquire access rights to the correct PHY. This is a
740 * function pointer entry point called by the api module.
741 **/
742 static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
743 {
744 u16 mask = E1000_SWFW_PHY0_SM;
745
746 if (hw->bus.func == E1000_FUNC_1)
747 mask = E1000_SWFW_PHY1_SM;
748 else if (hw->bus.func == E1000_FUNC_2)
749 mask = E1000_SWFW_PHY2_SM;
750 else if (hw->bus.func == E1000_FUNC_3)
751 mask = E1000_SWFW_PHY3_SM;
752
753 return hw->mac.ops.acquire_swfw_sync(hw, mask);
754 }
755
756 /**
757 * igb_release_phy_82575 - Release rights to access PHY
758 * @hw: pointer to the HW structure
759 *
760 * A wrapper to release access rights to the correct PHY. This is a
761 * function pointer entry point called by the api module.
762 **/
763 static void igb_release_phy_82575(struct e1000_hw *hw)
764 {
765 u16 mask = E1000_SWFW_PHY0_SM;
766
767 if (hw->bus.func == E1000_FUNC_1)
768 mask = E1000_SWFW_PHY1_SM;
769 else if (hw->bus.func == E1000_FUNC_2)
770 mask = E1000_SWFW_PHY2_SM;
771 else if (hw->bus.func == E1000_FUNC_3)
772 mask = E1000_SWFW_PHY3_SM;
773
774 hw->mac.ops.release_swfw_sync(hw, mask);
775 }
776
777 /**
778 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
779 * @hw: pointer to the HW structure
780 * @offset: register offset to be read
781 * @data: pointer to the read data
782 *
783 * Reads the PHY register at offset using the serial gigabit media independent
784 * interface and stores the retrieved information in data.
785 **/
786 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
787 u16 *data)
788 {
789 s32 ret_val = -E1000_ERR_PARAM;
790
791 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
792 hw_dbg("PHY Address %u is out of range\n", offset);
793 goto out;
794 }
795
796 ret_val = hw->phy.ops.acquire(hw);
797 if (ret_val)
798 goto out;
799
800 ret_val = igb_read_phy_reg_i2c(hw, offset, data);
801
802 hw->phy.ops.release(hw);
803
804 out:
805 return ret_val;
806 }
807
808 /**
809 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
810 * @hw: pointer to the HW structure
811 * @offset: register offset to write to
812 * @data: data to write at register offset
813 *
814 * Writes the data to PHY register at the offset using the serial gigabit
815 * media independent interface.
816 **/
817 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
818 u16 data)
819 {
820 s32 ret_val = -E1000_ERR_PARAM;
821
822
823 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
824 hw_dbg("PHY Address %d is out of range\n", offset);
825 goto out;
826 }
827
828 ret_val = hw->phy.ops.acquire(hw);
829 if (ret_val)
830 goto out;
831
832 ret_val = igb_write_phy_reg_i2c(hw, offset, data);
833
834 hw->phy.ops.release(hw);
835
836 out:
837 return ret_val;
838 }
839
840 /**
841 * igb_get_phy_id_82575 - Retrieve PHY addr and id
842 * @hw: pointer to the HW structure
843 *
844 * Retrieves the PHY address and ID for both PHY's which do and do not use
845 * sgmi interface.
846 **/
847 static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
848 {
849 struct e1000_phy_info *phy = &hw->phy;
850 s32 ret_val = 0;
851 u16 phy_id;
852 u32 ctrl_ext;
853 u32 mdic;
854
855 /* Extra read required for some PHY's on i354 */
856 if (hw->mac.type == e1000_i354)
857 igb_get_phy_id(hw);
858
859 /* For SGMII PHYs, we try the list of possible addresses until
860 * we find one that works. For non-SGMII PHYs
861 * (e.g. integrated copper PHYs), an address of 1 should
862 * work. The result of this function should mean phy->phy_addr
863 * and phy->id are set correctly.
864 */
865 if (!(igb_sgmii_active_82575(hw))) {
866 phy->addr = 1;
867 ret_val = igb_get_phy_id(hw);
868 goto out;
869 }
870
871 if (igb_sgmii_uses_mdio_82575(hw)) {
872 switch (hw->mac.type) {
873 case e1000_82575:
874 case e1000_82576:
875 mdic = rd32(E1000_MDIC);
876 mdic &= E1000_MDIC_PHY_MASK;
877 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
878 break;
879 case e1000_82580:
880 case e1000_i350:
881 case e1000_i354:
882 case e1000_i210:
883 case e1000_i211:
884 mdic = rd32(E1000_MDICNFG);
885 mdic &= E1000_MDICNFG_PHY_MASK;
886 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
887 break;
888 default:
889 ret_val = -E1000_ERR_PHY;
890 goto out;
891 }
892 ret_val = igb_get_phy_id(hw);
893 goto out;
894 }
895
896 /* Power on sgmii phy if it is disabled */
897 ctrl_ext = rd32(E1000_CTRL_EXT);
898 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
899 wrfl();
900 msleep(300);
901
902 /* The address field in the I2CCMD register is 3 bits and 0 is invalid.
903 * Therefore, we need to test 1-7
904 */
905 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
906 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
907 if (ret_val == 0) {
908 hw_dbg("Vendor ID 0x%08X read at address %u\n",
909 phy_id, phy->addr);
910 /* At the time of this writing, The M88 part is
911 * the only supported SGMII PHY product.
912 */
913 if (phy_id == M88_VENDOR)
914 break;
915 } else {
916 hw_dbg("PHY address %u was unreadable\n", phy->addr);
917 }
918 }
919
920 /* A valid PHY type couldn't be found. */
921 if (phy->addr == 8) {
922 phy->addr = 0;
923 ret_val = -E1000_ERR_PHY;
924 goto out;
925 } else {
926 ret_val = igb_get_phy_id(hw);
927 }
928
929 /* restore previous sfp cage power state */
930 wr32(E1000_CTRL_EXT, ctrl_ext);
931
932 out:
933 return ret_val;
934 }
935
936 /**
937 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
938 * @hw: pointer to the HW structure
939 *
940 * Resets the PHY using the serial gigabit media independent interface.
941 **/
942 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
943 {
944 struct e1000_phy_info *phy = &hw->phy;
945 s32 ret_val;
946
947 /* This isn't a true "hard" reset, but is the only reset
948 * available to us at this time.
949 */
950
951 hw_dbg("Soft resetting SGMII attached PHY...\n");
952
953 /* SFP documentation requires the following to configure the SPF module
954 * to work on SGMII. No further documentation is given.
955 */
956 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
957 if (ret_val)
958 goto out;
959
960 ret_val = igb_phy_sw_reset(hw);
961 if (ret_val)
962 goto out;
963
964 if (phy->id == M88E1512_E_PHY_ID)
965 ret_val = igb_initialize_M88E1512_phy(hw);
966 if (phy->id == M88E1543_E_PHY_ID)
967 ret_val = igb_initialize_M88E1543_phy(hw);
968 out:
969 return ret_val;
970 }
971
972 /**
973 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
974 * @hw: pointer to the HW structure
975 * @active: true to enable LPLU, false to disable
976 *
977 * Sets the LPLU D0 state according to the active flag. When
978 * activating LPLU this function also disables smart speed
979 * and vice versa. LPLU will not be activated unless the
980 * device autonegotiation advertisement meets standards of
981 * either 10 or 10/100 or 10/100/1000 at all duplexes.
982 * This is a function pointer entry point only called by
983 * PHY setup routines.
984 **/
985 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
986 {
987 struct e1000_phy_info *phy = &hw->phy;
988 s32 ret_val;
989 u16 data;
990
991 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
992 if (ret_val)
993 goto out;
994
995 if (active) {
996 data |= IGP02E1000_PM_D0_LPLU;
997 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
998 data);
999 if (ret_val)
1000 goto out;
1001
1002 /* When LPLU is enabled, we should disable SmartSpeed */
1003 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1004 &data);
1005 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1006 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1007 data);
1008 if (ret_val)
1009 goto out;
1010 } else {
1011 data &= ~IGP02E1000_PM_D0_LPLU;
1012 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1013 data);
1014 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1015 * during Dx states where the power conservation is most
1016 * important. During driver activity we should enable
1017 * SmartSpeed, so performance is maintained.
1018 */
1019 if (phy->smart_speed == e1000_smart_speed_on) {
1020 ret_val = phy->ops.read_reg(hw,
1021 IGP01E1000_PHY_PORT_CONFIG, &data);
1022 if (ret_val)
1023 goto out;
1024
1025 data |= IGP01E1000_PSCFR_SMART_SPEED;
1026 ret_val = phy->ops.write_reg(hw,
1027 IGP01E1000_PHY_PORT_CONFIG, data);
1028 if (ret_val)
1029 goto out;
1030 } else if (phy->smart_speed == e1000_smart_speed_off) {
1031 ret_val = phy->ops.read_reg(hw,
1032 IGP01E1000_PHY_PORT_CONFIG, &data);
1033 if (ret_val)
1034 goto out;
1035
1036 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1037 ret_val = phy->ops.write_reg(hw,
1038 IGP01E1000_PHY_PORT_CONFIG, data);
1039 if (ret_val)
1040 goto out;
1041 }
1042 }
1043
1044 out:
1045 return ret_val;
1046 }
1047
1048 /**
1049 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
1050 * @hw: pointer to the HW structure
1051 * @active: true to enable LPLU, false to disable
1052 *
1053 * Sets the LPLU D0 state according to the active flag. When
1054 * activating LPLU this function also disables smart speed
1055 * and vice versa. LPLU will not be activated unless the
1056 * device autonegotiation advertisement meets standards of
1057 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1058 * This is a function pointer entry point only called by
1059 * PHY setup routines.
1060 **/
1061 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1062 {
1063 struct e1000_phy_info *phy = &hw->phy;
1064 u16 data;
1065
1066 data = rd32(E1000_82580_PHY_POWER_MGMT);
1067
1068 if (active) {
1069 data |= E1000_82580_PM_D0_LPLU;
1070
1071 /* When LPLU is enabled, we should disable SmartSpeed */
1072 data &= ~E1000_82580_PM_SPD;
1073 } else {
1074 data &= ~E1000_82580_PM_D0_LPLU;
1075
1076 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1077 * during Dx states where the power conservation is most
1078 * important. During driver activity we should enable
1079 * SmartSpeed, so performance is maintained.
1080 */
1081 if (phy->smart_speed == e1000_smart_speed_on)
1082 data |= E1000_82580_PM_SPD;
1083 else if (phy->smart_speed == e1000_smart_speed_off)
1084 data &= ~E1000_82580_PM_SPD; }
1085
1086 wr32(E1000_82580_PHY_POWER_MGMT, data);
1087 return 0;
1088 }
1089
1090 /**
1091 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
1092 * @hw: pointer to the HW structure
1093 * @active: boolean used to enable/disable lplu
1094 *
1095 * Success returns 0, Failure returns 1
1096 *
1097 * The low power link up (lplu) state is set to the power management level D3
1098 * and SmartSpeed is disabled when active is true, else clear lplu for D3
1099 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
1100 * is used during Dx states where the power conservation is most important.
1101 * During driver activity, SmartSpeed should be enabled so performance is
1102 * maintained.
1103 **/
1104 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1105 {
1106 struct e1000_phy_info *phy = &hw->phy;
1107 u16 data;
1108
1109 data = rd32(E1000_82580_PHY_POWER_MGMT);
1110
1111 if (!active) {
1112 data &= ~E1000_82580_PM_D3_LPLU;
1113 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1114 * during Dx states where the power conservation is most
1115 * important. During driver activity we should enable
1116 * SmartSpeed, so performance is maintained.
1117 */
1118 if (phy->smart_speed == e1000_smart_speed_on)
1119 data |= E1000_82580_PM_SPD;
1120 else if (phy->smart_speed == e1000_smart_speed_off)
1121 data &= ~E1000_82580_PM_SPD;
1122 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1123 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1124 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1125 data |= E1000_82580_PM_D3_LPLU;
1126 /* When LPLU is enabled, we should disable SmartSpeed */
1127 data &= ~E1000_82580_PM_SPD;
1128 }
1129
1130 wr32(E1000_82580_PHY_POWER_MGMT, data);
1131 return 0;
1132 }
1133
1134 /**
1135 * igb_acquire_nvm_82575 - Request for access to EEPROM
1136 * @hw: pointer to the HW structure
1137 *
1138 * Acquire the necessary semaphores for exclusive access to the EEPROM.
1139 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1140 * Return successful if access grant bit set, else clear the request for
1141 * EEPROM access and return -E1000_ERR_NVM (-1).
1142 **/
1143 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
1144 {
1145 s32 ret_val;
1146
1147 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
1148 if (ret_val)
1149 goto out;
1150
1151 ret_val = igb_acquire_nvm(hw);
1152
1153 if (ret_val)
1154 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1155
1156 out:
1157 return ret_val;
1158 }
1159
1160 /**
1161 * igb_release_nvm_82575 - Release exclusive access to EEPROM
1162 * @hw: pointer to the HW structure
1163 *
1164 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
1165 * then release the semaphores acquired.
1166 **/
1167 static void igb_release_nvm_82575(struct e1000_hw *hw)
1168 {
1169 igb_release_nvm(hw);
1170 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1171 }
1172
1173 /**
1174 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
1175 * @hw: pointer to the HW structure
1176 * @mask: specifies which semaphore to acquire
1177 *
1178 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
1179 * will also specify which port we're acquiring the lock for.
1180 **/
1181 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1182 {
1183 u32 swfw_sync;
1184 u32 swmask = mask;
1185 u32 fwmask = mask << 16;
1186 s32 ret_val = 0;
1187 s32 i = 0, timeout = 200;
1188
1189 while (i < timeout) {
1190 if (igb_get_hw_semaphore(hw)) {
1191 ret_val = -E1000_ERR_SWFW_SYNC;
1192 goto out;
1193 }
1194
1195 swfw_sync = rd32(E1000_SW_FW_SYNC);
1196 if (!(swfw_sync & (fwmask | swmask)))
1197 break;
1198
1199 /* Firmware currently using resource (fwmask)
1200 * or other software thread using resource (swmask)
1201 */
1202 igb_put_hw_semaphore(hw);
1203 mdelay(5);
1204 i++;
1205 }
1206
1207 if (i == timeout) {
1208 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
1209 ret_val = -E1000_ERR_SWFW_SYNC;
1210 goto out;
1211 }
1212
1213 swfw_sync |= swmask;
1214 wr32(E1000_SW_FW_SYNC, swfw_sync);
1215
1216 igb_put_hw_semaphore(hw);
1217
1218 out:
1219 return ret_val;
1220 }
1221
1222 /**
1223 * igb_release_swfw_sync_82575 - Release SW/FW semaphore
1224 * @hw: pointer to the HW structure
1225 * @mask: specifies which semaphore to acquire
1226 *
1227 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
1228 * will also specify which port we're releasing the lock for.
1229 **/
1230 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1231 {
1232 u32 swfw_sync;
1233
1234 while (igb_get_hw_semaphore(hw) != 0)
1235 ; /* Empty */
1236
1237 swfw_sync = rd32(E1000_SW_FW_SYNC);
1238 swfw_sync &= ~mask;
1239 wr32(E1000_SW_FW_SYNC, swfw_sync);
1240
1241 igb_put_hw_semaphore(hw);
1242 }
1243
1244 /**
1245 * igb_get_cfg_done_82575 - Read config done bit
1246 * @hw: pointer to the HW structure
1247 *
1248 * Read the management control register for the config done bit for
1249 * completion status. NOTE: silicon which is EEPROM-less will fail trying
1250 * to read the config done bit, so an error is *ONLY* logged and returns
1251 * 0. If we were to return with error, EEPROM-less silicon
1252 * would not be able to be reset or change link.
1253 **/
1254 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1255 {
1256 s32 timeout = PHY_CFG_TIMEOUT;
1257 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1258
1259 if (hw->bus.func == 1)
1260 mask = E1000_NVM_CFG_DONE_PORT_1;
1261 else if (hw->bus.func == E1000_FUNC_2)
1262 mask = E1000_NVM_CFG_DONE_PORT_2;
1263 else if (hw->bus.func == E1000_FUNC_3)
1264 mask = E1000_NVM_CFG_DONE_PORT_3;
1265
1266 while (timeout) {
1267 if (rd32(E1000_EEMNGCTL) & mask)
1268 break;
1269 usleep_range(1000, 2000);
1270 timeout--;
1271 }
1272 if (!timeout)
1273 hw_dbg("MNG configuration cycle has not completed.\n");
1274
1275 /* If EEPROM is not marked present, init the PHY manually */
1276 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
1277 (hw->phy.type == e1000_phy_igp_3))
1278 igb_phy_init_script_igp3(hw);
1279
1280 return 0;
1281 }
1282
1283 /**
1284 * igb_get_link_up_info_82575 - Get link speed/duplex info
1285 * @hw: pointer to the HW structure
1286 * @speed: stores the current speed
1287 * @duplex: stores the current duplex
1288 *
1289 * This is a wrapper function, if using the serial gigabit media independent
1290 * interface, use PCS to retrieve the link speed and duplex information.
1291 * Otherwise, use the generic function to get the link speed and duplex info.
1292 **/
1293 static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
1294 u16 *duplex)
1295 {
1296 s32 ret_val;
1297
1298 if (hw->phy.media_type != e1000_media_type_copper)
1299 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
1300 duplex);
1301 else
1302 ret_val = igb_get_speed_and_duplex_copper(hw, speed,
1303 duplex);
1304
1305 return ret_val;
1306 }
1307
1308 /**
1309 * igb_check_for_link_82575 - Check for link
1310 * @hw: pointer to the HW structure
1311 *
1312 * If sgmii is enabled, then use the pcs register to determine link, otherwise
1313 * use the generic interface for determining link.
1314 **/
1315 static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1316 {
1317 s32 ret_val;
1318 u16 speed, duplex;
1319
1320 if (hw->phy.media_type != e1000_media_type_copper) {
1321 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1322 &duplex);
1323 /* Use this flag to determine if link needs to be checked or
1324 * not. If we have link clear the flag so that we do not
1325 * continue to check for link.
1326 */
1327 hw->mac.get_link_status = !hw->mac.serdes_has_link;
1328
1329 /* Configure Flow Control now that Auto-Neg has completed.
1330 * First, we need to restore the desired flow control
1331 * settings because we may have had to re-autoneg with a
1332 * different link partner.
1333 */
1334 ret_val = igb_config_fc_after_link_up(hw);
1335 if (ret_val)
1336 hw_dbg("Error configuring flow control\n");
1337 } else {
1338 ret_val = igb_check_for_copper_link(hw);
1339 }
1340
1341 return ret_val;
1342 }
1343
1344 /**
1345 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1346 * @hw: pointer to the HW structure
1347 **/
1348 void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1349 {
1350 u32 reg;
1351
1352
1353 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1354 !igb_sgmii_active_82575(hw))
1355 return;
1356
1357 /* Enable PCS to turn on link */
1358 reg = rd32(E1000_PCS_CFG0);
1359 reg |= E1000_PCS_CFG_PCS_EN;
1360 wr32(E1000_PCS_CFG0, reg);
1361
1362 /* Power up the laser */
1363 reg = rd32(E1000_CTRL_EXT);
1364 reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1365 wr32(E1000_CTRL_EXT, reg);
1366
1367 /* flush the write to verify completion */
1368 wrfl();
1369 usleep_range(1000, 2000);
1370 }
1371
1372 /**
1373 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1374 * @hw: pointer to the HW structure
1375 * @speed: stores the current speed
1376 * @duplex: stores the current duplex
1377 *
1378 * Using the physical coding sub-layer (PCS), retrieve the current speed and
1379 * duplex, then store the values in the pointers provided.
1380 **/
1381 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1382 u16 *duplex)
1383 {
1384 struct e1000_mac_info *mac = &hw->mac;
1385 u32 pcs, status;
1386
1387 /* Set up defaults for the return values of this function */
1388 mac->serdes_has_link = false;
1389 *speed = 0;
1390 *duplex = 0;
1391
1392 /* Read the PCS Status register for link state. For non-copper mode,
1393 * the status register is not accurate. The PCS status register is
1394 * used instead.
1395 */
1396 pcs = rd32(E1000_PCS_LSTAT);
1397
1398 /* The link up bit determines when link is up on autoneg. The sync ok
1399 * gets set once both sides sync up and agree upon link. Stable link
1400 * can be determined by checking for both link up and link sync ok
1401 */
1402 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1403 mac->serdes_has_link = true;
1404
1405 /* Detect and store PCS speed */
1406 if (pcs & E1000_PCS_LSTS_SPEED_1000)
1407 *speed = SPEED_1000;
1408 else if (pcs & E1000_PCS_LSTS_SPEED_100)
1409 *speed = SPEED_100;
1410 else
1411 *speed = SPEED_10;
1412
1413 /* Detect and store PCS duplex */
1414 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
1415 *duplex = FULL_DUPLEX;
1416 else
1417 *duplex = HALF_DUPLEX;
1418
1419 /* Check if it is an I354 2.5Gb backplane connection. */
1420 if (mac->type == e1000_i354) {
1421 status = rd32(E1000_STATUS);
1422 if ((status & E1000_STATUS_2P5_SKU) &&
1423 !(status & E1000_STATUS_2P5_SKU_OVER)) {
1424 *speed = SPEED_2500;
1425 *duplex = FULL_DUPLEX;
1426 hw_dbg("2500 Mbs, ");
1427 hw_dbg("Full Duplex\n");
1428 }
1429 }
1430
1431 }
1432
1433 return 0;
1434 }
1435
1436 /**
1437 * igb_shutdown_serdes_link_82575 - Remove link during power down
1438 * @hw: pointer to the HW structure
1439 *
1440 * In the case of fiber serdes, shut down optics and PCS on driver unload
1441 * when management pass thru is not enabled.
1442 **/
1443 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1444 {
1445 u32 reg;
1446
1447 if (hw->phy.media_type != e1000_media_type_internal_serdes &&
1448 igb_sgmii_active_82575(hw))
1449 return;
1450
1451 if (!igb_enable_mng_pass_thru(hw)) {
1452 /* Disable PCS to turn off link */
1453 reg = rd32(E1000_PCS_CFG0);
1454 reg &= ~E1000_PCS_CFG_PCS_EN;
1455 wr32(E1000_PCS_CFG0, reg);
1456
1457 /* shutdown the laser */
1458 reg = rd32(E1000_CTRL_EXT);
1459 reg |= E1000_CTRL_EXT_SDP3_DATA;
1460 wr32(E1000_CTRL_EXT, reg);
1461
1462 /* flush the write to verify completion */
1463 wrfl();
1464 usleep_range(1000, 2000);
1465 }
1466 }
1467
1468 /**
1469 * igb_reset_hw_82575 - Reset hardware
1470 * @hw: pointer to the HW structure
1471 *
1472 * This resets the hardware into a known state. This is a
1473 * function pointer entry point called by the api module.
1474 **/
1475 static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1476 {
1477 u32 ctrl;
1478 s32 ret_val;
1479
1480 /* Prevent the PCI-E bus from sticking if there is no TLP connection
1481 * on the last TLP read/write transaction when MAC is reset.
1482 */
1483 ret_val = igb_disable_pcie_master(hw);
1484 if (ret_val)
1485 hw_dbg("PCI-E Master disable polling has failed.\n");
1486
1487 /* set the completion timeout for interface */
1488 ret_val = igb_set_pcie_completion_timeout(hw);
1489 if (ret_val)
1490 hw_dbg("PCI-E Set completion timeout has failed.\n");
1491
1492 hw_dbg("Masking off all interrupts\n");
1493 wr32(E1000_IMC, 0xffffffff);
1494
1495 wr32(E1000_RCTL, 0);
1496 wr32(E1000_TCTL, E1000_TCTL_PSP);
1497 wrfl();
1498
1499 usleep_range(10000, 20000);
1500
1501 ctrl = rd32(E1000_CTRL);
1502
1503 hw_dbg("Issuing a global reset to MAC\n");
1504 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1505
1506 ret_val = igb_get_auto_rd_done(hw);
1507 if (ret_val) {
1508 /* When auto config read does not complete, do not
1509 * return with an error. This can happen in situations
1510 * where there is no eeprom and prevents getting link.
1511 */
1512 hw_dbg("Auto Read Done did not complete\n");
1513 }
1514
1515 /* If EEPROM is not present, run manual init scripts */
1516 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1517 igb_reset_init_script_82575(hw);
1518
1519 /* Clear any pending interrupt events. */
1520 wr32(E1000_IMC, 0xffffffff);
1521 rd32(E1000_ICR);
1522
1523 /* Install any alternate MAC address into RAR0 */
1524 ret_val = igb_check_alt_mac_addr(hw);
1525
1526 return ret_val;
1527 }
1528
1529 /**
1530 * igb_init_hw_82575 - Initialize hardware
1531 * @hw: pointer to the HW structure
1532 *
1533 * This inits the hardware readying it for operation.
1534 **/
1535 static s32 igb_init_hw_82575(struct e1000_hw *hw)
1536 {
1537 struct e1000_mac_info *mac = &hw->mac;
1538 s32 ret_val;
1539 u16 i, rar_count = mac->rar_entry_count;
1540
1541 if ((hw->mac.type >= e1000_i210) &&
1542 !(igb_get_flash_presence_i210(hw))) {
1543 ret_val = igb_pll_workaround_i210(hw);
1544 if (ret_val)
1545 return ret_val;
1546 }
1547
1548 /* Initialize identification LED */
1549 ret_val = igb_id_led_init(hw);
1550 if (ret_val) {
1551 hw_dbg("Error initializing identification LED\n");
1552 /* This is not fatal and we should not stop init due to this */
1553 }
1554
1555 /* Disabling VLAN filtering */
1556 hw_dbg("Initializing the IEEE VLAN\n");
1557 igb_clear_vfta(hw);
1558
1559 /* Setup the receive address */
1560 igb_init_rx_addrs(hw, rar_count);
1561
1562 /* Zero out the Multicast HASH table */
1563 hw_dbg("Zeroing the MTA\n");
1564 for (i = 0; i < mac->mta_reg_count; i++)
1565 array_wr32(E1000_MTA, i, 0);
1566
1567 /* Zero out the Unicast HASH table */
1568 hw_dbg("Zeroing the UTA\n");
1569 for (i = 0; i < mac->uta_reg_count; i++)
1570 array_wr32(E1000_UTA, i, 0);
1571
1572 /* Setup link and flow control */
1573 ret_val = igb_setup_link(hw);
1574
1575 /* Clear all of the statistics registers (clear on read). It is
1576 * important that we do this after we have tried to establish link
1577 * because the symbol error count will increment wildly if there
1578 * is no link.
1579 */
1580 igb_clear_hw_cntrs_82575(hw);
1581 return ret_val;
1582 }
1583
1584 /**
1585 * igb_setup_copper_link_82575 - Configure copper link settings
1586 * @hw: pointer to the HW structure
1587 *
1588 * Configures the link for auto-neg or forced speed and duplex. Then we check
1589 * for link, once link is established calls to configure collision distance
1590 * and flow control are called.
1591 **/
1592 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1593 {
1594 u32 ctrl;
1595 s32 ret_val;
1596 u32 phpm_reg;
1597
1598 ctrl = rd32(E1000_CTRL);
1599 ctrl |= E1000_CTRL_SLU;
1600 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1601 wr32(E1000_CTRL, ctrl);
1602
1603 /* Clear Go Link Disconnect bit on supported devices */
1604 switch (hw->mac.type) {
1605 case e1000_82580:
1606 case e1000_i350:
1607 case e1000_i210:
1608 case e1000_i211:
1609 case e1000_i354:
1610 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1611 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1612 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1613 break;
1614 default:
1615 break;
1616 }
1617
1618 ret_val = igb_setup_serdes_link_82575(hw);
1619 if (ret_val)
1620 goto out;
1621
1622 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1623 /* allow time for SFP cage time to power up phy */
1624 msleep(300);
1625
1626 ret_val = hw->phy.ops.reset(hw);
1627 if (ret_val) {
1628 hw_dbg("Error resetting the PHY.\n");
1629 goto out;
1630 }
1631 }
1632 switch (hw->phy.type) {
1633 case e1000_phy_i210:
1634 case e1000_phy_m88:
1635 switch (hw->phy.id) {
1636 case I347AT4_E_PHY_ID:
1637 case M88E1112_E_PHY_ID:
1638 case M88E1543_E_PHY_ID:
1639 case M88E1512_E_PHY_ID:
1640 case I210_I_PHY_ID:
1641 ret_val = igb_copper_link_setup_m88_gen2(hw);
1642 break;
1643 default:
1644 ret_val = igb_copper_link_setup_m88(hw);
1645 break;
1646 }
1647 break;
1648 case e1000_phy_igp_3:
1649 ret_val = igb_copper_link_setup_igp(hw);
1650 break;
1651 case e1000_phy_82580:
1652 ret_val = igb_copper_link_setup_82580(hw);
1653 break;
1654 case e1000_phy_bcm54616:
1655 break;
1656 default:
1657 ret_val = -E1000_ERR_PHY;
1658 break;
1659 }
1660
1661 if (ret_val)
1662 goto out;
1663
1664 ret_val = igb_setup_copper_link(hw);
1665 out:
1666 return ret_val;
1667 }
1668
1669 /**
1670 * igb_setup_serdes_link_82575 - Setup link for serdes
1671 * @hw: pointer to the HW structure
1672 *
1673 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1674 * used on copper connections where the serialized gigabit media independent
1675 * interface (sgmii), or serdes fiber is being used. Configures the link
1676 * for auto-negotiation or forces speed/duplex.
1677 **/
1678 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1679 {
1680 u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1681 bool pcs_autoneg;
1682 s32 ret_val = 0;
1683 u16 data;
1684
1685 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1686 !igb_sgmii_active_82575(hw))
1687 return ret_val;
1688
1689
1690 /* On the 82575, SerDes loopback mode persists until it is
1691 * explicitly turned off or a power cycle is performed. A read to
1692 * the register does not indicate its status. Therefore, we ensure
1693 * loopback mode is disabled during initialization.
1694 */
1695 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1696
1697 /* power on the sfp cage if present and turn on I2C */
1698 ctrl_ext = rd32(E1000_CTRL_EXT);
1699 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1700 ctrl_ext |= E1000_CTRL_I2C_ENA;
1701 wr32(E1000_CTRL_EXT, ctrl_ext);
1702
1703 ctrl_reg = rd32(E1000_CTRL);
1704 ctrl_reg |= E1000_CTRL_SLU;
1705
1706 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1707 /* set both sw defined pins */
1708 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1709
1710 /* Set switch control to serdes energy detect */
1711 reg = rd32(E1000_CONNSW);
1712 reg |= E1000_CONNSW_ENRGSRC;
1713 wr32(E1000_CONNSW, reg);
1714 }
1715
1716 reg = rd32(E1000_PCS_LCTL);
1717
1718 /* default pcs_autoneg to the same setting as mac autoneg */
1719 pcs_autoneg = hw->mac.autoneg;
1720
1721 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1722 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1723 /* sgmii mode lets the phy handle forcing speed/duplex */
1724 pcs_autoneg = true;
1725 /* autoneg time out should be disabled for SGMII mode */
1726 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1727 break;
1728 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1729 /* disable PCS autoneg and support parallel detect only */
1730 pcs_autoneg = false;
1731 default:
1732 if (hw->mac.type == e1000_82575 ||
1733 hw->mac.type == e1000_82576) {
1734 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1735 if (ret_val) {
1736 hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
1737 return ret_val;
1738 }
1739
1740 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1741 pcs_autoneg = false;
1742 }
1743
1744 /* non-SGMII modes only supports a speed of 1000/Full for the
1745 * link so it is best to just force the MAC and let the pcs
1746 * link either autoneg or be forced to 1000/Full
1747 */
1748 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1749 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1750
1751 /* set speed of 1000/Full if speed/duplex is forced */
1752 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1753 break;
1754 }
1755
1756 wr32(E1000_CTRL, ctrl_reg);
1757
1758 /* New SerDes mode allows for forcing speed or autonegotiating speed
1759 * at 1gb. Autoneg should be default set by most drivers. This is the
1760 * mode that will be compatible with older link partners and switches.
1761 * However, both are supported by the hardware and some drivers/tools.
1762 */
1763 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1764 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1765
1766 if (pcs_autoneg) {
1767 /* Set PCS register for autoneg */
1768 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1769 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1770
1771 /* Disable force flow control for autoneg */
1772 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1773
1774 /* Configure flow control advertisement for autoneg */
1775 anadv_reg = rd32(E1000_PCS_ANADV);
1776 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1777 switch (hw->fc.requested_mode) {
1778 case e1000_fc_full:
1779 case e1000_fc_rx_pause:
1780 anadv_reg |= E1000_TXCW_ASM_DIR;
1781 anadv_reg |= E1000_TXCW_PAUSE;
1782 break;
1783 case e1000_fc_tx_pause:
1784 anadv_reg |= E1000_TXCW_ASM_DIR;
1785 break;
1786 default:
1787 break;
1788 }
1789 wr32(E1000_PCS_ANADV, anadv_reg);
1790
1791 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1792 } else {
1793 /* Set PCS register for forced link */
1794 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1795
1796 /* Force flow control for forced link */
1797 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1798
1799 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1800 }
1801
1802 wr32(E1000_PCS_LCTL, reg);
1803
1804 if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
1805 igb_force_mac_fc(hw);
1806
1807 return ret_val;
1808 }
1809
1810 /**
1811 * igb_sgmii_active_82575 - Return sgmii state
1812 * @hw: pointer to the HW structure
1813 *
1814 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1815 * which can be enabled for use in the embedded applications. Simply
1816 * return the current state of the sgmii interface.
1817 **/
1818 static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1819 {
1820 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1821 return dev_spec->sgmii_active;
1822 }
1823
1824 /**
1825 * igb_reset_init_script_82575 - Inits HW defaults after reset
1826 * @hw: pointer to the HW structure
1827 *
1828 * Inits recommended HW defaults after a reset when there is no EEPROM
1829 * detected. This is only for the 82575.
1830 **/
1831 static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1832 {
1833 if (hw->mac.type == e1000_82575) {
1834 hw_dbg("Running reset init script for 82575\n");
1835 /* SerDes configuration via SERDESCTRL */
1836 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1837 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1838 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1839 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1840
1841 /* CCM configuration via CCMCTL register */
1842 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1843 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1844
1845 /* PCIe lanes configuration */
1846 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1847 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1848 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1849 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1850
1851 /* PCIe PLL Configuration */
1852 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1853 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1854 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1855 }
1856
1857 return 0;
1858 }
1859
1860 /**
1861 * igb_read_mac_addr_82575 - Read device MAC address
1862 * @hw: pointer to the HW structure
1863 **/
1864 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1865 {
1866 s32 ret_val = 0;
1867
1868 /* If there's an alternate MAC address place it in RAR0
1869 * so that it will override the Si installed default perm
1870 * address.
1871 */
1872 ret_val = igb_check_alt_mac_addr(hw);
1873 if (ret_val)
1874 goto out;
1875
1876 ret_val = igb_read_mac_addr(hw);
1877
1878 out:
1879 return ret_val;
1880 }
1881
1882 /**
1883 * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1884 * @hw: pointer to the HW structure
1885 *
1886 * In the case of a PHY power down to save power, or to turn off link during a
1887 * driver unload, or wake on lan is not enabled, remove the link.
1888 **/
1889 void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1890 {
1891 /* If the management interface is not enabled, then power down */
1892 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1893 igb_power_down_phy_copper(hw);
1894 }
1895
1896 /**
1897 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
1898 * @hw: pointer to the HW structure
1899 *
1900 * Clears the hardware counters by reading the counter registers.
1901 **/
1902 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1903 {
1904 igb_clear_hw_cntrs_base(hw);
1905
1906 rd32(E1000_PRC64);
1907 rd32(E1000_PRC127);
1908 rd32(E1000_PRC255);
1909 rd32(E1000_PRC511);
1910 rd32(E1000_PRC1023);
1911 rd32(E1000_PRC1522);
1912 rd32(E1000_PTC64);
1913 rd32(E1000_PTC127);
1914 rd32(E1000_PTC255);
1915 rd32(E1000_PTC511);
1916 rd32(E1000_PTC1023);
1917 rd32(E1000_PTC1522);
1918
1919 rd32(E1000_ALGNERRC);
1920 rd32(E1000_RXERRC);
1921 rd32(E1000_TNCRS);
1922 rd32(E1000_CEXTERR);
1923 rd32(E1000_TSCTC);
1924 rd32(E1000_TSCTFC);
1925
1926 rd32(E1000_MGTPRC);
1927 rd32(E1000_MGTPDC);
1928 rd32(E1000_MGTPTC);
1929
1930 rd32(E1000_IAC);
1931 rd32(E1000_ICRXOC);
1932
1933 rd32(E1000_ICRXPTC);
1934 rd32(E1000_ICRXATC);
1935 rd32(E1000_ICTXPTC);
1936 rd32(E1000_ICTXATC);
1937 rd32(E1000_ICTXQEC);
1938 rd32(E1000_ICTXQMTC);
1939 rd32(E1000_ICRXDMTC);
1940
1941 rd32(E1000_CBTMPC);
1942 rd32(E1000_HTDPMC);
1943 rd32(E1000_CBRMPC);
1944 rd32(E1000_RPTHC);
1945 rd32(E1000_HGPTC);
1946 rd32(E1000_HTCBDPC);
1947 rd32(E1000_HGORCL);
1948 rd32(E1000_HGORCH);
1949 rd32(E1000_HGOTCL);
1950 rd32(E1000_HGOTCH);
1951 rd32(E1000_LENERRS);
1952
1953 /* This register should not be read in copper configurations */
1954 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1955 igb_sgmii_active_82575(hw))
1956 rd32(E1000_SCVPC);
1957 }
1958
1959 /**
1960 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1961 * @hw: pointer to the HW structure
1962 *
1963 * After rx enable if manageability is enabled then there is likely some
1964 * bad data at the start of the fifo and possibly in the DMA fifo. This
1965 * function clears the fifos and flushes any packets that came in as rx was
1966 * being enabled.
1967 **/
1968 void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1969 {
1970 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1971 int i, ms_wait;
1972
1973 /* disable IPv6 options as per hardware errata */
1974 rfctl = rd32(E1000_RFCTL);
1975 rfctl |= E1000_RFCTL_IPV6_EX_DIS;
1976 wr32(E1000_RFCTL, rfctl);
1977
1978 if (hw->mac.type != e1000_82575 ||
1979 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1980 return;
1981
1982 /* Disable all RX queues */
1983 for (i = 0; i < 4; i++) {
1984 rxdctl[i] = rd32(E1000_RXDCTL(i));
1985 wr32(E1000_RXDCTL(i),
1986 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1987 }
1988 /* Poll all queues to verify they have shut down */
1989 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1990 usleep_range(1000, 2000);
1991 rx_enabled = 0;
1992 for (i = 0; i < 4; i++)
1993 rx_enabled |= rd32(E1000_RXDCTL(i));
1994 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1995 break;
1996 }
1997
1998 if (ms_wait == 10)
1999 hw_dbg("Queue disable timed out after 10ms\n");
2000
2001 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
2002 * incoming packets are rejected. Set enable and wait 2ms so that
2003 * any packet that was coming in as RCTL.EN was set is flushed
2004 */
2005 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
2006
2007 rlpml = rd32(E1000_RLPML);
2008 wr32(E1000_RLPML, 0);
2009
2010 rctl = rd32(E1000_RCTL);
2011 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
2012 temp_rctl |= E1000_RCTL_LPE;
2013
2014 wr32(E1000_RCTL, temp_rctl);
2015 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
2016 wrfl();
2017 usleep_range(2000, 3000);
2018
2019 /* Enable RX queues that were previously enabled and restore our
2020 * previous state
2021 */
2022 for (i = 0; i < 4; i++)
2023 wr32(E1000_RXDCTL(i), rxdctl[i]);
2024 wr32(E1000_RCTL, rctl);
2025 wrfl();
2026
2027 wr32(E1000_RLPML, rlpml);
2028 wr32(E1000_RFCTL, rfctl);
2029
2030 /* Flush receive errors generated by workaround */
2031 rd32(E1000_ROC);
2032 rd32(E1000_RNBC);
2033 rd32(E1000_MPC);
2034 }
2035
2036 /**
2037 * igb_set_pcie_completion_timeout - set pci-e completion timeout
2038 * @hw: pointer to the HW structure
2039 *
2040 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
2041 * however the hardware default for these parts is 500us to 1ms which is less
2042 * than the 10ms recommended by the pci-e spec. To address this we need to
2043 * increase the value to either 10ms to 200ms for capability version 1 config,
2044 * or 16ms to 55ms for version 2.
2045 **/
2046 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
2047 {
2048 u32 gcr = rd32(E1000_GCR);
2049 s32 ret_val = 0;
2050 u16 pcie_devctl2;
2051
2052 /* only take action if timeout value is defaulted to 0 */
2053 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
2054 goto out;
2055
2056 /* if capabilities version is type 1 we can write the
2057 * timeout of 10ms to 200ms through the GCR register
2058 */
2059 if (!(gcr & E1000_GCR_CAP_VER2)) {
2060 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
2061 goto out;
2062 }
2063
2064 /* for version 2 capabilities we need to write the config space
2065 * directly in order to set the completion timeout value for
2066 * 16ms to 55ms
2067 */
2068 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2069 &pcie_devctl2);
2070 if (ret_val)
2071 goto out;
2072
2073 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
2074
2075 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2076 &pcie_devctl2);
2077 out:
2078 /* disable completion timeout resend */
2079 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
2080
2081 wr32(E1000_GCR, gcr);
2082 return ret_val;
2083 }
2084
2085 /**
2086 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
2087 * @hw: pointer to the hardware struct
2088 * @enable: state to enter, either enabled or disabled
2089 * @pf: Physical Function pool - do not set anti-spoofing for the PF
2090 *
2091 * enables/disables L2 switch anti-spoofing functionality.
2092 **/
2093 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
2094 {
2095 u32 reg_val, reg_offset;
2096
2097 switch (hw->mac.type) {
2098 case e1000_82576:
2099 reg_offset = E1000_DTXSWC;
2100 break;
2101 case e1000_i350:
2102 case e1000_i354:
2103 reg_offset = E1000_TXSWC;
2104 break;
2105 default:
2106 return;
2107 }
2108
2109 reg_val = rd32(reg_offset);
2110 if (enable) {
2111 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
2112 E1000_DTXSWC_VLAN_SPOOF_MASK);
2113 /* The PF can spoof - it has to in order to
2114 * support emulation mode NICs
2115 */
2116 reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS));
2117 } else {
2118 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
2119 E1000_DTXSWC_VLAN_SPOOF_MASK);
2120 }
2121 wr32(reg_offset, reg_val);
2122 }
2123
2124 /**
2125 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
2126 * @hw: pointer to the hardware struct
2127 * @enable: state to enter, either enabled or disabled
2128 *
2129 * enables/disables L2 switch loopback functionality.
2130 **/
2131 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
2132 {
2133 u32 dtxswc;
2134
2135 switch (hw->mac.type) {
2136 case e1000_82576:
2137 dtxswc = rd32(E1000_DTXSWC);
2138 if (enable)
2139 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2140 else
2141 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2142 wr32(E1000_DTXSWC, dtxswc);
2143 break;
2144 case e1000_i354:
2145 case e1000_i350:
2146 dtxswc = rd32(E1000_TXSWC);
2147 if (enable)
2148 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2149 else
2150 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2151 wr32(E1000_TXSWC, dtxswc);
2152 break;
2153 default:
2154 /* Currently no other hardware supports loopback */
2155 break;
2156 }
2157
2158 }
2159
2160 /**
2161 * igb_vmdq_set_replication_pf - enable or disable vmdq replication
2162 * @hw: pointer to the hardware struct
2163 * @enable: state to enter, either enabled or disabled
2164 *
2165 * enables/disables replication of packets across multiple pools.
2166 **/
2167 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
2168 {
2169 u32 vt_ctl = rd32(E1000_VT_CTL);
2170
2171 if (enable)
2172 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
2173 else
2174 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
2175
2176 wr32(E1000_VT_CTL, vt_ctl);
2177 }
2178
2179 /**
2180 * igb_read_phy_reg_82580 - Read 82580 MDI control register
2181 * @hw: pointer to the HW structure
2182 * @offset: register offset to be read
2183 * @data: pointer to the read data
2184 *
2185 * Reads the MDI control register in the PHY at offset and stores the
2186 * information read to data.
2187 **/
2188 s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
2189 {
2190 s32 ret_val;
2191
2192 ret_val = hw->phy.ops.acquire(hw);
2193 if (ret_val)
2194 goto out;
2195
2196 ret_val = igb_read_phy_reg_mdic(hw, offset, data);
2197
2198 hw->phy.ops.release(hw);
2199
2200 out:
2201 return ret_val;
2202 }
2203
2204 /**
2205 * igb_write_phy_reg_82580 - Write 82580 MDI control register
2206 * @hw: pointer to the HW structure
2207 * @offset: register offset to write to
2208 * @data: data to write to register at offset
2209 *
2210 * Writes data to MDI control register in the PHY at offset.
2211 **/
2212 s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
2213 {
2214 s32 ret_val;
2215
2216
2217 ret_val = hw->phy.ops.acquire(hw);
2218 if (ret_val)
2219 goto out;
2220
2221 ret_val = igb_write_phy_reg_mdic(hw, offset, data);
2222
2223 hw->phy.ops.release(hw);
2224
2225 out:
2226 return ret_val;
2227 }
2228
2229 /**
2230 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
2231 * @hw: pointer to the HW structure
2232 *
2233 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
2234 * the values found in the EEPROM. This addresses an issue in which these
2235 * bits are not restored from EEPROM after reset.
2236 **/
2237 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
2238 {
2239 s32 ret_val = 0;
2240 u32 mdicnfg;
2241 u16 nvm_data = 0;
2242
2243 if (hw->mac.type != e1000_82580)
2244 goto out;
2245 if (!igb_sgmii_active_82575(hw))
2246 goto out;
2247
2248 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2249 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2250 &nvm_data);
2251 if (ret_val) {
2252 hw_dbg("NVM Read Error\n");
2253 goto out;
2254 }
2255
2256 mdicnfg = rd32(E1000_MDICNFG);
2257 if (nvm_data & NVM_WORD24_EXT_MDIO)
2258 mdicnfg |= E1000_MDICNFG_EXT_MDIO;
2259 if (nvm_data & NVM_WORD24_COM_MDIO)
2260 mdicnfg |= E1000_MDICNFG_COM_MDIO;
2261 wr32(E1000_MDICNFG, mdicnfg);
2262 out:
2263 return ret_val;
2264 }
2265
2266 /**
2267 * igb_reset_hw_82580 - Reset hardware
2268 * @hw: pointer to the HW structure
2269 *
2270 * This resets function or entire device (all ports, etc.)
2271 * to a known state.
2272 **/
2273 static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2274 {
2275 s32 ret_val = 0;
2276 /* BH SW mailbox bit in SW_FW_SYNC */
2277 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2278 u32 ctrl;
2279 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2280
2281 hw->dev_spec._82575.global_device_reset = false;
2282
2283 /* due to hw errata, global device reset doesn't always
2284 * work on 82580
2285 */
2286 if (hw->mac.type == e1000_82580)
2287 global_device_reset = false;
2288
2289 /* Get current control state. */
2290 ctrl = rd32(E1000_CTRL);
2291
2292 /* Prevent the PCI-E bus from sticking if there is no TLP connection
2293 * on the last TLP read/write transaction when MAC is reset.
2294 */
2295 ret_val = igb_disable_pcie_master(hw);
2296 if (ret_val)
2297 hw_dbg("PCI-E Master disable polling has failed.\n");
2298
2299 hw_dbg("Masking off all interrupts\n");
2300 wr32(E1000_IMC, 0xffffffff);
2301 wr32(E1000_RCTL, 0);
2302 wr32(E1000_TCTL, E1000_TCTL_PSP);
2303 wrfl();
2304
2305 usleep_range(10000, 11000);
2306
2307 /* Determine whether or not a global dev reset is requested */
2308 if (global_device_reset &&
2309 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
2310 global_device_reset = false;
2311
2312 if (global_device_reset &&
2313 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
2314 ctrl |= E1000_CTRL_DEV_RST;
2315 else
2316 ctrl |= E1000_CTRL_RST;
2317
2318 wr32(E1000_CTRL, ctrl);
2319 wrfl();
2320
2321 /* Add delay to insure DEV_RST has time to complete */
2322 if (global_device_reset)
2323 usleep_range(5000, 6000);
2324
2325 ret_val = igb_get_auto_rd_done(hw);
2326 if (ret_val) {
2327 /* When auto config read does not complete, do not
2328 * return with an error. This can happen in situations
2329 * where there is no eeprom and prevents getting link.
2330 */
2331 hw_dbg("Auto Read Done did not complete\n");
2332 }
2333
2334 /* clear global device reset status bit */
2335 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
2336
2337 /* Clear any pending interrupt events. */
2338 wr32(E1000_IMC, 0xffffffff);
2339 rd32(E1000_ICR);
2340
2341 ret_val = igb_reset_mdicnfg_82580(hw);
2342 if (ret_val)
2343 hw_dbg("Could not reset MDICNFG based on EEPROM\n");
2344
2345 /* Install any alternate MAC address into RAR0 */
2346 ret_val = igb_check_alt_mac_addr(hw);
2347
2348 /* Release semaphore */
2349 if (global_device_reset)
2350 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
2351
2352 return ret_val;
2353 }
2354
2355 /**
2356 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
2357 * @data: data received by reading RXPBS register
2358 *
2359 * The 82580 uses a table based approach for packet buffer allocation sizes.
2360 * This function converts the retrieved value into the correct table value
2361 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2362 * 0x0 36 72 144 1 2 4 8 16
2363 * 0x8 35 70 140 rsv rsv rsv rsv rsv
2364 */
2365 u16 igb_rxpbs_adjust_82580(u32 data)
2366 {
2367 u16 ret_val = 0;
2368
2369 if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
2370 ret_val = e1000_82580_rxpbs_table[data];
2371
2372 return ret_val;
2373 }
2374
2375 /**
2376 * igb_validate_nvm_checksum_with_offset - Validate EEPROM
2377 * checksum
2378 * @hw: pointer to the HW structure
2379 * @offset: offset in words of the checksum protected region
2380 *
2381 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2382 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2383 **/
2384 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
2385 u16 offset)
2386 {
2387 s32 ret_val = 0;
2388 u16 checksum = 0;
2389 u16 i, nvm_data;
2390
2391 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2392 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2393 if (ret_val) {
2394 hw_dbg("NVM Read Error\n");
2395 goto out;
2396 }
2397 checksum += nvm_data;
2398 }
2399
2400 if (checksum != (u16) NVM_SUM) {
2401 hw_dbg("NVM Checksum Invalid\n");
2402 ret_val = -E1000_ERR_NVM;
2403 goto out;
2404 }
2405
2406 out:
2407 return ret_val;
2408 }
2409
2410 /**
2411 * igb_update_nvm_checksum_with_offset - Update EEPROM
2412 * checksum
2413 * @hw: pointer to the HW structure
2414 * @offset: offset in words of the checksum protected region
2415 *
2416 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2417 * up to the checksum. Then calculates the EEPROM checksum and writes the
2418 * value to the EEPROM.
2419 **/
2420 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2421 {
2422 s32 ret_val;
2423 u16 checksum = 0;
2424 u16 i, nvm_data;
2425
2426 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2427 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2428 if (ret_val) {
2429 hw_dbg("NVM Read Error while updating checksum.\n");
2430 goto out;
2431 }
2432 checksum += nvm_data;
2433 }
2434 checksum = (u16) NVM_SUM - checksum;
2435 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2436 &checksum);
2437 if (ret_val)
2438 hw_dbg("NVM Write Error while updating checksum.\n");
2439
2440 out:
2441 return ret_val;
2442 }
2443
2444 /**
2445 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
2446 * @hw: pointer to the HW structure
2447 *
2448 * Calculates the EEPROM section checksum by reading/adding each word of
2449 * the EEPROM and then verifies that the sum of the EEPROM is
2450 * equal to 0xBABA.
2451 **/
2452 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2453 {
2454 s32 ret_val = 0;
2455 u16 eeprom_regions_count = 1;
2456 u16 j, nvm_data;
2457 u16 nvm_offset;
2458
2459 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2460 if (ret_val) {
2461 hw_dbg("NVM Read Error\n");
2462 goto out;
2463 }
2464
2465 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2466 /* if checksums compatibility bit is set validate checksums
2467 * for all 4 ports.
2468 */
2469 eeprom_regions_count = 4;
2470 }
2471
2472 for (j = 0; j < eeprom_regions_count; j++) {
2473 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2474 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2475 nvm_offset);
2476 if (ret_val != 0)
2477 goto out;
2478 }
2479
2480 out:
2481 return ret_val;
2482 }
2483
2484 /**
2485 * igb_update_nvm_checksum_82580 - Update EEPROM checksum
2486 * @hw: pointer to the HW structure
2487 *
2488 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2489 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2490 * checksum and writes the value to the EEPROM.
2491 **/
2492 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2493 {
2494 s32 ret_val;
2495 u16 j, nvm_data;
2496 u16 nvm_offset;
2497
2498 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2499 if (ret_val) {
2500 hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
2501 goto out;
2502 }
2503
2504 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2505 /* set compatibility bit to validate checksums appropriately */
2506 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2507 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2508 &nvm_data);
2509 if (ret_val) {
2510 hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
2511 goto out;
2512 }
2513 }
2514
2515 for (j = 0; j < 4; j++) {
2516 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2517 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2518 if (ret_val)
2519 goto out;
2520 }
2521
2522 out:
2523 return ret_val;
2524 }
2525
2526 /**
2527 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
2528 * @hw: pointer to the HW structure
2529 *
2530 * Calculates the EEPROM section checksum by reading/adding each word of
2531 * the EEPROM and then verifies that the sum of the EEPROM is
2532 * equal to 0xBABA.
2533 **/
2534 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
2535 {
2536 s32 ret_val = 0;
2537 u16 j;
2538 u16 nvm_offset;
2539
2540 for (j = 0; j < 4; j++) {
2541 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2542 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2543 nvm_offset);
2544 if (ret_val != 0)
2545 goto out;
2546 }
2547
2548 out:
2549 return ret_val;
2550 }
2551
2552 /**
2553 * igb_update_nvm_checksum_i350 - Update EEPROM checksum
2554 * @hw: pointer to the HW structure
2555 *
2556 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2557 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2558 * checksum and writes the value to the EEPROM.
2559 **/
2560 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2561 {
2562 s32 ret_val = 0;
2563 u16 j;
2564 u16 nvm_offset;
2565
2566 for (j = 0; j < 4; j++) {
2567 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2568 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2569 if (ret_val != 0)
2570 goto out;
2571 }
2572
2573 out:
2574 return ret_val;
2575 }
2576
2577 /**
2578 * __igb_access_emi_reg - Read/write EMI register
2579 * @hw: pointer to the HW structure
2580 * @addr: EMI address to program
2581 * @data: pointer to value to read/write from/to the EMI address
2582 * @read: boolean flag to indicate read or write
2583 **/
2584 static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2585 u16 *data, bool read)
2586 {
2587 s32 ret_val = 0;
2588
2589 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2590 if (ret_val)
2591 return ret_val;
2592
2593 if (read)
2594 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
2595 else
2596 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
2597
2598 return ret_val;
2599 }
2600
2601 /**
2602 * igb_read_emi_reg - Read Extended Management Interface register
2603 * @hw: pointer to the HW structure
2604 * @addr: EMI address to program
2605 * @data: value to be read from the EMI address
2606 **/
2607 s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2608 {
2609 return __igb_access_emi_reg(hw, addr, data, true);
2610 }
2611
2612 /**
2613 * igb_set_eee_i350 - Enable/disable EEE support
2614 * @hw: pointer to the HW structure
2615 * @adv1G: boolean flag enabling 1G EEE advertisement
2616 * @adv100m: boolean flag enabling 100M EEE advertisement
2617 *
2618 * Enable/disable EEE based on setting in dev_spec structure.
2619 *
2620 **/
2621 s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
2622 {
2623 u32 ipcnfg, eeer;
2624
2625 if ((hw->mac.type < e1000_i350) ||
2626 (hw->phy.media_type != e1000_media_type_copper))
2627 goto out;
2628 ipcnfg = rd32(E1000_IPCNFG);
2629 eeer = rd32(E1000_EEER);
2630
2631 /* enable or disable per user setting */
2632 if (!(hw->dev_spec._82575.eee_disable)) {
2633 u32 eee_su = rd32(E1000_EEE_SU);
2634
2635 if (adv100M)
2636 ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
2637 else
2638 ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
2639
2640 if (adv1G)
2641 ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
2642 else
2643 ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
2644
2645 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2646 E1000_EEER_LPI_FC);
2647
2648 /* This bit should not be set in normal operation. */
2649 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2650 hw_dbg("LPI Clock Stop Bit should not be set!\n");
2651
2652 } else {
2653 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2654 E1000_IPCNFG_EEE_100M_AN);
2655 eeer &= ~(E1000_EEER_TX_LPI_EN |
2656 E1000_EEER_RX_LPI_EN |
2657 E1000_EEER_LPI_FC);
2658 }
2659 wr32(E1000_IPCNFG, ipcnfg);
2660 wr32(E1000_EEER, eeer);
2661 rd32(E1000_IPCNFG);
2662 rd32(E1000_EEER);
2663 out:
2664
2665 return 0;
2666 }
2667
2668 /**
2669 * igb_set_eee_i354 - Enable/disable EEE support
2670 * @hw: pointer to the HW structure
2671 * @adv1G: boolean flag enabling 1G EEE advertisement
2672 * @adv100m: boolean flag enabling 100M EEE advertisement
2673 *
2674 * Enable/disable EEE legacy mode based on setting in dev_spec structure.
2675 *
2676 **/
2677 s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
2678 {
2679 struct e1000_phy_info *phy = &hw->phy;
2680 s32 ret_val = 0;
2681 u16 phy_data;
2682
2683 if ((hw->phy.media_type != e1000_media_type_copper) ||
2684 ((phy->id != M88E1543_E_PHY_ID) &&
2685 (phy->id != M88E1512_E_PHY_ID)))
2686 goto out;
2687
2688 if (!hw->dev_spec._82575.eee_disable) {
2689 /* Switch to PHY page 18. */
2690 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
2691 if (ret_val)
2692 goto out;
2693
2694 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2695 &phy_data);
2696 if (ret_val)
2697 goto out;
2698
2699 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
2700 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2701 phy_data);
2702 if (ret_val)
2703 goto out;
2704
2705 /* Return the PHY to page 0. */
2706 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
2707 if (ret_val)
2708 goto out;
2709
2710 /* Turn on EEE advertisement. */
2711 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2712 E1000_EEE_ADV_DEV_I354,
2713 &phy_data);
2714 if (ret_val)
2715 goto out;
2716
2717 if (adv100M)
2718 phy_data |= E1000_EEE_ADV_100_SUPPORTED;
2719 else
2720 phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
2721
2722 if (adv1G)
2723 phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
2724 else
2725 phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
2726
2727 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2728 E1000_EEE_ADV_DEV_I354,
2729 phy_data);
2730 } else {
2731 /* Turn off EEE advertisement. */
2732 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2733 E1000_EEE_ADV_DEV_I354,
2734 &phy_data);
2735 if (ret_val)
2736 goto out;
2737
2738 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
2739 E1000_EEE_ADV_1000_SUPPORTED);
2740 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2741 E1000_EEE_ADV_DEV_I354,
2742 phy_data);
2743 }
2744
2745 out:
2746 return ret_val;
2747 }
2748
2749 /**
2750 * igb_get_eee_status_i354 - Get EEE status
2751 * @hw: pointer to the HW structure
2752 * @status: EEE status
2753 *
2754 * Get EEE status by guessing based on whether Tx or Rx LPI indications have
2755 * been received.
2756 **/
2757 s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2758 {
2759 struct e1000_phy_info *phy = &hw->phy;
2760 s32 ret_val = 0;
2761 u16 phy_data;
2762
2763 /* Check if EEE is supported on this device. */
2764 if ((hw->phy.media_type != e1000_media_type_copper) ||
2765 ((phy->id != M88E1543_E_PHY_ID) &&
2766 (phy->id != M88E1512_E_PHY_ID)))
2767 goto out;
2768
2769 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
2770 E1000_PCS_STATUS_DEV_I354,
2771 &phy_data);
2772 if (ret_val)
2773 goto out;
2774
2775 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
2776 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
2777
2778 out:
2779 return ret_val;
2780 }
2781
2782 static const u8 e1000_emc_temp_data[4] = {
2783 E1000_EMC_INTERNAL_DATA,
2784 E1000_EMC_DIODE1_DATA,
2785 E1000_EMC_DIODE2_DATA,
2786 E1000_EMC_DIODE3_DATA
2787 };
2788 static const u8 e1000_emc_therm_limit[4] = {
2789 E1000_EMC_INTERNAL_THERM_LIMIT,
2790 E1000_EMC_DIODE1_THERM_LIMIT,
2791 E1000_EMC_DIODE2_THERM_LIMIT,
2792 E1000_EMC_DIODE3_THERM_LIMIT
2793 };
2794
2795 #ifdef CONFIG_IGB_HWMON
2796 /**
2797 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2798 * @hw: pointer to hardware structure
2799 *
2800 * Updates the temperatures in mac.thermal_sensor_data
2801 **/
2802 static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2803 {
2804 u16 ets_offset;
2805 u16 ets_cfg;
2806 u16 ets_sensor;
2807 u8 num_sensors;
2808 u8 sensor_index;
2809 u8 sensor_location;
2810 u8 i;
2811 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2812
2813 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2814 return E1000_NOT_IMPLEMENTED;
2815
2816 data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
2817
2818 /* Return the internal sensor only if ETS is unsupported */
2819 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2820 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2821 return 0;
2822
2823 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2824 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2825 != NVM_ETS_TYPE_EMC)
2826 return E1000_NOT_IMPLEMENTED;
2827
2828 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2829 if (num_sensors > E1000_MAX_SENSORS)
2830 num_sensors = E1000_MAX_SENSORS;
2831
2832 for (i = 1; i < num_sensors; i++) {
2833 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2834 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2835 NVM_ETS_DATA_INDEX_SHIFT);
2836 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2837 NVM_ETS_DATA_LOC_SHIFT);
2838
2839 if (sensor_location != 0)
2840 hw->phy.ops.read_i2c_byte(hw,
2841 e1000_emc_temp_data[sensor_index],
2842 E1000_I2C_THERMAL_SENSOR_ADDR,
2843 &data->sensor[i].temp);
2844 }
2845 return 0;
2846 }
2847
2848 /**
2849 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
2850 * @hw: pointer to hardware structure
2851 *
2852 * Sets the thermal sensor thresholds according to the NVM map
2853 * and save off the threshold and location values into mac.thermal_sensor_data
2854 **/
2855 static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2856 {
2857 u16 ets_offset;
2858 u16 ets_cfg;
2859 u16 ets_sensor;
2860 u8 low_thresh_delta;
2861 u8 num_sensors;
2862 u8 sensor_index;
2863 u8 sensor_location;
2864 u8 therm_limit;
2865 u8 i;
2866 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2867
2868 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2869 return E1000_NOT_IMPLEMENTED;
2870
2871 memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
2872
2873 data->sensor[0].location = 0x1;
2874 data->sensor[0].caution_thresh =
2875 (rd32(E1000_THHIGHTC) & 0xFF);
2876 data->sensor[0].max_op_thresh =
2877 (rd32(E1000_THLOWTC) & 0xFF);
2878
2879 /* Return the internal sensor only if ETS is unsupported */
2880 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2881 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2882 return 0;
2883
2884 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2885 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2886 != NVM_ETS_TYPE_EMC)
2887 return E1000_NOT_IMPLEMENTED;
2888
2889 low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
2890 NVM_ETS_LTHRES_DELTA_SHIFT);
2891 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2892
2893 for (i = 1; i <= num_sensors; i++) {
2894 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2895 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2896 NVM_ETS_DATA_INDEX_SHIFT);
2897 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2898 NVM_ETS_DATA_LOC_SHIFT);
2899 therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
2900
2901 hw->phy.ops.write_i2c_byte(hw,
2902 e1000_emc_therm_limit[sensor_index],
2903 E1000_I2C_THERMAL_SENSOR_ADDR,
2904 therm_limit);
2905
2906 if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
2907 data->sensor[i].location = sensor_location;
2908 data->sensor[i].caution_thresh = therm_limit;
2909 data->sensor[i].max_op_thresh = therm_limit -
2910 low_thresh_delta;
2911 }
2912 }
2913 return 0;
2914 }
2915
2916 #endif
2917 static struct e1000_mac_operations e1000_mac_ops_82575 = {
2918 .init_hw = igb_init_hw_82575,
2919 .check_for_link = igb_check_for_link_82575,
2920 .rar_set = igb_rar_set,
2921 .read_mac_addr = igb_read_mac_addr_82575,
2922 .get_speed_and_duplex = igb_get_link_up_info_82575,
2923 #ifdef CONFIG_IGB_HWMON
2924 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
2925 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
2926 #endif
2927 };
2928
2929 static const struct e1000_phy_operations e1000_phy_ops_82575 = {
2930 .acquire = igb_acquire_phy_82575,
2931 .get_cfg_done = igb_get_cfg_done_82575,
2932 .release = igb_release_phy_82575,
2933 .write_i2c_byte = igb_write_i2c_byte,
2934 .read_i2c_byte = igb_read_i2c_byte,
2935 };
2936
2937 static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
2938 .acquire = igb_acquire_nvm_82575,
2939 .read = igb_read_nvm_eerd,
2940 .release = igb_release_nvm_82575,
2941 .write = igb_write_nvm_spi,
2942 };
2943
2944 const struct e1000_info e1000_82575_info = {
2945 .get_invariants = igb_get_invariants_82575,
2946 .mac_ops = &e1000_mac_ops_82575,
2947 .phy_ops = &e1000_phy_ops_82575,
2948 .nvm_ops = &e1000_nvm_ops_82575,
2949 };
2950