]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/intel/e1000e/ich8lan.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / intel / e1000e / ich8lan.c
1 /*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 /*
30 * 82562G 10/100 Network Connection
31 * 82562G-2 10/100 Network Connection
32 * 82562GT 10/100 Network Connection
33 * 82562GT-2 10/100 Network Connection
34 * 82562V 10/100 Network Connection
35 * 82562V-2 10/100 Network Connection
36 * 82566DC-2 Gigabit Network Connection
37 * 82566DC Gigabit Network Connection
38 * 82566DM-2 Gigabit Network Connection
39 * 82566DM Gigabit Network Connection
40 * 82566MC Gigabit Network Connection
41 * 82566MM Gigabit Network Connection
42 * 82567LM Gigabit Network Connection
43 * 82567LF Gigabit Network Connection
44 * 82567V Gigabit Network Connection
45 * 82567LM-2 Gigabit Network Connection
46 * 82567LF-2 Gigabit Network Connection
47 * 82567V-2 Gigabit Network Connection
48 * 82567LF-3 Gigabit Network Connection
49 * 82567LM-3 Gigabit Network Connection
50 * 82567LM-4 Gigabit Network Connection
51 * 82577LM Gigabit Network Connection
52 * 82577LC Gigabit Network Connection
53 * 82578DM Gigabit Network Connection
54 * 82578DC Gigabit Network Connection
55 * 82579LM Gigabit Network Connection
56 * 82579V Gigabit Network Connection
57 */
58
59 #include "e1000.h"
60
61 #define ICH_FLASH_GFPREG 0x0000
62 #define ICH_FLASH_HSFSTS 0x0004
63 #define ICH_FLASH_HSFCTL 0x0006
64 #define ICH_FLASH_FADDR 0x0008
65 #define ICH_FLASH_FDATA0 0x0010
66 #define ICH_FLASH_PR0 0x0074
67
68 #define ICH_FLASH_READ_COMMAND_TIMEOUT 500
69 #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
70 #define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
71 #define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
72 #define ICH_FLASH_CYCLE_REPEAT_COUNT 10
73
74 #define ICH_CYCLE_READ 0
75 #define ICH_CYCLE_WRITE 2
76 #define ICH_CYCLE_ERASE 3
77
78 #define FLASH_GFPREG_BASE_MASK 0x1FFF
79 #define FLASH_SECTOR_ADDR_SHIFT 12
80
81 #define ICH_FLASH_SEG_SIZE_256 256
82 #define ICH_FLASH_SEG_SIZE_4K 4096
83 #define ICH_FLASH_SEG_SIZE_8K 8192
84 #define ICH_FLASH_SEG_SIZE_64K 65536
85
86
87 #define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
88 /* FW established a valid mode */
89 #define E1000_ICH_FWSM_FW_VALID 0x00008000
90
91 #define E1000_ICH_MNG_IAMT_MODE 0x2
92
93 #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
94 (ID_LED_DEF1_OFF2 << 8) | \
95 (ID_LED_DEF1_ON2 << 4) | \
96 (ID_LED_DEF1_DEF2))
97
98 #define E1000_ICH_NVM_SIG_WORD 0x13
99 #define E1000_ICH_NVM_SIG_MASK 0xC000
100 #define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
101 #define E1000_ICH_NVM_SIG_VALUE 0x80
102
103 #define E1000_ICH8_LAN_INIT_TIMEOUT 1500
104
105 #define E1000_FEXTNVM_SW_CONFIG 1
106 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107
108 #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
111
112 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
113
114 #define E1000_ICH_RAR_ENTRIES 7
115
116 #define PHY_PAGE_SHIFT 5
117 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
118 ((reg) & MAX_PHY_REG_ADDRESS))
119 #define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
120 #define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
121
122 #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
123 #define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
124 #define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
125
126 #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
127
128 #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
129
130 /* SMBus Address Phy Register */
131 #define HV_SMB_ADDR PHY_REG(768, 26)
132 #define HV_SMB_ADDR_MASK 0x007F
133 #define HV_SMB_ADDR_PEC_EN 0x0200
134 #define HV_SMB_ADDR_VALID 0x0080
135
136 /* PHY Power Management Control */
137 #define HV_PM_CTRL PHY_REG(770, 17)
138
139 /* PHY Low Power Idle Control */
140 #define I82579_LPI_CTRL PHY_REG(772, 20)
141 #define I82579_LPI_CTRL_ENABLE_MASK 0x6000
142 #define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
143
144 /* EMI Registers */
145 #define I82579_EMI_ADDR 0x10
146 #define I82579_EMI_DATA 0x11
147 #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
148
149 /* Strapping Option Register - RO */
150 #define E1000_STRAP 0x0000C
151 #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
152 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
153
154 /* OEM Bits Phy Register */
155 #define HV_OEM_BITS PHY_REG(768, 25)
156 #define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
157 #define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
158 #define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
159
160 #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
161 #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
162
163 /* KMRN Mode Control */
164 #define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
165 #define HV_KMRN_MDIO_SLOW 0x0400
166
167 /* KMRN FIFO Control and Status */
168 #define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
169 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
170 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
171
172 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
173 /* Offset 04h HSFSTS */
174 union ich8_hws_flash_status {
175 struct ich8_hsfsts {
176 u16 flcdone :1; /* bit 0 Flash Cycle Done */
177 u16 flcerr :1; /* bit 1 Flash Cycle Error */
178 u16 dael :1; /* bit 2 Direct Access error Log */
179 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
180 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
181 u16 reserved1 :2; /* bit 13:6 Reserved */
182 u16 reserved2 :6; /* bit 13:6 Reserved */
183 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
184 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
185 } hsf_status;
186 u16 regval;
187 };
188
189 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
190 /* Offset 06h FLCTL */
191 union ich8_hws_flash_ctrl {
192 struct ich8_hsflctl {
193 u16 flcgo :1; /* 0 Flash Cycle Go */
194 u16 flcycle :2; /* 2:1 Flash Cycle */
195 u16 reserved :5; /* 7:3 Reserved */
196 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
197 u16 flockdn :6; /* 15:10 Reserved */
198 } hsf_ctrl;
199 u16 regval;
200 };
201
202 /* ICH Flash Region Access Permissions */
203 union ich8_hws_flash_regacc {
204 struct ich8_flracc {
205 u32 grra :8; /* 0:7 GbE region Read Access */
206 u32 grwa :8; /* 8:15 GbE region Write Access */
207 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
208 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
209 } hsf_flregacc;
210 u16 regval;
211 };
212
213 /* ICH Flash Protected Region */
214 union ich8_flash_protected_range {
215 struct ich8_pr {
216 u32 base:13; /* 0:12 Protected Range Base */
217 u32 reserved1:2; /* 13:14 Reserved */
218 u32 rpe:1; /* 15 Read Protection Enable */
219 u32 limit:13; /* 16:28 Protected Range Limit */
220 u32 reserved2:2; /* 29:30 Reserved */
221 u32 wpe:1; /* 31 Write Protection Enable */
222 } range;
223 u32 regval;
224 };
225
226 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
227 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
228 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
229 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
230 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
231 u32 offset, u8 byte);
232 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
233 u8 *data);
234 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
235 u16 *data);
236 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
237 u8 size, u16 *data);
238 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
239 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
240 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
241 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
242 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
243 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
244 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
245 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
246 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
247 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
248 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
249 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
250 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
251 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
252 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
253 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
254 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
255 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
256 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
257 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
258
259 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
260 {
261 return readw(hw->flash_address + reg);
262 }
263
264 static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
265 {
266 return readl(hw->flash_address + reg);
267 }
268
269 static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
270 {
271 writew(val, hw->flash_address + reg);
272 }
273
274 static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
275 {
276 writel(val, hw->flash_address + reg);
277 }
278
279 #define er16flash(reg) __er16flash(hw, (reg))
280 #define er32flash(reg) __er32flash(hw, (reg))
281 #define ew16flash(reg,val) __ew16flash(hw, (reg), (val))
282 #define ew32flash(reg,val) __ew32flash(hw, (reg), (val))
283
284 static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
285 {
286 u32 ctrl;
287
288 ctrl = er32(CTRL);
289 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
290 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
291 ew32(CTRL, ctrl);
292 e1e_flush();
293 udelay(10);
294 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
295 ew32(CTRL, ctrl);
296 }
297
298 /**
299 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
300 * @hw: pointer to the HW structure
301 *
302 * Initialize family-specific PHY parameters and function pointers.
303 **/
304 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
305 {
306 struct e1000_phy_info *phy = &hw->phy;
307 u32 fwsm;
308 s32 ret_val = 0;
309
310 phy->addr = 1;
311 phy->reset_delay_us = 100;
312
313 phy->ops.set_page = e1000_set_page_igp;
314 phy->ops.read_reg = e1000_read_phy_reg_hv;
315 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
316 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
317 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
318 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
319 phy->ops.write_reg = e1000_write_phy_reg_hv;
320 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
321 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
322 phy->ops.power_up = e1000_power_up_phy_copper;
323 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
324 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
325
326 /*
327 * The MAC-PHY interconnect may still be in SMBus mode
328 * after Sx->S0. If the manageability engine (ME) is
329 * disabled, then toggle the LANPHYPC Value bit to force
330 * the interconnect to PCIe mode.
331 */
332 fwsm = er32(FWSM);
333 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
334 e1000_toggle_lanphypc_value_ich8lan(hw);
335 msleep(50);
336
337 /*
338 * Gate automatic PHY configuration by hardware on
339 * non-managed 82579
340 */
341 if (hw->mac.type == e1000_pch2lan)
342 e1000_gate_hw_phy_config_ich8lan(hw, true);
343 }
344
345 /*
346 * Reset the PHY before any access to it. Doing so, ensures that
347 * the PHY is in a known good state before we read/write PHY registers.
348 * The generic reset is sufficient here, because we haven't determined
349 * the PHY type yet.
350 */
351 ret_val = e1000e_phy_hw_reset_generic(hw);
352 if (ret_val)
353 goto out;
354
355 /* Ungate automatic PHY configuration on non-managed 82579 */
356 if ((hw->mac.type == e1000_pch2lan) &&
357 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
358 usleep_range(10000, 20000);
359 e1000_gate_hw_phy_config_ich8lan(hw, false);
360 }
361
362 phy->id = e1000_phy_unknown;
363 switch (hw->mac.type) {
364 default:
365 ret_val = e1000e_get_phy_id(hw);
366 if (ret_val)
367 goto out;
368 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
369 break;
370 /* fall-through */
371 case e1000_pch2lan:
372 /*
373 * In case the PHY needs to be in mdio slow mode,
374 * set slow mode and try to get the PHY id again.
375 */
376 ret_val = e1000_set_mdio_slow_mode_hv(hw);
377 if (ret_val)
378 goto out;
379 ret_val = e1000e_get_phy_id(hw);
380 if (ret_val)
381 goto out;
382 break;
383 }
384 phy->type = e1000e_get_phy_type_from_id(phy->id);
385
386 switch (phy->type) {
387 case e1000_phy_82577:
388 case e1000_phy_82579:
389 phy->ops.check_polarity = e1000_check_polarity_82577;
390 phy->ops.force_speed_duplex =
391 e1000_phy_force_speed_duplex_82577;
392 phy->ops.get_cable_length = e1000_get_cable_length_82577;
393 phy->ops.get_info = e1000_get_phy_info_82577;
394 phy->ops.commit = e1000e_phy_sw_reset;
395 break;
396 case e1000_phy_82578:
397 phy->ops.check_polarity = e1000_check_polarity_m88;
398 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
399 phy->ops.get_cable_length = e1000e_get_cable_length_m88;
400 phy->ops.get_info = e1000e_get_phy_info_m88;
401 break;
402 default:
403 ret_val = -E1000_ERR_PHY;
404 break;
405 }
406
407 out:
408 return ret_val;
409 }
410
411 /**
412 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
413 * @hw: pointer to the HW structure
414 *
415 * Initialize family-specific PHY parameters and function pointers.
416 **/
417 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
418 {
419 struct e1000_phy_info *phy = &hw->phy;
420 s32 ret_val;
421 u16 i = 0;
422
423 phy->addr = 1;
424 phy->reset_delay_us = 100;
425
426 phy->ops.power_up = e1000_power_up_phy_copper;
427 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
428
429 /*
430 * We may need to do this twice - once for IGP and if that fails,
431 * we'll set BM func pointers and try again
432 */
433 ret_val = e1000e_determine_phy_address(hw);
434 if (ret_val) {
435 phy->ops.write_reg = e1000e_write_phy_reg_bm;
436 phy->ops.read_reg = e1000e_read_phy_reg_bm;
437 ret_val = e1000e_determine_phy_address(hw);
438 if (ret_val) {
439 e_dbg("Cannot determine PHY addr. Erroring out\n");
440 return ret_val;
441 }
442 }
443
444 phy->id = 0;
445 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
446 (i++ < 100)) {
447 usleep_range(1000, 2000);
448 ret_val = e1000e_get_phy_id(hw);
449 if (ret_val)
450 return ret_val;
451 }
452
453 /* Verify phy id */
454 switch (phy->id) {
455 case IGP03E1000_E_PHY_ID:
456 phy->type = e1000_phy_igp_3;
457 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
458 phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
459 phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
460 phy->ops.get_info = e1000e_get_phy_info_igp;
461 phy->ops.check_polarity = e1000_check_polarity_igp;
462 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
463 break;
464 case IFE_E_PHY_ID:
465 case IFE_PLUS_E_PHY_ID:
466 case IFE_C_E_PHY_ID:
467 phy->type = e1000_phy_ife;
468 phy->autoneg_mask = E1000_ALL_NOT_GIG;
469 phy->ops.get_info = e1000_get_phy_info_ife;
470 phy->ops.check_polarity = e1000_check_polarity_ife;
471 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
472 break;
473 case BME1000_E_PHY_ID:
474 phy->type = e1000_phy_bm;
475 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
476 phy->ops.read_reg = e1000e_read_phy_reg_bm;
477 phy->ops.write_reg = e1000e_write_phy_reg_bm;
478 phy->ops.commit = e1000e_phy_sw_reset;
479 phy->ops.get_info = e1000e_get_phy_info_m88;
480 phy->ops.check_polarity = e1000_check_polarity_m88;
481 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
482 break;
483 default:
484 return -E1000_ERR_PHY;
485 break;
486 }
487
488 return 0;
489 }
490
491 /**
492 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
493 * @hw: pointer to the HW structure
494 *
495 * Initialize family-specific NVM parameters and function
496 * pointers.
497 **/
498 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
499 {
500 struct e1000_nvm_info *nvm = &hw->nvm;
501 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
502 u32 gfpreg, sector_base_addr, sector_end_addr;
503 u16 i;
504
505 /* Can't read flash registers if the register set isn't mapped. */
506 if (!hw->flash_address) {
507 e_dbg("ERROR: Flash registers not mapped\n");
508 return -E1000_ERR_CONFIG;
509 }
510
511 nvm->type = e1000_nvm_flash_sw;
512
513 gfpreg = er32flash(ICH_FLASH_GFPREG);
514
515 /*
516 * sector_X_addr is a "sector"-aligned address (4096 bytes)
517 * Add 1 to sector_end_addr since this sector is included in
518 * the overall size.
519 */
520 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
521 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
522
523 /* flash_base_addr is byte-aligned */
524 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
525
526 /*
527 * find total size of the NVM, then cut in half since the total
528 * size represents two separate NVM banks.
529 */
530 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
531 << FLASH_SECTOR_ADDR_SHIFT;
532 nvm->flash_bank_size /= 2;
533 /* Adjust to word count */
534 nvm->flash_bank_size /= sizeof(u16);
535
536 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
537
538 /* Clear shadow ram */
539 for (i = 0; i < nvm->word_size; i++) {
540 dev_spec->shadow_ram[i].modified = false;
541 dev_spec->shadow_ram[i].value = 0xFFFF;
542 }
543
544 return 0;
545 }
546
547 /**
548 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
549 * @hw: pointer to the HW structure
550 *
551 * Initialize family-specific MAC parameters and function
552 * pointers.
553 **/
554 static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
555 {
556 struct e1000_hw *hw = &adapter->hw;
557 struct e1000_mac_info *mac = &hw->mac;
558
559 /* Set media type function pointer */
560 hw->phy.media_type = e1000_media_type_copper;
561
562 /* Set mta register count */
563 mac->mta_reg_count = 32;
564 /* Set rar entry count */
565 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
566 if (mac->type == e1000_ich8lan)
567 mac->rar_entry_count--;
568 /* FWSM register */
569 mac->has_fwsm = true;
570 /* ARC subsystem not supported */
571 mac->arc_subsystem_valid = false;
572 /* Adaptive IFS supported */
573 mac->adaptive_ifs = true;
574
575 /* LED operations */
576 switch (mac->type) {
577 case e1000_ich8lan:
578 case e1000_ich9lan:
579 case e1000_ich10lan:
580 /* check management mode */
581 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
582 /* ID LED init */
583 mac->ops.id_led_init = e1000e_id_led_init;
584 /* blink LED */
585 mac->ops.blink_led = e1000e_blink_led_generic;
586 /* setup LED */
587 mac->ops.setup_led = e1000e_setup_led_generic;
588 /* cleanup LED */
589 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
590 /* turn on/off LED */
591 mac->ops.led_on = e1000_led_on_ich8lan;
592 mac->ops.led_off = e1000_led_off_ich8lan;
593 break;
594 case e1000_pchlan:
595 case e1000_pch2lan:
596 /* check management mode */
597 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
598 /* ID LED init */
599 mac->ops.id_led_init = e1000_id_led_init_pchlan;
600 /* setup LED */
601 mac->ops.setup_led = e1000_setup_led_pchlan;
602 /* cleanup LED */
603 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
604 /* turn on/off LED */
605 mac->ops.led_on = e1000_led_on_pchlan;
606 mac->ops.led_off = e1000_led_off_pchlan;
607 break;
608 default:
609 break;
610 }
611
612 /* Enable PCS Lock-loss workaround for ICH8 */
613 if (mac->type == e1000_ich8lan)
614 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
615
616 /* Gate automatic PHY configuration by hardware on managed 82579 */
617 if ((mac->type == e1000_pch2lan) &&
618 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
619 e1000_gate_hw_phy_config_ich8lan(hw, true);
620
621 return 0;
622 }
623
624 /**
625 * e1000_set_eee_pchlan - Enable/disable EEE support
626 * @hw: pointer to the HW structure
627 *
628 * Enable/disable EEE based on setting in dev_spec structure. The bits in
629 * the LPI Control register will remain set only if/when link is up.
630 **/
631 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
632 {
633 s32 ret_val = 0;
634 u16 phy_reg;
635
636 if (hw->phy.type != e1000_phy_82579)
637 goto out;
638
639 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
640 if (ret_val)
641 goto out;
642
643 if (hw->dev_spec.ich8lan.eee_disable)
644 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
645 else
646 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
647
648 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
649 out:
650 return ret_val;
651 }
652
653 /**
654 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
655 * @hw: pointer to the HW structure
656 *
657 * Checks to see of the link status of the hardware has changed. If a
658 * change in link status has been detected, then we read the PHY registers
659 * to get the current speed/duplex if link exists.
660 **/
661 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
662 {
663 struct e1000_mac_info *mac = &hw->mac;
664 s32 ret_val;
665 bool link;
666 u16 phy_reg;
667
668 /*
669 * We only want to go out to the PHY registers to see if Auto-Neg
670 * has completed and/or if our link status has changed. The
671 * get_link_status flag is set upon receiving a Link Status
672 * Change or Rx Sequence Error interrupt.
673 */
674 if (!mac->get_link_status) {
675 ret_val = 0;
676 goto out;
677 }
678
679 /*
680 * First we want to see if the MII Status Register reports
681 * link. If so, then we want to get the current speed/duplex
682 * of the PHY.
683 */
684 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
685 if (ret_val)
686 goto out;
687
688 if (hw->mac.type == e1000_pchlan) {
689 ret_val = e1000_k1_gig_workaround_hv(hw, link);
690 if (ret_val)
691 goto out;
692 }
693
694 if (!link)
695 goto out; /* No link detected */
696
697 mac->get_link_status = false;
698
699 switch (hw->mac.type) {
700 case e1000_pch2lan:
701 ret_val = e1000_k1_workaround_lv(hw);
702 if (ret_val)
703 goto out;
704 /* fall-thru */
705 case e1000_pchlan:
706 if (hw->phy.type == e1000_phy_82578) {
707 ret_val = e1000_link_stall_workaround_hv(hw);
708 if (ret_val)
709 goto out;
710 }
711
712 /*
713 * Workaround for PCHx parts in half-duplex:
714 * Set the number of preambles removed from the packet
715 * when it is passed from the PHY to the MAC to prevent
716 * the MAC from misinterpreting the packet type.
717 */
718 e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
719 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
720
721 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
722 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
723
724 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
725 break;
726 default:
727 break;
728 }
729
730 /*
731 * Check if there was DownShift, must be checked
732 * immediately after link-up
733 */
734 e1000e_check_downshift(hw);
735
736 /* Enable/Disable EEE after link up */
737 ret_val = e1000_set_eee_pchlan(hw);
738 if (ret_val)
739 goto out;
740
741 /*
742 * If we are forcing speed/duplex, then we simply return since
743 * we have already determined whether we have link or not.
744 */
745 if (!mac->autoneg) {
746 ret_val = -E1000_ERR_CONFIG;
747 goto out;
748 }
749
750 /*
751 * Auto-Neg is enabled. Auto Speed Detection takes care
752 * of MAC speed/duplex configuration. So we only need to
753 * configure Collision Distance in the MAC.
754 */
755 e1000e_config_collision_dist(hw);
756
757 /*
758 * Configure Flow Control now that Auto-Neg has completed.
759 * First, we need to restore the desired flow control
760 * settings because we may have had to re-autoneg with a
761 * different link partner.
762 */
763 ret_val = e1000e_config_fc_after_link_up(hw);
764 if (ret_val)
765 e_dbg("Error configuring flow control\n");
766
767 out:
768 return ret_val;
769 }
770
771 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
772 {
773 struct e1000_hw *hw = &adapter->hw;
774 s32 rc;
775
776 rc = e1000_init_mac_params_ich8lan(adapter);
777 if (rc)
778 return rc;
779
780 rc = e1000_init_nvm_params_ich8lan(hw);
781 if (rc)
782 return rc;
783
784 switch (hw->mac.type) {
785 case e1000_ich8lan:
786 case e1000_ich9lan:
787 case e1000_ich10lan:
788 rc = e1000_init_phy_params_ich8lan(hw);
789 break;
790 case e1000_pchlan:
791 case e1000_pch2lan:
792 rc = e1000_init_phy_params_pchlan(hw);
793 break;
794 default:
795 break;
796 }
797 if (rc)
798 return rc;
799
800 /*
801 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
802 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
803 */
804 if ((adapter->hw.phy.type == e1000_phy_ife) ||
805 ((adapter->hw.mac.type >= e1000_pch2lan) &&
806 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
807 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
808 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
809
810 hw->mac.ops.blink_led = NULL;
811 }
812
813 if ((adapter->hw.mac.type == e1000_ich8lan) &&
814 (adapter->hw.phy.type == e1000_phy_igp_3))
815 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
816
817 /* Enable workaround for 82579 w/ ME enabled */
818 if ((adapter->hw.mac.type == e1000_pch2lan) &&
819 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
820 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
821
822 /* Disable EEE by default until IEEE802.3az spec is finalized */
823 if (adapter->flags2 & FLAG2_HAS_EEE)
824 adapter->hw.dev_spec.ich8lan.eee_disable = true;
825
826 return 0;
827 }
828
829 static DEFINE_MUTEX(nvm_mutex);
830
831 /**
832 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
833 * @hw: pointer to the HW structure
834 *
835 * Acquires the mutex for performing NVM operations.
836 **/
837 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
838 {
839 mutex_lock(&nvm_mutex);
840
841 return 0;
842 }
843
844 /**
845 * e1000_release_nvm_ich8lan - Release NVM mutex
846 * @hw: pointer to the HW structure
847 *
848 * Releases the mutex used while performing NVM operations.
849 **/
850 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
851 {
852 mutex_unlock(&nvm_mutex);
853 }
854
855 static DEFINE_MUTEX(swflag_mutex);
856
857 /**
858 * e1000_acquire_swflag_ich8lan - Acquire software control flag
859 * @hw: pointer to the HW structure
860 *
861 * Acquires the software control flag for performing PHY and select
862 * MAC CSR accesses.
863 **/
864 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
865 {
866 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
867 s32 ret_val = 0;
868
869 mutex_lock(&swflag_mutex);
870
871 while (timeout) {
872 extcnf_ctrl = er32(EXTCNF_CTRL);
873 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
874 break;
875
876 mdelay(1);
877 timeout--;
878 }
879
880 if (!timeout) {
881 e_dbg("SW/FW/HW has locked the resource for too long.\n");
882 ret_val = -E1000_ERR_CONFIG;
883 goto out;
884 }
885
886 timeout = SW_FLAG_TIMEOUT;
887
888 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
889 ew32(EXTCNF_CTRL, extcnf_ctrl);
890
891 while (timeout) {
892 extcnf_ctrl = er32(EXTCNF_CTRL);
893 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
894 break;
895
896 mdelay(1);
897 timeout--;
898 }
899
900 if (!timeout) {
901 e_dbg("Failed to acquire the semaphore.\n");
902 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
903 ew32(EXTCNF_CTRL, extcnf_ctrl);
904 ret_val = -E1000_ERR_CONFIG;
905 goto out;
906 }
907
908 out:
909 if (ret_val)
910 mutex_unlock(&swflag_mutex);
911
912 return ret_val;
913 }
914
915 /**
916 * e1000_release_swflag_ich8lan - Release software control flag
917 * @hw: pointer to the HW structure
918 *
919 * Releases the software control flag for performing PHY and select
920 * MAC CSR accesses.
921 **/
922 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
923 {
924 u32 extcnf_ctrl;
925
926 extcnf_ctrl = er32(EXTCNF_CTRL);
927
928 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
929 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
930 ew32(EXTCNF_CTRL, extcnf_ctrl);
931 } else {
932 e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
933 }
934
935 mutex_unlock(&swflag_mutex);
936 }
937
938 /**
939 * e1000_check_mng_mode_ich8lan - Checks management mode
940 * @hw: pointer to the HW structure
941 *
942 * This checks if the adapter has any manageability enabled.
943 * This is a function pointer entry point only called by read/write
944 * routines for the PHY and NVM parts.
945 **/
946 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
947 {
948 u32 fwsm;
949
950 fwsm = er32(FWSM);
951 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
952 ((fwsm & E1000_FWSM_MODE_MASK) ==
953 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
954 }
955
956 /**
957 * e1000_check_mng_mode_pchlan - Checks management mode
958 * @hw: pointer to the HW structure
959 *
960 * This checks if the adapter has iAMT enabled.
961 * This is a function pointer entry point only called by read/write
962 * routines for the PHY and NVM parts.
963 **/
964 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
965 {
966 u32 fwsm;
967
968 fwsm = er32(FWSM);
969 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
970 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
971 }
972
973 /**
974 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
975 * @hw: pointer to the HW structure
976 *
977 * Checks if firmware is blocking the reset of the PHY.
978 * This is a function pointer entry point only called by
979 * reset routines.
980 **/
981 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
982 {
983 u32 fwsm;
984
985 fwsm = er32(FWSM);
986
987 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
988 }
989
990 /**
991 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
992 * @hw: pointer to the HW structure
993 *
994 * Assumes semaphore already acquired.
995 *
996 **/
997 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
998 {
999 u16 phy_data;
1000 u32 strap = er32(STRAP);
1001 s32 ret_val = 0;
1002
1003 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1004
1005 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1006 if (ret_val)
1007 goto out;
1008
1009 phy_data &= ~HV_SMB_ADDR_MASK;
1010 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1011 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1012 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1013
1014 out:
1015 return ret_val;
1016 }
1017
1018 /**
1019 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1020 * @hw: pointer to the HW structure
1021 *
1022 * SW should configure the LCD from the NVM extended configuration region
1023 * as a workaround for certain parts.
1024 **/
1025 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1026 {
1027 struct e1000_phy_info *phy = &hw->phy;
1028 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1029 s32 ret_val = 0;
1030 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1031
1032 /*
1033 * Initialize the PHY from the NVM on ICH platforms. This
1034 * is needed due to an issue where the NVM configuration is
1035 * not properly autoloaded after power transitions.
1036 * Therefore, after each PHY reset, we will load the
1037 * configuration data out of the NVM manually.
1038 */
1039 switch (hw->mac.type) {
1040 case e1000_ich8lan:
1041 if (phy->type != e1000_phy_igp_3)
1042 return ret_val;
1043
1044 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
1045 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
1046 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1047 break;
1048 }
1049 /* Fall-thru */
1050 case e1000_pchlan:
1051 case e1000_pch2lan:
1052 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1053 break;
1054 default:
1055 return ret_val;
1056 }
1057
1058 ret_val = hw->phy.ops.acquire(hw);
1059 if (ret_val)
1060 return ret_val;
1061
1062 data = er32(FEXTNVM);
1063 if (!(data & sw_cfg_mask))
1064 goto out;
1065
1066 /*
1067 * Make sure HW does not configure LCD from PHY
1068 * extended configuration before SW configuration
1069 */
1070 data = er32(EXTCNF_CTRL);
1071 if (!(hw->mac.type == e1000_pch2lan)) {
1072 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1073 goto out;
1074 }
1075
1076 cnf_size = er32(EXTCNF_SIZE);
1077 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1078 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1079 if (!cnf_size)
1080 goto out;
1081
1082 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1083 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1084
1085 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1086 (hw->mac.type == e1000_pchlan)) ||
1087 (hw->mac.type == e1000_pch2lan)) {
1088 /*
1089 * HW configures the SMBus address and LEDs when the
1090 * OEM and LCD Write Enable bits are set in the NVM.
1091 * When both NVM bits are cleared, SW will configure
1092 * them instead.
1093 */
1094 ret_val = e1000_write_smbus_addr(hw);
1095 if (ret_val)
1096 goto out;
1097
1098 data = er32(LEDCTL);
1099 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1100 (u16)data);
1101 if (ret_val)
1102 goto out;
1103 }
1104
1105 /* Configure LCD from extended configuration region. */
1106
1107 /* cnf_base_addr is in DWORD */
1108 word_addr = (u16)(cnf_base_addr << 1);
1109
1110 for (i = 0; i < cnf_size; i++) {
1111 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
1112 &reg_data);
1113 if (ret_val)
1114 goto out;
1115
1116 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
1117 1, &reg_addr);
1118 if (ret_val)
1119 goto out;
1120
1121 /* Save off the PHY page for future writes. */
1122 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1123 phy_page = reg_data;
1124 continue;
1125 }
1126
1127 reg_addr &= PHY_REG_MASK;
1128 reg_addr |= phy_page;
1129
1130 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1131 reg_data);
1132 if (ret_val)
1133 goto out;
1134 }
1135
1136 out:
1137 hw->phy.ops.release(hw);
1138 return ret_val;
1139 }
1140
1141 /**
1142 * e1000_k1_gig_workaround_hv - K1 Si workaround
1143 * @hw: pointer to the HW structure
1144 * @link: link up bool flag
1145 *
1146 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1147 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1148 * If link is down, the function will restore the default K1 setting located
1149 * in the NVM.
1150 **/
1151 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1152 {
1153 s32 ret_val = 0;
1154 u16 status_reg = 0;
1155 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1156
1157 if (hw->mac.type != e1000_pchlan)
1158 goto out;
1159
1160 /* Wrap the whole flow with the sw flag */
1161 ret_val = hw->phy.ops.acquire(hw);
1162 if (ret_val)
1163 goto out;
1164
1165 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1166 if (link) {
1167 if (hw->phy.type == e1000_phy_82578) {
1168 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1169 &status_reg);
1170 if (ret_val)
1171 goto release;
1172
1173 status_reg &= BM_CS_STATUS_LINK_UP |
1174 BM_CS_STATUS_RESOLVED |
1175 BM_CS_STATUS_SPEED_MASK;
1176
1177 if (status_reg == (BM_CS_STATUS_LINK_UP |
1178 BM_CS_STATUS_RESOLVED |
1179 BM_CS_STATUS_SPEED_1000))
1180 k1_enable = false;
1181 }
1182
1183 if (hw->phy.type == e1000_phy_82577) {
1184 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1185 &status_reg);
1186 if (ret_val)
1187 goto release;
1188
1189 status_reg &= HV_M_STATUS_LINK_UP |
1190 HV_M_STATUS_AUTONEG_COMPLETE |
1191 HV_M_STATUS_SPEED_MASK;
1192
1193 if (status_reg == (HV_M_STATUS_LINK_UP |
1194 HV_M_STATUS_AUTONEG_COMPLETE |
1195 HV_M_STATUS_SPEED_1000))
1196 k1_enable = false;
1197 }
1198
1199 /* Link stall fix for link up */
1200 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1201 0x0100);
1202 if (ret_val)
1203 goto release;
1204
1205 } else {
1206 /* Link stall fix for link down */
1207 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1208 0x4100);
1209 if (ret_val)
1210 goto release;
1211 }
1212
1213 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1214
1215 release:
1216 hw->phy.ops.release(hw);
1217 out:
1218 return ret_val;
1219 }
1220
1221 /**
1222 * e1000_configure_k1_ich8lan - Configure K1 power state
1223 * @hw: pointer to the HW structure
1224 * @enable: K1 state to configure
1225 *
1226 * Configure the K1 power state based on the provided parameter.
1227 * Assumes semaphore already acquired.
1228 *
1229 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1230 **/
1231 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1232 {
1233 s32 ret_val = 0;
1234 u32 ctrl_reg = 0;
1235 u32 ctrl_ext = 0;
1236 u32 reg = 0;
1237 u16 kmrn_reg = 0;
1238
1239 ret_val = e1000e_read_kmrn_reg_locked(hw,
1240 E1000_KMRNCTRLSTA_K1_CONFIG,
1241 &kmrn_reg);
1242 if (ret_val)
1243 goto out;
1244
1245 if (k1_enable)
1246 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1247 else
1248 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1249
1250 ret_val = e1000e_write_kmrn_reg_locked(hw,
1251 E1000_KMRNCTRLSTA_K1_CONFIG,
1252 kmrn_reg);
1253 if (ret_val)
1254 goto out;
1255
1256 udelay(20);
1257 ctrl_ext = er32(CTRL_EXT);
1258 ctrl_reg = er32(CTRL);
1259
1260 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1261 reg |= E1000_CTRL_FRCSPD;
1262 ew32(CTRL, reg);
1263
1264 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1265 e1e_flush();
1266 udelay(20);
1267 ew32(CTRL, ctrl_reg);
1268 ew32(CTRL_EXT, ctrl_ext);
1269 e1e_flush();
1270 udelay(20);
1271
1272 out:
1273 return ret_val;
1274 }
1275
1276 /**
1277 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1278 * @hw: pointer to the HW structure
1279 * @d0_state: boolean if entering d0 or d3 device state
1280 *
1281 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1282 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1283 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1284 **/
1285 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1286 {
1287 s32 ret_val = 0;
1288 u32 mac_reg;
1289 u16 oem_reg;
1290
1291 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1292 return ret_val;
1293
1294 ret_val = hw->phy.ops.acquire(hw);
1295 if (ret_val)
1296 return ret_val;
1297
1298 if (!(hw->mac.type == e1000_pch2lan)) {
1299 mac_reg = er32(EXTCNF_CTRL);
1300 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1301 goto out;
1302 }
1303
1304 mac_reg = er32(FEXTNVM);
1305 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1306 goto out;
1307
1308 mac_reg = er32(PHY_CTRL);
1309
1310 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1311 if (ret_val)
1312 goto out;
1313
1314 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1315
1316 if (d0_state) {
1317 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1318 oem_reg |= HV_OEM_BITS_GBE_DIS;
1319
1320 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1321 oem_reg |= HV_OEM_BITS_LPLU;
1322 } else {
1323 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1324 oem_reg |= HV_OEM_BITS_GBE_DIS;
1325
1326 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1327 oem_reg |= HV_OEM_BITS_LPLU;
1328 }
1329 /* Restart auto-neg to activate the bits */
1330 if (!e1000_check_reset_block(hw))
1331 oem_reg |= HV_OEM_BITS_RESTART_AN;
1332 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1333
1334 out:
1335 hw->phy.ops.release(hw);
1336
1337 return ret_val;
1338 }
1339
1340
1341 /**
1342 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1343 * @hw: pointer to the HW structure
1344 **/
1345 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1346 {
1347 s32 ret_val;
1348 u16 data;
1349
1350 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
1351 if (ret_val)
1352 return ret_val;
1353
1354 data |= HV_KMRN_MDIO_SLOW;
1355
1356 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
1357
1358 return ret_val;
1359 }
1360
1361 /**
1362 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1363 * done after every PHY reset.
1364 **/
1365 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1366 {
1367 s32 ret_val = 0;
1368 u16 phy_data;
1369
1370 if (hw->mac.type != e1000_pchlan)
1371 return ret_val;
1372
1373 /* Set MDIO slow mode before any other MDIO access */
1374 if (hw->phy.type == e1000_phy_82577) {
1375 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1376 if (ret_val)
1377 goto out;
1378 }
1379
1380 if (((hw->phy.type == e1000_phy_82577) &&
1381 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1382 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1383 /* Disable generation of early preamble */
1384 ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
1385 if (ret_val)
1386 return ret_val;
1387
1388 /* Preamble tuning for SSC */
1389 ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1390 if (ret_val)
1391 return ret_val;
1392 }
1393
1394 if (hw->phy.type == e1000_phy_82578) {
1395 /*
1396 * Return registers to default by doing a soft reset then
1397 * writing 0x3140 to the control register.
1398 */
1399 if (hw->phy.revision < 2) {
1400 e1000e_phy_sw_reset(hw);
1401 ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
1402 }
1403 }
1404
1405 /* Select page 0 */
1406 ret_val = hw->phy.ops.acquire(hw);
1407 if (ret_val)
1408 return ret_val;
1409
1410 hw->phy.addr = 1;
1411 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1412 hw->phy.ops.release(hw);
1413 if (ret_val)
1414 goto out;
1415
1416 /*
1417 * Configure the K1 Si workaround during phy reset assuming there is
1418 * link so that it disables K1 if link is in 1Gbps.
1419 */
1420 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1421 if (ret_val)
1422 goto out;
1423
1424 /* Workaround for link disconnects on a busy hub in half duplex */
1425 ret_val = hw->phy.ops.acquire(hw);
1426 if (ret_val)
1427 goto out;
1428 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1429 if (ret_val)
1430 goto release;
1431 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
1432 phy_data & 0x00FF);
1433 release:
1434 hw->phy.ops.release(hw);
1435 out:
1436 return ret_val;
1437 }
1438
1439 /**
1440 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1441 * @hw: pointer to the HW structure
1442 **/
1443 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1444 {
1445 u32 mac_reg;
1446 u16 i, phy_reg = 0;
1447 s32 ret_val;
1448
1449 ret_val = hw->phy.ops.acquire(hw);
1450 if (ret_val)
1451 return;
1452 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1453 if (ret_val)
1454 goto release;
1455
1456 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1457 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1458 mac_reg = er32(RAL(i));
1459 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1460 (u16)(mac_reg & 0xFFFF));
1461 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1462 (u16)((mac_reg >> 16) & 0xFFFF));
1463
1464 mac_reg = er32(RAH(i));
1465 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1466 (u16)(mac_reg & 0xFFFF));
1467 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1468 (u16)((mac_reg & E1000_RAH_AV)
1469 >> 16));
1470 }
1471
1472 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1473
1474 release:
1475 hw->phy.ops.release(hw);
1476 }
1477
1478 /**
1479 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1480 * with 82579 PHY
1481 * @hw: pointer to the HW structure
1482 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1483 **/
1484 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1485 {
1486 s32 ret_val = 0;
1487 u16 phy_reg, data;
1488 u32 mac_reg;
1489 u16 i;
1490
1491 if (hw->mac.type != e1000_pch2lan)
1492 goto out;
1493
1494 /* disable Rx path while enabling/disabling workaround */
1495 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1496 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1497 if (ret_val)
1498 goto out;
1499
1500 if (enable) {
1501 /*
1502 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1503 * SHRAL/H) and initial CRC values to the MAC
1504 */
1505 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1506 u8 mac_addr[ETH_ALEN] = {0};
1507 u32 addr_high, addr_low;
1508
1509 addr_high = er32(RAH(i));
1510 if (!(addr_high & E1000_RAH_AV))
1511 continue;
1512 addr_low = er32(RAL(i));
1513 mac_addr[0] = (addr_low & 0xFF);
1514 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1515 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1516 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1517 mac_addr[4] = (addr_high & 0xFF);
1518 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1519
1520 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
1521 }
1522
1523 /* Write Rx addresses to the PHY */
1524 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1525
1526 /* Enable jumbo frame workaround in the MAC */
1527 mac_reg = er32(FFLT_DBG);
1528 mac_reg &= ~(1 << 14);
1529 mac_reg |= (7 << 15);
1530 ew32(FFLT_DBG, mac_reg);
1531
1532 mac_reg = er32(RCTL);
1533 mac_reg |= E1000_RCTL_SECRC;
1534 ew32(RCTL, mac_reg);
1535
1536 ret_val = e1000e_read_kmrn_reg(hw,
1537 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1538 &data);
1539 if (ret_val)
1540 goto out;
1541 ret_val = e1000e_write_kmrn_reg(hw,
1542 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1543 data | (1 << 0));
1544 if (ret_val)
1545 goto out;
1546 ret_val = e1000e_read_kmrn_reg(hw,
1547 E1000_KMRNCTRLSTA_HD_CTRL,
1548 &data);
1549 if (ret_val)
1550 goto out;
1551 data &= ~(0xF << 8);
1552 data |= (0xB << 8);
1553 ret_val = e1000e_write_kmrn_reg(hw,
1554 E1000_KMRNCTRLSTA_HD_CTRL,
1555 data);
1556 if (ret_val)
1557 goto out;
1558
1559 /* Enable jumbo frame workaround in the PHY */
1560 e1e_rphy(hw, PHY_REG(769, 23), &data);
1561 data &= ~(0x7F << 5);
1562 data |= (0x37 << 5);
1563 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1564 if (ret_val)
1565 goto out;
1566 e1e_rphy(hw, PHY_REG(769, 16), &data);
1567 data &= ~(1 << 13);
1568 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1569 if (ret_val)
1570 goto out;
1571 e1e_rphy(hw, PHY_REG(776, 20), &data);
1572 data &= ~(0x3FF << 2);
1573 data |= (0x1A << 2);
1574 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1575 if (ret_val)
1576 goto out;
1577 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00);
1578 if (ret_val)
1579 goto out;
1580 e1e_rphy(hw, HV_PM_CTRL, &data);
1581 ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
1582 if (ret_val)
1583 goto out;
1584 } else {
1585 /* Write MAC register values back to h/w defaults */
1586 mac_reg = er32(FFLT_DBG);
1587 mac_reg &= ~(0xF << 14);
1588 ew32(FFLT_DBG, mac_reg);
1589
1590 mac_reg = er32(RCTL);
1591 mac_reg &= ~E1000_RCTL_SECRC;
1592 ew32(RCTL, mac_reg);
1593
1594 ret_val = e1000e_read_kmrn_reg(hw,
1595 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1596 &data);
1597 if (ret_val)
1598 goto out;
1599 ret_val = e1000e_write_kmrn_reg(hw,
1600 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1601 data & ~(1 << 0));
1602 if (ret_val)
1603 goto out;
1604 ret_val = e1000e_read_kmrn_reg(hw,
1605 E1000_KMRNCTRLSTA_HD_CTRL,
1606 &data);
1607 if (ret_val)
1608 goto out;
1609 data &= ~(0xF << 8);
1610 data |= (0xB << 8);
1611 ret_val = e1000e_write_kmrn_reg(hw,
1612 E1000_KMRNCTRLSTA_HD_CTRL,
1613 data);
1614 if (ret_val)
1615 goto out;
1616
1617 /* Write PHY register values back to h/w defaults */
1618 e1e_rphy(hw, PHY_REG(769, 23), &data);
1619 data &= ~(0x7F << 5);
1620 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1621 if (ret_val)
1622 goto out;
1623 e1e_rphy(hw, PHY_REG(769, 16), &data);
1624 data |= (1 << 13);
1625 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1626 if (ret_val)
1627 goto out;
1628 e1e_rphy(hw, PHY_REG(776, 20), &data);
1629 data &= ~(0x3FF << 2);
1630 data |= (0x8 << 2);
1631 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1632 if (ret_val)
1633 goto out;
1634 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
1635 if (ret_val)
1636 goto out;
1637 e1e_rphy(hw, HV_PM_CTRL, &data);
1638 ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
1639 if (ret_val)
1640 goto out;
1641 }
1642
1643 /* re-enable Rx path after enabling/disabling workaround */
1644 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1645
1646 out:
1647 return ret_val;
1648 }
1649
1650 /**
1651 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1652 * done after every PHY reset.
1653 **/
1654 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1655 {
1656 s32 ret_val = 0;
1657
1658 if (hw->mac.type != e1000_pch2lan)
1659 goto out;
1660
1661 /* Set MDIO slow mode before any other MDIO access */
1662 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1663
1664 out:
1665 return ret_val;
1666 }
1667
1668 /**
1669 * e1000_k1_gig_workaround_lv - K1 Si workaround
1670 * @hw: pointer to the HW structure
1671 *
1672 * Workaround to set the K1 beacon duration for 82579 parts
1673 **/
1674 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1675 {
1676 s32 ret_val = 0;
1677 u16 status_reg = 0;
1678 u32 mac_reg;
1679 u16 phy_reg;
1680
1681 if (hw->mac.type != e1000_pch2lan)
1682 goto out;
1683
1684 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1685 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
1686 if (ret_val)
1687 goto out;
1688
1689 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1690 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1691 mac_reg = er32(FEXTNVM4);
1692 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1693
1694 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
1695 if (ret_val)
1696 goto out;
1697
1698 if (status_reg & HV_M_STATUS_SPEED_1000) {
1699 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1700 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1701 } else {
1702 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1703 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1704 }
1705 ew32(FEXTNVM4, mac_reg);
1706 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
1707 }
1708
1709 out:
1710 return ret_val;
1711 }
1712
1713 /**
1714 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1715 * @hw: pointer to the HW structure
1716 * @gate: boolean set to true to gate, false to ungate
1717 *
1718 * Gate/ungate the automatic PHY configuration via hardware; perform
1719 * the configuration via software instead.
1720 **/
1721 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1722 {
1723 u32 extcnf_ctrl;
1724
1725 if (hw->mac.type != e1000_pch2lan)
1726 return;
1727
1728 extcnf_ctrl = er32(EXTCNF_CTRL);
1729
1730 if (gate)
1731 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1732 else
1733 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1734
1735 ew32(EXTCNF_CTRL, extcnf_ctrl);
1736 return;
1737 }
1738
1739 /**
1740 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1741 * @hw: pointer to the HW structure
1742 *
1743 * Check the appropriate indication the MAC has finished configuring the
1744 * PHY after a software reset.
1745 **/
1746 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1747 {
1748 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1749
1750 /* Wait for basic configuration completes before proceeding */
1751 do {
1752 data = er32(STATUS);
1753 data &= E1000_STATUS_LAN_INIT_DONE;
1754 udelay(100);
1755 } while ((!data) && --loop);
1756
1757 /*
1758 * If basic configuration is incomplete before the above loop
1759 * count reaches 0, loading the configuration from NVM will
1760 * leave the PHY in a bad state possibly resulting in no link.
1761 */
1762 if (loop == 0)
1763 e_dbg("LAN_INIT_DONE not set, increase timeout\n");
1764
1765 /* Clear the Init Done bit for the next init event */
1766 data = er32(STATUS);
1767 data &= ~E1000_STATUS_LAN_INIT_DONE;
1768 ew32(STATUS, data);
1769 }
1770
1771 /**
1772 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1773 * @hw: pointer to the HW structure
1774 **/
1775 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1776 {
1777 s32 ret_val = 0;
1778 u16 reg;
1779
1780 if (e1000_check_reset_block(hw))
1781 goto out;
1782
1783 /* Allow time for h/w to get to quiescent state after reset */
1784 usleep_range(10000, 20000);
1785
1786 /* Perform any necessary post-reset workarounds */
1787 switch (hw->mac.type) {
1788 case e1000_pchlan:
1789 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1790 if (ret_val)
1791 goto out;
1792 break;
1793 case e1000_pch2lan:
1794 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1795 if (ret_val)
1796 goto out;
1797 break;
1798 default:
1799 break;
1800 }
1801
1802 /* Clear the host wakeup bit after lcd reset */
1803 if (hw->mac.type >= e1000_pchlan) {
1804 e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
1805 reg &= ~BM_WUC_HOST_WU_BIT;
1806 e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
1807 }
1808
1809 /* Configure the LCD with the extended configuration region in NVM */
1810 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1811 if (ret_val)
1812 goto out;
1813
1814 /* Configure the LCD with the OEM bits in NVM */
1815 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1816
1817 if (hw->mac.type == e1000_pch2lan) {
1818 /* Ungate automatic PHY configuration on non-managed 82579 */
1819 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1820 usleep_range(10000, 20000);
1821 e1000_gate_hw_phy_config_ich8lan(hw, false);
1822 }
1823
1824 /* Set EEE LPI Update Timer to 200usec */
1825 ret_val = hw->phy.ops.acquire(hw);
1826 if (ret_val)
1827 goto out;
1828 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1829 I82579_LPI_UPDATE_TIMER);
1830 if (ret_val)
1831 goto release;
1832 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1833 0x1387);
1834 release:
1835 hw->phy.ops.release(hw);
1836 }
1837
1838 out:
1839 return ret_val;
1840 }
1841
1842 /**
1843 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1844 * @hw: pointer to the HW structure
1845 *
1846 * Resets the PHY
1847 * This is a function pointer entry point called by drivers
1848 * or other shared routines.
1849 **/
1850 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1851 {
1852 s32 ret_val = 0;
1853
1854 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1855 if ((hw->mac.type == e1000_pch2lan) &&
1856 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1857 e1000_gate_hw_phy_config_ich8lan(hw, true);
1858
1859 ret_val = e1000e_phy_hw_reset_generic(hw);
1860 if (ret_val)
1861 goto out;
1862
1863 ret_val = e1000_post_phy_reset_ich8lan(hw);
1864
1865 out:
1866 return ret_val;
1867 }
1868
1869 /**
1870 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1871 * @hw: pointer to the HW structure
1872 * @active: true to enable LPLU, false to disable
1873 *
1874 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1875 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1876 * the phy speed. This function will manually set the LPLU bit and restart
1877 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1878 * since it configures the same bit.
1879 **/
1880 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1881 {
1882 s32 ret_val = 0;
1883 u16 oem_reg;
1884
1885 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
1886 if (ret_val)
1887 goto out;
1888
1889 if (active)
1890 oem_reg |= HV_OEM_BITS_LPLU;
1891 else
1892 oem_reg &= ~HV_OEM_BITS_LPLU;
1893
1894 oem_reg |= HV_OEM_BITS_RESTART_AN;
1895 ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
1896
1897 out:
1898 return ret_val;
1899 }
1900
1901 /**
1902 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1903 * @hw: pointer to the HW structure
1904 * @active: true to enable LPLU, false to disable
1905 *
1906 * Sets the LPLU D0 state according to the active flag. When
1907 * activating LPLU this function also disables smart speed
1908 * and vice versa. LPLU will not be activated unless the
1909 * device autonegotiation advertisement meets standards of
1910 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1911 * This is a function pointer entry point only called by
1912 * PHY setup routines.
1913 **/
1914 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1915 {
1916 struct e1000_phy_info *phy = &hw->phy;
1917 u32 phy_ctrl;
1918 s32 ret_val = 0;
1919 u16 data;
1920
1921 if (phy->type == e1000_phy_ife)
1922 return ret_val;
1923
1924 phy_ctrl = er32(PHY_CTRL);
1925
1926 if (active) {
1927 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
1928 ew32(PHY_CTRL, phy_ctrl);
1929
1930 if (phy->type != e1000_phy_igp_3)
1931 return 0;
1932
1933 /*
1934 * Call gig speed drop workaround on LPLU before accessing
1935 * any PHY registers
1936 */
1937 if (hw->mac.type == e1000_ich8lan)
1938 e1000e_gig_downshift_workaround_ich8lan(hw);
1939
1940 /* When LPLU is enabled, we should disable SmartSpeed */
1941 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
1942 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1943 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
1944 if (ret_val)
1945 return ret_val;
1946 } else {
1947 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
1948 ew32(PHY_CTRL, phy_ctrl);
1949
1950 if (phy->type != e1000_phy_igp_3)
1951 return 0;
1952
1953 /*
1954 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1955 * during Dx states where the power conservation is most
1956 * important. During driver activity we should enable
1957 * SmartSpeed, so performance is maintained.
1958 */
1959 if (phy->smart_speed == e1000_smart_speed_on) {
1960 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1961 &data);
1962 if (ret_val)
1963 return ret_val;
1964
1965 data |= IGP01E1000_PSCFR_SMART_SPEED;
1966 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1967 data);
1968 if (ret_val)
1969 return ret_val;
1970 } else if (phy->smart_speed == e1000_smart_speed_off) {
1971 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1972 &data);
1973 if (ret_val)
1974 return ret_val;
1975
1976 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1977 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1978 data);
1979 if (ret_val)
1980 return ret_val;
1981 }
1982 }
1983
1984 return 0;
1985 }
1986
1987 /**
1988 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
1989 * @hw: pointer to the HW structure
1990 * @active: true to enable LPLU, false to disable
1991 *
1992 * Sets the LPLU D3 state according to the active flag. When
1993 * activating LPLU this function also disables smart speed
1994 * and vice versa. LPLU will not be activated unless the
1995 * device autonegotiation advertisement meets standards of
1996 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1997 * This is a function pointer entry point only called by
1998 * PHY setup routines.
1999 **/
2000 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2001 {
2002 struct e1000_phy_info *phy = &hw->phy;
2003 u32 phy_ctrl;
2004 s32 ret_val;
2005 u16 data;
2006
2007 phy_ctrl = er32(PHY_CTRL);
2008
2009 if (!active) {
2010 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2011 ew32(PHY_CTRL, phy_ctrl);
2012
2013 if (phy->type != e1000_phy_igp_3)
2014 return 0;
2015
2016 /*
2017 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2018 * during Dx states where the power conservation is most
2019 * important. During driver activity we should enable
2020 * SmartSpeed, so performance is maintained.
2021 */
2022 if (phy->smart_speed == e1000_smart_speed_on) {
2023 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2024 &data);
2025 if (ret_val)
2026 return ret_val;
2027
2028 data |= IGP01E1000_PSCFR_SMART_SPEED;
2029 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2030 data);
2031 if (ret_val)
2032 return ret_val;
2033 } else if (phy->smart_speed == e1000_smart_speed_off) {
2034 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2035 &data);
2036 if (ret_val)
2037 return ret_val;
2038
2039 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2040 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2041 data);
2042 if (ret_val)
2043 return ret_val;
2044 }
2045 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2046 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2047 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2048 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2049 ew32(PHY_CTRL, phy_ctrl);
2050
2051 if (phy->type != e1000_phy_igp_3)
2052 return 0;
2053
2054 /*
2055 * Call gig speed drop workaround on LPLU before accessing
2056 * any PHY registers
2057 */
2058 if (hw->mac.type == e1000_ich8lan)
2059 e1000e_gig_downshift_workaround_ich8lan(hw);
2060
2061 /* When LPLU is enabled, we should disable SmartSpeed */
2062 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2063 if (ret_val)
2064 return ret_val;
2065
2066 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2067 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2068 }
2069
2070 return 0;
2071 }
2072
2073 /**
2074 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2075 * @hw: pointer to the HW structure
2076 * @bank: pointer to the variable that returns the active bank
2077 *
2078 * Reads signature byte from the NVM using the flash access registers.
2079 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2080 **/
2081 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2082 {
2083 u32 eecd;
2084 struct e1000_nvm_info *nvm = &hw->nvm;
2085 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2086 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2087 u8 sig_byte = 0;
2088 s32 ret_val = 0;
2089
2090 switch (hw->mac.type) {
2091 case e1000_ich8lan:
2092 case e1000_ich9lan:
2093 eecd = er32(EECD);
2094 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2095 E1000_EECD_SEC1VAL_VALID_MASK) {
2096 if (eecd & E1000_EECD_SEC1VAL)
2097 *bank = 1;
2098 else
2099 *bank = 0;
2100
2101 return 0;
2102 }
2103 e_dbg("Unable to determine valid NVM bank via EEC - "
2104 "reading flash signature\n");
2105 /* fall-thru */
2106 default:
2107 /* set bank to 0 in case flash read fails */
2108 *bank = 0;
2109
2110 /* Check bank 0 */
2111 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2112 &sig_byte);
2113 if (ret_val)
2114 return ret_val;
2115 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2116 E1000_ICH_NVM_SIG_VALUE) {
2117 *bank = 0;
2118 return 0;
2119 }
2120
2121 /* Check bank 1 */
2122 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2123 bank1_offset,
2124 &sig_byte);
2125 if (ret_val)
2126 return ret_val;
2127 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2128 E1000_ICH_NVM_SIG_VALUE) {
2129 *bank = 1;
2130 return 0;
2131 }
2132
2133 e_dbg("ERROR: No valid NVM bank present\n");
2134 return -E1000_ERR_NVM;
2135 }
2136
2137 return 0;
2138 }
2139
2140 /**
2141 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2142 * @hw: pointer to the HW structure
2143 * @offset: The offset (in bytes) of the word(s) to read.
2144 * @words: Size of data to read in words
2145 * @data: Pointer to the word(s) to read at offset.
2146 *
2147 * Reads a word(s) from the NVM using the flash access registers.
2148 **/
2149 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2150 u16 *data)
2151 {
2152 struct e1000_nvm_info *nvm = &hw->nvm;
2153 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2154 u32 act_offset;
2155 s32 ret_val = 0;
2156 u32 bank = 0;
2157 u16 i, word;
2158
2159 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2160 (words == 0)) {
2161 e_dbg("nvm parameter(s) out of bounds\n");
2162 ret_val = -E1000_ERR_NVM;
2163 goto out;
2164 }
2165
2166 nvm->ops.acquire(hw);
2167
2168 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2169 if (ret_val) {
2170 e_dbg("Could not detect valid bank, assuming bank 0\n");
2171 bank = 0;
2172 }
2173
2174 act_offset = (bank) ? nvm->flash_bank_size : 0;
2175 act_offset += offset;
2176
2177 ret_val = 0;
2178 for (i = 0; i < words; i++) {
2179 if (dev_spec->shadow_ram[offset+i].modified) {
2180 data[i] = dev_spec->shadow_ram[offset+i].value;
2181 } else {
2182 ret_val = e1000_read_flash_word_ich8lan(hw,
2183 act_offset + i,
2184 &word);
2185 if (ret_val)
2186 break;
2187 data[i] = word;
2188 }
2189 }
2190
2191 nvm->ops.release(hw);
2192
2193 out:
2194 if (ret_val)
2195 e_dbg("NVM read error: %d\n", ret_val);
2196
2197 return ret_val;
2198 }
2199
2200 /**
2201 * e1000_flash_cycle_init_ich8lan - Initialize flash
2202 * @hw: pointer to the HW structure
2203 *
2204 * This function does initial flash setup so that a new read/write/erase cycle
2205 * can be started.
2206 **/
2207 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2208 {
2209 union ich8_hws_flash_status hsfsts;
2210 s32 ret_val = -E1000_ERR_NVM;
2211
2212 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2213
2214 /* Check if the flash descriptor is valid */
2215 if (hsfsts.hsf_status.fldesvalid == 0) {
2216 e_dbg("Flash descriptor invalid. "
2217 "SW Sequencing must be used.\n");
2218 return -E1000_ERR_NVM;
2219 }
2220
2221 /* Clear FCERR and DAEL in hw status by writing 1 */
2222 hsfsts.hsf_status.flcerr = 1;
2223 hsfsts.hsf_status.dael = 1;
2224
2225 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2226
2227 /*
2228 * Either we should have a hardware SPI cycle in progress
2229 * bit to check against, in order to start a new cycle or
2230 * FDONE bit should be changed in the hardware so that it
2231 * is 1 after hardware reset, which can then be used as an
2232 * indication whether a cycle is in progress or has been
2233 * completed.
2234 */
2235
2236 if (hsfsts.hsf_status.flcinprog == 0) {
2237 /*
2238 * There is no cycle running at present,
2239 * so we can start a cycle.
2240 * Begin by setting Flash Cycle Done.
2241 */
2242 hsfsts.hsf_status.flcdone = 1;
2243 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2244 ret_val = 0;
2245 } else {
2246 s32 i = 0;
2247
2248 /*
2249 * Otherwise poll for sometime so the current
2250 * cycle has a chance to end before giving up.
2251 */
2252 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2253 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
2254 if (hsfsts.hsf_status.flcinprog == 0) {
2255 ret_val = 0;
2256 break;
2257 }
2258 udelay(1);
2259 }
2260 if (ret_val == 0) {
2261 /*
2262 * Successful in waiting for previous cycle to timeout,
2263 * now set the Flash Cycle Done.
2264 */
2265 hsfsts.hsf_status.flcdone = 1;
2266 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2267 } else {
2268 e_dbg("Flash controller busy, cannot get access\n");
2269 }
2270 }
2271
2272 return ret_val;
2273 }
2274
2275 /**
2276 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2277 * @hw: pointer to the HW structure
2278 * @timeout: maximum time to wait for completion
2279 *
2280 * This function starts a flash cycle and waits for its completion.
2281 **/
2282 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2283 {
2284 union ich8_hws_flash_ctrl hsflctl;
2285 union ich8_hws_flash_status hsfsts;
2286 s32 ret_val = -E1000_ERR_NVM;
2287 u32 i = 0;
2288
2289 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2290 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2291 hsflctl.hsf_ctrl.flcgo = 1;
2292 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2293
2294 /* wait till FDONE bit is set to 1 */
2295 do {
2296 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2297 if (hsfsts.hsf_status.flcdone == 1)
2298 break;
2299 udelay(1);
2300 } while (i++ < timeout);
2301
2302 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2303 return 0;
2304
2305 return ret_val;
2306 }
2307
2308 /**
2309 * e1000_read_flash_word_ich8lan - Read word from flash
2310 * @hw: pointer to the HW structure
2311 * @offset: offset to data location
2312 * @data: pointer to the location for storing the data
2313 *
2314 * Reads the flash word at offset into data. Offset is converted
2315 * to bytes before read.
2316 **/
2317 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2318 u16 *data)
2319 {
2320 /* Must convert offset into bytes. */
2321 offset <<= 1;
2322
2323 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2324 }
2325
2326 /**
2327 * e1000_read_flash_byte_ich8lan - Read byte from flash
2328 * @hw: pointer to the HW structure
2329 * @offset: The offset of the byte to read.
2330 * @data: Pointer to a byte to store the value read.
2331 *
2332 * Reads a single byte from the NVM using the flash access registers.
2333 **/
2334 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2335 u8 *data)
2336 {
2337 s32 ret_val;
2338 u16 word = 0;
2339
2340 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2341 if (ret_val)
2342 return ret_val;
2343
2344 *data = (u8)word;
2345
2346 return 0;
2347 }
2348
2349 /**
2350 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2351 * @hw: pointer to the HW structure
2352 * @offset: The offset (in bytes) of the byte or word to read.
2353 * @size: Size of data to read, 1=byte 2=word
2354 * @data: Pointer to the word to store the value read.
2355 *
2356 * Reads a byte or word from the NVM using the flash access registers.
2357 **/
2358 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2359 u8 size, u16 *data)
2360 {
2361 union ich8_hws_flash_status hsfsts;
2362 union ich8_hws_flash_ctrl hsflctl;
2363 u32 flash_linear_addr;
2364 u32 flash_data = 0;
2365 s32 ret_val = -E1000_ERR_NVM;
2366 u8 count = 0;
2367
2368 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2369 return -E1000_ERR_NVM;
2370
2371 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2372 hw->nvm.flash_base_addr;
2373
2374 do {
2375 udelay(1);
2376 /* Steps */
2377 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2378 if (ret_val != 0)
2379 break;
2380
2381 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2382 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2383 hsflctl.hsf_ctrl.fldbcount = size - 1;
2384 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2385 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2386
2387 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2388
2389 ret_val = e1000_flash_cycle_ich8lan(hw,
2390 ICH_FLASH_READ_COMMAND_TIMEOUT);
2391
2392 /*
2393 * Check if FCERR is set to 1, if set to 1, clear it
2394 * and try the whole sequence a few more times, else
2395 * read in (shift in) the Flash Data0, the order is
2396 * least significant byte first msb to lsb
2397 */
2398 if (ret_val == 0) {
2399 flash_data = er32flash(ICH_FLASH_FDATA0);
2400 if (size == 1)
2401 *data = (u8)(flash_data & 0x000000FF);
2402 else if (size == 2)
2403 *data = (u16)(flash_data & 0x0000FFFF);
2404 break;
2405 } else {
2406 /*
2407 * If we've gotten here, then things are probably
2408 * completely hosed, but if the error condition is
2409 * detected, it won't hurt to give it another try...
2410 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2411 */
2412 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2413 if (hsfsts.hsf_status.flcerr == 1) {
2414 /* Repeat for some time before giving up. */
2415 continue;
2416 } else if (hsfsts.hsf_status.flcdone == 0) {
2417 e_dbg("Timeout error - flash cycle "
2418 "did not complete.\n");
2419 break;
2420 }
2421 }
2422 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2423
2424 return ret_val;
2425 }
2426
2427 /**
2428 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2429 * @hw: pointer to the HW structure
2430 * @offset: The offset (in bytes) of the word(s) to write.
2431 * @words: Size of data to write in words
2432 * @data: Pointer to the word(s) to write at offset.
2433 *
2434 * Writes a byte or word to the NVM using the flash access registers.
2435 **/
2436 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2437 u16 *data)
2438 {
2439 struct e1000_nvm_info *nvm = &hw->nvm;
2440 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2441 u16 i;
2442
2443 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2444 (words == 0)) {
2445 e_dbg("nvm parameter(s) out of bounds\n");
2446 return -E1000_ERR_NVM;
2447 }
2448
2449 nvm->ops.acquire(hw);
2450
2451 for (i = 0; i < words; i++) {
2452 dev_spec->shadow_ram[offset+i].modified = true;
2453 dev_spec->shadow_ram[offset+i].value = data[i];
2454 }
2455
2456 nvm->ops.release(hw);
2457
2458 return 0;
2459 }
2460
2461 /**
2462 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2463 * @hw: pointer to the HW structure
2464 *
2465 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2466 * which writes the checksum to the shadow ram. The changes in the shadow
2467 * ram are then committed to the EEPROM by processing each bank at a time
2468 * checking for the modified bit and writing only the pending changes.
2469 * After a successful commit, the shadow ram is cleared and is ready for
2470 * future writes.
2471 **/
2472 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2473 {
2474 struct e1000_nvm_info *nvm = &hw->nvm;
2475 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2476 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2477 s32 ret_val;
2478 u16 data;
2479
2480 ret_val = e1000e_update_nvm_checksum_generic(hw);
2481 if (ret_val)
2482 goto out;
2483
2484 if (nvm->type != e1000_nvm_flash_sw)
2485 goto out;
2486
2487 nvm->ops.acquire(hw);
2488
2489 /*
2490 * We're writing to the opposite bank so if we're on bank 1,
2491 * write to bank 0 etc. We also need to erase the segment that
2492 * is going to be written
2493 */
2494 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2495 if (ret_val) {
2496 e_dbg("Could not detect valid bank, assuming bank 0\n");
2497 bank = 0;
2498 }
2499
2500 if (bank == 0) {
2501 new_bank_offset = nvm->flash_bank_size;
2502 old_bank_offset = 0;
2503 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2504 if (ret_val)
2505 goto release;
2506 } else {
2507 old_bank_offset = nvm->flash_bank_size;
2508 new_bank_offset = 0;
2509 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2510 if (ret_val)
2511 goto release;
2512 }
2513
2514 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2515 /*
2516 * Determine whether to write the value stored
2517 * in the other NVM bank or a modified value stored
2518 * in the shadow RAM
2519 */
2520 if (dev_spec->shadow_ram[i].modified) {
2521 data = dev_spec->shadow_ram[i].value;
2522 } else {
2523 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2524 old_bank_offset,
2525 &data);
2526 if (ret_val)
2527 break;
2528 }
2529
2530 /*
2531 * If the word is 0x13, then make sure the signature bits
2532 * (15:14) are 11b until the commit has completed.
2533 * This will allow us to write 10b which indicates the
2534 * signature is valid. We want to do this after the write
2535 * has completed so that we don't mark the segment valid
2536 * while the write is still in progress
2537 */
2538 if (i == E1000_ICH_NVM_SIG_WORD)
2539 data |= E1000_ICH_NVM_SIG_MASK;
2540
2541 /* Convert offset to bytes. */
2542 act_offset = (i + new_bank_offset) << 1;
2543
2544 udelay(100);
2545 /* Write the bytes to the new bank. */
2546 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2547 act_offset,
2548 (u8)data);
2549 if (ret_val)
2550 break;
2551
2552 udelay(100);
2553 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2554 act_offset + 1,
2555 (u8)(data >> 8));
2556 if (ret_val)
2557 break;
2558 }
2559
2560 /*
2561 * Don't bother writing the segment valid bits if sector
2562 * programming failed.
2563 */
2564 if (ret_val) {
2565 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
2566 e_dbg("Flash commit failed.\n");
2567 goto release;
2568 }
2569
2570 /*
2571 * Finally validate the new segment by setting bit 15:14
2572 * to 10b in word 0x13 , this can be done without an
2573 * erase as well since these bits are 11 to start with
2574 * and we need to change bit 14 to 0b
2575 */
2576 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2577 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2578 if (ret_val)
2579 goto release;
2580
2581 data &= 0xBFFF;
2582 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2583 act_offset * 2 + 1,
2584 (u8)(data >> 8));
2585 if (ret_val)
2586 goto release;
2587
2588 /*
2589 * And invalidate the previously valid segment by setting
2590 * its signature word (0x13) high_byte to 0b. This can be
2591 * done without an erase because flash erase sets all bits
2592 * to 1's. We can write 1's to 0's without an erase
2593 */
2594 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2595 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2596 if (ret_val)
2597 goto release;
2598
2599 /* Great! Everything worked, we can now clear the cached entries. */
2600 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2601 dev_spec->shadow_ram[i].modified = false;
2602 dev_spec->shadow_ram[i].value = 0xFFFF;
2603 }
2604
2605 release:
2606 nvm->ops.release(hw);
2607
2608 /*
2609 * Reload the EEPROM, or else modifications will not appear
2610 * until after the next adapter reset.
2611 */
2612 if (!ret_val) {
2613 e1000e_reload_nvm(hw);
2614 usleep_range(10000, 20000);
2615 }
2616
2617 out:
2618 if (ret_val)
2619 e_dbg("NVM update error: %d\n", ret_val);
2620
2621 return ret_val;
2622 }
2623
2624 /**
2625 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2626 * @hw: pointer to the HW structure
2627 *
2628 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2629 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2630 * calculated, in which case we need to calculate the checksum and set bit 6.
2631 **/
2632 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2633 {
2634 s32 ret_val;
2635 u16 data;
2636
2637 /*
2638 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2639 * needs to be fixed. This bit is an indication that the NVM
2640 * was prepared by OEM software and did not calculate the
2641 * checksum...a likely scenario.
2642 */
2643 ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
2644 if (ret_val)
2645 return ret_val;
2646
2647 if ((data & 0x40) == 0) {
2648 data |= 0x40;
2649 ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
2650 if (ret_val)
2651 return ret_val;
2652 ret_val = e1000e_update_nvm_checksum(hw);
2653 if (ret_val)
2654 return ret_val;
2655 }
2656
2657 return e1000e_validate_nvm_checksum_generic(hw);
2658 }
2659
2660 /**
2661 * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
2662 * @hw: pointer to the HW structure
2663 *
2664 * To prevent malicious write/erase of the NVM, set it to be read-only
2665 * so that the hardware ignores all write/erase cycles of the NVM via
2666 * the flash control registers. The shadow-ram copy of the NVM will
2667 * still be updated, however any updates to this copy will not stick
2668 * across driver reloads.
2669 **/
2670 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
2671 {
2672 struct e1000_nvm_info *nvm = &hw->nvm;
2673 union ich8_flash_protected_range pr0;
2674 union ich8_hws_flash_status hsfsts;
2675 u32 gfpreg;
2676
2677 nvm->ops.acquire(hw);
2678
2679 gfpreg = er32flash(ICH_FLASH_GFPREG);
2680
2681 /* Write-protect GbE Sector of NVM */
2682 pr0.regval = er32flash(ICH_FLASH_PR0);
2683 pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
2684 pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
2685 pr0.range.wpe = true;
2686 ew32flash(ICH_FLASH_PR0, pr0.regval);
2687
2688 /*
2689 * Lock down a subset of GbE Flash Control Registers, e.g.
2690 * PR0 to prevent the write-protection from being lifted.
2691 * Once FLOCKDN is set, the registers protected by it cannot
2692 * be written until FLOCKDN is cleared by a hardware reset.
2693 */
2694 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2695 hsfsts.hsf_status.flockdn = true;
2696 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2697
2698 nvm->ops.release(hw);
2699 }
2700
2701 /**
2702 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2703 * @hw: pointer to the HW structure
2704 * @offset: The offset (in bytes) of the byte/word to read.
2705 * @size: Size of data to read, 1=byte 2=word
2706 * @data: The byte(s) to write to the NVM.
2707 *
2708 * Writes one/two bytes to the NVM using the flash access registers.
2709 **/
2710 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2711 u8 size, u16 data)
2712 {
2713 union ich8_hws_flash_status hsfsts;
2714 union ich8_hws_flash_ctrl hsflctl;
2715 u32 flash_linear_addr;
2716 u32 flash_data = 0;
2717 s32 ret_val;
2718 u8 count = 0;
2719
2720 if (size < 1 || size > 2 || data > size * 0xff ||
2721 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2722 return -E1000_ERR_NVM;
2723
2724 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2725 hw->nvm.flash_base_addr;
2726
2727 do {
2728 udelay(1);
2729 /* Steps */
2730 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2731 if (ret_val)
2732 break;
2733
2734 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2735 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2736 hsflctl.hsf_ctrl.fldbcount = size -1;
2737 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2738 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2739
2740 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2741
2742 if (size == 1)
2743 flash_data = (u32)data & 0x00FF;
2744 else
2745 flash_data = (u32)data;
2746
2747 ew32flash(ICH_FLASH_FDATA0, flash_data);
2748
2749 /*
2750 * check if FCERR is set to 1 , if set to 1, clear it
2751 * and try the whole sequence a few more times else done
2752 */
2753 ret_val = e1000_flash_cycle_ich8lan(hw,
2754 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2755 if (!ret_val)
2756 break;
2757
2758 /*
2759 * If we're here, then things are most likely
2760 * completely hosed, but if the error condition
2761 * is detected, it won't hurt to give it another
2762 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2763 */
2764 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2765 if (hsfsts.hsf_status.flcerr == 1)
2766 /* Repeat for some time before giving up. */
2767 continue;
2768 if (hsfsts.hsf_status.flcdone == 0) {
2769 e_dbg("Timeout error - flash cycle "
2770 "did not complete.");
2771 break;
2772 }
2773 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2774
2775 return ret_val;
2776 }
2777
2778 /**
2779 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2780 * @hw: pointer to the HW structure
2781 * @offset: The index of the byte to read.
2782 * @data: The byte to write to the NVM.
2783 *
2784 * Writes a single byte to the NVM using the flash access registers.
2785 **/
2786 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2787 u8 data)
2788 {
2789 u16 word = (u16)data;
2790
2791 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2792 }
2793
2794 /**
2795 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2796 * @hw: pointer to the HW structure
2797 * @offset: The offset of the byte to write.
2798 * @byte: The byte to write to the NVM.
2799 *
2800 * Writes a single byte to the NVM using the flash access registers.
2801 * Goes through a retry algorithm before giving up.
2802 **/
2803 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2804 u32 offset, u8 byte)
2805 {
2806 s32 ret_val;
2807 u16 program_retries;
2808
2809 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2810 if (!ret_val)
2811 return ret_val;
2812
2813 for (program_retries = 0; program_retries < 100; program_retries++) {
2814 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
2815 udelay(100);
2816 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2817 if (!ret_val)
2818 break;
2819 }
2820 if (program_retries == 100)
2821 return -E1000_ERR_NVM;
2822
2823 return 0;
2824 }
2825
2826 /**
2827 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2828 * @hw: pointer to the HW structure
2829 * @bank: 0 for first bank, 1 for second bank, etc.
2830 *
2831 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2832 * bank N is 4096 * N + flash_reg_addr.
2833 **/
2834 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2835 {
2836 struct e1000_nvm_info *nvm = &hw->nvm;
2837 union ich8_hws_flash_status hsfsts;
2838 union ich8_hws_flash_ctrl hsflctl;
2839 u32 flash_linear_addr;
2840 /* bank size is in 16bit words - adjust to bytes */
2841 u32 flash_bank_size = nvm->flash_bank_size * 2;
2842 s32 ret_val;
2843 s32 count = 0;
2844 s32 j, iteration, sector_size;
2845
2846 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2847
2848 /*
2849 * Determine HW Sector size: Read BERASE bits of hw flash status
2850 * register
2851 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2852 * consecutive sectors. The start index for the nth Hw sector
2853 * can be calculated as = bank * 4096 + n * 256
2854 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2855 * The start index for the nth Hw sector can be calculated
2856 * as = bank * 4096
2857 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2858 * (ich9 only, otherwise error condition)
2859 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2860 */
2861 switch (hsfsts.hsf_status.berasesz) {
2862 case 0:
2863 /* Hw sector size 256 */
2864 sector_size = ICH_FLASH_SEG_SIZE_256;
2865 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2866 break;
2867 case 1:
2868 sector_size = ICH_FLASH_SEG_SIZE_4K;
2869 iteration = 1;
2870 break;
2871 case 2:
2872 sector_size = ICH_FLASH_SEG_SIZE_8K;
2873 iteration = 1;
2874 break;
2875 case 3:
2876 sector_size = ICH_FLASH_SEG_SIZE_64K;
2877 iteration = 1;
2878 break;
2879 default:
2880 return -E1000_ERR_NVM;
2881 }
2882
2883 /* Start with the base address, then add the sector offset. */
2884 flash_linear_addr = hw->nvm.flash_base_addr;
2885 flash_linear_addr += (bank) ? flash_bank_size : 0;
2886
2887 for (j = 0; j < iteration ; j++) {
2888 do {
2889 /* Steps */
2890 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2891 if (ret_val)
2892 return ret_val;
2893
2894 /*
2895 * Write a value 11 (block Erase) in Flash
2896 * Cycle field in hw flash control
2897 */
2898 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2899 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
2900 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2901
2902 /*
2903 * Write the last 24 bits of an index within the
2904 * block into Flash Linear address field in Flash
2905 * Address.
2906 */
2907 flash_linear_addr += (j * sector_size);
2908 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2909
2910 ret_val = e1000_flash_cycle_ich8lan(hw,
2911 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
2912 if (ret_val == 0)
2913 break;
2914
2915 /*
2916 * Check if FCERR is set to 1. If 1,
2917 * clear it and try the whole sequence
2918 * a few more times else Done
2919 */
2920 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2921 if (hsfsts.hsf_status.flcerr == 1)
2922 /* repeat for some time before giving up */
2923 continue;
2924 else if (hsfsts.hsf_status.flcdone == 0)
2925 return ret_val;
2926 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
2927 }
2928
2929 return 0;
2930 }
2931
2932 /**
2933 * e1000_valid_led_default_ich8lan - Set the default LED settings
2934 * @hw: pointer to the HW structure
2935 * @data: Pointer to the LED settings
2936 *
2937 * Reads the LED default settings from the NVM to data. If the NVM LED
2938 * settings is all 0's or F's, set the LED default to a valid LED default
2939 * setting.
2940 **/
2941 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
2942 {
2943 s32 ret_val;
2944
2945 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
2946 if (ret_val) {
2947 e_dbg("NVM Read Error\n");
2948 return ret_val;
2949 }
2950
2951 if (*data == ID_LED_RESERVED_0000 ||
2952 *data == ID_LED_RESERVED_FFFF)
2953 *data = ID_LED_DEFAULT_ICH8LAN;
2954
2955 return 0;
2956 }
2957
2958 /**
2959 * e1000_id_led_init_pchlan - store LED configurations
2960 * @hw: pointer to the HW structure
2961 *
2962 * PCH does not control LEDs via the LEDCTL register, rather it uses
2963 * the PHY LED configuration register.
2964 *
2965 * PCH also does not have an "always on" or "always off" mode which
2966 * complicates the ID feature. Instead of using the "on" mode to indicate
2967 * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()),
2968 * use "link_up" mode. The LEDs will still ID on request if there is no
2969 * link based on logic in e1000_led_[on|off]_pchlan().
2970 **/
2971 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
2972 {
2973 struct e1000_mac_info *mac = &hw->mac;
2974 s32 ret_val;
2975 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
2976 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
2977 u16 data, i, temp, shift;
2978
2979 /* Get default ID LED modes */
2980 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
2981 if (ret_val)
2982 goto out;
2983
2984 mac->ledctl_default = er32(LEDCTL);
2985 mac->ledctl_mode1 = mac->ledctl_default;
2986 mac->ledctl_mode2 = mac->ledctl_default;
2987
2988 for (i = 0; i < 4; i++) {
2989 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
2990 shift = (i * 5);
2991 switch (temp) {
2992 case ID_LED_ON1_DEF2:
2993 case ID_LED_ON1_ON2:
2994 case ID_LED_ON1_OFF2:
2995 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2996 mac->ledctl_mode1 |= (ledctl_on << shift);
2997 break;
2998 case ID_LED_OFF1_DEF2:
2999 case ID_LED_OFF1_ON2:
3000 case ID_LED_OFF1_OFF2:
3001 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3002 mac->ledctl_mode1 |= (ledctl_off << shift);
3003 break;
3004 default:
3005 /* Do nothing */
3006 break;
3007 }
3008 switch (temp) {
3009 case ID_LED_DEF1_ON2:
3010 case ID_LED_ON1_ON2:
3011 case ID_LED_OFF1_ON2:
3012 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3013 mac->ledctl_mode2 |= (ledctl_on << shift);
3014 break;
3015 case ID_LED_DEF1_OFF2:
3016 case ID_LED_ON1_OFF2:
3017 case ID_LED_OFF1_OFF2:
3018 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3019 mac->ledctl_mode2 |= (ledctl_off << shift);
3020 break;
3021 default:
3022 /* Do nothing */
3023 break;
3024 }
3025 }
3026
3027 out:
3028 return ret_val;
3029 }
3030
3031 /**
3032 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3033 * @hw: pointer to the HW structure
3034 *
3035 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3036 * register, so the the bus width is hard coded.
3037 **/
3038 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3039 {
3040 struct e1000_bus_info *bus = &hw->bus;
3041 s32 ret_val;
3042
3043 ret_val = e1000e_get_bus_info_pcie(hw);
3044
3045 /*
3046 * ICH devices are "PCI Express"-ish. They have
3047 * a configuration space, but do not contain
3048 * PCI Express Capability registers, so bus width
3049 * must be hardcoded.
3050 */
3051 if (bus->width == e1000_bus_width_unknown)
3052 bus->width = e1000_bus_width_pcie_x1;
3053
3054 return ret_val;
3055 }
3056
3057 /**
3058 * e1000_reset_hw_ich8lan - Reset the hardware
3059 * @hw: pointer to the HW structure
3060 *
3061 * Does a full reset of the hardware which includes a reset of the PHY and
3062 * MAC.
3063 **/
3064 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3065 {
3066 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3067 u16 reg;
3068 u32 ctrl, kab;
3069 s32 ret_val;
3070
3071 /*
3072 * Prevent the PCI-E bus from sticking if there is no TLP connection
3073 * on the last TLP read/write transaction when MAC is reset.
3074 */
3075 ret_val = e1000e_disable_pcie_master(hw);
3076 if (ret_val)
3077 e_dbg("PCI-E Master disable polling has failed.\n");
3078
3079 e_dbg("Masking off all interrupts\n");
3080 ew32(IMC, 0xffffffff);
3081
3082 /*
3083 * Disable the Transmit and Receive units. Then delay to allow
3084 * any pending transactions to complete before we hit the MAC
3085 * with the global reset.
3086 */
3087 ew32(RCTL, 0);
3088 ew32(TCTL, E1000_TCTL_PSP);
3089 e1e_flush();
3090
3091 usleep_range(10000, 20000);
3092
3093 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3094 if (hw->mac.type == e1000_ich8lan) {
3095 /* Set Tx and Rx buffer allocation to 8k apiece. */
3096 ew32(PBA, E1000_PBA_8K);
3097 /* Set Packet Buffer Size to 16k. */
3098 ew32(PBS, E1000_PBS_16K);
3099 }
3100
3101 if (hw->mac.type == e1000_pchlan) {
3102 /* Save the NVM K1 bit setting*/
3103 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
3104 if (ret_val)
3105 return ret_val;
3106
3107 if (reg & E1000_NVM_K1_ENABLE)
3108 dev_spec->nvm_k1_enabled = true;
3109 else
3110 dev_spec->nvm_k1_enabled = false;
3111 }
3112
3113 ctrl = er32(CTRL);
3114
3115 if (!e1000_check_reset_block(hw)) {
3116 /*
3117 * Full-chip reset requires MAC and PHY reset at the same
3118 * time to make sure the interface between MAC and the
3119 * external PHY is reset.
3120 */
3121 ctrl |= E1000_CTRL_PHY_RST;
3122
3123 /*
3124 * Gate automatic PHY configuration by hardware on
3125 * non-managed 82579
3126 */
3127 if ((hw->mac.type == e1000_pch2lan) &&
3128 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3129 e1000_gate_hw_phy_config_ich8lan(hw, true);
3130 }
3131 ret_val = e1000_acquire_swflag_ich8lan(hw);
3132 e_dbg("Issuing a global reset to ich8lan\n");
3133 ew32(CTRL, (ctrl | E1000_CTRL_RST));
3134 /* cannot issue a flush here because it hangs the hardware */
3135 msleep(20);
3136
3137 if (!ret_val)
3138 mutex_unlock(&swflag_mutex);
3139
3140 if (ctrl & E1000_CTRL_PHY_RST) {
3141 ret_val = hw->phy.ops.get_cfg_done(hw);
3142 if (ret_val)
3143 goto out;
3144
3145 ret_val = e1000_post_phy_reset_ich8lan(hw);
3146 if (ret_val)
3147 goto out;
3148 }
3149
3150 /*
3151 * For PCH, this write will make sure that any noise
3152 * will be detected as a CRC error and be dropped rather than show up
3153 * as a bad packet to the DMA engine.
3154 */
3155 if (hw->mac.type == e1000_pchlan)
3156 ew32(CRC_OFFSET, 0x65656565);
3157
3158 ew32(IMC, 0xffffffff);
3159 er32(ICR);
3160
3161 kab = er32(KABGTXD);
3162 kab |= E1000_KABGTXD_BGSQLBIAS;
3163 ew32(KABGTXD, kab);
3164
3165 out:
3166 return ret_val;
3167 }
3168
3169 /**
3170 * e1000_init_hw_ich8lan - Initialize the hardware
3171 * @hw: pointer to the HW structure
3172 *
3173 * Prepares the hardware for transmit and receive by doing the following:
3174 * - initialize hardware bits
3175 * - initialize LED identification
3176 * - setup receive address registers
3177 * - setup flow control
3178 * - setup transmit descriptors
3179 * - clear statistics
3180 **/
3181 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3182 {
3183 struct e1000_mac_info *mac = &hw->mac;
3184 u32 ctrl_ext, txdctl, snoop;
3185 s32 ret_val;
3186 u16 i;
3187
3188 e1000_initialize_hw_bits_ich8lan(hw);
3189
3190 /* Initialize identification LED */
3191 ret_val = mac->ops.id_led_init(hw);
3192 if (ret_val)
3193 e_dbg("Error initializing identification LED\n");
3194 /* This is not fatal and we should not stop init due to this */
3195
3196 /* Setup the receive address. */
3197 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
3198
3199 /* Zero out the Multicast HASH table */
3200 e_dbg("Zeroing the MTA\n");
3201 for (i = 0; i < mac->mta_reg_count; i++)
3202 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3203
3204 /*
3205 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3206 * the ME. Disable wakeup by clearing the host wakeup bit.
3207 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3208 */
3209 if (hw->phy.type == e1000_phy_82578) {
3210 e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
3211 i &= ~BM_WUC_HOST_WU_BIT;
3212 e1e_wphy(hw, BM_PORT_GEN_CFG, i);
3213 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3214 if (ret_val)
3215 return ret_val;
3216 }
3217
3218 /* Setup link and flow control */
3219 ret_val = e1000_setup_link_ich8lan(hw);
3220
3221 /* Set the transmit descriptor write-back policy for both queues */
3222 txdctl = er32(TXDCTL(0));
3223 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3224 E1000_TXDCTL_FULL_TX_DESC_WB;
3225 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3226 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3227 ew32(TXDCTL(0), txdctl);
3228 txdctl = er32(TXDCTL(1));
3229 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3230 E1000_TXDCTL_FULL_TX_DESC_WB;
3231 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3232 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3233 ew32(TXDCTL(1), txdctl);
3234
3235 /*
3236 * ICH8 has opposite polarity of no_snoop bits.
3237 * By default, we should use snoop behavior.
3238 */
3239 if (mac->type == e1000_ich8lan)
3240 snoop = PCIE_ICH8_SNOOP_ALL;
3241 else
3242 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3243 e1000e_set_pcie_no_snoop(hw, snoop);
3244
3245 ctrl_ext = er32(CTRL_EXT);
3246 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3247 ew32(CTRL_EXT, ctrl_ext);
3248
3249 /*
3250 * Clear all of the statistics registers (clear on read). It is
3251 * important that we do this after we have tried to establish link
3252 * because the symbol error count will increment wildly if there
3253 * is no link.
3254 */
3255 e1000_clear_hw_cntrs_ich8lan(hw);
3256
3257 return 0;
3258 }
3259 /**
3260 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3261 * @hw: pointer to the HW structure
3262 *
3263 * Sets/Clears required hardware bits necessary for correctly setting up the
3264 * hardware for transmit and receive.
3265 **/
3266 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3267 {
3268 u32 reg;
3269
3270 /* Extended Device Control */
3271 reg = er32(CTRL_EXT);
3272 reg |= (1 << 22);
3273 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3274 if (hw->mac.type >= e1000_pchlan)
3275 reg |= E1000_CTRL_EXT_PHYPDEN;
3276 ew32(CTRL_EXT, reg);
3277
3278 /* Transmit Descriptor Control 0 */
3279 reg = er32(TXDCTL(0));
3280 reg |= (1 << 22);
3281 ew32(TXDCTL(0), reg);
3282
3283 /* Transmit Descriptor Control 1 */
3284 reg = er32(TXDCTL(1));
3285 reg |= (1 << 22);
3286 ew32(TXDCTL(1), reg);
3287
3288 /* Transmit Arbitration Control 0 */
3289 reg = er32(TARC(0));
3290 if (hw->mac.type == e1000_ich8lan)
3291 reg |= (1 << 28) | (1 << 29);
3292 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3293 ew32(TARC(0), reg);
3294
3295 /* Transmit Arbitration Control 1 */
3296 reg = er32(TARC(1));
3297 if (er32(TCTL) & E1000_TCTL_MULR)
3298 reg &= ~(1 << 28);
3299 else
3300 reg |= (1 << 28);
3301 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3302 ew32(TARC(1), reg);
3303
3304 /* Device Status */
3305 if (hw->mac.type == e1000_ich8lan) {
3306 reg = er32(STATUS);
3307 reg &= ~(1 << 31);
3308 ew32(STATUS, reg);
3309 }
3310
3311 /*
3312 * work-around descriptor data corruption issue during nfs v2 udp
3313 * traffic, just disable the nfs filtering capability
3314 */
3315 reg = er32(RFCTL);
3316 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3317 ew32(RFCTL, reg);
3318 }
3319
3320 /**
3321 * e1000_setup_link_ich8lan - Setup flow control and link settings
3322 * @hw: pointer to the HW structure
3323 *
3324 * Determines which flow control settings to use, then configures flow
3325 * control. Calls the appropriate media-specific link configuration
3326 * function. Assuming the adapter has a valid link partner, a valid link
3327 * should be established. Assumes the hardware has previously been reset
3328 * and the transmitter and receiver are not enabled.
3329 **/
3330 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3331 {
3332 s32 ret_val;
3333
3334 if (e1000_check_reset_block(hw))
3335 return 0;
3336
3337 /*
3338 * ICH parts do not have a word in the NVM to determine
3339 * the default flow control setting, so we explicitly
3340 * set it to full.
3341 */
3342 if (hw->fc.requested_mode == e1000_fc_default) {
3343 /* Workaround h/w hang when Tx flow control enabled */
3344 if (hw->mac.type == e1000_pchlan)
3345 hw->fc.requested_mode = e1000_fc_rx_pause;
3346 else
3347 hw->fc.requested_mode = e1000_fc_full;
3348 }
3349
3350 /*
3351 * Save off the requested flow control mode for use later. Depending
3352 * on the link partner's capabilities, we may or may not use this mode.
3353 */
3354 hw->fc.current_mode = hw->fc.requested_mode;
3355
3356 e_dbg("After fix-ups FlowControl is now = %x\n",
3357 hw->fc.current_mode);
3358
3359 /* Continue to configure the copper link. */
3360 ret_val = e1000_setup_copper_link_ich8lan(hw);
3361 if (ret_val)
3362 return ret_val;
3363
3364 ew32(FCTTV, hw->fc.pause_time);
3365 if ((hw->phy.type == e1000_phy_82578) ||
3366 (hw->phy.type == e1000_phy_82579) ||
3367 (hw->phy.type == e1000_phy_82577)) {
3368 ew32(FCRTV_PCH, hw->fc.refresh_time);
3369
3370 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
3371 hw->fc.pause_time);
3372 if (ret_val)
3373 return ret_val;
3374 }
3375
3376 return e1000e_set_fc_watermarks(hw);
3377 }
3378
3379 /**
3380 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3381 * @hw: pointer to the HW structure
3382 *
3383 * Configures the kumeran interface to the PHY to wait the appropriate time
3384 * when polling the PHY, then call the generic setup_copper_link to finish
3385 * configuring the copper link.
3386 **/
3387 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3388 {
3389 u32 ctrl;
3390 s32 ret_val;
3391 u16 reg_data;
3392
3393 ctrl = er32(CTRL);
3394 ctrl |= E1000_CTRL_SLU;
3395 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3396 ew32(CTRL, ctrl);
3397
3398 /*
3399 * Set the mac to wait the maximum time between each iteration
3400 * and increase the max iterations when polling the phy;
3401 * this fixes erroneous timeouts at 10Mbps.
3402 */
3403 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
3404 if (ret_val)
3405 return ret_val;
3406 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3407 &reg_data);
3408 if (ret_val)
3409 return ret_val;
3410 reg_data |= 0x3F;
3411 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3412 reg_data);
3413 if (ret_val)
3414 return ret_val;
3415
3416 switch (hw->phy.type) {
3417 case e1000_phy_igp_3:
3418 ret_val = e1000e_copper_link_setup_igp(hw);
3419 if (ret_val)
3420 return ret_val;
3421 break;
3422 case e1000_phy_bm:
3423 case e1000_phy_82578:
3424 ret_val = e1000e_copper_link_setup_m88(hw);
3425 if (ret_val)
3426 return ret_val;
3427 break;
3428 case e1000_phy_82577:
3429 case e1000_phy_82579:
3430 ret_val = e1000_copper_link_setup_82577(hw);
3431 if (ret_val)
3432 return ret_val;
3433 break;
3434 case e1000_phy_ife:
3435 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
3436 if (ret_val)
3437 return ret_val;
3438
3439 reg_data &= ~IFE_PMC_AUTO_MDIX;
3440
3441 switch (hw->phy.mdix) {
3442 case 1:
3443 reg_data &= ~IFE_PMC_FORCE_MDIX;
3444 break;
3445 case 2:
3446 reg_data |= IFE_PMC_FORCE_MDIX;
3447 break;
3448 case 0:
3449 default:
3450 reg_data |= IFE_PMC_AUTO_MDIX;
3451 break;
3452 }
3453 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
3454 if (ret_val)
3455 return ret_val;
3456 break;
3457 default:
3458 break;
3459 }
3460 return e1000e_setup_copper_link(hw);
3461 }
3462
3463 /**
3464 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3465 * @hw: pointer to the HW structure
3466 * @speed: pointer to store current link speed
3467 * @duplex: pointer to store the current link duplex
3468 *
3469 * Calls the generic get_speed_and_duplex to retrieve the current link
3470 * information and then calls the Kumeran lock loss workaround for links at
3471 * gigabit speeds.
3472 **/
3473 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3474 u16 *duplex)
3475 {
3476 s32 ret_val;
3477
3478 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
3479 if (ret_val)
3480 return ret_val;
3481
3482 if ((hw->mac.type == e1000_ich8lan) &&
3483 (hw->phy.type == e1000_phy_igp_3) &&
3484 (*speed == SPEED_1000)) {
3485 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3486 }
3487
3488 return ret_val;
3489 }
3490
3491 /**
3492 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3493 * @hw: pointer to the HW structure
3494 *
3495 * Work-around for 82566 Kumeran PCS lock loss:
3496 * On link status change (i.e. PCI reset, speed change) and link is up and
3497 * speed is gigabit-
3498 * 0) if workaround is optionally disabled do nothing
3499 * 1) wait 1ms for Kumeran link to come up
3500 * 2) check Kumeran Diagnostic register PCS lock loss bit
3501 * 3) if not set the link is locked (all is good), otherwise...
3502 * 4) reset the PHY
3503 * 5) repeat up to 10 times
3504 * Note: this is only called for IGP3 copper when speed is 1gb.
3505 **/
3506 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3507 {
3508 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3509 u32 phy_ctrl;
3510 s32 ret_val;
3511 u16 i, data;
3512 bool link;
3513
3514 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3515 return 0;
3516
3517 /*
3518 * Make sure link is up before proceeding. If not just return.
3519 * Attempting this while link is negotiating fouled up link
3520 * stability
3521 */
3522 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
3523 if (!link)
3524 return 0;
3525
3526 for (i = 0; i < 10; i++) {
3527 /* read once to clear */
3528 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3529 if (ret_val)
3530 return ret_val;
3531 /* and again to get new status */
3532 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3533 if (ret_val)
3534 return ret_val;
3535
3536 /* check for PCS lock */
3537 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3538 return 0;
3539
3540 /* Issue PHY reset */
3541 e1000_phy_hw_reset(hw);
3542 mdelay(5);
3543 }
3544 /* Disable GigE link negotiation */
3545 phy_ctrl = er32(PHY_CTRL);
3546 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3547 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3548 ew32(PHY_CTRL, phy_ctrl);
3549
3550 /*
3551 * Call gig speed drop workaround on Gig disable before accessing
3552 * any PHY registers
3553 */
3554 e1000e_gig_downshift_workaround_ich8lan(hw);
3555
3556 /* unable to acquire PCS lock */
3557 return -E1000_ERR_PHY;
3558 }
3559
3560 /**
3561 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3562 * @hw: pointer to the HW structure
3563 * @state: boolean value used to set the current Kumeran workaround state
3564 *
3565 * If ICH8, set the current Kumeran workaround state (enabled - true
3566 * /disabled - false).
3567 **/
3568 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3569 bool state)
3570 {
3571 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3572
3573 if (hw->mac.type != e1000_ich8lan) {
3574 e_dbg("Workaround applies to ICH8 only.\n");
3575 return;
3576 }
3577
3578 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3579 }
3580
3581 /**
3582 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3583 * @hw: pointer to the HW structure
3584 *
3585 * Workaround for 82566 power-down on D3 entry:
3586 * 1) disable gigabit link
3587 * 2) write VR power-down enable
3588 * 3) read it back
3589 * Continue if successful, else issue LCD reset and repeat
3590 **/
3591 void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3592 {
3593 u32 reg;
3594 u16 data;
3595 u8 retry = 0;
3596
3597 if (hw->phy.type != e1000_phy_igp_3)
3598 return;
3599
3600 /* Try the workaround twice (if needed) */
3601 do {
3602 /* Disable link */
3603 reg = er32(PHY_CTRL);
3604 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3605 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3606 ew32(PHY_CTRL, reg);
3607
3608 /*
3609 * Call gig speed drop workaround on Gig disable before
3610 * accessing any PHY registers
3611 */
3612 if (hw->mac.type == e1000_ich8lan)
3613 e1000e_gig_downshift_workaround_ich8lan(hw);
3614
3615 /* Write VR power-down enable */
3616 e1e_rphy(hw, IGP3_VR_CTRL, &data);
3617 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3618 e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3619
3620 /* Read it back and test */
3621 e1e_rphy(hw, IGP3_VR_CTRL, &data);
3622 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3623 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3624 break;
3625
3626 /* Issue PHY reset and repeat at most one more time */
3627 reg = er32(CTRL);
3628 ew32(CTRL, reg | E1000_CTRL_PHY_RST);
3629 retry++;
3630 } while (retry);
3631 }
3632
3633 /**
3634 * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3635 * @hw: pointer to the HW structure
3636 *
3637 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3638 * LPLU, Gig disable, MDIC PHY reset):
3639 * 1) Set Kumeran Near-end loopback
3640 * 2) Clear Kumeran Near-end loopback
3641 * Should only be called for ICH8[m] devices with IGP_3 Phy.
3642 **/
3643 void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3644 {
3645 s32 ret_val;
3646 u16 reg_data;
3647
3648 if ((hw->mac.type != e1000_ich8lan) ||
3649 (hw->phy.type != e1000_phy_igp_3))
3650 return;
3651
3652 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3653 &reg_data);
3654 if (ret_val)
3655 return;
3656 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3657 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3658 reg_data);
3659 if (ret_val)
3660 return;
3661 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3662 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3663 reg_data);
3664 }
3665
3666 /**
3667 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3668 * @hw: pointer to the HW structure
3669 *
3670 * During S0 to Sx transition, it is possible the link remains at gig
3671 * instead of negotiating to a lower speed. Before going to Sx, set
3672 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3673 * to a lower speed. For PCH and newer parts, the OEM bits PHY register
3674 * (LED, GbE disable and LPLU configurations) also needs to be written.
3675 **/
3676 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3677 {
3678 u32 phy_ctrl;
3679 s32 ret_val;
3680
3681 phy_ctrl = er32(PHY_CTRL);
3682 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3683 ew32(PHY_CTRL, phy_ctrl);
3684
3685 if (hw->mac.type >= e1000_pchlan) {
3686 e1000_oem_bits_config_ich8lan(hw, false);
3687 ret_val = hw->phy.ops.acquire(hw);
3688 if (ret_val)
3689 return;
3690 e1000_write_smbus_addr(hw);
3691 hw->phy.ops.release(hw);
3692 }
3693 }
3694
3695 /**
3696 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
3697 * @hw: pointer to the HW structure
3698 *
3699 * During Sx to S0 transitions on non-managed devices or managed devices
3700 * on which PHY resets are not blocked, if the PHY registers cannot be
3701 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
3702 * the PHY.
3703 **/
3704 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3705 {
3706 u32 fwsm;
3707
3708 if (hw->mac.type != e1000_pch2lan)
3709 return;
3710
3711 fwsm = er32(FWSM);
3712 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) {
3713 u16 phy_id1, phy_id2;
3714 s32 ret_val;
3715
3716 ret_val = hw->phy.ops.acquire(hw);
3717 if (ret_val) {
3718 e_dbg("Failed to acquire PHY semaphore in resume\n");
3719 return;
3720 }
3721
3722 /* Test access to the PHY registers by reading the ID regs */
3723 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
3724 if (ret_val)
3725 goto release;
3726 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
3727 if (ret_val)
3728 goto release;
3729
3730 if (hw->phy.id == ((u32)(phy_id1 << 16) |
3731 (u32)(phy_id2 & PHY_REVISION_MASK)))
3732 goto release;
3733
3734 e1000_toggle_lanphypc_value_ich8lan(hw);
3735
3736 hw->phy.ops.release(hw);
3737 msleep(50);
3738 e1000_phy_hw_reset(hw);
3739 msleep(50);
3740 return;
3741 }
3742
3743 release:
3744 hw->phy.ops.release(hw);
3745
3746 return;
3747 }
3748
3749 /**
3750 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3751 * @hw: pointer to the HW structure
3752 *
3753 * Return the LED back to the default configuration.
3754 **/
3755 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3756 {
3757 if (hw->phy.type == e1000_phy_ife)
3758 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
3759
3760 ew32(LEDCTL, hw->mac.ledctl_default);
3761 return 0;
3762 }
3763
3764 /**
3765 * e1000_led_on_ich8lan - Turn LEDs on
3766 * @hw: pointer to the HW structure
3767 *
3768 * Turn on the LEDs.
3769 **/
3770 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3771 {
3772 if (hw->phy.type == e1000_phy_ife)
3773 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3774 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3775
3776 ew32(LEDCTL, hw->mac.ledctl_mode2);
3777 return 0;
3778 }
3779
3780 /**
3781 * e1000_led_off_ich8lan - Turn LEDs off
3782 * @hw: pointer to the HW structure
3783 *
3784 * Turn off the LEDs.
3785 **/
3786 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3787 {
3788 if (hw->phy.type == e1000_phy_ife)
3789 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3790 (IFE_PSCL_PROBE_MODE |
3791 IFE_PSCL_PROBE_LEDS_OFF));
3792
3793 ew32(LEDCTL, hw->mac.ledctl_mode1);
3794 return 0;
3795 }
3796
3797 /**
3798 * e1000_setup_led_pchlan - Configures SW controllable LED
3799 * @hw: pointer to the HW structure
3800 *
3801 * This prepares the SW controllable LED for use.
3802 **/
3803 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3804 {
3805 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
3806 }
3807
3808 /**
3809 * e1000_cleanup_led_pchlan - Restore the default LED operation
3810 * @hw: pointer to the HW structure
3811 *
3812 * Return the LED back to the default configuration.
3813 **/
3814 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3815 {
3816 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
3817 }
3818
3819 /**
3820 * e1000_led_on_pchlan - Turn LEDs on
3821 * @hw: pointer to the HW structure
3822 *
3823 * Turn on the LEDs.
3824 **/
3825 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3826 {
3827 u16 data = (u16)hw->mac.ledctl_mode2;
3828 u32 i, led;
3829
3830 /*
3831 * If no link, then turn LED on by setting the invert bit
3832 * for each LED that's mode is "link_up" in ledctl_mode2.
3833 */
3834 if (!(er32(STATUS) & E1000_STATUS_LU)) {
3835 for (i = 0; i < 3; i++) {
3836 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3837 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3838 E1000_LEDCTL_MODE_LINK_UP)
3839 continue;
3840 if (led & E1000_PHY_LED0_IVRT)
3841 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3842 else
3843 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3844 }
3845 }
3846
3847 return e1e_wphy(hw, HV_LED_CONFIG, data);
3848 }
3849
3850 /**
3851 * e1000_led_off_pchlan - Turn LEDs off
3852 * @hw: pointer to the HW structure
3853 *
3854 * Turn off the LEDs.
3855 **/
3856 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3857 {
3858 u16 data = (u16)hw->mac.ledctl_mode1;
3859 u32 i, led;
3860
3861 /*
3862 * If no link, then turn LED off by clearing the invert bit
3863 * for each LED that's mode is "link_up" in ledctl_mode1.
3864 */
3865 if (!(er32(STATUS) & E1000_STATUS_LU)) {
3866 for (i = 0; i < 3; i++) {
3867 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3868 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3869 E1000_LEDCTL_MODE_LINK_UP)
3870 continue;
3871 if (led & E1000_PHY_LED0_IVRT)
3872 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3873 else
3874 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3875 }
3876 }
3877
3878 return e1e_wphy(hw, HV_LED_CONFIG, data);
3879 }
3880
3881 /**
3882 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
3883 * @hw: pointer to the HW structure
3884 *
3885 * Read appropriate register for the config done bit for completion status
3886 * and configure the PHY through s/w for EEPROM-less parts.
3887 *
3888 * NOTE: some silicon which is EEPROM-less will fail trying to read the
3889 * config done bit, so only an error is logged and continues. If we were
3890 * to return with error, EEPROM-less silicon would not be able to be reset
3891 * or change link.
3892 **/
3893 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3894 {
3895 s32 ret_val = 0;
3896 u32 bank = 0;
3897 u32 status;
3898
3899 e1000e_get_cfg_done(hw);
3900
3901 /* Wait for indication from h/w that it has completed basic config */
3902 if (hw->mac.type >= e1000_ich10lan) {
3903 e1000_lan_init_done_ich8lan(hw);
3904 } else {
3905 ret_val = e1000e_get_auto_rd_done(hw);
3906 if (ret_val) {
3907 /*
3908 * When auto config read does not complete, do not
3909 * return with an error. This can happen in situations
3910 * where there is no eeprom and prevents getting link.
3911 */
3912 e_dbg("Auto Read Done did not complete\n");
3913 ret_val = 0;
3914 }
3915 }
3916
3917 /* Clear PHY Reset Asserted bit */
3918 status = er32(STATUS);
3919 if (status & E1000_STATUS_PHYRA)
3920 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
3921 else
3922 e_dbg("PHY Reset Asserted not set - needs delay\n");
3923
3924 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
3925 if (hw->mac.type <= e1000_ich9lan) {
3926 if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
3927 (hw->phy.type == e1000_phy_igp_3)) {
3928 e1000e_phy_init_script_igp3(hw);
3929 }
3930 } else {
3931 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
3932 /* Maybe we should do a basic PHY config */
3933 e_dbg("EEPROM not present\n");
3934 ret_val = -E1000_ERR_CONFIG;
3935 }
3936 }
3937
3938 return ret_val;
3939 }
3940
3941 /**
3942 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
3943 * @hw: pointer to the HW structure
3944 *
3945 * In the case of a PHY power down to save power, or to turn off link during a
3946 * driver unload, or wake on lan is not enabled, remove the link.
3947 **/
3948 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
3949 {
3950 /* If the management interface is not enabled, then power down */
3951 if (!(hw->mac.ops.check_mng_mode(hw) ||
3952 hw->phy.ops.check_reset_block(hw)))
3953 e1000_power_down_phy_copper(hw);
3954 }
3955
3956 /**
3957 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
3958 * @hw: pointer to the HW structure
3959 *
3960 * Clears hardware counters specific to the silicon family and calls
3961 * clear_hw_cntrs_generic to clear all general purpose counters.
3962 **/
3963 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3964 {
3965 u16 phy_data;
3966 s32 ret_val;
3967
3968 e1000e_clear_hw_cntrs_base(hw);
3969
3970 er32(ALGNERRC);
3971 er32(RXERRC);
3972 er32(TNCRS);
3973 er32(CEXTERR);
3974 er32(TSCTC);
3975 er32(TSCTFC);
3976
3977 er32(MGTPRC);
3978 er32(MGTPDC);
3979 er32(MGTPTC);
3980
3981 er32(IAC);
3982 er32(ICRXOC);
3983
3984 /* Clear PHY statistics registers */
3985 if ((hw->phy.type == e1000_phy_82578) ||
3986 (hw->phy.type == e1000_phy_82579) ||
3987 (hw->phy.type == e1000_phy_82577)) {
3988 ret_val = hw->phy.ops.acquire(hw);
3989 if (ret_val)
3990 return;
3991 ret_val = hw->phy.ops.set_page(hw,
3992 HV_STATS_PAGE << IGP_PAGE_SHIFT);
3993 if (ret_val)
3994 goto release;
3995 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
3996 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
3997 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
3998 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
3999 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4000 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4001 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4002 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4003 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4004 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4005 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4006 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4007 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4008 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4009 release:
4010 hw->phy.ops.release(hw);
4011 }
4012 }
4013
4014 static struct e1000_mac_operations ich8_mac_ops = {
4015 .id_led_init = e1000e_id_led_init,
4016 /* check_mng_mode dependent on mac type */
4017 .check_for_link = e1000_check_for_copper_link_ich8lan,
4018 /* cleanup_led dependent on mac type */
4019 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
4020 .get_bus_info = e1000_get_bus_info_ich8lan,
4021 .set_lan_id = e1000_set_lan_id_single_port,
4022 .get_link_up_info = e1000_get_link_up_info_ich8lan,
4023 /* led_on dependent on mac type */
4024 /* led_off dependent on mac type */
4025 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
4026 .reset_hw = e1000_reset_hw_ich8lan,
4027 .init_hw = e1000_init_hw_ich8lan,
4028 .setup_link = e1000_setup_link_ich8lan,
4029 .setup_physical_interface= e1000_setup_copper_link_ich8lan,
4030 /* id_led_init dependent on mac type */
4031 };
4032
4033 static struct e1000_phy_operations ich8_phy_ops = {
4034 .acquire = e1000_acquire_swflag_ich8lan,
4035 .check_reset_block = e1000_check_reset_block_ich8lan,
4036 .commit = NULL,
4037 .get_cfg_done = e1000_get_cfg_done_ich8lan,
4038 .get_cable_length = e1000e_get_cable_length_igp_2,
4039 .read_reg = e1000e_read_phy_reg_igp,
4040 .release = e1000_release_swflag_ich8lan,
4041 .reset = e1000_phy_hw_reset_ich8lan,
4042 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan,
4043 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan,
4044 .write_reg = e1000e_write_phy_reg_igp,
4045 };
4046
4047 static struct e1000_nvm_operations ich8_nvm_ops = {
4048 .acquire = e1000_acquire_nvm_ich8lan,
4049 .read = e1000_read_nvm_ich8lan,
4050 .release = e1000_release_nvm_ich8lan,
4051 .update = e1000_update_nvm_checksum_ich8lan,
4052 .valid_led_default = e1000_valid_led_default_ich8lan,
4053 .validate = e1000_validate_nvm_checksum_ich8lan,
4054 .write = e1000_write_nvm_ich8lan,
4055 };
4056
4057 struct e1000_info e1000_ich8_info = {
4058 .mac = e1000_ich8lan,
4059 .flags = FLAG_HAS_WOL
4060 | FLAG_IS_ICH
4061 | FLAG_HAS_CTRLEXT_ON_LOAD
4062 | FLAG_HAS_AMT
4063 | FLAG_HAS_FLASH
4064 | FLAG_APME_IN_WUC,
4065 .pba = 8,
4066 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
4067 .get_variants = e1000_get_variants_ich8lan,
4068 .mac_ops = &ich8_mac_ops,
4069 .phy_ops = &ich8_phy_ops,
4070 .nvm_ops = &ich8_nvm_ops,
4071 };
4072
4073 struct e1000_info e1000_ich9_info = {
4074 .mac = e1000_ich9lan,
4075 .flags = FLAG_HAS_JUMBO_FRAMES
4076 | FLAG_IS_ICH
4077 | FLAG_HAS_WOL
4078 | FLAG_HAS_CTRLEXT_ON_LOAD
4079 | FLAG_HAS_AMT
4080 | FLAG_HAS_ERT
4081 | FLAG_HAS_FLASH
4082 | FLAG_APME_IN_WUC,
4083 .pba = 10,
4084 .max_hw_frame_size = DEFAULT_JUMBO,
4085 .get_variants = e1000_get_variants_ich8lan,
4086 .mac_ops = &ich8_mac_ops,
4087 .phy_ops = &ich8_phy_ops,
4088 .nvm_ops = &ich8_nvm_ops,
4089 };
4090
4091 struct e1000_info e1000_ich10_info = {
4092 .mac = e1000_ich10lan,
4093 .flags = FLAG_HAS_JUMBO_FRAMES
4094 | FLAG_IS_ICH
4095 | FLAG_HAS_WOL
4096 | FLAG_HAS_CTRLEXT_ON_LOAD
4097 | FLAG_HAS_AMT
4098 | FLAG_HAS_ERT
4099 | FLAG_HAS_FLASH
4100 | FLAG_APME_IN_WUC,
4101 .pba = 10,
4102 .max_hw_frame_size = DEFAULT_JUMBO,
4103 .get_variants = e1000_get_variants_ich8lan,
4104 .mac_ops = &ich8_mac_ops,
4105 .phy_ops = &ich8_phy_ops,
4106 .nvm_ops = &ich8_nvm_ops,
4107 };
4108
4109 struct e1000_info e1000_pch_info = {
4110 .mac = e1000_pchlan,
4111 .flags = FLAG_IS_ICH
4112 | FLAG_HAS_WOL
4113 | FLAG_HAS_CTRLEXT_ON_LOAD
4114 | FLAG_HAS_AMT
4115 | FLAG_HAS_FLASH
4116 | FLAG_HAS_JUMBO_FRAMES
4117 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
4118 | FLAG_APME_IN_WUC,
4119 .flags2 = FLAG2_HAS_PHY_STATS,
4120 .pba = 26,
4121 .max_hw_frame_size = 4096,
4122 .get_variants = e1000_get_variants_ich8lan,
4123 .mac_ops = &ich8_mac_ops,
4124 .phy_ops = &ich8_phy_ops,
4125 .nvm_ops = &ich8_nvm_ops,
4126 };
4127
4128 struct e1000_info e1000_pch2_info = {
4129 .mac = e1000_pch2lan,
4130 .flags = FLAG_IS_ICH
4131 | FLAG_HAS_WOL
4132 | FLAG_HAS_CTRLEXT_ON_LOAD
4133 | FLAG_HAS_AMT
4134 | FLAG_HAS_FLASH
4135 | FLAG_HAS_JUMBO_FRAMES
4136 | FLAG_APME_IN_WUC,
4137 .flags2 = FLAG2_HAS_PHY_STATS
4138 | FLAG2_HAS_EEE,
4139 .pba = 26,
4140 .max_hw_frame_size = DEFAULT_JUMBO,
4141 .get_variants = e1000_get_variants_ich8lan,
4142 .mac_ops = &ich8_mac_ops,
4143 .phy_ops = &ich8_phy_ops,
4144 .nvm_ops = &ich8_nvm_ops,
4145 };