]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/net/e1000e/lib.c
e1000e: fix flow control denial of service possibility
[mirror_ubuntu-kernels.git] / drivers / net / e1000e / lib.c
1 /*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include <linux/netdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/delay.h>
32 #include <linux/pci.h>
33
34 #include "e1000.h"
35
36 enum e1000_mng_mode {
37 e1000_mng_mode_none = 0,
38 e1000_mng_mode_asf,
39 e1000_mng_mode_pt,
40 e1000_mng_mode_ipmi,
41 e1000_mng_mode_host_if_only
42 };
43
44 #define E1000_FACTPS_MNGCG 0x20000000
45
46 #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management
47 * Technology signature */
48
49 /**
50 * e1000e_get_bus_info_pcie - Get PCIe bus information
51 * @hw: pointer to the HW structure
52 *
53 * Determines and stores the system bus information for a particular
54 * network interface. The following bus information is determined and stored:
55 * bus speed, bus width, type (PCIe), and PCIe function.
56 **/
57 s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
58 {
59 struct e1000_bus_info *bus = &hw->bus;
60 struct e1000_adapter *adapter = hw->adapter;
61 u32 status;
62 u16 pcie_link_status, pci_header_type, cap_offset;
63
64 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
65 if (!cap_offset) {
66 bus->width = e1000_bus_width_unknown;
67 } else {
68 pci_read_config_word(adapter->pdev,
69 cap_offset + PCIE_LINK_STATUS,
70 &pcie_link_status);
71 bus->width = (enum e1000_bus_width)((pcie_link_status &
72 PCIE_LINK_WIDTH_MASK) >>
73 PCIE_LINK_WIDTH_SHIFT);
74 }
75
76 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
77 &pci_header_type);
78 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
79 status = er32(STATUS);
80 bus->func = (status & E1000_STATUS_FUNC_MASK)
81 >> E1000_STATUS_FUNC_SHIFT;
82 } else {
83 bus->func = 0;
84 }
85
86 return 0;
87 }
88
89 /**
90 * e1000e_write_vfta - Write value to VLAN filter table
91 * @hw: pointer to the HW structure
92 * @offset: register offset in VLAN filter table
93 * @value: register value written to VLAN filter table
94 *
95 * Writes value at the given offset in the register array which stores
96 * the VLAN filter table.
97 **/
98 void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
99 {
100 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
101 e1e_flush();
102 }
103
104 /**
105 * e1000e_init_rx_addrs - Initialize receive address's
106 * @hw: pointer to the HW structure
107 * @rar_count: receive address registers
108 *
109 * Setups the receive address registers by setting the base receive address
110 * register to the devices MAC address and clearing all the other receive
111 * address registers to 0.
112 **/
113 void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
114 {
115 u32 i;
116
117 /* Setup the receive address */
118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
119
120 e1000e_rar_set(hw, hw->mac.addr, 0);
121
122 /* Zero out the other (rar_entry_count - 1) receive addresses */
123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
124 for (i = 1; i < rar_count; i++) {
125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
126 e1e_flush();
127 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
128 e1e_flush();
129 }
130 }
131
132 /**
133 * e1000e_rar_set - Set receive address register
134 * @hw: pointer to the HW structure
135 * @addr: pointer to the receive address
136 * @index: receive address array register
137 *
138 * Sets the receive address array register at index to the address passed
139 * in by addr.
140 **/
141 void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
142 {
143 u32 rar_low, rar_high;
144
145 /* HW expects these in little endian so we reverse the byte order
146 * from network order (big endian) to little endian
147 */
148 rar_low = ((u32) addr[0] |
149 ((u32) addr[1] << 8) |
150 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
151
152 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
153
154 rar_high |= E1000_RAH_AV;
155
156 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
157 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
158 }
159
160 /**
161 * e1000_mta_set - Set multicast filter table address
162 * @hw: pointer to the HW structure
163 * @hash_value: determines the MTA register and bit to set
164 *
165 * The multicast table address is a register array of 32-bit registers.
166 * The hash_value is used to determine what register the bit is in, the
167 * current value is read, the new bit is OR'd in and the new value is
168 * written back into the register.
169 **/
170 static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
171 {
172 u32 hash_bit, hash_reg, mta;
173
174 /* The MTA is a register array of 32-bit registers. It is
175 * treated like an array of (32*mta_reg_count) bits. We want to
176 * set bit BitArray[hash_value]. So we figure out what register
177 * the bit is in, read it, OR in the new bit, then write
178 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
179 * mask to bits 31:5 of the hash value which gives us the
180 * register we're modifying. The hash bit within that register
181 * is determined by the lower 5 bits of the hash value.
182 */
183 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
184 hash_bit = hash_value & 0x1F;
185
186 mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
187
188 mta |= (1 << hash_bit);
189
190 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
191 e1e_flush();
192 }
193
194 /**
195 * e1000_hash_mc_addr - Generate a multicast hash value
196 * @hw: pointer to the HW structure
197 * @mc_addr: pointer to a multicast address
198 *
199 * Generates a multicast address hash value which is used to determine
200 * the multicast filter table array address and new table value. See
201 * e1000_mta_set_generic()
202 **/
203 static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
204 {
205 u32 hash_value, hash_mask;
206 u8 bit_shift = 0;
207
208 /* Register count multiplied by bits per register */
209 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
210
211 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
212 * where 0xFF would still fall within the hash mask. */
213 while (hash_mask >> bit_shift != 0xFF)
214 bit_shift++;
215
216 /* The portion of the address that is used for the hash table
217 * is determined by the mc_filter_type setting.
218 * The algorithm is such that there is a total of 8 bits of shifting.
219 * The bit_shift for a mc_filter_type of 0 represents the number of
220 * left-shifts where the MSB of mc_addr[5] would still fall within
221 * the hash_mask. Case 0 does this exactly. Since there are a total
222 * of 8 bits of shifting, then mc_addr[4] will shift right the
223 * remaining number of bits. Thus 8 - bit_shift. The rest of the
224 * cases are a variation of this algorithm...essentially raising the
225 * number of bits to shift mc_addr[5] left, while still keeping the
226 * 8-bit shifting total.
227 */
228 /* For example, given the following Destination MAC Address and an
229 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
230 * we can see that the bit_shift for case 0 is 4. These are the hash
231 * values resulting from each mc_filter_type...
232 * [0] [1] [2] [3] [4] [5]
233 * 01 AA 00 12 34 56
234 * LSB MSB
235 *
236 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
237 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
238 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
239 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
240 */
241 switch (hw->mac.mc_filter_type) {
242 default:
243 case 0:
244 break;
245 case 1:
246 bit_shift += 1;
247 break;
248 case 2:
249 bit_shift += 2;
250 break;
251 case 3:
252 bit_shift += 4;
253 break;
254 }
255
256 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
257 (((u16) mc_addr[5]) << bit_shift)));
258
259 return hash_value;
260 }
261
262 /**
263 * e1000e_mc_addr_list_update_generic - Update Multicast addresses
264 * @hw: pointer to the HW structure
265 * @mc_addr_list: array of multicast addresses to program
266 * @mc_addr_count: number of multicast addresses to program
267 * @rar_used_count: the first RAR register free to program
268 * @rar_count: total number of supported Receive Address Registers
269 *
270 * Updates the Receive Address Registers and Multicast Table Array.
271 * The caller must have a packed mc_addr_list of multicast addresses.
272 * The parameter rar_count will usually be hw->mac.rar_entry_count
273 * unless there are workarounds that change this.
274 **/
275 void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
276 u8 *mc_addr_list, u32 mc_addr_count,
277 u32 rar_used_count, u32 rar_count)
278 {
279 u32 hash_value;
280 u32 i;
281
282 /* Load the first set of multicast addresses into the exact
283 * filters (RAR). If there are not enough to fill the RAR
284 * array, clear the filters.
285 */
286 for (i = rar_used_count; i < rar_count; i++) {
287 if (mc_addr_count) {
288 e1000e_rar_set(hw, mc_addr_list, i);
289 mc_addr_count--;
290 mc_addr_list += ETH_ALEN;
291 } else {
292 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
293 e1e_flush();
294 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
295 e1e_flush();
296 }
297 }
298
299 /* Clear the old settings from the MTA */
300 hw_dbg(hw, "Clearing MTA\n");
301 for (i = 0; i < hw->mac.mta_reg_count; i++) {
302 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
303 e1e_flush();
304 }
305
306 /* Load any remaining multicast addresses into the hash table. */
307 for (; mc_addr_count > 0; mc_addr_count--) {
308 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
309 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
310 e1000_mta_set(hw, hash_value);
311 mc_addr_list += ETH_ALEN;
312 }
313 }
314
315 /**
316 * e1000e_clear_hw_cntrs_base - Clear base hardware counters
317 * @hw: pointer to the HW structure
318 *
319 * Clears the base hardware counters by reading the counter registers.
320 **/
321 void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
322 {
323 u32 temp;
324
325 temp = er32(CRCERRS);
326 temp = er32(SYMERRS);
327 temp = er32(MPC);
328 temp = er32(SCC);
329 temp = er32(ECOL);
330 temp = er32(MCC);
331 temp = er32(LATECOL);
332 temp = er32(COLC);
333 temp = er32(DC);
334 temp = er32(SEC);
335 temp = er32(RLEC);
336 temp = er32(XONRXC);
337 temp = er32(XONTXC);
338 temp = er32(XOFFRXC);
339 temp = er32(XOFFTXC);
340 temp = er32(FCRUC);
341 temp = er32(GPRC);
342 temp = er32(BPRC);
343 temp = er32(MPRC);
344 temp = er32(GPTC);
345 temp = er32(GORCL);
346 temp = er32(GORCH);
347 temp = er32(GOTCL);
348 temp = er32(GOTCH);
349 temp = er32(RNBC);
350 temp = er32(RUC);
351 temp = er32(RFC);
352 temp = er32(ROC);
353 temp = er32(RJC);
354 temp = er32(TORL);
355 temp = er32(TORH);
356 temp = er32(TOTL);
357 temp = er32(TOTH);
358 temp = er32(TPR);
359 temp = er32(TPT);
360 temp = er32(MPTC);
361 temp = er32(BPTC);
362 }
363
364 /**
365 * e1000e_check_for_copper_link - Check for link (Copper)
366 * @hw: pointer to the HW structure
367 *
368 * Checks to see of the link status of the hardware has changed. If a
369 * change in link status has been detected, then we read the PHY registers
370 * to get the current speed/duplex if link exists.
371 **/
372 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
373 {
374 struct e1000_mac_info *mac = &hw->mac;
375 s32 ret_val;
376 bool link;
377
378 /* We only want to go out to the PHY registers to see if Auto-Neg
379 * has completed and/or if our link status has changed. The
380 * get_link_status flag is set upon receiving a Link Status
381 * Change or Rx Sequence Error interrupt.
382 */
383 if (!mac->get_link_status)
384 return 0;
385
386 /* First we want to see if the MII Status Register reports
387 * link. If so, then we want to get the current speed/duplex
388 * of the PHY.
389 */
390 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
391 if (ret_val)
392 return ret_val;
393
394 if (!link)
395 return ret_val; /* No link detected */
396
397 mac->get_link_status = 0;
398
399 /* Check if there was DownShift, must be checked
400 * immediately after link-up */
401 e1000e_check_downshift(hw);
402
403 /* If we are forcing speed/duplex, then we simply return since
404 * we have already determined whether we have link or not.
405 */
406 if (!mac->autoneg) {
407 ret_val = -E1000_ERR_CONFIG;
408 return ret_val;
409 }
410
411 /* Auto-Neg is enabled. Auto Speed Detection takes care
412 * of MAC speed/duplex configuration. So we only need to
413 * configure Collision Distance in the MAC.
414 */
415 e1000e_config_collision_dist(hw);
416
417 /* Configure Flow Control now that Auto-Neg has completed.
418 * First, we need to restore the desired flow control
419 * settings because we may have had to re-autoneg with a
420 * different link partner.
421 */
422 ret_val = e1000e_config_fc_after_link_up(hw);
423 if (ret_val) {
424 hw_dbg(hw, "Error configuring flow control\n");
425 }
426
427 return ret_val;
428 }
429
430 /**
431 * e1000e_check_for_fiber_link - Check for link (Fiber)
432 * @hw: pointer to the HW structure
433 *
434 * Checks for link up on the hardware. If link is not up and we have
435 * a signal, then we need to force link up.
436 **/
437 s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
438 {
439 struct e1000_mac_info *mac = &hw->mac;
440 u32 rxcw;
441 u32 ctrl;
442 u32 status;
443 s32 ret_val;
444
445 ctrl = er32(CTRL);
446 status = er32(STATUS);
447 rxcw = er32(RXCW);
448
449 /* If we don't have link (auto-negotiation failed or link partner
450 * cannot auto-negotiate), the cable is plugged in (we have signal),
451 * and our link partner is not trying to auto-negotiate with us (we
452 * are receiving idles or data), we need to force link up. We also
453 * need to give auto-negotiation time to complete, in case the cable
454 * was just plugged in. The autoneg_failed flag does this.
455 */
456 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
457 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
458 (!(rxcw & E1000_RXCW_C))) {
459 if (mac->autoneg_failed == 0) {
460 mac->autoneg_failed = 1;
461 return 0;
462 }
463 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
464
465 /* Disable auto-negotiation in the TXCW register */
466 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
467
468 /* Force link-up and also force full-duplex. */
469 ctrl = er32(CTRL);
470 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
471 ew32(CTRL, ctrl);
472
473 /* Configure Flow Control after forcing link up. */
474 ret_val = e1000e_config_fc_after_link_up(hw);
475 if (ret_val) {
476 hw_dbg(hw, "Error configuring flow control\n");
477 return ret_val;
478 }
479 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
480 /* If we are forcing link and we are receiving /C/ ordered
481 * sets, re-enable auto-negotiation in the TXCW register
482 * and disable forced link in the Device Control register
483 * in an attempt to auto-negotiate with our link partner.
484 */
485 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
486 ew32(TXCW, mac->txcw);
487 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
488
489 mac->serdes_has_link = 1;
490 }
491
492 return 0;
493 }
494
495 /**
496 * e1000e_check_for_serdes_link - Check for link (Serdes)
497 * @hw: pointer to the HW structure
498 *
499 * Checks for link up on the hardware. If link is not up and we have
500 * a signal, then we need to force link up.
501 **/
502 s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
503 {
504 struct e1000_mac_info *mac = &hw->mac;
505 u32 rxcw;
506 u32 ctrl;
507 u32 status;
508 s32 ret_val;
509
510 ctrl = er32(CTRL);
511 status = er32(STATUS);
512 rxcw = er32(RXCW);
513
514 /* If we don't have link (auto-negotiation failed or link partner
515 * cannot auto-negotiate), and our link partner is not trying to
516 * auto-negotiate with us (we are receiving idles or data),
517 * we need to force link up. We also need to give auto-negotiation
518 * time to complete.
519 */
520 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
521 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
522 if (mac->autoneg_failed == 0) {
523 mac->autoneg_failed = 1;
524 return 0;
525 }
526 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
527
528 /* Disable auto-negotiation in the TXCW register */
529 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
530
531 /* Force link-up and also force full-duplex. */
532 ctrl = er32(CTRL);
533 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
534 ew32(CTRL, ctrl);
535
536 /* Configure Flow Control after forcing link up. */
537 ret_val = e1000e_config_fc_after_link_up(hw);
538 if (ret_val) {
539 hw_dbg(hw, "Error configuring flow control\n");
540 return ret_val;
541 }
542 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
543 /* If we are forcing link and we are receiving /C/ ordered
544 * sets, re-enable auto-negotiation in the TXCW register
545 * and disable forced link in the Device Control register
546 * in an attempt to auto-negotiate with our link partner.
547 */
548 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
549 ew32(TXCW, mac->txcw);
550 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
551
552 mac->serdes_has_link = 1;
553 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
554 /* If we force link for non-auto-negotiation switch, check
555 * link status based on MAC synchronization for internal
556 * serdes media type.
557 */
558 /* SYNCH bit and IV bit are sticky. */
559 udelay(10);
560 if (E1000_RXCW_SYNCH & er32(RXCW)) {
561 if (!(rxcw & E1000_RXCW_IV)) {
562 mac->serdes_has_link = 1;
563 hw_dbg(hw, "SERDES: Link is up.\n");
564 }
565 } else {
566 mac->serdes_has_link = 0;
567 hw_dbg(hw, "SERDES: Link is down.\n");
568 }
569 }
570
571 if (E1000_TXCW_ANE & er32(TXCW)) {
572 status = er32(STATUS);
573 mac->serdes_has_link = (status & E1000_STATUS_LU);
574 }
575
576 return 0;
577 }
578
579 /**
580 * e1000_set_default_fc_generic - Set flow control default values
581 * @hw: pointer to the HW structure
582 *
583 * Read the EEPROM for the default values for flow control and store the
584 * values.
585 **/
586 static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
587 {
588 struct e1000_mac_info *mac = &hw->mac;
589 s32 ret_val;
590 u16 nvm_data;
591
592 /* Read and store word 0x0F of the EEPROM. This word contains bits
593 * that determine the hardware's default PAUSE (flow control) mode,
594 * a bit that determines whether the HW defaults to enabling or
595 * disabling auto-negotiation, and the direction of the
596 * SW defined pins. If there is no SW over-ride of the flow
597 * control setting, then the variable hw->fc will
598 * be initialized based on a value in the EEPROM.
599 */
600 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
601
602 if (ret_val) {
603 hw_dbg(hw, "NVM Read Error\n");
604 return ret_val;
605 }
606
607 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
608 mac->fc = e1000_fc_none;
609 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
610 NVM_WORD0F_ASM_DIR)
611 mac->fc = e1000_fc_tx_pause;
612 else
613 mac->fc = e1000_fc_full;
614
615 return 0;
616 }
617
618 /**
619 * e1000e_setup_link - Setup flow control and link settings
620 * @hw: pointer to the HW structure
621 *
622 * Determines which flow control settings to use, then configures flow
623 * control. Calls the appropriate media-specific link configuration
624 * function. Assuming the adapter has a valid link partner, a valid link
625 * should be established. Assumes the hardware has previously been reset
626 * and the transmitter and receiver are not enabled.
627 **/
628 s32 e1000e_setup_link(struct e1000_hw *hw)
629 {
630 struct e1000_mac_info *mac = &hw->mac;
631 s32 ret_val;
632
633 /* In the case of the phy reset being blocked, we already have a link.
634 * We do not need to set it up again.
635 */
636 if (e1000_check_reset_block(hw))
637 return 0;
638
639 /*
640 * If flow control is set to default, set flow control based on
641 * the EEPROM flow control settings.
642 */
643 if (mac->fc == e1000_fc_default) {
644 ret_val = e1000_set_default_fc_generic(hw);
645 if (ret_val)
646 return ret_val;
647 }
648
649 /* We want to save off the original Flow Control configuration just
650 * in case we get disconnected and then reconnected into a different
651 * hub or switch with different Flow Control capabilities.
652 */
653 mac->original_fc = mac->fc;
654
655 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
656
657 /* Call the necessary media_type subroutine to configure the link. */
658 ret_val = mac->ops.setup_physical_interface(hw);
659 if (ret_val)
660 return ret_val;
661
662 /* Initialize the flow control address, type, and PAUSE timer
663 * registers to their default values. This is done even if flow
664 * control is disabled, because it does not hurt anything to
665 * initialize these registers.
666 */
667 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
668 ew32(FCT, FLOW_CONTROL_TYPE);
669 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
670 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
671
672 ew32(FCTTV, mac->fc_pause_time);
673
674 return e1000e_set_fc_watermarks(hw);
675 }
676
677 /**
678 * e1000_commit_fc_settings_generic - Configure flow control
679 * @hw: pointer to the HW structure
680 *
681 * Write the flow control settings to the Transmit Config Word Register (TXCW)
682 * base on the flow control settings in e1000_mac_info.
683 **/
684 static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
685 {
686 struct e1000_mac_info *mac = &hw->mac;
687 u32 txcw;
688
689 /* Check for a software override of the flow control settings, and
690 * setup the device accordingly. If auto-negotiation is enabled, then
691 * software will have to set the "PAUSE" bits to the correct value in
692 * the Transmit Config Word Register (TXCW) and re-start auto-
693 * negotiation. However, if auto-negotiation is disabled, then
694 * software will have to manually configure the two flow control enable
695 * bits in the CTRL register.
696 *
697 * The possible values of the "fc" parameter are:
698 * 0: Flow control is completely disabled
699 * 1: Rx flow control is enabled (we can receive pause frames,
700 * but not send pause frames).
701 * 2: Tx flow control is enabled (we can send pause frames but we
702 * do not support receiving pause frames).
703 * 3: Both Rx and TX flow control (symmetric) are enabled.
704 */
705 switch (mac->fc) {
706 case e1000_fc_none:
707 /* Flow control completely disabled by a software over-ride. */
708 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
709 break;
710 case e1000_fc_rx_pause:
711 /* RX Flow control is enabled and TX Flow control is disabled
712 * by a software over-ride. Since there really isn't a way to
713 * advertise that we are capable of RX Pause ONLY, we will
714 * advertise that we support both symmetric and asymmetric RX
715 * PAUSE. Later, we will disable the adapter's ability to send
716 * PAUSE frames.
717 */
718 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
719 break;
720 case e1000_fc_tx_pause:
721 /* TX Flow control is enabled, and RX Flow control is disabled,
722 * by a software over-ride.
723 */
724 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
725 break;
726 case e1000_fc_full:
727 /* Flow control (both RX and TX) is enabled by a software
728 * over-ride.
729 */
730 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
731 break;
732 default:
733 hw_dbg(hw, "Flow control param set incorrectly\n");
734 return -E1000_ERR_CONFIG;
735 break;
736 }
737
738 ew32(TXCW, txcw);
739 mac->txcw = txcw;
740
741 return 0;
742 }
743
744 /**
745 * e1000_poll_fiber_serdes_link_generic - Poll for link up
746 * @hw: pointer to the HW structure
747 *
748 * Polls for link up by reading the status register, if link fails to come
749 * up with auto-negotiation, then the link is forced if a signal is detected.
750 **/
751 static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
752 {
753 struct e1000_mac_info *mac = &hw->mac;
754 u32 i, status;
755 s32 ret_val;
756
757 /* If we have a signal (the cable is plugged in, or assumed true for
758 * serdes media) then poll for a "Link-Up" indication in the Device
759 * Status Register. Time-out if a link isn't seen in 500 milliseconds
760 * seconds (Auto-negotiation should complete in less than 500
761 * milliseconds even if the other end is doing it in SW).
762 */
763 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
764 msleep(10);
765 status = er32(STATUS);
766 if (status & E1000_STATUS_LU)
767 break;
768 }
769 if (i == FIBER_LINK_UP_LIMIT) {
770 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
771 mac->autoneg_failed = 1;
772 /* AutoNeg failed to achieve a link, so we'll call
773 * mac->check_for_link. This routine will force the
774 * link up if we detect a signal. This will allow us to
775 * communicate with non-autonegotiating link partners.
776 */
777 ret_val = mac->ops.check_for_link(hw);
778 if (ret_val) {
779 hw_dbg(hw, "Error while checking for link\n");
780 return ret_val;
781 }
782 mac->autoneg_failed = 0;
783 } else {
784 mac->autoneg_failed = 0;
785 hw_dbg(hw, "Valid Link Found\n");
786 }
787
788 return 0;
789 }
790
791 /**
792 * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
793 * @hw: pointer to the HW structure
794 *
795 * Configures collision distance and flow control for fiber and serdes
796 * links. Upon successful setup, poll for link.
797 **/
798 s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
799 {
800 u32 ctrl;
801 s32 ret_val;
802
803 ctrl = er32(CTRL);
804
805 /* Take the link out of reset */
806 ctrl &= ~E1000_CTRL_LRST;
807
808 e1000e_config_collision_dist(hw);
809
810 ret_val = e1000_commit_fc_settings_generic(hw);
811 if (ret_val)
812 return ret_val;
813
814 /* Since auto-negotiation is enabled, take the link out of reset (the
815 * link will be in reset, because we previously reset the chip). This
816 * will restart auto-negotiation. If auto-negotiation is successful
817 * then the link-up status bit will be set and the flow control enable
818 * bits (RFCE and TFCE) will be set according to their negotiated value.
819 */
820 hw_dbg(hw, "Auto-negotiation enabled\n");
821
822 ew32(CTRL, ctrl);
823 e1e_flush();
824 msleep(1);
825
826 /* For these adapters, the SW defineable pin 1 is set when the optics
827 * detect a signal. If we have a signal, then poll for a "Link-Up"
828 * indication.
829 */
830 if (hw->media_type == e1000_media_type_internal_serdes ||
831 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
832 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
833 } else {
834 hw_dbg(hw, "No signal detected\n");
835 }
836
837 return 0;
838 }
839
840 /**
841 * e1000e_config_collision_dist - Configure collision distance
842 * @hw: pointer to the HW structure
843 *
844 * Configures the collision distance to the default value and is used
845 * during link setup. Currently no func pointer exists and all
846 * implementations are handled in the generic version of this function.
847 **/
848 void e1000e_config_collision_dist(struct e1000_hw *hw)
849 {
850 u32 tctl;
851
852 tctl = er32(TCTL);
853
854 tctl &= ~E1000_TCTL_COLD;
855 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
856
857 ew32(TCTL, tctl);
858 e1e_flush();
859 }
860
861 /**
862 * e1000e_set_fc_watermarks - Set flow control high/low watermarks
863 * @hw: pointer to the HW structure
864 *
865 * Sets the flow control high/low threshold (watermark) registers. If
866 * flow control XON frame transmission is enabled, then set XON frame
867 * tansmission as well.
868 **/
869 s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
870 {
871 struct e1000_mac_info *mac = &hw->mac;
872 u32 fcrtl = 0, fcrth = 0;
873
874 /* Set the flow control receive threshold registers. Normally,
875 * these registers will be set to a default threshold that may be
876 * adjusted later by the driver's runtime code. However, if the
877 * ability to transmit pause frames is not enabled, then these
878 * registers will be set to 0.
879 */
880 if (mac->fc & e1000_fc_tx_pause) {
881 /* We need to set up the Receive Threshold high and low water
882 * marks as well as (optionally) enabling the transmission of
883 * XON frames.
884 */
885 fcrtl = mac->fc_low_water;
886 fcrtl |= E1000_FCRTL_XONE;
887 fcrth = mac->fc_high_water;
888 }
889 ew32(FCRTL, fcrtl);
890 ew32(FCRTH, fcrth);
891
892 return 0;
893 }
894
895 /**
896 * e1000e_force_mac_fc - Force the MAC's flow control settings
897 * @hw: pointer to the HW structure
898 *
899 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
900 * device control register to reflect the adapter settings. TFCE and RFCE
901 * need to be explicitly set by software when a copper PHY is used because
902 * autonegotiation is managed by the PHY rather than the MAC. Software must
903 * also configure these bits when link is forced on a fiber connection.
904 **/
905 s32 e1000e_force_mac_fc(struct e1000_hw *hw)
906 {
907 struct e1000_mac_info *mac = &hw->mac;
908 u32 ctrl;
909
910 ctrl = er32(CTRL);
911
912 /* Because we didn't get link via the internal auto-negotiation
913 * mechanism (we either forced link or we got link via PHY
914 * auto-neg), we have to manually enable/disable transmit an
915 * receive flow control.
916 *
917 * The "Case" statement below enables/disable flow control
918 * according to the "mac->fc" parameter.
919 *
920 * The possible values of the "fc" parameter are:
921 * 0: Flow control is completely disabled
922 * 1: Rx flow control is enabled (we can receive pause
923 * frames but not send pause frames).
924 * 2: Tx flow control is enabled (we can send pause frames
925 * frames but we do not receive pause frames).
926 * 3: Both Rx and TX flow control (symmetric) is enabled.
927 * other: No other values should be possible at this point.
928 */
929 hw_dbg(hw, "mac->fc = %u\n", mac->fc);
930
931 switch (mac->fc) {
932 case e1000_fc_none:
933 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
934 break;
935 case e1000_fc_rx_pause:
936 ctrl &= (~E1000_CTRL_TFCE);
937 ctrl |= E1000_CTRL_RFCE;
938 break;
939 case e1000_fc_tx_pause:
940 ctrl &= (~E1000_CTRL_RFCE);
941 ctrl |= E1000_CTRL_TFCE;
942 break;
943 case e1000_fc_full:
944 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
945 break;
946 default:
947 hw_dbg(hw, "Flow control param set incorrectly\n");
948 return -E1000_ERR_CONFIG;
949 }
950
951 ew32(CTRL, ctrl);
952
953 return 0;
954 }
955
956 /**
957 * e1000e_config_fc_after_link_up - Configures flow control after link
958 * @hw: pointer to the HW structure
959 *
960 * Checks the status of auto-negotiation after link up to ensure that the
961 * speed and duplex were not forced. If the link needed to be forced, then
962 * flow control needs to be forced also. If auto-negotiation is enabled
963 * and did not fail, then we configure flow control based on our link
964 * partner.
965 **/
966 s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
967 {
968 struct e1000_mac_info *mac = &hw->mac;
969 s32 ret_val = 0;
970 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
971 u16 speed, duplex;
972
973 /* Check for the case where we have fiber media and auto-neg failed
974 * so we had to force link. In this case, we need to force the
975 * configuration of the MAC to match the "fc" parameter.
976 */
977 if (mac->autoneg_failed) {
978 if (hw->media_type == e1000_media_type_fiber ||
979 hw->media_type == e1000_media_type_internal_serdes)
980 ret_val = e1000e_force_mac_fc(hw);
981 } else {
982 if (hw->media_type == e1000_media_type_copper)
983 ret_val = e1000e_force_mac_fc(hw);
984 }
985
986 if (ret_val) {
987 hw_dbg(hw, "Error forcing flow control settings\n");
988 return ret_val;
989 }
990
991 /* Check for the case where we have copper media and auto-neg is
992 * enabled. In this case, we need to check and see if Auto-Neg
993 * has completed, and if so, how the PHY and link partner has
994 * flow control configured.
995 */
996 if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
997 /* Read the MII Status Register and check to see if AutoNeg
998 * has completed. We read this twice because this reg has
999 * some "sticky" (latched) bits.
1000 */
1001 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1002 if (ret_val)
1003 return ret_val;
1004 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1005 if (ret_val)
1006 return ret_val;
1007
1008 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1009 hw_dbg(hw, "Copper PHY and Auto Neg "
1010 "has not completed.\n");
1011 return ret_val;
1012 }
1013
1014 /* The AutoNeg process has completed, so we now need to
1015 * read both the Auto Negotiation Advertisement
1016 * Register (Address 4) and the Auto_Negotiation Base
1017 * Page Ability Register (Address 5) to determine how
1018 * flow control was negotiated.
1019 */
1020 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1021 if (ret_val)
1022 return ret_val;
1023 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1024 if (ret_val)
1025 return ret_val;
1026
1027 /* Two bits in the Auto Negotiation Advertisement Register
1028 * (Address 4) and two bits in the Auto Negotiation Base
1029 * Page Ability Register (Address 5) determine flow control
1030 * for both the PHY and the link partner. The following
1031 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1032 * 1999, describes these PAUSE resolution bits and how flow
1033 * control is determined based upon these settings.
1034 * NOTE: DC = Don't Care
1035 *
1036 * LOCAL DEVICE | LINK PARTNER
1037 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1038 *-------|---------|-------|---------|--------------------
1039 * 0 | 0 | DC | DC | e1000_fc_none
1040 * 0 | 1 | 0 | DC | e1000_fc_none
1041 * 0 | 1 | 1 | 0 | e1000_fc_none
1042 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1043 * 1 | 0 | 0 | DC | e1000_fc_none
1044 * 1 | DC | 1 | DC | e1000_fc_full
1045 * 1 | 1 | 0 | 0 | e1000_fc_none
1046 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1047 *
1048 */
1049 /* Are both PAUSE bits set to 1? If so, this implies
1050 * Symmetric Flow Control is enabled at both ends. The
1051 * ASM_DIR bits are irrelevant per the spec.
1052 *
1053 * For Symmetric Flow Control:
1054 *
1055 * LOCAL DEVICE | LINK PARTNER
1056 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1057 *-------|---------|-------|---------|--------------------
1058 * 1 | DC | 1 | DC | E1000_fc_full
1059 *
1060 */
1061 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1062 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1063 /* Now we need to check if the user selected RX ONLY
1064 * of pause frames. In this case, we had to advertise
1065 * FULL flow control because we could not advertise RX
1066 * ONLY. Hence, we must now check to see if we need to
1067 * turn OFF the TRANSMISSION of PAUSE frames.
1068 */
1069 if (mac->original_fc == e1000_fc_full) {
1070 mac->fc = e1000_fc_full;
1071 hw_dbg(hw, "Flow Control = FULL.\r\n");
1072 } else {
1073 mac->fc = e1000_fc_rx_pause;
1074 hw_dbg(hw, "Flow Control = "
1075 "RX PAUSE frames only.\r\n");
1076 }
1077 }
1078 /* For receiving PAUSE frames ONLY.
1079 *
1080 * LOCAL DEVICE | LINK PARTNER
1081 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1082 *-------|---------|-------|---------|--------------------
1083 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1084 *
1085 */
1086 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1087 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1088 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1089 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1090 mac->fc = e1000_fc_tx_pause;
1091 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
1092 }
1093 /* For transmitting PAUSE frames ONLY.
1094 *
1095 * LOCAL DEVICE | LINK PARTNER
1096 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1097 *-------|---------|-------|---------|--------------------
1098 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1099 *
1100 */
1101 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1102 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1103 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1104 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1105 mac->fc = e1000_fc_rx_pause;
1106 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1107 } else {
1108 /*
1109 * Per the IEEE spec, at this point flow control
1110 * should be disabled.
1111 */
1112 mac->fc = e1000_fc_none;
1113 hw_dbg(hw, "Flow Control = NONE.\r\n");
1114 }
1115
1116 /* Now we need to do one last check... If we auto-
1117 * negotiated to HALF DUPLEX, flow control should not be
1118 * enabled per IEEE 802.3 spec.
1119 */
1120 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1121 if (ret_val) {
1122 hw_dbg(hw, "Error getting link speed and duplex\n");
1123 return ret_val;
1124 }
1125
1126 if (duplex == HALF_DUPLEX)
1127 mac->fc = e1000_fc_none;
1128
1129 /* Now we call a subroutine to actually force the MAC
1130 * controller to use the correct flow control settings.
1131 */
1132 ret_val = e1000e_force_mac_fc(hw);
1133 if (ret_val) {
1134 hw_dbg(hw, "Error forcing flow control settings\n");
1135 return ret_val;
1136 }
1137 }
1138
1139 return 0;
1140 }
1141
1142 /**
1143 * e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex
1144 * @hw: pointer to the HW structure
1145 * @speed: stores the current speed
1146 * @duplex: stores the current duplex
1147 *
1148 * Read the status register for the current speed/duplex and store the current
1149 * speed and duplex for copper connections.
1150 **/
1151 s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1152 {
1153 u32 status;
1154
1155 status = er32(STATUS);
1156 if (status & E1000_STATUS_SPEED_1000) {
1157 *speed = SPEED_1000;
1158 hw_dbg(hw, "1000 Mbs, ");
1159 } else if (status & E1000_STATUS_SPEED_100) {
1160 *speed = SPEED_100;
1161 hw_dbg(hw, "100 Mbs, ");
1162 } else {
1163 *speed = SPEED_10;
1164 hw_dbg(hw, "10 Mbs, ");
1165 }
1166
1167 if (status & E1000_STATUS_FD) {
1168 *duplex = FULL_DUPLEX;
1169 hw_dbg(hw, "Full Duplex\n");
1170 } else {
1171 *duplex = HALF_DUPLEX;
1172 hw_dbg(hw, "Half Duplex\n");
1173 }
1174
1175 return 0;
1176 }
1177
1178 /**
1179 * e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex
1180 * @hw: pointer to the HW structure
1181 * @speed: stores the current speed
1182 * @duplex: stores the current duplex
1183 *
1184 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1185 * for fiber/serdes links.
1186 **/
1187 s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1188 {
1189 *speed = SPEED_1000;
1190 *duplex = FULL_DUPLEX;
1191
1192 return 0;
1193 }
1194
1195 /**
1196 * e1000e_get_hw_semaphore - Acquire hardware semaphore
1197 * @hw: pointer to the HW structure
1198 *
1199 * Acquire the HW semaphore to access the PHY or NVM
1200 **/
1201 s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1202 {
1203 u32 swsm;
1204 s32 timeout = hw->nvm.word_size + 1;
1205 s32 i = 0;
1206
1207 /* Get the SW semaphore */
1208 while (i < timeout) {
1209 swsm = er32(SWSM);
1210 if (!(swsm & E1000_SWSM_SMBI))
1211 break;
1212
1213 udelay(50);
1214 i++;
1215 }
1216
1217 if (i == timeout) {
1218 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1219 return -E1000_ERR_NVM;
1220 }
1221
1222 /* Get the FW semaphore. */
1223 for (i = 0; i < timeout; i++) {
1224 swsm = er32(SWSM);
1225 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1226
1227 /* Semaphore acquired if bit latched */
1228 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1229 break;
1230
1231 udelay(50);
1232 }
1233
1234 if (i == timeout) {
1235 /* Release semaphores */
1236 e1000e_put_hw_semaphore(hw);
1237 hw_dbg(hw, "Driver can't access the NVM\n");
1238 return -E1000_ERR_NVM;
1239 }
1240
1241 return 0;
1242 }
1243
1244 /**
1245 * e1000e_put_hw_semaphore - Release hardware semaphore
1246 * @hw: pointer to the HW structure
1247 *
1248 * Release hardware semaphore used to access the PHY or NVM
1249 **/
1250 void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1251 {
1252 u32 swsm;
1253
1254 swsm = er32(SWSM);
1255 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1256 ew32(SWSM, swsm);
1257 }
1258
1259 /**
1260 * e1000e_get_auto_rd_done - Check for auto read completion
1261 * @hw: pointer to the HW structure
1262 *
1263 * Check EEPROM for Auto Read done bit.
1264 **/
1265 s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1266 {
1267 s32 i = 0;
1268
1269 while (i < AUTO_READ_DONE_TIMEOUT) {
1270 if (er32(EECD) & E1000_EECD_AUTO_RD)
1271 break;
1272 msleep(1);
1273 i++;
1274 }
1275
1276 if (i == AUTO_READ_DONE_TIMEOUT) {
1277 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1278 return -E1000_ERR_RESET;
1279 }
1280
1281 return 0;
1282 }
1283
1284 /**
1285 * e1000e_valid_led_default - Verify a valid default LED config
1286 * @hw: pointer to the HW structure
1287 * @data: pointer to the NVM (EEPROM)
1288 *
1289 * Read the EEPROM for the current default LED configuration. If the
1290 * LED configuration is not valid, set to a valid LED configuration.
1291 **/
1292 s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1293 {
1294 s32 ret_val;
1295
1296 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1297 if (ret_val) {
1298 hw_dbg(hw, "NVM Read Error\n");
1299 return ret_val;
1300 }
1301
1302 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1303 *data = ID_LED_DEFAULT;
1304
1305 return 0;
1306 }
1307
1308 /**
1309 * e1000e_id_led_init -
1310 * @hw: pointer to the HW structure
1311 *
1312 **/
1313 s32 e1000e_id_led_init(struct e1000_hw *hw)
1314 {
1315 struct e1000_mac_info *mac = &hw->mac;
1316 s32 ret_val;
1317 const u32 ledctl_mask = 0x000000FF;
1318 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1319 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1320 u16 data, i, temp;
1321 const u16 led_mask = 0x0F;
1322
1323 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1324 if (ret_val)
1325 return ret_val;
1326
1327 mac->ledctl_default = er32(LEDCTL);
1328 mac->ledctl_mode1 = mac->ledctl_default;
1329 mac->ledctl_mode2 = mac->ledctl_default;
1330
1331 for (i = 0; i < 4; i++) {
1332 temp = (data >> (i << 2)) & led_mask;
1333 switch (temp) {
1334 case ID_LED_ON1_DEF2:
1335 case ID_LED_ON1_ON2:
1336 case ID_LED_ON1_OFF2:
1337 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1338 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1339 break;
1340 case ID_LED_OFF1_DEF2:
1341 case ID_LED_OFF1_ON2:
1342 case ID_LED_OFF1_OFF2:
1343 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1344 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1345 break;
1346 default:
1347 /* Do nothing */
1348 break;
1349 }
1350 switch (temp) {
1351 case ID_LED_DEF1_ON2:
1352 case ID_LED_ON1_ON2:
1353 case ID_LED_OFF1_ON2:
1354 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1355 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1356 break;
1357 case ID_LED_DEF1_OFF2:
1358 case ID_LED_ON1_OFF2:
1359 case ID_LED_OFF1_OFF2:
1360 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1361 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1362 break;
1363 default:
1364 /* Do nothing */
1365 break;
1366 }
1367 }
1368
1369 return 0;
1370 }
1371
1372 /**
1373 * e1000e_cleanup_led_generic - Set LED config to default operation
1374 * @hw: pointer to the HW structure
1375 *
1376 * Remove the current LED configuration and set the LED configuration
1377 * to the default value, saved from the EEPROM.
1378 **/
1379 s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1380 {
1381 ew32(LEDCTL, hw->mac.ledctl_default);
1382 return 0;
1383 }
1384
1385 /**
1386 * e1000e_blink_led - Blink LED
1387 * @hw: pointer to the HW structure
1388 *
1389 * Blink the led's which are set to be on.
1390 **/
1391 s32 e1000e_blink_led(struct e1000_hw *hw)
1392 {
1393 u32 ledctl_blink = 0;
1394 u32 i;
1395
1396 if (hw->media_type == e1000_media_type_fiber) {
1397 /* always blink LED0 for PCI-E fiber */
1398 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1399 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1400 } else {
1401 /* set the blink bit for each LED that's "on" (0x0E)
1402 * in ledctl_mode2 */
1403 ledctl_blink = hw->mac.ledctl_mode2;
1404 for (i = 0; i < 4; i++)
1405 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1406 E1000_LEDCTL_MODE_LED_ON)
1407 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1408 (i * 8));
1409 }
1410
1411 ew32(LEDCTL, ledctl_blink);
1412
1413 return 0;
1414 }
1415
1416 /**
1417 * e1000e_led_on_generic - Turn LED on
1418 * @hw: pointer to the HW structure
1419 *
1420 * Turn LED on.
1421 **/
1422 s32 e1000e_led_on_generic(struct e1000_hw *hw)
1423 {
1424 u32 ctrl;
1425
1426 switch (hw->media_type) {
1427 case e1000_media_type_fiber:
1428 ctrl = er32(CTRL);
1429 ctrl &= ~E1000_CTRL_SWDPIN0;
1430 ctrl |= E1000_CTRL_SWDPIO0;
1431 ew32(CTRL, ctrl);
1432 break;
1433 case e1000_media_type_copper:
1434 ew32(LEDCTL, hw->mac.ledctl_mode2);
1435 break;
1436 default:
1437 break;
1438 }
1439
1440 return 0;
1441 }
1442
1443 /**
1444 * e1000e_led_off_generic - Turn LED off
1445 * @hw: pointer to the HW structure
1446 *
1447 * Turn LED off.
1448 **/
1449 s32 e1000e_led_off_generic(struct e1000_hw *hw)
1450 {
1451 u32 ctrl;
1452
1453 switch (hw->media_type) {
1454 case e1000_media_type_fiber:
1455 ctrl = er32(CTRL);
1456 ctrl |= E1000_CTRL_SWDPIN0;
1457 ctrl |= E1000_CTRL_SWDPIO0;
1458 ew32(CTRL, ctrl);
1459 break;
1460 case e1000_media_type_copper:
1461 ew32(LEDCTL, hw->mac.ledctl_mode1);
1462 break;
1463 default:
1464 break;
1465 }
1466
1467 return 0;
1468 }
1469
1470 /**
1471 * e1000e_set_pcie_no_snoop - Set PCI-express capabilities
1472 * @hw: pointer to the HW structure
1473 * @no_snoop: bitmap of snoop events
1474 *
1475 * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1476 **/
1477 void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1478 {
1479 u32 gcr;
1480
1481 if (no_snoop) {
1482 gcr = er32(GCR);
1483 gcr &= ~(PCIE_NO_SNOOP_ALL);
1484 gcr |= no_snoop;
1485 ew32(GCR, gcr);
1486 }
1487 }
1488
1489 /**
1490 * e1000e_disable_pcie_master - Disables PCI-express master access
1491 * @hw: pointer to the HW structure
1492 *
1493 * Returns 0 if successful, else returns -10
1494 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
1495 * the master requests to be disabled.
1496 *
1497 * Disables PCI-Express master access and verifies there are no pending
1498 * requests.
1499 **/
1500 s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1501 {
1502 u32 ctrl;
1503 s32 timeout = MASTER_DISABLE_TIMEOUT;
1504
1505 ctrl = er32(CTRL);
1506 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1507 ew32(CTRL, ctrl);
1508
1509 while (timeout) {
1510 if (!(er32(STATUS) &
1511 E1000_STATUS_GIO_MASTER_ENABLE))
1512 break;
1513 udelay(100);
1514 timeout--;
1515 }
1516
1517 if (!timeout) {
1518 hw_dbg(hw, "Master requests are pending.\n");
1519 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1520 }
1521
1522 return 0;
1523 }
1524
1525 /**
1526 * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
1527 * @hw: pointer to the HW structure
1528 *
1529 * Reset the Adaptive Interframe Spacing throttle to default values.
1530 **/
1531 void e1000e_reset_adaptive(struct e1000_hw *hw)
1532 {
1533 struct e1000_mac_info *mac = &hw->mac;
1534
1535 mac->current_ifs_val = 0;
1536 mac->ifs_min_val = IFS_MIN;
1537 mac->ifs_max_val = IFS_MAX;
1538 mac->ifs_step_size = IFS_STEP;
1539 mac->ifs_ratio = IFS_RATIO;
1540
1541 mac->in_ifs_mode = 0;
1542 ew32(AIT, 0);
1543 }
1544
1545 /**
1546 * e1000e_update_adaptive - Update Adaptive Interframe Spacing
1547 * @hw: pointer to the HW structure
1548 *
1549 * Update the Adaptive Interframe Spacing Throttle value based on the
1550 * time between transmitted packets and time between collisions.
1551 **/
1552 void e1000e_update_adaptive(struct e1000_hw *hw)
1553 {
1554 struct e1000_mac_info *mac = &hw->mac;
1555
1556 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1557 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1558 mac->in_ifs_mode = 1;
1559 if (mac->current_ifs_val < mac->ifs_max_val) {
1560 if (!mac->current_ifs_val)
1561 mac->current_ifs_val = mac->ifs_min_val;
1562 else
1563 mac->current_ifs_val +=
1564 mac->ifs_step_size;
1565 ew32(AIT,
1566 mac->current_ifs_val);
1567 }
1568 }
1569 } else {
1570 if (mac->in_ifs_mode &&
1571 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1572 mac->current_ifs_val = 0;
1573 mac->in_ifs_mode = 0;
1574 ew32(AIT, 0);
1575 }
1576 }
1577 }
1578
1579 /**
1580 * e1000_raise_eec_clk - Raise EEPROM clock
1581 * @hw: pointer to the HW structure
1582 * @eecd: pointer to the EEPROM
1583 *
1584 * Enable/Raise the EEPROM clock bit.
1585 **/
1586 static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1587 {
1588 *eecd = *eecd | E1000_EECD_SK;
1589 ew32(EECD, *eecd);
1590 e1e_flush();
1591 udelay(hw->nvm.delay_usec);
1592 }
1593
1594 /**
1595 * e1000_lower_eec_clk - Lower EEPROM clock
1596 * @hw: pointer to the HW structure
1597 * @eecd: pointer to the EEPROM
1598 *
1599 * Clear/Lower the EEPROM clock bit.
1600 **/
1601 static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1602 {
1603 *eecd = *eecd & ~E1000_EECD_SK;
1604 ew32(EECD, *eecd);
1605 e1e_flush();
1606 udelay(hw->nvm.delay_usec);
1607 }
1608
1609 /**
1610 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
1611 * @hw: pointer to the HW structure
1612 * @data: data to send to the EEPROM
1613 * @count: number of bits to shift out
1614 *
1615 * We need to shift 'count' bits out to the EEPROM. So, the value in the
1616 * "data" parameter will be shifted out to the EEPROM one bit at a time.
1617 * In order to do this, "data" must be broken down into bits.
1618 **/
1619 static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1620 {
1621 struct e1000_nvm_info *nvm = &hw->nvm;
1622 u32 eecd = er32(EECD);
1623 u32 mask;
1624
1625 mask = 0x01 << (count - 1);
1626 if (nvm->type == e1000_nvm_eeprom_spi)
1627 eecd |= E1000_EECD_DO;
1628
1629 do {
1630 eecd &= ~E1000_EECD_DI;
1631
1632 if (data & mask)
1633 eecd |= E1000_EECD_DI;
1634
1635 ew32(EECD, eecd);
1636 e1e_flush();
1637
1638 udelay(nvm->delay_usec);
1639
1640 e1000_raise_eec_clk(hw, &eecd);
1641 e1000_lower_eec_clk(hw, &eecd);
1642
1643 mask >>= 1;
1644 } while (mask);
1645
1646 eecd &= ~E1000_EECD_DI;
1647 ew32(EECD, eecd);
1648 }
1649
1650 /**
1651 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
1652 * @hw: pointer to the HW structure
1653 * @count: number of bits to shift in
1654 *
1655 * In order to read a register from the EEPROM, we need to shift 'count' bits
1656 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
1657 * the EEPROM (setting the SK bit), and then reading the value of the data out
1658 * "DO" bit. During this "shifting in" process the data in "DI" bit should
1659 * always be clear.
1660 **/
1661 static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1662 {
1663 u32 eecd;
1664 u32 i;
1665 u16 data;
1666
1667 eecd = er32(EECD);
1668
1669 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1670 data = 0;
1671
1672 for (i = 0; i < count; i++) {
1673 data <<= 1;
1674 e1000_raise_eec_clk(hw, &eecd);
1675
1676 eecd = er32(EECD);
1677
1678 eecd &= ~E1000_EECD_DI;
1679 if (eecd & E1000_EECD_DO)
1680 data |= 1;
1681
1682 e1000_lower_eec_clk(hw, &eecd);
1683 }
1684
1685 return data;
1686 }
1687
1688 /**
1689 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
1690 * @hw: pointer to the HW structure
1691 * @ee_reg: EEPROM flag for polling
1692 *
1693 * Polls the EEPROM status bit for either read or write completion based
1694 * upon the value of 'ee_reg'.
1695 **/
1696 s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1697 {
1698 u32 attempts = 100000;
1699 u32 i, reg = 0;
1700
1701 for (i = 0; i < attempts; i++) {
1702 if (ee_reg == E1000_NVM_POLL_READ)
1703 reg = er32(EERD);
1704 else
1705 reg = er32(EEWR);
1706
1707 if (reg & E1000_NVM_RW_REG_DONE)
1708 return 0;
1709
1710 udelay(5);
1711 }
1712
1713 return -E1000_ERR_NVM;
1714 }
1715
1716 /**
1717 * e1000e_acquire_nvm - Generic request for access to EEPROM
1718 * @hw: pointer to the HW structure
1719 *
1720 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1721 * Return successful if access grant bit set, else clear the request for
1722 * EEPROM access and return -E1000_ERR_NVM (-1).
1723 **/
1724 s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1725 {
1726 u32 eecd = er32(EECD);
1727 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1728
1729 ew32(EECD, eecd | E1000_EECD_REQ);
1730 eecd = er32(EECD);
1731
1732 while (timeout) {
1733 if (eecd & E1000_EECD_GNT)
1734 break;
1735 udelay(5);
1736 eecd = er32(EECD);
1737 timeout--;
1738 }
1739
1740 if (!timeout) {
1741 eecd &= ~E1000_EECD_REQ;
1742 ew32(EECD, eecd);
1743 hw_dbg(hw, "Could not acquire NVM grant\n");
1744 return -E1000_ERR_NVM;
1745 }
1746
1747 return 0;
1748 }
1749
1750 /**
1751 * e1000_standby_nvm - Return EEPROM to standby state
1752 * @hw: pointer to the HW structure
1753 *
1754 * Return the EEPROM to a standby state.
1755 **/
1756 static void e1000_standby_nvm(struct e1000_hw *hw)
1757 {
1758 struct e1000_nvm_info *nvm = &hw->nvm;
1759 u32 eecd = er32(EECD);
1760
1761 if (nvm->type == e1000_nvm_eeprom_spi) {
1762 /* Toggle CS to flush commands */
1763 eecd |= E1000_EECD_CS;
1764 ew32(EECD, eecd);
1765 e1e_flush();
1766 udelay(nvm->delay_usec);
1767 eecd &= ~E1000_EECD_CS;
1768 ew32(EECD, eecd);
1769 e1e_flush();
1770 udelay(nvm->delay_usec);
1771 }
1772 }
1773
1774 /**
1775 * e1000_stop_nvm - Terminate EEPROM command
1776 * @hw: pointer to the HW structure
1777 *
1778 * Terminates the current command by inverting the EEPROM's chip select pin.
1779 **/
1780 static void e1000_stop_nvm(struct e1000_hw *hw)
1781 {
1782 u32 eecd;
1783
1784 eecd = er32(EECD);
1785 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1786 /* Pull CS high */
1787 eecd |= E1000_EECD_CS;
1788 e1000_lower_eec_clk(hw, &eecd);
1789 }
1790 }
1791
1792 /**
1793 * e1000e_release_nvm - Release exclusive access to EEPROM
1794 * @hw: pointer to the HW structure
1795 *
1796 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
1797 **/
1798 void e1000e_release_nvm(struct e1000_hw *hw)
1799 {
1800 u32 eecd;
1801
1802 e1000_stop_nvm(hw);
1803
1804 eecd = er32(EECD);
1805 eecd &= ~E1000_EECD_REQ;
1806 ew32(EECD, eecd);
1807 }
1808
1809 /**
1810 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
1811 * @hw: pointer to the HW structure
1812 *
1813 * Setups the EEPROM for reading and writing.
1814 **/
1815 static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1816 {
1817 struct e1000_nvm_info *nvm = &hw->nvm;
1818 u32 eecd = er32(EECD);
1819 u16 timeout = 0;
1820 u8 spi_stat_reg;
1821
1822 if (nvm->type == e1000_nvm_eeprom_spi) {
1823 /* Clear SK and CS */
1824 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1825 ew32(EECD, eecd);
1826 udelay(1);
1827 timeout = NVM_MAX_RETRY_SPI;
1828
1829 /* Read "Status Register" repeatedly until the LSB is cleared.
1830 * The EEPROM will signal that the command has been completed
1831 * by clearing bit 0 of the internal status register. If it's
1832 * not cleared within 'timeout', then error out. */
1833 while (timeout) {
1834 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1835 hw->nvm.opcode_bits);
1836 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
1837 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
1838 break;
1839
1840 udelay(5);
1841 e1000_standby_nvm(hw);
1842 timeout--;
1843 }
1844
1845 if (!timeout) {
1846 hw_dbg(hw, "SPI NVM Status error\n");
1847 return -E1000_ERR_NVM;
1848 }
1849 }
1850
1851 return 0;
1852 }
1853
1854 /**
1855 * e1000e_read_nvm_spi - Read EEPROM's using SPI
1856 * @hw: pointer to the HW structure
1857 * @offset: offset of word in the EEPROM to read
1858 * @words: number of words to read
1859 * @data: word read from the EEPROM
1860 *
1861 * Reads a 16 bit word from the EEPROM.
1862 **/
1863 s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1864 {
1865 struct e1000_nvm_info *nvm = &hw->nvm;
1866 u32 i = 0;
1867 s32 ret_val;
1868 u16 word_in;
1869 u8 read_opcode = NVM_READ_OPCODE_SPI;
1870
1871 /* A check for invalid values: offset too large, too many words,
1872 * and not enough words. */
1873 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1874 (words == 0)) {
1875 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1876 return -E1000_ERR_NVM;
1877 }
1878
1879 ret_val = nvm->ops.acquire_nvm(hw);
1880 if (ret_val)
1881 return ret_val;
1882
1883 ret_val = e1000_ready_nvm_eeprom(hw);
1884 if (ret_val) {
1885 nvm->ops.release_nvm(hw);
1886 return ret_val;
1887 }
1888
1889 e1000_standby_nvm(hw);
1890
1891 if ((nvm->address_bits == 8) && (offset >= 128))
1892 read_opcode |= NVM_A8_OPCODE_SPI;
1893
1894 /* Send the READ command (opcode + addr) */
1895 e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
1896 e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
1897
1898 /* Read the data. SPI NVMs increment the address with each byte
1899 * read and will roll over if reading beyond the end. This allows
1900 * us to read the whole NVM from any offset */
1901 for (i = 0; i < words; i++) {
1902 word_in = e1000_shift_in_eec_bits(hw, 16);
1903 data[i] = (word_in >> 8) | (word_in << 8);
1904 }
1905
1906 nvm->ops.release_nvm(hw);
1907 return 0;
1908 }
1909
1910 /**
1911 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
1912 * @hw: pointer to the HW structure
1913 * @offset: offset of word in the EEPROM to read
1914 * @words: number of words to read
1915 * @data: word read from the EEPROM
1916 *
1917 * Reads a 16 bit word from the EEPROM using the EERD register.
1918 **/
1919 s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1920 {
1921 struct e1000_nvm_info *nvm = &hw->nvm;
1922 u32 i, eerd = 0;
1923 s32 ret_val = 0;
1924
1925 /* A check for invalid values: offset too large, too many words,
1926 * and not enough words. */
1927 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1928 (words == 0)) {
1929 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1930 return -E1000_ERR_NVM;
1931 }
1932
1933 for (i = 0; i < words; i++) {
1934 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
1935 E1000_NVM_RW_REG_START;
1936
1937 ew32(EERD, eerd);
1938 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
1939 if (ret_val)
1940 break;
1941
1942 data[i] = (er32(EERD) >>
1943 E1000_NVM_RW_REG_DATA);
1944 }
1945
1946 return ret_val;
1947 }
1948
1949 /**
1950 * e1000e_write_nvm_spi - Write to EEPROM using SPI
1951 * @hw: pointer to the HW structure
1952 * @offset: offset within the EEPROM to be written to
1953 * @words: number of words to write
1954 * @data: 16 bit word(s) to be written to the EEPROM
1955 *
1956 * Writes data to EEPROM at offset using SPI interface.
1957 *
1958 * If e1000e_update_nvm_checksum is not called after this function , the
1959 * EEPROM will most likley contain an invalid checksum.
1960 **/
1961 s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1962 {
1963 struct e1000_nvm_info *nvm = &hw->nvm;
1964 s32 ret_val;
1965 u16 widx = 0;
1966
1967 /* A check for invalid values: offset too large, too many words,
1968 * and not enough words. */
1969 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1970 (words == 0)) {
1971 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1972 return -E1000_ERR_NVM;
1973 }
1974
1975 ret_val = nvm->ops.acquire_nvm(hw);
1976 if (ret_val)
1977 return ret_val;
1978
1979 msleep(10);
1980
1981 while (widx < words) {
1982 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
1983
1984 ret_val = e1000_ready_nvm_eeprom(hw);
1985 if (ret_val) {
1986 nvm->ops.release_nvm(hw);
1987 return ret_val;
1988 }
1989
1990 e1000_standby_nvm(hw);
1991
1992 /* Send the WRITE ENABLE command (8 bit opcode) */
1993 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
1994 nvm->opcode_bits);
1995
1996 e1000_standby_nvm(hw);
1997
1998 /* Some SPI eeproms use the 8th address bit embedded in the
1999 * opcode */
2000 if ((nvm->address_bits == 8) && (offset >= 128))
2001 write_opcode |= NVM_A8_OPCODE_SPI;
2002
2003 /* Send the Write command (8-bit opcode + addr) */
2004 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
2005 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
2006 nvm->address_bits);
2007
2008 /* Loop to allow for up to whole page write of eeprom */
2009 while (widx < words) {
2010 u16 word_out = data[widx];
2011 word_out = (word_out >> 8) | (word_out << 8);
2012 e1000_shift_out_eec_bits(hw, word_out, 16);
2013 widx++;
2014
2015 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2016 e1000_standby_nvm(hw);
2017 break;
2018 }
2019 }
2020 }
2021
2022 msleep(10);
2023 return 0;
2024 }
2025
2026 /**
2027 * e1000e_read_mac_addr - Read device MAC address
2028 * @hw: pointer to the HW structure
2029 *
2030 * Reads the device MAC address from the EEPROM and stores the value.
2031 * Since devices with two ports use the same EEPROM, we increment the
2032 * last bit in the MAC address for the second port.
2033 **/
2034 s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2035 {
2036 s32 ret_val;
2037 u16 offset, nvm_data, i;
2038 u16 mac_addr_offset = 0;
2039
2040 if (hw->mac.type == e1000_82571) {
2041 /* Check for an alternate MAC address. An alternate MAC
2042 * address can be setup by pre-boot software and must be
2043 * treated like a permanent address and must override the
2044 * actual permanent MAC address. */
2045 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2046 &mac_addr_offset);
2047 if (ret_val) {
2048 hw_dbg(hw, "NVM Read Error\n");
2049 return ret_val;
2050 }
2051 if (mac_addr_offset == 0xFFFF)
2052 mac_addr_offset = 0;
2053
2054 if (mac_addr_offset) {
2055 if (hw->bus.func == E1000_FUNC_1)
2056 mac_addr_offset += ETH_ALEN/sizeof(u16);
2057
2058 /* make sure we have a valid mac address here
2059 * before using it */
2060 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2061 &nvm_data);
2062 if (ret_val) {
2063 hw_dbg(hw, "NVM Read Error\n");
2064 return ret_val;
2065 }
2066 if (nvm_data & 0x0001)
2067 mac_addr_offset = 0;
2068 }
2069
2070 if (mac_addr_offset)
2071 hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
2072 }
2073
2074 for (i = 0; i < ETH_ALEN; i += 2) {
2075 offset = mac_addr_offset + (i >> 1);
2076 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2077 if (ret_val) {
2078 hw_dbg(hw, "NVM Read Error\n");
2079 return ret_val;
2080 }
2081 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2082 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2083 }
2084
2085 /* Flip last bit of mac address if we're on second port */
2086 if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1)
2087 hw->mac.perm_addr[5] ^= 1;
2088
2089 for (i = 0; i < ETH_ALEN; i++)
2090 hw->mac.addr[i] = hw->mac.perm_addr[i];
2091
2092 return 0;
2093 }
2094
2095 /**
2096 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
2097 * @hw: pointer to the HW structure
2098 *
2099 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2100 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2101 **/
2102 s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2103 {
2104 s32 ret_val;
2105 u16 checksum = 0;
2106 u16 i, nvm_data;
2107
2108 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2109 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2110 if (ret_val) {
2111 hw_dbg(hw, "NVM Read Error\n");
2112 return ret_val;
2113 }
2114 checksum += nvm_data;
2115 }
2116
2117 if (checksum != (u16) NVM_SUM) {
2118 hw_dbg(hw, "NVM Checksum Invalid\n");
2119 return -E1000_ERR_NVM;
2120 }
2121
2122 return 0;
2123 }
2124
2125 /**
2126 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
2127 * @hw: pointer to the HW structure
2128 *
2129 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2130 * up to the checksum. Then calculates the EEPROM checksum and writes the
2131 * value to the EEPROM.
2132 **/
2133 s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2134 {
2135 s32 ret_val;
2136 u16 checksum = 0;
2137 u16 i, nvm_data;
2138
2139 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2140 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2141 if (ret_val) {
2142 hw_dbg(hw, "NVM Read Error while updating checksum.\n");
2143 return ret_val;
2144 }
2145 checksum += nvm_data;
2146 }
2147 checksum = (u16) NVM_SUM - checksum;
2148 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2149 if (ret_val)
2150 hw_dbg(hw, "NVM Write Error while updating checksum.\n");
2151
2152 return ret_val;
2153 }
2154
2155 /**
2156 * e1000e_reload_nvm - Reloads EEPROM
2157 * @hw: pointer to the HW structure
2158 *
2159 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
2160 * extended control register.
2161 **/
2162 void e1000e_reload_nvm(struct e1000_hw *hw)
2163 {
2164 u32 ctrl_ext;
2165
2166 udelay(10);
2167 ctrl_ext = er32(CTRL_EXT);
2168 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2169 ew32(CTRL_EXT, ctrl_ext);
2170 e1e_flush();
2171 }
2172
2173 /**
2174 * e1000_calculate_checksum - Calculate checksum for buffer
2175 * @buffer: pointer to EEPROM
2176 * @length: size of EEPROM to calculate a checksum for
2177 *
2178 * Calculates the checksum for some buffer on a specified length. The
2179 * checksum calculated is returned.
2180 **/
2181 static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2182 {
2183 u32 i;
2184 u8 sum = 0;
2185
2186 if (!buffer)
2187 return 0;
2188
2189 for (i = 0; i < length; i++)
2190 sum += buffer[i];
2191
2192 return (u8) (0 - sum);
2193 }
2194
2195 /**
2196 * e1000_mng_enable_host_if - Checks host interface is enabled
2197 * @hw: pointer to the HW structure
2198 *
2199 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2200 *
2201 * This function checks whether the HOST IF is enabled for command operaton
2202 * and also checks whether the previous command is completed. It busy waits
2203 * in case of previous command is not completed.
2204 **/
2205 static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2206 {
2207 u32 hicr;
2208 u8 i;
2209
2210 /* Check that the host interface is enabled. */
2211 hicr = er32(HICR);
2212 if ((hicr & E1000_HICR_EN) == 0) {
2213 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
2214 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2215 }
2216 /* check the previous command is completed */
2217 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2218 hicr = er32(HICR);
2219 if (!(hicr & E1000_HICR_C))
2220 break;
2221 mdelay(1);
2222 }
2223
2224 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2225 hw_dbg(hw, "Previous command timeout failed .\n");
2226 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2227 }
2228
2229 return 0;
2230 }
2231
2232 /**
2233 * e1000e_check_mng_mode - check managament mode
2234 * @hw: pointer to the HW structure
2235 *
2236 * Reads the firmware semaphore register and returns true (>0) if
2237 * manageability is enabled, else false (0).
2238 **/
2239 bool e1000e_check_mng_mode(struct e1000_hw *hw)
2240 {
2241 u32 fwsm = er32(FWSM);
2242
2243 return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab;
2244 }
2245
2246 /**
2247 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX
2248 * @hw: pointer to the HW structure
2249 *
2250 * Enables packet filtering on transmit packets if manageability is enabled
2251 * and host interface is enabled.
2252 **/
2253 bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2254 {
2255 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2256 u32 *buffer = (u32 *)&hw->mng_cookie;
2257 u32 offset;
2258 s32 ret_val, hdr_csum, csum;
2259 u8 i, len;
2260
2261 /* No manageability, no filtering */
2262 if (!e1000e_check_mng_mode(hw)) {
2263 hw->mac.tx_pkt_filtering = 0;
2264 return 0;
2265 }
2266
2267 /* If we can't read from the host interface for whatever
2268 * reason, disable filtering.
2269 */
2270 ret_val = e1000_mng_enable_host_if(hw);
2271 if (ret_val != 0) {
2272 hw->mac.tx_pkt_filtering = 0;
2273 return ret_val;
2274 }
2275
2276 /* Read in the header. Length and offset are in dwords. */
2277 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2278 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2279 for (i = 0; i < len; i++)
2280 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2281 hdr_csum = hdr->checksum;
2282 hdr->checksum = 0;
2283 csum = e1000_calculate_checksum((u8 *)hdr,
2284 E1000_MNG_DHCP_COOKIE_LENGTH);
2285 /* If either the checksums or signature don't match, then
2286 * the cookie area isn't considered valid, in which case we
2287 * take the safe route of assuming Tx filtering is enabled.
2288 */
2289 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2290 hw->mac.tx_pkt_filtering = 1;
2291 return 1;
2292 }
2293
2294 /* Cookie area is valid, make the final check for filtering. */
2295 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2296 hw->mac.tx_pkt_filtering = 0;
2297 return 0;
2298 }
2299
2300 hw->mac.tx_pkt_filtering = 1;
2301 return 1;
2302 }
2303
2304 /**
2305 * e1000_mng_write_cmd_header - Writes manageability command header
2306 * @hw: pointer to the HW structure
2307 * @hdr: pointer to the host interface command header
2308 *
2309 * Writes the command header after does the checksum calculation.
2310 **/
2311 static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2312 struct e1000_host_mng_command_header *hdr)
2313 {
2314 u16 i, length = sizeof(struct e1000_host_mng_command_header);
2315
2316 /* Write the whole command header structure with new checksum. */
2317
2318 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2319
2320 length >>= 2;
2321 /* Write the relevant command block into the ram area. */
2322 for (i = 0; i < length; i++) {
2323 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2324 *((u32 *) hdr + i));
2325 e1e_flush();
2326 }
2327
2328 return 0;
2329 }
2330
2331 /**
2332 * e1000_mng_host_if_write - Writes to the manageability host interface
2333 * @hw: pointer to the HW structure
2334 * @buffer: pointer to the host interface buffer
2335 * @length: size of the buffer
2336 * @offset: location in the buffer to write to
2337 * @sum: sum of the data (not checksum)
2338 *
2339 * This function writes the buffer content at the offset given on the host if.
2340 * It also does alignment considerations to do the writes in most efficient
2341 * way. Also fills up the sum of the buffer in *buffer parameter.
2342 **/
2343 static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2344 u16 length, u16 offset, u8 *sum)
2345 {
2346 u8 *tmp;
2347 u8 *bufptr = buffer;
2348 u32 data = 0;
2349 u16 remaining, i, j, prev_bytes;
2350
2351 /* sum = only sum of the data and it is not checksum */
2352
2353 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2354 return -E1000_ERR_PARAM;
2355
2356 tmp = (u8 *)&data;
2357 prev_bytes = offset & 0x3;
2358 offset >>= 2;
2359
2360 if (prev_bytes) {
2361 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2362 for (j = prev_bytes; j < sizeof(u32); j++) {
2363 *(tmp + j) = *bufptr++;
2364 *sum += *(tmp + j);
2365 }
2366 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2367 length -= j - prev_bytes;
2368 offset++;
2369 }
2370
2371 remaining = length & 0x3;
2372 length -= remaining;
2373
2374 /* Calculate length in DWORDs */
2375 length >>= 2;
2376
2377 /* The device driver writes the relevant command block into the
2378 * ram area. */
2379 for (i = 0; i < length; i++) {
2380 for (j = 0; j < sizeof(u32); j++) {
2381 *(tmp + j) = *bufptr++;
2382 *sum += *(tmp + j);
2383 }
2384
2385 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2386 }
2387 if (remaining) {
2388 for (j = 0; j < sizeof(u32); j++) {
2389 if (j < remaining)
2390 *(tmp + j) = *bufptr++;
2391 else
2392 *(tmp + j) = 0;
2393
2394 *sum += *(tmp + j);
2395 }
2396 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2397 }
2398
2399 return 0;
2400 }
2401
2402 /**
2403 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
2404 * @hw: pointer to the HW structure
2405 * @buffer: pointer to the host interface
2406 * @length: size of the buffer
2407 *
2408 * Writes the DHCP information to the host interface.
2409 **/
2410 s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2411 {
2412 struct e1000_host_mng_command_header hdr;
2413 s32 ret_val;
2414 u32 hicr;
2415
2416 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2417 hdr.command_length = length;
2418 hdr.reserved1 = 0;
2419 hdr.reserved2 = 0;
2420 hdr.checksum = 0;
2421
2422 /* Enable the host interface */
2423 ret_val = e1000_mng_enable_host_if(hw);
2424 if (ret_val)
2425 return ret_val;
2426
2427 /* Populate the host interface with the contents of "buffer". */
2428 ret_val = e1000_mng_host_if_write(hw, buffer, length,
2429 sizeof(hdr), &(hdr.checksum));
2430 if (ret_val)
2431 return ret_val;
2432
2433 /* Write the manageability command header */
2434 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2435 if (ret_val)
2436 return ret_val;
2437
2438 /* Tell the ARC a new command is pending. */
2439 hicr = er32(HICR);
2440 ew32(HICR, hicr | E1000_HICR_C);
2441
2442 return 0;
2443 }
2444
2445 /**
2446 * e1000e_enable_mng_pass_thru - Enable processing of ARP's
2447 * @hw: pointer to the HW structure
2448 *
2449 * Verifies the hardware needs to allow ARPs to be processed by the host.
2450 **/
2451 bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2452 {
2453 u32 manc;
2454 u32 fwsm, factps;
2455 bool ret_val = 0;
2456
2457 manc = er32(MANC);
2458
2459 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
2460 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
2461 return ret_val;
2462
2463 if (hw->mac.arc_subsystem_valid) {
2464 fwsm = er32(FWSM);
2465 factps = er32(FACTPS);
2466
2467 if (!(factps & E1000_FACTPS_MNGCG) &&
2468 ((fwsm & E1000_FWSM_MODE_MASK) ==
2469 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2470 ret_val = 1;
2471 return ret_val;
2472 }
2473 } else {
2474 if ((manc & E1000_MANC_SMBUS_EN) &&
2475 !(manc & E1000_MANC_ASF_EN)) {
2476 ret_val = 1;
2477 return ret_val;
2478 }
2479 }
2480
2481 return ret_val;
2482 }
2483
2484 s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
2485 {
2486 s32 ret_val;
2487 u16 nvm_data;
2488
2489 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2490 if (ret_val) {
2491 hw_dbg(hw, "NVM Read Error\n");
2492 return ret_val;
2493 }
2494 *part_num = (u32)(nvm_data << 16);
2495
2496 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2497 if (ret_val) {
2498 hw_dbg(hw, "NVM Read Error\n");
2499 return ret_val;
2500 }
2501 *part_num |= nvm_data;
2502
2503 return 0;
2504 }