]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/ixgbe/base/ixgbe_common.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / ixgbe / base / ixgbe_common.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
36 #include "ixgbe_dcb.h"
37 #include "ixgbe_dcb_82599.h"
38 #include "ixgbe_api.h"
39
40 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
41 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
42 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
43 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
44 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
45 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
46 u16 count);
47 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
48 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
51
52 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
54 u16 *san_mac_offset);
55 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
60 u16 offset);
61
62 /**
63 * ixgbe_init_ops_generic - Inits function ptrs
64 * @hw: pointer to the hardware structure
65 *
66 * Initialize the function pointers.
67 **/
68 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
69 {
70 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
71 struct ixgbe_mac_info *mac = &hw->mac;
72 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
73
74 DEBUGFUNC("ixgbe_init_ops_generic");
75
76 /* EEPROM */
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
78 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
79 if (eec & IXGBE_EEC_PRES) {
80 eeprom->ops.read = ixgbe_read_eerd_generic;
81 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
82 } else {
83 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
84 eeprom->ops.read_buffer =
85 ixgbe_read_eeprom_buffer_bit_bang_generic;
86 }
87 eeprom->ops.write = ixgbe_write_eeprom_generic;
88 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
89 eeprom->ops.validate_checksum =
90 ixgbe_validate_eeprom_checksum_generic;
91 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
92 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
93
94 /* MAC */
95 mac->ops.init_hw = ixgbe_init_hw_generic;
96 mac->ops.reset_hw = NULL;
97 mac->ops.start_hw = ixgbe_start_hw_generic;
98 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
99 mac->ops.get_media_type = NULL;
100 mac->ops.get_supported_physical_layer = NULL;
101 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
102 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
103 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
104 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
105 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
106 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
107 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
108 mac->ops.prot_autoc_read = prot_autoc_read_generic;
109 mac->ops.prot_autoc_write = prot_autoc_write_generic;
110
111 /* LEDs */
112 mac->ops.led_on = ixgbe_led_on_generic;
113 mac->ops.led_off = ixgbe_led_off_generic;
114 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
115 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
116 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
117
118 /* RAR, Multicast, VLAN */
119 mac->ops.set_rar = ixgbe_set_rar_generic;
120 mac->ops.clear_rar = ixgbe_clear_rar_generic;
121 mac->ops.insert_mac_addr = NULL;
122 mac->ops.set_vmdq = NULL;
123 mac->ops.clear_vmdq = NULL;
124 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
125 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
126 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
127 mac->ops.enable_mc = ixgbe_enable_mc_generic;
128 mac->ops.disable_mc = ixgbe_disable_mc_generic;
129 mac->ops.clear_vfta = NULL;
130 mac->ops.set_vfta = NULL;
131 mac->ops.set_vlvf = NULL;
132 mac->ops.init_uta_tables = NULL;
133 mac->ops.enable_rx = ixgbe_enable_rx_generic;
134 mac->ops.disable_rx = ixgbe_disable_rx_generic;
135
136 /* Flow Control */
137 mac->ops.fc_enable = ixgbe_fc_enable_generic;
138 mac->ops.setup_fc = ixgbe_setup_fc_generic;
139 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
140
141 /* Link */
142 mac->ops.get_link_capabilities = NULL;
143 mac->ops.setup_link = NULL;
144 mac->ops.check_link = NULL;
145 mac->ops.dmac_config = NULL;
146 mac->ops.dmac_update_tcs = NULL;
147 mac->ops.dmac_config_tcs = NULL;
148
149 return IXGBE_SUCCESS;
150 }
151
152 /**
153 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
154 * of flow control
155 * @hw: pointer to hardware structure
156 *
157 * This function returns true if the device supports flow control
158 * autonegotiation, and false if it does not.
159 *
160 **/
161 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
162 {
163 bool supported = false;
164 ixgbe_link_speed speed;
165 bool link_up;
166
167 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
168
169 switch (hw->phy.media_type) {
170 case ixgbe_media_type_fiber_qsfp:
171 case ixgbe_media_type_fiber:
172 /* flow control autoneg black list */
173 switch (hw->device_id) {
174 case IXGBE_DEV_ID_X550EM_A_SFP:
175 case IXGBE_DEV_ID_X550EM_A_SFP_N:
176 case IXGBE_DEV_ID_X550EM_A_QSFP:
177 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
178 supported = false;
179 break;
180 default:
181 hw->mac.ops.check_link(hw, &speed, &link_up, false);
182 /* if link is down, assume supported */
183 if (link_up)
184 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
185 true : false;
186 else
187 supported = true;
188 }
189
190 break;
191 case ixgbe_media_type_backplane:
192 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
193 supported = false;
194 else
195 supported = true;
196 break;
197 case ixgbe_media_type_copper:
198 /* only some copper devices support flow control autoneg */
199 switch (hw->device_id) {
200 case IXGBE_DEV_ID_82599_T3_LOM:
201 case IXGBE_DEV_ID_X540T:
202 case IXGBE_DEV_ID_X540T1:
203 case IXGBE_DEV_ID_X550T:
204 case IXGBE_DEV_ID_X550T1:
205 case IXGBE_DEV_ID_X550EM_X_10G_T:
206 case IXGBE_DEV_ID_X550EM_A_10G_T:
207 case IXGBE_DEV_ID_X550EM_A_1G_T:
208 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
209 supported = true;
210 break;
211 default:
212 supported = false;
213 }
214 default:
215 break;
216 }
217
218 if (!supported)
219 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
220 "Device %x does not support flow control autoneg",
221 hw->device_id);
222 return supported;
223 }
224
225 /**
226 * ixgbe_setup_fc_generic - Set up flow control
227 * @hw: pointer to hardware structure
228 *
229 * Called at init time to set up flow control.
230 **/
231 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
232 {
233 s32 ret_val = IXGBE_SUCCESS;
234 u32 reg = 0, reg_bp = 0;
235 u16 reg_cu = 0;
236 bool locked = false;
237
238 DEBUGFUNC("ixgbe_setup_fc_generic");
239
240 /* Validate the requested mode */
241 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
242 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
243 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
244 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
245 goto out;
246 }
247
248 /*
249 * 10gig parts do not have a word in the EEPROM to determine the
250 * default flow control setting, so we explicitly set it to full.
251 */
252 if (hw->fc.requested_mode == ixgbe_fc_default)
253 hw->fc.requested_mode = ixgbe_fc_full;
254
255 /*
256 * Set up the 1G and 10G flow control advertisement registers so the
257 * HW will be able to do fc autoneg once the cable is plugged in. If
258 * we link at 10G, the 1G advertisement is harmless and vice versa.
259 */
260 switch (hw->phy.media_type) {
261 case ixgbe_media_type_backplane:
262 /* some MAC's need RMW protection on AUTOC */
263 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
264 if (ret_val != IXGBE_SUCCESS)
265 goto out;
266
267 /* only backplane uses autoc so fall though */
268 case ixgbe_media_type_fiber_qsfp:
269 case ixgbe_media_type_fiber:
270 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
271
272 break;
273 case ixgbe_media_type_copper:
274 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
275 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
276 break;
277 default:
278 break;
279 }
280
281 /*
282 * The possible values of fc.requested_mode are:
283 * 0: Flow control is completely disabled
284 * 1: Rx flow control is enabled (we can receive pause frames,
285 * but not send pause frames).
286 * 2: Tx flow control is enabled (we can send pause frames but
287 * we do not support receiving pause frames).
288 * 3: Both Rx and Tx flow control (symmetric) are enabled.
289 * other: Invalid.
290 */
291 switch (hw->fc.requested_mode) {
292 case ixgbe_fc_none:
293 /* Flow control completely disabled by software override. */
294 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
295 if (hw->phy.media_type == ixgbe_media_type_backplane)
296 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
297 IXGBE_AUTOC_ASM_PAUSE);
298 else if (hw->phy.media_type == ixgbe_media_type_copper)
299 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
300 break;
301 case ixgbe_fc_tx_pause:
302 /*
303 * Tx Flow control is enabled, and Rx Flow control is
304 * disabled by software override.
305 */
306 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
307 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
308 if (hw->phy.media_type == ixgbe_media_type_backplane) {
309 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
310 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
311 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
312 reg_cu |= IXGBE_TAF_ASM_PAUSE;
313 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
314 }
315 break;
316 case ixgbe_fc_rx_pause:
317 /*
318 * Rx Flow control is enabled and Tx Flow control is
319 * disabled by software override. Since there really
320 * isn't a way to advertise that we are capable of RX
321 * Pause ONLY, we will advertise that we support both
322 * symmetric and asymmetric Rx PAUSE, as such we fall
323 * through to the fc_full statement. Later, we will
324 * disable the adapter's ability to send PAUSE frames.
325 */
326 case ixgbe_fc_full:
327 /* Flow control (both Rx and Tx) is enabled by SW override. */
328 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
329 if (hw->phy.media_type == ixgbe_media_type_backplane)
330 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
331 IXGBE_AUTOC_ASM_PAUSE;
332 else if (hw->phy.media_type == ixgbe_media_type_copper)
333 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
334 break;
335 default:
336 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
337 "Flow control param set incorrectly\n");
338 ret_val = IXGBE_ERR_CONFIG;
339 goto out;
340 break;
341 }
342
343 if (hw->mac.type < ixgbe_mac_X540) {
344 /*
345 * Enable auto-negotiation between the MAC & PHY;
346 * the MAC will advertise clause 37 flow control.
347 */
348 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
349 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
350
351 /* Disable AN timeout */
352 if (hw->fc.strict_ieee)
353 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
354
355 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
356 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
357 }
358
359 /*
360 * AUTOC restart handles negotiation of 1G and 10G on backplane
361 * and copper. There is no need to set the PCS1GCTL register.
362 *
363 */
364 if (hw->phy.media_type == ixgbe_media_type_backplane) {
365 reg_bp |= IXGBE_AUTOC_AN_RESTART;
366 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
367 if (ret_val)
368 goto out;
369 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
370 (ixgbe_device_supports_autoneg_fc(hw))) {
371 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
372 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
373 }
374
375 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
376 out:
377 return ret_val;
378 }
379
380 /**
381 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
382 * @hw: pointer to hardware structure
383 *
384 * Starts the hardware by filling the bus info structure and media type, clears
385 * all on chip counters, initializes receive address registers, multicast
386 * table, VLAN filter table, calls routine to set up link and flow control
387 * settings, and leaves transmit and receive units disabled and uninitialized
388 **/
389 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
390 {
391 s32 ret_val;
392 u32 ctrl_ext;
393 u16 device_caps;
394
395 DEBUGFUNC("ixgbe_start_hw_generic");
396
397 /* Set the media type */
398 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
399
400 /* PHY ops initialization must be done in reset_hw() */
401
402 /* Clear the VLAN filter table */
403 hw->mac.ops.clear_vfta(hw);
404
405 /* Clear statistics registers */
406 hw->mac.ops.clear_hw_cntrs(hw);
407
408 /* Set No Snoop Disable */
409 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
410 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
411 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
412 IXGBE_WRITE_FLUSH(hw);
413
414 /* Setup flow control */
415 ret_val = ixgbe_setup_fc(hw);
416 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
417 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
418 return ret_val;
419 }
420
421 /* Cache bit indicating need for crosstalk fix */
422 switch (hw->mac.type) {
423 case ixgbe_mac_82599EB:
424 case ixgbe_mac_X550EM_x:
425 case ixgbe_mac_X550EM_a:
426 hw->mac.ops.get_device_caps(hw, &device_caps);
427 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
428 hw->need_crosstalk_fix = false;
429 else
430 hw->need_crosstalk_fix = true;
431 break;
432 default:
433 hw->need_crosstalk_fix = false;
434 break;
435 }
436
437 /* Clear adapter stopped flag */
438 hw->adapter_stopped = false;
439
440 return IXGBE_SUCCESS;
441 }
442
443 /**
444 * ixgbe_start_hw_gen2 - Init sequence for common device family
445 * @hw: pointer to hw structure
446 *
447 * Performs the init sequence common to the second generation
448 * of 10 GbE devices.
449 * Devices in the second generation:
450 * 82599
451 * X540
452 **/
453 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
454 {
455 u32 i;
456 u32 regval;
457
458 /* Clear the rate limiters */
459 for (i = 0; i < hw->mac.max_tx_queues; i++) {
460 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
461 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
462 }
463 IXGBE_WRITE_FLUSH(hw);
464
465 /* Disable relaxed ordering */
466 for (i = 0; i < hw->mac.max_tx_queues; i++) {
467 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
468 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
469 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
470 }
471
472 for (i = 0; i < hw->mac.max_rx_queues; i++) {
473 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
474 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
475 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
476 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
477 }
478
479 return IXGBE_SUCCESS;
480 }
481
482 /**
483 * ixgbe_init_hw_generic - Generic hardware initialization
484 * @hw: pointer to hardware structure
485 *
486 * Initialize the hardware by resetting the hardware, filling the bus info
487 * structure and media type, clears all on chip counters, initializes receive
488 * address registers, multicast table, VLAN filter table, calls routine to set
489 * up link and flow control settings, and leaves transmit and receive units
490 * disabled and uninitialized
491 **/
492 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
493 {
494 s32 status;
495
496 DEBUGFUNC("ixgbe_init_hw_generic");
497
498 /* Reset the hardware */
499 status = hw->mac.ops.reset_hw(hw);
500
501 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
502 /* Start the HW */
503 status = hw->mac.ops.start_hw(hw);
504 }
505
506 /* Initialize the LED link active for LED blink support */
507 hw->mac.ops.init_led_link_act(hw);
508
509 if (status != IXGBE_SUCCESS)
510 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
511
512 return status;
513 }
514
515 /**
516 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
517 * @hw: pointer to hardware structure
518 *
519 * Clears all hardware statistics counters by reading them from the hardware
520 * Statistics counters are clear on read.
521 **/
522 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
523 {
524 u16 i = 0;
525
526 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
527
528 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
529 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
530 IXGBE_READ_REG(hw, IXGBE_ERRBC);
531 IXGBE_READ_REG(hw, IXGBE_MSPDC);
532 for (i = 0; i < 8; i++)
533 IXGBE_READ_REG(hw, IXGBE_MPC(i));
534
535 IXGBE_READ_REG(hw, IXGBE_MLFC);
536 IXGBE_READ_REG(hw, IXGBE_MRFC);
537 IXGBE_READ_REG(hw, IXGBE_RLEC);
538 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
539 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
540 if (hw->mac.type >= ixgbe_mac_82599EB) {
541 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
542 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
543 } else {
544 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
545 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
546 }
547
548 for (i = 0; i < 8; i++) {
549 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
550 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
551 if (hw->mac.type >= ixgbe_mac_82599EB) {
552 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
553 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
554 } else {
555 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
556 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
557 }
558 }
559 if (hw->mac.type >= ixgbe_mac_82599EB)
560 for (i = 0; i < 8; i++)
561 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
562 IXGBE_READ_REG(hw, IXGBE_PRC64);
563 IXGBE_READ_REG(hw, IXGBE_PRC127);
564 IXGBE_READ_REG(hw, IXGBE_PRC255);
565 IXGBE_READ_REG(hw, IXGBE_PRC511);
566 IXGBE_READ_REG(hw, IXGBE_PRC1023);
567 IXGBE_READ_REG(hw, IXGBE_PRC1522);
568 IXGBE_READ_REG(hw, IXGBE_GPRC);
569 IXGBE_READ_REG(hw, IXGBE_BPRC);
570 IXGBE_READ_REG(hw, IXGBE_MPRC);
571 IXGBE_READ_REG(hw, IXGBE_GPTC);
572 IXGBE_READ_REG(hw, IXGBE_GORCL);
573 IXGBE_READ_REG(hw, IXGBE_GORCH);
574 IXGBE_READ_REG(hw, IXGBE_GOTCL);
575 IXGBE_READ_REG(hw, IXGBE_GOTCH);
576 if (hw->mac.type == ixgbe_mac_82598EB)
577 for (i = 0; i < 8; i++)
578 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
579 IXGBE_READ_REG(hw, IXGBE_RUC);
580 IXGBE_READ_REG(hw, IXGBE_RFC);
581 IXGBE_READ_REG(hw, IXGBE_ROC);
582 IXGBE_READ_REG(hw, IXGBE_RJC);
583 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
584 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
585 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
586 IXGBE_READ_REG(hw, IXGBE_TORL);
587 IXGBE_READ_REG(hw, IXGBE_TORH);
588 IXGBE_READ_REG(hw, IXGBE_TPR);
589 IXGBE_READ_REG(hw, IXGBE_TPT);
590 IXGBE_READ_REG(hw, IXGBE_PTC64);
591 IXGBE_READ_REG(hw, IXGBE_PTC127);
592 IXGBE_READ_REG(hw, IXGBE_PTC255);
593 IXGBE_READ_REG(hw, IXGBE_PTC511);
594 IXGBE_READ_REG(hw, IXGBE_PTC1023);
595 IXGBE_READ_REG(hw, IXGBE_PTC1522);
596 IXGBE_READ_REG(hw, IXGBE_MPTC);
597 IXGBE_READ_REG(hw, IXGBE_BPTC);
598 for (i = 0; i < 16; i++) {
599 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
600 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
601 if (hw->mac.type >= ixgbe_mac_82599EB) {
602 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
603 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
604 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
605 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
606 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
607 } else {
608 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
609 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
610 }
611 }
612
613 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
614 if (hw->phy.id == 0)
615 ixgbe_identify_phy(hw);
616 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
617 IXGBE_MDIO_PCS_DEV_TYPE, &i);
618 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
619 IXGBE_MDIO_PCS_DEV_TYPE, &i);
620 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
621 IXGBE_MDIO_PCS_DEV_TYPE, &i);
622 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
623 IXGBE_MDIO_PCS_DEV_TYPE, &i);
624 }
625
626 return IXGBE_SUCCESS;
627 }
628
629 /**
630 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
631 * @hw: pointer to hardware structure
632 * @pba_num: stores the part number string from the EEPROM
633 * @pba_num_size: part number string buffer length
634 *
635 * Reads the part number string from the EEPROM.
636 **/
637 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
638 u32 pba_num_size)
639 {
640 s32 ret_val;
641 u16 data;
642 u16 pba_ptr;
643 u16 offset;
644 u16 length;
645
646 DEBUGFUNC("ixgbe_read_pba_string_generic");
647
648 if (pba_num == NULL) {
649 DEBUGOUT("PBA string buffer was null\n");
650 return IXGBE_ERR_INVALID_ARGUMENT;
651 }
652
653 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
654 if (ret_val) {
655 DEBUGOUT("NVM Read Error\n");
656 return ret_val;
657 }
658
659 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
660 if (ret_val) {
661 DEBUGOUT("NVM Read Error\n");
662 return ret_val;
663 }
664
665 /*
666 * if data is not ptr guard the PBA must be in legacy format which
667 * means pba_ptr is actually our second data word for the PBA number
668 * and we can decode it into an ascii string
669 */
670 if (data != IXGBE_PBANUM_PTR_GUARD) {
671 DEBUGOUT("NVM PBA number is not stored as string\n");
672
673 /* we will need 11 characters to store the PBA */
674 if (pba_num_size < 11) {
675 DEBUGOUT("PBA string buffer too small\n");
676 return IXGBE_ERR_NO_SPACE;
677 }
678
679 /* extract hex string from data and pba_ptr */
680 pba_num[0] = (data >> 12) & 0xF;
681 pba_num[1] = (data >> 8) & 0xF;
682 pba_num[2] = (data >> 4) & 0xF;
683 pba_num[3] = data & 0xF;
684 pba_num[4] = (pba_ptr >> 12) & 0xF;
685 pba_num[5] = (pba_ptr >> 8) & 0xF;
686 pba_num[6] = '-';
687 pba_num[7] = 0;
688 pba_num[8] = (pba_ptr >> 4) & 0xF;
689 pba_num[9] = pba_ptr & 0xF;
690
691 /* put a null character on the end of our string */
692 pba_num[10] = '\0';
693
694 /* switch all the data but the '-' to hex char */
695 for (offset = 0; offset < 10; offset++) {
696 if (pba_num[offset] < 0xA)
697 pba_num[offset] += '0';
698 else if (pba_num[offset] < 0x10)
699 pba_num[offset] += 'A' - 0xA;
700 }
701
702 return IXGBE_SUCCESS;
703 }
704
705 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
706 if (ret_val) {
707 DEBUGOUT("NVM Read Error\n");
708 return ret_val;
709 }
710
711 if (length == 0xFFFF || length == 0) {
712 DEBUGOUT("NVM PBA number section invalid length\n");
713 return IXGBE_ERR_PBA_SECTION;
714 }
715
716 /* check if pba_num buffer is big enough */
717 if (pba_num_size < (((u32)length * 2) - 1)) {
718 DEBUGOUT("PBA string buffer too small\n");
719 return IXGBE_ERR_NO_SPACE;
720 }
721
722 /* trim pba length from start of string */
723 pba_ptr++;
724 length--;
725
726 for (offset = 0; offset < length; offset++) {
727 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
728 if (ret_val) {
729 DEBUGOUT("NVM Read Error\n");
730 return ret_val;
731 }
732 pba_num[offset * 2] = (u8)(data >> 8);
733 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
734 }
735 pba_num[offset * 2] = '\0';
736
737 return IXGBE_SUCCESS;
738 }
739
740 /**
741 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
742 * @hw: pointer to hardware structure
743 * @pba_num: stores the part number from the EEPROM
744 *
745 * Reads the part number from the EEPROM.
746 **/
747 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
748 {
749 s32 ret_val;
750 u16 data;
751
752 DEBUGFUNC("ixgbe_read_pba_num_generic");
753
754 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
755 if (ret_val) {
756 DEBUGOUT("NVM Read Error\n");
757 return ret_val;
758 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
759 DEBUGOUT("NVM Not supported\n");
760 return IXGBE_NOT_IMPLEMENTED;
761 }
762 *pba_num = (u32)(data << 16);
763
764 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
765 if (ret_val) {
766 DEBUGOUT("NVM Read Error\n");
767 return ret_val;
768 }
769 *pba_num |= data;
770
771 return IXGBE_SUCCESS;
772 }
773
774 /**
775 * ixgbe_read_pba_raw
776 * @hw: pointer to the HW structure
777 * @eeprom_buf: optional pointer to EEPROM image
778 * @eeprom_buf_size: size of EEPROM image in words
779 * @max_pba_block_size: PBA block size limit
780 * @pba: pointer to output PBA structure
781 *
782 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
783 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
784 *
785 **/
786 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
787 u32 eeprom_buf_size, u16 max_pba_block_size,
788 struct ixgbe_pba *pba)
789 {
790 s32 ret_val;
791 u16 pba_block_size;
792
793 if (pba == NULL)
794 return IXGBE_ERR_PARAM;
795
796 if (eeprom_buf == NULL) {
797 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
798 &pba->word[0]);
799 if (ret_val)
800 return ret_val;
801 } else {
802 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
803 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
804 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
805 } else {
806 return IXGBE_ERR_PARAM;
807 }
808 }
809
810 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
811 if (pba->pba_block == NULL)
812 return IXGBE_ERR_PARAM;
813
814 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
815 eeprom_buf_size,
816 &pba_block_size);
817 if (ret_val)
818 return ret_val;
819
820 if (pba_block_size > max_pba_block_size)
821 return IXGBE_ERR_PARAM;
822
823 if (eeprom_buf == NULL) {
824 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
825 pba_block_size,
826 pba->pba_block);
827 if (ret_val)
828 return ret_val;
829 } else {
830 if (eeprom_buf_size > (u32)(pba->word[1] +
831 pba_block_size)) {
832 memcpy(pba->pba_block,
833 &eeprom_buf[pba->word[1]],
834 pba_block_size * sizeof(u16));
835 } else {
836 return IXGBE_ERR_PARAM;
837 }
838 }
839 }
840
841 return IXGBE_SUCCESS;
842 }
843
844 /**
845 * ixgbe_write_pba_raw
846 * @hw: pointer to the HW structure
847 * @eeprom_buf: optional pointer to EEPROM image
848 * @eeprom_buf_size: size of EEPROM image in words
849 * @pba: pointer to PBA structure
850 *
851 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
852 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
853 *
854 **/
855 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
856 u32 eeprom_buf_size, struct ixgbe_pba *pba)
857 {
858 s32 ret_val;
859
860 if (pba == NULL)
861 return IXGBE_ERR_PARAM;
862
863 if (eeprom_buf == NULL) {
864 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
865 &pba->word[0]);
866 if (ret_val)
867 return ret_val;
868 } else {
869 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
870 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
871 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
872 } else {
873 return IXGBE_ERR_PARAM;
874 }
875 }
876
877 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
878 if (pba->pba_block == NULL)
879 return IXGBE_ERR_PARAM;
880
881 if (eeprom_buf == NULL) {
882 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
883 pba->pba_block[0],
884 pba->pba_block);
885 if (ret_val)
886 return ret_val;
887 } else {
888 if (eeprom_buf_size > (u32)(pba->word[1] +
889 pba->pba_block[0])) {
890 memcpy(&eeprom_buf[pba->word[1]],
891 pba->pba_block,
892 pba->pba_block[0] * sizeof(u16));
893 } else {
894 return IXGBE_ERR_PARAM;
895 }
896 }
897 }
898
899 return IXGBE_SUCCESS;
900 }
901
902 /**
903 * ixgbe_get_pba_block_size
904 * @hw: pointer to the HW structure
905 * @eeprom_buf: optional pointer to EEPROM image
906 * @eeprom_buf_size: size of EEPROM image in words
907 * @pba_data_size: pointer to output variable
908 *
909 * Returns the size of the PBA block in words. Function operates on EEPROM
910 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
911 * EEPROM device.
912 *
913 **/
914 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
915 u32 eeprom_buf_size, u16 *pba_block_size)
916 {
917 s32 ret_val;
918 u16 pba_word[2];
919 u16 length;
920
921 DEBUGFUNC("ixgbe_get_pba_block_size");
922
923 if (eeprom_buf == NULL) {
924 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
925 &pba_word[0]);
926 if (ret_val)
927 return ret_val;
928 } else {
929 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
930 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
931 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
932 } else {
933 return IXGBE_ERR_PARAM;
934 }
935 }
936
937 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
938 if (eeprom_buf == NULL) {
939 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
940 &length);
941 if (ret_val)
942 return ret_val;
943 } else {
944 if (eeprom_buf_size > pba_word[1])
945 length = eeprom_buf[pba_word[1] + 0];
946 else
947 return IXGBE_ERR_PARAM;
948 }
949
950 if (length == 0xFFFF || length == 0)
951 return IXGBE_ERR_PBA_SECTION;
952 } else {
953 /* PBA number in legacy format, there is no PBA Block. */
954 length = 0;
955 }
956
957 if (pba_block_size != NULL)
958 *pba_block_size = length;
959
960 return IXGBE_SUCCESS;
961 }
962
963 /**
964 * ixgbe_get_mac_addr_generic - Generic get MAC address
965 * @hw: pointer to hardware structure
966 * @mac_addr: Adapter MAC address
967 *
968 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
969 * A reset of the adapter must be performed prior to calling this function
970 * in order for the MAC address to have been loaded from the EEPROM into RAR0
971 **/
972 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
973 {
974 u32 rar_high;
975 u32 rar_low;
976 u16 i;
977
978 DEBUGFUNC("ixgbe_get_mac_addr_generic");
979
980 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
981 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
982
983 for (i = 0; i < 4; i++)
984 mac_addr[i] = (u8)(rar_low >> (i*8));
985
986 for (i = 0; i < 2; i++)
987 mac_addr[i+4] = (u8)(rar_high >> (i*8));
988
989 return IXGBE_SUCCESS;
990 }
991
992 /**
993 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
994 * @hw: pointer to hardware structure
995 * @link_status: the link status returned by the PCI config space
996 *
997 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
998 **/
999 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1000 {
1001 struct ixgbe_mac_info *mac = &hw->mac;
1002
1003 if (hw->bus.type == ixgbe_bus_type_unknown)
1004 hw->bus.type = ixgbe_bus_type_pci_express;
1005
1006 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1007 case IXGBE_PCI_LINK_WIDTH_1:
1008 hw->bus.width = ixgbe_bus_width_pcie_x1;
1009 break;
1010 case IXGBE_PCI_LINK_WIDTH_2:
1011 hw->bus.width = ixgbe_bus_width_pcie_x2;
1012 break;
1013 case IXGBE_PCI_LINK_WIDTH_4:
1014 hw->bus.width = ixgbe_bus_width_pcie_x4;
1015 break;
1016 case IXGBE_PCI_LINK_WIDTH_8:
1017 hw->bus.width = ixgbe_bus_width_pcie_x8;
1018 break;
1019 default:
1020 hw->bus.width = ixgbe_bus_width_unknown;
1021 break;
1022 }
1023
1024 switch (link_status & IXGBE_PCI_LINK_SPEED) {
1025 case IXGBE_PCI_LINK_SPEED_2500:
1026 hw->bus.speed = ixgbe_bus_speed_2500;
1027 break;
1028 case IXGBE_PCI_LINK_SPEED_5000:
1029 hw->bus.speed = ixgbe_bus_speed_5000;
1030 break;
1031 case IXGBE_PCI_LINK_SPEED_8000:
1032 hw->bus.speed = ixgbe_bus_speed_8000;
1033 break;
1034 default:
1035 hw->bus.speed = ixgbe_bus_speed_unknown;
1036 break;
1037 }
1038
1039 mac->ops.set_lan_id(hw);
1040 }
1041
1042 /**
1043 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1044 * @hw: pointer to hardware structure
1045 *
1046 * Gets the PCI bus info (speed, width, type) then calls helper function to
1047 * store this data within the ixgbe_hw structure.
1048 **/
1049 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1050 {
1051 u16 link_status;
1052
1053 DEBUGFUNC("ixgbe_get_bus_info_generic");
1054
1055 /* Get the negotiated link width and speed from PCI config space */
1056 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1057
1058 ixgbe_set_pci_config_data_generic(hw, link_status);
1059
1060 return IXGBE_SUCCESS;
1061 }
1062
1063 /**
1064 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1065 * @hw: pointer to the HW structure
1066 *
1067 * Determines the LAN function id by reading memory-mapped registers and swaps
1068 * the port value if requested, and set MAC instance for devices that share
1069 * CS4227.
1070 **/
1071 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1072 {
1073 struct ixgbe_bus_info *bus = &hw->bus;
1074 u32 reg;
1075 u16 ee_ctrl_4;
1076
1077 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1078
1079 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1080 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1081 bus->lan_id = (u8)bus->func;
1082
1083 /* check for a port swap */
1084 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1085 if (reg & IXGBE_FACTPS_LFS)
1086 bus->func ^= 0x1;
1087
1088 /* Get MAC instance from EEPROM for configuring CS4227 */
1089 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1090 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1091 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1092 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1093 }
1094 }
1095
1096 /**
1097 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1098 * @hw: pointer to hardware structure
1099 *
1100 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1101 * disables transmit and receive units. The adapter_stopped flag is used by
1102 * the shared code and drivers to determine if the adapter is in a stopped
1103 * state and should not touch the hardware.
1104 **/
1105 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1106 {
1107 u32 reg_val;
1108 u16 i;
1109
1110 DEBUGFUNC("ixgbe_stop_adapter_generic");
1111
1112 /*
1113 * Set the adapter_stopped flag so other driver functions stop touching
1114 * the hardware
1115 */
1116 hw->adapter_stopped = true;
1117
1118 /* Disable the receive unit */
1119 ixgbe_disable_rx(hw);
1120
1121 /* Clear interrupt mask to stop interrupts from being generated */
1122 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1123
1124 /* Clear any pending interrupts, flush previous writes */
1125 IXGBE_READ_REG(hw, IXGBE_EICR);
1126
1127 /* Disable the transmit unit. Each queue must be disabled. */
1128 for (i = 0; i < hw->mac.max_tx_queues; i++)
1129 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1130
1131 /* Disable the receive unit by stopping each queue */
1132 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1133 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1134 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1135 reg_val |= IXGBE_RXDCTL_SWFLSH;
1136 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1137 }
1138
1139 /* flush all queues disables */
1140 IXGBE_WRITE_FLUSH(hw);
1141 msec_delay(2);
1142
1143 /*
1144 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1145 * access and verify no pending requests
1146 */
1147 return ixgbe_disable_pcie_master(hw);
1148 }
1149
1150 /**
1151 * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1152 * @hw: pointer to hardware structure
1153 *
1154 * Store the index for the link active LED. This will be used to support
1155 * blinking the LED.
1156 **/
1157 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1158 {
1159 struct ixgbe_mac_info *mac = &hw->mac;
1160 u32 led_reg, led_mode;
1161 u8 i;
1162
1163 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1164
1165 /* Get LED link active from the LEDCTL register */
1166 for (i = 0; i < 4; i++) {
1167 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1168
1169 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1170 IXGBE_LED_LINK_ACTIVE) {
1171 mac->led_link_act = i;
1172 return IXGBE_SUCCESS;
1173 }
1174 }
1175
1176 /*
1177 * If LEDCTL register does not have the LED link active set, then use
1178 * known MAC defaults.
1179 */
1180 switch (hw->mac.type) {
1181 case ixgbe_mac_X550EM_a:
1182 case ixgbe_mac_X550EM_x:
1183 mac->led_link_act = 1;
1184 break;
1185 default:
1186 mac->led_link_act = 2;
1187 }
1188 return IXGBE_SUCCESS;
1189 }
1190
1191 /**
1192 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1193 * @hw: pointer to hardware structure
1194 * @index: led number to turn on
1195 **/
1196 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1197 {
1198 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1199
1200 DEBUGFUNC("ixgbe_led_on_generic");
1201
1202 if (index > 3)
1203 return IXGBE_ERR_PARAM;
1204
1205 /* To turn on the LED, set mode to ON. */
1206 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1207 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1208 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1209 IXGBE_WRITE_FLUSH(hw);
1210
1211 return IXGBE_SUCCESS;
1212 }
1213
1214 /**
1215 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1216 * @hw: pointer to hardware structure
1217 * @index: led number to turn off
1218 **/
1219 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1220 {
1221 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1222
1223 DEBUGFUNC("ixgbe_led_off_generic");
1224
1225 if (index > 3)
1226 return IXGBE_ERR_PARAM;
1227
1228 /* To turn off the LED, set mode to OFF. */
1229 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1230 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1231 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1232 IXGBE_WRITE_FLUSH(hw);
1233
1234 return IXGBE_SUCCESS;
1235 }
1236
1237 /**
1238 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1239 * @hw: pointer to hardware structure
1240 *
1241 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1242 * ixgbe_hw struct in order to set up EEPROM access.
1243 **/
1244 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1245 {
1246 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1247 u32 eec;
1248 u16 eeprom_size;
1249
1250 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1251
1252 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1253 eeprom->type = ixgbe_eeprom_none;
1254 /* Set default semaphore delay to 10ms which is a well
1255 * tested value */
1256 eeprom->semaphore_delay = 10;
1257 /* Clear EEPROM page size, it will be initialized as needed */
1258 eeprom->word_page_size = 0;
1259
1260 /*
1261 * Check for EEPROM present first.
1262 * If not present leave as none
1263 */
1264 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1265 if (eec & IXGBE_EEC_PRES) {
1266 eeprom->type = ixgbe_eeprom_spi;
1267
1268 /*
1269 * SPI EEPROM is assumed here. This code would need to
1270 * change if a future EEPROM is not SPI.
1271 */
1272 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1273 IXGBE_EEC_SIZE_SHIFT);
1274 eeprom->word_size = 1 << (eeprom_size +
1275 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1276 }
1277
1278 if (eec & IXGBE_EEC_ADDR_SIZE)
1279 eeprom->address_bits = 16;
1280 else
1281 eeprom->address_bits = 8;
1282 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1283 "%d\n", eeprom->type, eeprom->word_size,
1284 eeprom->address_bits);
1285 }
1286
1287 return IXGBE_SUCCESS;
1288 }
1289
1290 /**
1291 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1292 * @hw: pointer to hardware structure
1293 * @offset: offset within the EEPROM to write
1294 * @words: number of word(s)
1295 * @data: 16 bit word(s) to write to EEPROM
1296 *
1297 * Reads 16 bit word(s) from EEPROM through bit-bang method
1298 **/
1299 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1300 u16 words, u16 *data)
1301 {
1302 s32 status = IXGBE_SUCCESS;
1303 u16 i, count;
1304
1305 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1306
1307 hw->eeprom.ops.init_params(hw);
1308
1309 if (words == 0) {
1310 status = IXGBE_ERR_INVALID_ARGUMENT;
1311 goto out;
1312 }
1313
1314 if (offset + words > hw->eeprom.word_size) {
1315 status = IXGBE_ERR_EEPROM;
1316 goto out;
1317 }
1318
1319 /*
1320 * The EEPROM page size cannot be queried from the chip. We do lazy
1321 * initialization. It is worth to do that when we write large buffer.
1322 */
1323 if ((hw->eeprom.word_page_size == 0) &&
1324 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1325 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1326
1327 /*
1328 * We cannot hold synchronization semaphores for too long
1329 * to avoid other entity starvation. However it is more efficient
1330 * to read in bursts than synchronizing access for each word.
1331 */
1332 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1333 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1334 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1335 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1336 count, &data[i]);
1337
1338 if (status != IXGBE_SUCCESS)
1339 break;
1340 }
1341
1342 out:
1343 return status;
1344 }
1345
1346 /**
1347 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1348 * @hw: pointer to hardware structure
1349 * @offset: offset within the EEPROM to be written to
1350 * @words: number of word(s)
1351 * @data: 16 bit word(s) to be written to the EEPROM
1352 *
1353 * If ixgbe_eeprom_update_checksum is not called after this function, the
1354 * EEPROM will most likely contain an invalid checksum.
1355 **/
1356 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1357 u16 words, u16 *data)
1358 {
1359 s32 status;
1360 u16 word;
1361 u16 page_size;
1362 u16 i;
1363 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1364
1365 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1366
1367 /* Prepare the EEPROM for writing */
1368 status = ixgbe_acquire_eeprom(hw);
1369
1370 if (status == IXGBE_SUCCESS) {
1371 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1372 ixgbe_release_eeprom(hw);
1373 status = IXGBE_ERR_EEPROM;
1374 }
1375 }
1376
1377 if (status == IXGBE_SUCCESS) {
1378 for (i = 0; i < words; i++) {
1379 ixgbe_standby_eeprom(hw);
1380
1381 /* Send the WRITE ENABLE command (8 bit opcode ) */
1382 ixgbe_shift_out_eeprom_bits(hw,
1383 IXGBE_EEPROM_WREN_OPCODE_SPI,
1384 IXGBE_EEPROM_OPCODE_BITS);
1385
1386 ixgbe_standby_eeprom(hw);
1387
1388 /*
1389 * Some SPI eeproms use the 8th address bit embedded
1390 * in the opcode
1391 */
1392 if ((hw->eeprom.address_bits == 8) &&
1393 ((offset + i) >= 128))
1394 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1395
1396 /* Send the Write command (8-bit opcode + addr) */
1397 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1398 IXGBE_EEPROM_OPCODE_BITS);
1399 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1400 hw->eeprom.address_bits);
1401
1402 page_size = hw->eeprom.word_page_size;
1403
1404 /* Send the data in burst via SPI*/
1405 do {
1406 word = data[i];
1407 word = (word >> 8) | (word << 8);
1408 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1409
1410 if (page_size == 0)
1411 break;
1412
1413 /* do not wrap around page */
1414 if (((offset + i) & (page_size - 1)) ==
1415 (page_size - 1))
1416 break;
1417 } while (++i < words);
1418
1419 ixgbe_standby_eeprom(hw);
1420 msec_delay(10);
1421 }
1422 /* Done with writing - release the EEPROM */
1423 ixgbe_release_eeprom(hw);
1424 }
1425
1426 return status;
1427 }
1428
1429 /**
1430 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1431 * @hw: pointer to hardware structure
1432 * @offset: offset within the EEPROM to be written to
1433 * @data: 16 bit word to be written to the EEPROM
1434 *
1435 * If ixgbe_eeprom_update_checksum is not called after this function, the
1436 * EEPROM will most likely contain an invalid checksum.
1437 **/
1438 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1439 {
1440 s32 status;
1441
1442 DEBUGFUNC("ixgbe_write_eeprom_generic");
1443
1444 hw->eeprom.ops.init_params(hw);
1445
1446 if (offset >= hw->eeprom.word_size) {
1447 status = IXGBE_ERR_EEPROM;
1448 goto out;
1449 }
1450
1451 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1452
1453 out:
1454 return status;
1455 }
1456
1457 /**
1458 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1459 * @hw: pointer to hardware structure
1460 * @offset: offset within the EEPROM to be read
1461 * @data: read 16 bit words(s) from EEPROM
1462 * @words: number of word(s)
1463 *
1464 * Reads 16 bit word(s) from EEPROM through bit-bang method
1465 **/
1466 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1467 u16 words, u16 *data)
1468 {
1469 s32 status = IXGBE_SUCCESS;
1470 u16 i, count;
1471
1472 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1473
1474 hw->eeprom.ops.init_params(hw);
1475
1476 if (words == 0) {
1477 status = IXGBE_ERR_INVALID_ARGUMENT;
1478 goto out;
1479 }
1480
1481 if (offset + words > hw->eeprom.word_size) {
1482 status = IXGBE_ERR_EEPROM;
1483 goto out;
1484 }
1485
1486 /*
1487 * We cannot hold synchronization semaphores for too long
1488 * to avoid other entity starvation. However it is more efficient
1489 * to read in bursts than synchronizing access for each word.
1490 */
1491 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1492 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1493 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1494
1495 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1496 count, &data[i]);
1497
1498 if (status != IXGBE_SUCCESS)
1499 break;
1500 }
1501
1502 out:
1503 return status;
1504 }
1505
1506 /**
1507 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1508 * @hw: pointer to hardware structure
1509 * @offset: offset within the EEPROM to be read
1510 * @words: number of word(s)
1511 * @data: read 16 bit word(s) from EEPROM
1512 *
1513 * Reads 16 bit word(s) from EEPROM through bit-bang method
1514 **/
1515 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1516 u16 words, u16 *data)
1517 {
1518 s32 status;
1519 u16 word_in;
1520 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1521 u16 i;
1522
1523 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1524
1525 /* Prepare the EEPROM for reading */
1526 status = ixgbe_acquire_eeprom(hw);
1527
1528 if (status == IXGBE_SUCCESS) {
1529 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1530 ixgbe_release_eeprom(hw);
1531 status = IXGBE_ERR_EEPROM;
1532 }
1533 }
1534
1535 if (status == IXGBE_SUCCESS) {
1536 for (i = 0; i < words; i++) {
1537 ixgbe_standby_eeprom(hw);
1538 /*
1539 * Some SPI eeproms use the 8th address bit embedded
1540 * in the opcode
1541 */
1542 if ((hw->eeprom.address_bits == 8) &&
1543 ((offset + i) >= 128))
1544 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1545
1546 /* Send the READ command (opcode + addr) */
1547 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1548 IXGBE_EEPROM_OPCODE_BITS);
1549 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1550 hw->eeprom.address_bits);
1551
1552 /* Read the data. */
1553 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1554 data[i] = (word_in >> 8) | (word_in << 8);
1555 }
1556
1557 /* End this read operation */
1558 ixgbe_release_eeprom(hw);
1559 }
1560
1561 return status;
1562 }
1563
1564 /**
1565 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1566 * @hw: pointer to hardware structure
1567 * @offset: offset within the EEPROM to be read
1568 * @data: read 16 bit value from EEPROM
1569 *
1570 * Reads 16 bit value from EEPROM through bit-bang method
1571 **/
1572 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1573 u16 *data)
1574 {
1575 s32 status;
1576
1577 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1578
1579 hw->eeprom.ops.init_params(hw);
1580
1581 if (offset >= hw->eeprom.word_size) {
1582 status = IXGBE_ERR_EEPROM;
1583 goto out;
1584 }
1585
1586 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1587
1588 out:
1589 return status;
1590 }
1591
1592 /**
1593 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1594 * @hw: pointer to hardware structure
1595 * @offset: offset of word in the EEPROM to read
1596 * @words: number of word(s)
1597 * @data: 16 bit word(s) from the EEPROM
1598 *
1599 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1600 **/
1601 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1602 u16 words, u16 *data)
1603 {
1604 u32 eerd;
1605 s32 status = IXGBE_SUCCESS;
1606 u32 i;
1607
1608 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1609
1610 hw->eeprom.ops.init_params(hw);
1611
1612 if (words == 0) {
1613 status = IXGBE_ERR_INVALID_ARGUMENT;
1614 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1615 goto out;
1616 }
1617
1618 if (offset >= hw->eeprom.word_size) {
1619 status = IXGBE_ERR_EEPROM;
1620 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1621 goto out;
1622 }
1623
1624 for (i = 0; i < words; i++) {
1625 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1626 IXGBE_EEPROM_RW_REG_START;
1627
1628 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1629 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1630
1631 if (status == IXGBE_SUCCESS) {
1632 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1633 IXGBE_EEPROM_RW_REG_DATA);
1634 } else {
1635 DEBUGOUT("Eeprom read timed out\n");
1636 goto out;
1637 }
1638 }
1639 out:
1640 return status;
1641 }
1642
1643 /**
1644 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1645 * @hw: pointer to hardware structure
1646 * @offset: offset within the EEPROM to be used as a scratch pad
1647 *
1648 * Discover EEPROM page size by writing marching data at given offset.
1649 * This function is called only when we are writing a new large buffer
1650 * at given offset so the data would be overwritten anyway.
1651 **/
1652 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1653 u16 offset)
1654 {
1655 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1656 s32 status = IXGBE_SUCCESS;
1657 u16 i;
1658
1659 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1660
1661 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1662 data[i] = i;
1663
1664 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1665 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1666 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1667 hw->eeprom.word_page_size = 0;
1668 if (status != IXGBE_SUCCESS)
1669 goto out;
1670
1671 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1672 if (status != IXGBE_SUCCESS)
1673 goto out;
1674
1675 /*
1676 * When writing in burst more than the actual page size
1677 * EEPROM address wraps around current page.
1678 */
1679 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1680
1681 DEBUGOUT1("Detected EEPROM page size = %d words.",
1682 hw->eeprom.word_page_size);
1683 out:
1684 return status;
1685 }
1686
1687 /**
1688 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1689 * @hw: pointer to hardware structure
1690 * @offset: offset of word in the EEPROM to read
1691 * @data: word read from the EEPROM
1692 *
1693 * Reads a 16 bit word from the EEPROM using the EERD register.
1694 **/
1695 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1696 {
1697 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1698 }
1699
1700 /**
1701 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1702 * @hw: pointer to hardware structure
1703 * @offset: offset of word in the EEPROM to write
1704 * @words: number of word(s)
1705 * @data: word(s) write to the EEPROM
1706 *
1707 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1708 **/
1709 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1710 u16 words, u16 *data)
1711 {
1712 u32 eewr;
1713 s32 status = IXGBE_SUCCESS;
1714 u16 i;
1715
1716 DEBUGFUNC("ixgbe_write_eewr_generic");
1717
1718 hw->eeprom.ops.init_params(hw);
1719
1720 if (words == 0) {
1721 status = IXGBE_ERR_INVALID_ARGUMENT;
1722 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1723 goto out;
1724 }
1725
1726 if (offset >= hw->eeprom.word_size) {
1727 status = IXGBE_ERR_EEPROM;
1728 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1729 goto out;
1730 }
1731
1732 for (i = 0; i < words; i++) {
1733 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1734 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1735 IXGBE_EEPROM_RW_REG_START;
1736
1737 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1738 if (status != IXGBE_SUCCESS) {
1739 DEBUGOUT("Eeprom write EEWR timed out\n");
1740 goto out;
1741 }
1742
1743 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1744
1745 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1746 if (status != IXGBE_SUCCESS) {
1747 DEBUGOUT("Eeprom write EEWR timed out\n");
1748 goto out;
1749 }
1750 }
1751
1752 out:
1753 return status;
1754 }
1755
1756 /**
1757 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1758 * @hw: pointer to hardware structure
1759 * @offset: offset of word in the EEPROM to write
1760 * @data: word write to the EEPROM
1761 *
1762 * Write a 16 bit word to the EEPROM using the EEWR register.
1763 **/
1764 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1765 {
1766 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1767 }
1768
1769 /**
1770 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1771 * @hw: pointer to hardware structure
1772 * @ee_reg: EEPROM flag for polling
1773 *
1774 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1775 * read or write is done respectively.
1776 **/
1777 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1778 {
1779 u32 i;
1780 u32 reg;
1781 s32 status = IXGBE_ERR_EEPROM;
1782
1783 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1784
1785 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1786 if (ee_reg == IXGBE_NVM_POLL_READ)
1787 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1788 else
1789 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1790
1791 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1792 status = IXGBE_SUCCESS;
1793 break;
1794 }
1795 usec_delay(5);
1796 }
1797
1798 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1799 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1800 "EEPROM read/write done polling timed out");
1801
1802 return status;
1803 }
1804
1805 /**
1806 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1807 * @hw: pointer to hardware structure
1808 *
1809 * Prepares EEPROM for access using bit-bang method. This function should
1810 * be called before issuing a command to the EEPROM.
1811 **/
1812 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1813 {
1814 s32 status = IXGBE_SUCCESS;
1815 u32 eec;
1816 u32 i;
1817
1818 DEBUGFUNC("ixgbe_acquire_eeprom");
1819
1820 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1821 != IXGBE_SUCCESS)
1822 status = IXGBE_ERR_SWFW_SYNC;
1823
1824 if (status == IXGBE_SUCCESS) {
1825 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1826
1827 /* Request EEPROM Access */
1828 eec |= IXGBE_EEC_REQ;
1829 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1830
1831 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1832 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1833 if (eec & IXGBE_EEC_GNT)
1834 break;
1835 usec_delay(5);
1836 }
1837
1838 /* Release if grant not acquired */
1839 if (!(eec & IXGBE_EEC_GNT)) {
1840 eec &= ~IXGBE_EEC_REQ;
1841 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1842 DEBUGOUT("Could not acquire EEPROM grant\n");
1843
1844 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1845 status = IXGBE_ERR_EEPROM;
1846 }
1847
1848 /* Setup EEPROM for Read/Write */
1849 if (status == IXGBE_SUCCESS) {
1850 /* Clear CS and SK */
1851 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1852 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1853 IXGBE_WRITE_FLUSH(hw);
1854 usec_delay(1);
1855 }
1856 }
1857 return status;
1858 }
1859
1860 /**
1861 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1862 * @hw: pointer to hardware structure
1863 *
1864 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1865 **/
1866 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1867 {
1868 s32 status = IXGBE_ERR_EEPROM;
1869 u32 timeout = 2000;
1870 u32 i;
1871 u32 swsm;
1872
1873 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1874
1875
1876 /* Get SMBI software semaphore between device drivers first */
1877 for (i = 0; i < timeout; i++) {
1878 /*
1879 * If the SMBI bit is 0 when we read it, then the bit will be
1880 * set and we have the semaphore
1881 */
1882 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1883 if (!(swsm & IXGBE_SWSM_SMBI)) {
1884 status = IXGBE_SUCCESS;
1885 break;
1886 }
1887 usec_delay(50);
1888 }
1889
1890 if (i == timeout) {
1891 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1892 "not granted.\n");
1893 /*
1894 * this release is particularly important because our attempts
1895 * above to get the semaphore may have succeeded, and if there
1896 * was a timeout, we should unconditionally clear the semaphore
1897 * bits to free the driver to make progress
1898 */
1899 ixgbe_release_eeprom_semaphore(hw);
1900
1901 usec_delay(50);
1902 /*
1903 * one last try
1904 * If the SMBI bit is 0 when we read it, then the bit will be
1905 * set and we have the semaphore
1906 */
1907 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1908 if (!(swsm & IXGBE_SWSM_SMBI))
1909 status = IXGBE_SUCCESS;
1910 }
1911
1912 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1913 if (status == IXGBE_SUCCESS) {
1914 for (i = 0; i < timeout; i++) {
1915 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1916
1917 /* Set the SW EEPROM semaphore bit to request access */
1918 swsm |= IXGBE_SWSM_SWESMBI;
1919 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1920
1921 /*
1922 * If we set the bit successfully then we got the
1923 * semaphore.
1924 */
1925 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1926 if (swsm & IXGBE_SWSM_SWESMBI)
1927 break;
1928
1929 usec_delay(50);
1930 }
1931
1932 /*
1933 * Release semaphores and return error if SW EEPROM semaphore
1934 * was not granted because we don't have access to the EEPROM
1935 */
1936 if (i >= timeout) {
1937 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1938 "SWESMBI Software EEPROM semaphore not granted.\n");
1939 ixgbe_release_eeprom_semaphore(hw);
1940 status = IXGBE_ERR_EEPROM;
1941 }
1942 } else {
1943 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1944 "Software semaphore SMBI between device drivers "
1945 "not granted.\n");
1946 }
1947
1948 return status;
1949 }
1950
1951 /**
1952 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1953 * @hw: pointer to hardware structure
1954 *
1955 * This function clears hardware semaphore bits.
1956 **/
1957 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1958 {
1959 u32 swsm;
1960
1961 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1962
1963 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1964
1965 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1966 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1967 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1968 IXGBE_WRITE_FLUSH(hw);
1969 }
1970
1971 /**
1972 * ixgbe_ready_eeprom - Polls for EEPROM ready
1973 * @hw: pointer to hardware structure
1974 **/
1975 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1976 {
1977 s32 status = IXGBE_SUCCESS;
1978 u16 i;
1979 u8 spi_stat_reg;
1980
1981 DEBUGFUNC("ixgbe_ready_eeprom");
1982
1983 /*
1984 * Read "Status Register" repeatedly until the LSB is cleared. The
1985 * EEPROM will signal that the command has been completed by clearing
1986 * bit 0 of the internal status register. If it's not cleared within
1987 * 5 milliseconds, then error out.
1988 */
1989 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1990 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1991 IXGBE_EEPROM_OPCODE_BITS);
1992 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1993 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1994 break;
1995
1996 usec_delay(5);
1997 ixgbe_standby_eeprom(hw);
1998 };
1999
2000 /*
2001 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2002 * devices (and only 0-5mSec on 5V devices)
2003 */
2004 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2005 DEBUGOUT("SPI EEPROM Status error\n");
2006 status = IXGBE_ERR_EEPROM;
2007 }
2008
2009 return status;
2010 }
2011
2012 /**
2013 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2014 * @hw: pointer to hardware structure
2015 **/
2016 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2017 {
2018 u32 eec;
2019
2020 DEBUGFUNC("ixgbe_standby_eeprom");
2021
2022 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2023
2024 /* Toggle CS to flush commands */
2025 eec |= IXGBE_EEC_CS;
2026 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2027 IXGBE_WRITE_FLUSH(hw);
2028 usec_delay(1);
2029 eec &= ~IXGBE_EEC_CS;
2030 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2031 IXGBE_WRITE_FLUSH(hw);
2032 usec_delay(1);
2033 }
2034
2035 /**
2036 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2037 * @hw: pointer to hardware structure
2038 * @data: data to send to the EEPROM
2039 * @count: number of bits to shift out
2040 **/
2041 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2042 u16 count)
2043 {
2044 u32 eec;
2045 u32 mask;
2046 u32 i;
2047
2048 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2049
2050 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2051
2052 /*
2053 * Mask is used to shift "count" bits of "data" out to the EEPROM
2054 * one bit at a time. Determine the starting bit based on count
2055 */
2056 mask = 0x01 << (count - 1);
2057
2058 for (i = 0; i < count; i++) {
2059 /*
2060 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2061 * "1", and then raising and then lowering the clock (the SK
2062 * bit controls the clock input to the EEPROM). A "0" is
2063 * shifted out to the EEPROM by setting "DI" to "0" and then
2064 * raising and then lowering the clock.
2065 */
2066 if (data & mask)
2067 eec |= IXGBE_EEC_DI;
2068 else
2069 eec &= ~IXGBE_EEC_DI;
2070
2071 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2072 IXGBE_WRITE_FLUSH(hw);
2073
2074 usec_delay(1);
2075
2076 ixgbe_raise_eeprom_clk(hw, &eec);
2077 ixgbe_lower_eeprom_clk(hw, &eec);
2078
2079 /*
2080 * Shift mask to signify next bit of data to shift in to the
2081 * EEPROM
2082 */
2083 mask = mask >> 1;
2084 };
2085
2086 /* We leave the "DI" bit set to "0" when we leave this routine. */
2087 eec &= ~IXGBE_EEC_DI;
2088 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2089 IXGBE_WRITE_FLUSH(hw);
2090 }
2091
2092 /**
2093 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2094 * @hw: pointer to hardware structure
2095 **/
2096 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2097 {
2098 u32 eec;
2099 u32 i;
2100 u16 data = 0;
2101
2102 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2103
2104 /*
2105 * In order to read a register from the EEPROM, we need to shift
2106 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2107 * the clock input to the EEPROM (setting the SK bit), and then reading
2108 * the value of the "DO" bit. During this "shifting in" process the
2109 * "DI" bit should always be clear.
2110 */
2111 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2112
2113 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2114
2115 for (i = 0; i < count; i++) {
2116 data = data << 1;
2117 ixgbe_raise_eeprom_clk(hw, &eec);
2118
2119 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2120
2121 eec &= ~(IXGBE_EEC_DI);
2122 if (eec & IXGBE_EEC_DO)
2123 data |= 1;
2124
2125 ixgbe_lower_eeprom_clk(hw, &eec);
2126 }
2127
2128 return data;
2129 }
2130
2131 /**
2132 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2133 * @hw: pointer to hardware structure
2134 * @eec: EEC register's current value
2135 **/
2136 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2137 {
2138 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2139
2140 /*
2141 * Raise the clock input to the EEPROM
2142 * (setting the SK bit), then delay
2143 */
2144 *eec = *eec | IXGBE_EEC_SK;
2145 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2146 IXGBE_WRITE_FLUSH(hw);
2147 usec_delay(1);
2148 }
2149
2150 /**
2151 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2152 * @hw: pointer to hardware structure
2153 * @eecd: EECD's current value
2154 **/
2155 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2156 {
2157 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2158
2159 /*
2160 * Lower the clock input to the EEPROM (clearing the SK bit), then
2161 * delay
2162 */
2163 *eec = *eec & ~IXGBE_EEC_SK;
2164 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2165 IXGBE_WRITE_FLUSH(hw);
2166 usec_delay(1);
2167 }
2168
2169 /**
2170 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2171 * @hw: pointer to hardware structure
2172 **/
2173 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2174 {
2175 u32 eec;
2176
2177 DEBUGFUNC("ixgbe_release_eeprom");
2178
2179 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2180
2181 eec |= IXGBE_EEC_CS; /* Pull CS high */
2182 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2183
2184 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2185 IXGBE_WRITE_FLUSH(hw);
2186
2187 usec_delay(1);
2188
2189 /* Stop requesting EEPROM access */
2190 eec &= ~IXGBE_EEC_REQ;
2191 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2192
2193 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2194
2195 /* Delay before attempt to obtain semaphore again to allow FW access */
2196 msec_delay(hw->eeprom.semaphore_delay);
2197 }
2198
2199 /**
2200 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2201 * @hw: pointer to hardware structure
2202 *
2203 * Returns a negative error code on error, or the 16-bit checksum
2204 **/
2205 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2206 {
2207 u16 i;
2208 u16 j;
2209 u16 checksum = 0;
2210 u16 length = 0;
2211 u16 pointer = 0;
2212 u16 word = 0;
2213
2214 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2215
2216 /* Include 0x0-0x3F in the checksum */
2217 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2218 if (hw->eeprom.ops.read(hw, i, &word)) {
2219 DEBUGOUT("EEPROM read failed\n");
2220 return IXGBE_ERR_EEPROM;
2221 }
2222 checksum += word;
2223 }
2224
2225 /* Include all data from pointers except for the fw pointer */
2226 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2227 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2228 DEBUGOUT("EEPROM read failed\n");
2229 return IXGBE_ERR_EEPROM;
2230 }
2231
2232 /* If the pointer seems invalid */
2233 if (pointer == 0xFFFF || pointer == 0)
2234 continue;
2235
2236 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2237 DEBUGOUT("EEPROM read failed\n");
2238 return IXGBE_ERR_EEPROM;
2239 }
2240
2241 if (length == 0xFFFF || length == 0)
2242 continue;
2243
2244 for (j = pointer + 1; j <= pointer + length; j++) {
2245 if (hw->eeprom.ops.read(hw, j, &word)) {
2246 DEBUGOUT("EEPROM read failed\n");
2247 return IXGBE_ERR_EEPROM;
2248 }
2249 checksum += word;
2250 }
2251 }
2252
2253 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2254
2255 return (s32)checksum;
2256 }
2257
2258 /**
2259 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2260 * @hw: pointer to hardware structure
2261 * @checksum_val: calculated checksum
2262 *
2263 * Performs checksum calculation and validates the EEPROM checksum. If the
2264 * caller does not need checksum_val, the value can be NULL.
2265 **/
2266 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2267 u16 *checksum_val)
2268 {
2269 s32 status;
2270 u16 checksum;
2271 u16 read_checksum = 0;
2272
2273 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2274
2275 /* Read the first word from the EEPROM. If this times out or fails, do
2276 * not continue or we could be in for a very long wait while every
2277 * EEPROM read fails
2278 */
2279 status = hw->eeprom.ops.read(hw, 0, &checksum);
2280 if (status) {
2281 DEBUGOUT("EEPROM read failed\n");
2282 return status;
2283 }
2284
2285 status = hw->eeprom.ops.calc_checksum(hw);
2286 if (status < 0)
2287 return status;
2288
2289 checksum = (u16)(status & 0xffff);
2290
2291 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2292 if (status) {
2293 DEBUGOUT("EEPROM read failed\n");
2294 return status;
2295 }
2296
2297 /* Verify read checksum from EEPROM is the same as
2298 * calculated checksum
2299 */
2300 if (read_checksum != checksum)
2301 status = IXGBE_ERR_EEPROM_CHECKSUM;
2302
2303 /* If the user cares, return the calculated checksum */
2304 if (checksum_val)
2305 *checksum_val = checksum;
2306
2307 return status;
2308 }
2309
2310 /**
2311 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2312 * @hw: pointer to hardware structure
2313 **/
2314 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2315 {
2316 s32 status;
2317 u16 checksum;
2318
2319 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2320
2321 /* Read the first word from the EEPROM. If this times out or fails, do
2322 * not continue or we could be in for a very long wait while every
2323 * EEPROM read fails
2324 */
2325 status = hw->eeprom.ops.read(hw, 0, &checksum);
2326 if (status) {
2327 DEBUGOUT("EEPROM read failed\n");
2328 return status;
2329 }
2330
2331 status = hw->eeprom.ops.calc_checksum(hw);
2332 if (status < 0)
2333 return status;
2334
2335 checksum = (u16)(status & 0xffff);
2336
2337 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2338
2339 return status;
2340 }
2341
2342 /**
2343 * ixgbe_validate_mac_addr - Validate MAC address
2344 * @mac_addr: pointer to MAC address.
2345 *
2346 * Tests a MAC address to ensure it is a valid Individual Address.
2347 **/
2348 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2349 {
2350 s32 status = IXGBE_SUCCESS;
2351
2352 DEBUGFUNC("ixgbe_validate_mac_addr");
2353
2354 /* Make sure it is not a multicast address */
2355 if (IXGBE_IS_MULTICAST(mac_addr)) {
2356 status = IXGBE_ERR_INVALID_MAC_ADDR;
2357 /* Not a broadcast address */
2358 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2359 status = IXGBE_ERR_INVALID_MAC_ADDR;
2360 /* Reject the zero address */
2361 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2362 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2363 status = IXGBE_ERR_INVALID_MAC_ADDR;
2364 }
2365 return status;
2366 }
2367
2368 /**
2369 * ixgbe_set_rar_generic - Set Rx address register
2370 * @hw: pointer to hardware structure
2371 * @index: Receive address register to write
2372 * @addr: Address to put into receive address register
2373 * @vmdq: VMDq "set" or "pool" index
2374 * @enable_addr: set flag that address is active
2375 *
2376 * Puts an ethernet address into a receive address register.
2377 **/
2378 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2379 u32 enable_addr)
2380 {
2381 u32 rar_low, rar_high;
2382 u32 rar_entries = hw->mac.num_rar_entries;
2383
2384 DEBUGFUNC("ixgbe_set_rar_generic");
2385
2386 /* Make sure we are using a valid rar index range */
2387 if (index >= rar_entries) {
2388 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2389 "RAR index %d is out of range.\n", index);
2390 return IXGBE_ERR_INVALID_ARGUMENT;
2391 }
2392
2393 /* setup VMDq pool selection before this RAR gets enabled */
2394 hw->mac.ops.set_vmdq(hw, index, vmdq);
2395
2396 /*
2397 * HW expects these in little endian so we reverse the byte
2398 * order from network order (big endian) to little endian
2399 */
2400 rar_low = ((u32)addr[0] |
2401 ((u32)addr[1] << 8) |
2402 ((u32)addr[2] << 16) |
2403 ((u32)addr[3] << 24));
2404 /*
2405 * Some parts put the VMDq setting in the extra RAH bits,
2406 * so save everything except the lower 16 bits that hold part
2407 * of the address and the address valid bit.
2408 */
2409 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2410 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2411 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2412
2413 if (enable_addr != 0)
2414 rar_high |= IXGBE_RAH_AV;
2415
2416 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2417 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2418
2419 return IXGBE_SUCCESS;
2420 }
2421
2422 /**
2423 * ixgbe_clear_rar_generic - Remove Rx address register
2424 * @hw: pointer to hardware structure
2425 * @index: Receive address register to write
2426 *
2427 * Clears an ethernet address from a receive address register.
2428 **/
2429 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2430 {
2431 u32 rar_high;
2432 u32 rar_entries = hw->mac.num_rar_entries;
2433
2434 DEBUGFUNC("ixgbe_clear_rar_generic");
2435
2436 /* Make sure we are using a valid rar index range */
2437 if (index >= rar_entries) {
2438 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2439 "RAR index %d is out of range.\n", index);
2440 return IXGBE_ERR_INVALID_ARGUMENT;
2441 }
2442
2443 /*
2444 * Some parts put the VMDq setting in the extra RAH bits,
2445 * so save everything except the lower 16 bits that hold part
2446 * of the address and the address valid bit.
2447 */
2448 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2449 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2450
2451 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2452 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2453
2454 /* clear VMDq pool/queue selection for this RAR */
2455 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2456
2457 return IXGBE_SUCCESS;
2458 }
2459
2460 /**
2461 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2462 * @hw: pointer to hardware structure
2463 *
2464 * Places the MAC address in receive address register 0 and clears the rest
2465 * of the receive address registers. Clears the multicast table. Assumes
2466 * the receiver is in reset when the routine is called.
2467 **/
2468 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2469 {
2470 u32 i;
2471 u32 rar_entries = hw->mac.num_rar_entries;
2472
2473 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2474
2475 /*
2476 * If the current mac address is valid, assume it is a software override
2477 * to the permanent address.
2478 * Otherwise, use the permanent address from the eeprom.
2479 */
2480 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2481 IXGBE_ERR_INVALID_MAC_ADDR) {
2482 /* Get the MAC address from the RAR0 for later reference */
2483 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2484
2485 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2486 hw->mac.addr[0], hw->mac.addr[1],
2487 hw->mac.addr[2]);
2488 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2489 hw->mac.addr[4], hw->mac.addr[5]);
2490 } else {
2491 /* Setup the receive address. */
2492 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2493 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2494 hw->mac.addr[0], hw->mac.addr[1],
2495 hw->mac.addr[2]);
2496 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2497 hw->mac.addr[4], hw->mac.addr[5]);
2498
2499 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2500 }
2501
2502 /* clear VMDq pool/queue selection for RAR 0 */
2503 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2504
2505 hw->addr_ctrl.overflow_promisc = 0;
2506
2507 hw->addr_ctrl.rar_used_count = 1;
2508
2509 /* Zero out the other receive addresses. */
2510 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2511 for (i = 1; i < rar_entries; i++) {
2512 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2513 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2514 }
2515
2516 /* Clear the MTA */
2517 hw->addr_ctrl.mta_in_use = 0;
2518 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2519
2520 DEBUGOUT(" Clearing MTA\n");
2521 for (i = 0; i < hw->mac.mcft_size; i++)
2522 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2523
2524 ixgbe_init_uta_tables(hw);
2525
2526 return IXGBE_SUCCESS;
2527 }
2528
2529 /**
2530 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2531 * @hw: pointer to hardware structure
2532 * @addr: new address
2533 *
2534 * Adds it to unused receive address register or goes into promiscuous mode.
2535 **/
2536 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2537 {
2538 u32 rar_entries = hw->mac.num_rar_entries;
2539 u32 rar;
2540
2541 DEBUGFUNC("ixgbe_add_uc_addr");
2542
2543 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2544 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2545
2546 /*
2547 * Place this address in the RAR if there is room,
2548 * else put the controller into promiscuous mode
2549 */
2550 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2551 rar = hw->addr_ctrl.rar_used_count;
2552 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2553 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2554 hw->addr_ctrl.rar_used_count++;
2555 } else {
2556 hw->addr_ctrl.overflow_promisc++;
2557 }
2558
2559 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2560 }
2561
2562 /**
2563 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2564 * @hw: pointer to hardware structure
2565 * @addr_list: the list of new addresses
2566 * @addr_count: number of addresses
2567 * @next: iterator function to walk the address list
2568 *
2569 * The given list replaces any existing list. Clears the secondary addrs from
2570 * receive address registers. Uses unused receive address registers for the
2571 * first secondary addresses, and falls back to promiscuous mode as needed.
2572 *
2573 * Drivers using secondary unicast addresses must set user_set_promisc when
2574 * manually putting the device into promiscuous mode.
2575 **/
2576 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2577 u32 addr_count, ixgbe_mc_addr_itr next)
2578 {
2579 u8 *addr;
2580 u32 i;
2581 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2582 u32 uc_addr_in_use;
2583 u32 fctrl;
2584 u32 vmdq;
2585
2586 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2587
2588 /*
2589 * Clear accounting of old secondary address list,
2590 * don't count RAR[0]
2591 */
2592 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2593 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2594 hw->addr_ctrl.overflow_promisc = 0;
2595
2596 /* Zero out the other receive addresses */
2597 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2598 for (i = 0; i < uc_addr_in_use; i++) {
2599 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2600 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2601 }
2602
2603 /* Add the new addresses */
2604 for (i = 0; i < addr_count; i++) {
2605 DEBUGOUT(" Adding the secondary addresses:\n");
2606 addr = next(hw, &addr_list, &vmdq);
2607 ixgbe_add_uc_addr(hw, addr, vmdq);
2608 }
2609
2610 if (hw->addr_ctrl.overflow_promisc) {
2611 /* enable promisc if not already in overflow or set by user */
2612 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2613 DEBUGOUT(" Entering address overflow promisc mode\n");
2614 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2615 fctrl |= IXGBE_FCTRL_UPE;
2616 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2617 }
2618 } else {
2619 /* only disable if set by overflow, not by user */
2620 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2621 DEBUGOUT(" Leaving address overflow promisc mode\n");
2622 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2623 fctrl &= ~IXGBE_FCTRL_UPE;
2624 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2625 }
2626 }
2627
2628 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2629 return IXGBE_SUCCESS;
2630 }
2631
2632 /**
2633 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2634 * @hw: pointer to hardware structure
2635 * @mc_addr: the multicast address
2636 *
2637 * Extracts the 12 bits, from a multicast address, to determine which
2638 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2639 * incoming rx multicast addresses, to determine the bit-vector to check in
2640 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2641 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2642 * to mc_filter_type.
2643 **/
2644 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2645 {
2646 u32 vector = 0;
2647
2648 DEBUGFUNC("ixgbe_mta_vector");
2649
2650 switch (hw->mac.mc_filter_type) {
2651 case 0: /* use bits [47:36] of the address */
2652 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2653 break;
2654 case 1: /* use bits [46:35] of the address */
2655 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2656 break;
2657 case 2: /* use bits [45:34] of the address */
2658 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2659 break;
2660 case 3: /* use bits [43:32] of the address */
2661 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2662 break;
2663 default: /* Invalid mc_filter_type */
2664 DEBUGOUT("MC filter type param set incorrectly\n");
2665 ASSERT(0);
2666 break;
2667 }
2668
2669 /* vector can only be 12-bits or boundary will be exceeded */
2670 vector &= 0xFFF;
2671 return vector;
2672 }
2673
2674 /**
2675 * ixgbe_set_mta - Set bit-vector in multicast table
2676 * @hw: pointer to hardware structure
2677 * @hash_value: Multicast address hash value
2678 *
2679 * Sets the bit-vector in the multicast table.
2680 **/
2681 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2682 {
2683 u32 vector;
2684 u32 vector_bit;
2685 u32 vector_reg;
2686
2687 DEBUGFUNC("ixgbe_set_mta");
2688
2689 hw->addr_ctrl.mta_in_use++;
2690
2691 vector = ixgbe_mta_vector(hw, mc_addr);
2692 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2693
2694 /*
2695 * The MTA is a register array of 128 32-bit registers. It is treated
2696 * like an array of 4096 bits. We want to set bit
2697 * BitArray[vector_value]. So we figure out what register the bit is
2698 * in, read it, OR in the new bit, then write back the new value. The
2699 * register is determined by the upper 7 bits of the vector value and
2700 * the bit within that register are determined by the lower 5 bits of
2701 * the value.
2702 */
2703 vector_reg = (vector >> 5) & 0x7F;
2704 vector_bit = vector & 0x1F;
2705 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2706 }
2707
2708 /**
2709 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2710 * @hw: pointer to hardware structure
2711 * @mc_addr_list: the list of new multicast addresses
2712 * @mc_addr_count: number of addresses
2713 * @next: iterator function to walk the multicast address list
2714 * @clear: flag, when set clears the table beforehand
2715 *
2716 * When the clear flag is set, the given list replaces any existing list.
2717 * Hashes the given addresses into the multicast table.
2718 **/
2719 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2720 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2721 bool clear)
2722 {
2723 u32 i;
2724 u32 vmdq;
2725
2726 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2727
2728 /*
2729 * Set the new number of MC addresses that we are being requested to
2730 * use.
2731 */
2732 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2733 hw->addr_ctrl.mta_in_use = 0;
2734
2735 /* Clear mta_shadow */
2736 if (clear) {
2737 DEBUGOUT(" Clearing MTA\n");
2738 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2739 }
2740
2741 /* Update mta_shadow */
2742 for (i = 0; i < mc_addr_count; i++) {
2743 DEBUGOUT(" Adding the multicast addresses:\n");
2744 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2745 }
2746
2747 /* Enable mta */
2748 for (i = 0; i < hw->mac.mcft_size; i++)
2749 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2750 hw->mac.mta_shadow[i]);
2751
2752 if (hw->addr_ctrl.mta_in_use > 0)
2753 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2754 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2755
2756 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2757 return IXGBE_SUCCESS;
2758 }
2759
2760 /**
2761 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2762 * @hw: pointer to hardware structure
2763 *
2764 * Enables multicast address in RAR and the use of the multicast hash table.
2765 **/
2766 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2767 {
2768 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2769
2770 DEBUGFUNC("ixgbe_enable_mc_generic");
2771
2772 if (a->mta_in_use > 0)
2773 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2774 hw->mac.mc_filter_type);
2775
2776 return IXGBE_SUCCESS;
2777 }
2778
2779 /**
2780 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2781 * @hw: pointer to hardware structure
2782 *
2783 * Disables multicast address in RAR and the use of the multicast hash table.
2784 **/
2785 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2786 {
2787 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2788
2789 DEBUGFUNC("ixgbe_disable_mc_generic");
2790
2791 if (a->mta_in_use > 0)
2792 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2793
2794 return IXGBE_SUCCESS;
2795 }
2796
2797 /**
2798 * ixgbe_fc_enable_generic - Enable flow control
2799 * @hw: pointer to hardware structure
2800 *
2801 * Enable flow control according to the current settings.
2802 **/
2803 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2804 {
2805 s32 ret_val = IXGBE_SUCCESS;
2806 u32 mflcn_reg, fccfg_reg;
2807 u32 reg;
2808 u32 fcrtl, fcrth;
2809 int i;
2810
2811 DEBUGFUNC("ixgbe_fc_enable_generic");
2812
2813 /* Validate the water mark configuration */
2814 if (!hw->fc.pause_time) {
2815 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2816 goto out;
2817 }
2818
2819 /* Low water mark of zero causes XOFF floods */
2820 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2821 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2822 hw->fc.high_water[i]) {
2823 if (!hw->fc.low_water[i] ||
2824 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2825 DEBUGOUT("Invalid water mark configuration\n");
2826 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2827 goto out;
2828 }
2829 }
2830 }
2831
2832 /* Negotiate the fc mode to use */
2833 hw->mac.ops.fc_autoneg(hw);
2834
2835 /* Disable any previous flow control settings */
2836 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2837 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2838
2839 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2840 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2841
2842 /*
2843 * The possible values of fc.current_mode are:
2844 * 0: Flow control is completely disabled
2845 * 1: Rx flow control is enabled (we can receive pause frames,
2846 * but not send pause frames).
2847 * 2: Tx flow control is enabled (we can send pause frames but
2848 * we do not support receiving pause frames).
2849 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2850 * other: Invalid.
2851 */
2852 switch (hw->fc.current_mode) {
2853 case ixgbe_fc_none:
2854 /*
2855 * Flow control is disabled by software override or autoneg.
2856 * The code below will actually disable it in the HW.
2857 */
2858 break;
2859 case ixgbe_fc_rx_pause:
2860 /*
2861 * Rx Flow control is enabled and Tx Flow control is
2862 * disabled by software override. Since there really
2863 * isn't a way to advertise that we are capable of RX
2864 * Pause ONLY, we will advertise that we support both
2865 * symmetric and asymmetric Rx PAUSE. Later, we will
2866 * disable the adapter's ability to send PAUSE frames.
2867 */
2868 mflcn_reg |= IXGBE_MFLCN_RFCE;
2869 break;
2870 case ixgbe_fc_tx_pause:
2871 /*
2872 * Tx Flow control is enabled, and Rx Flow control is
2873 * disabled by software override.
2874 */
2875 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2876 break;
2877 case ixgbe_fc_full:
2878 /* Flow control (both Rx and Tx) is enabled by SW override. */
2879 mflcn_reg |= IXGBE_MFLCN_RFCE;
2880 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2881 break;
2882 default:
2883 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2884 "Flow control param set incorrectly\n");
2885 ret_val = IXGBE_ERR_CONFIG;
2886 goto out;
2887 break;
2888 }
2889
2890 /* Set 802.3x based flow control settings. */
2891 mflcn_reg |= IXGBE_MFLCN_DPF;
2892 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2893 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2894
2895
2896 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2897 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2898 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2899 hw->fc.high_water[i]) {
2900 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2901 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2902 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2903 } else {
2904 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2905 /*
2906 * In order to prevent Tx hangs when the internal Tx
2907 * switch is enabled we must set the high water mark
2908 * to the Rx packet buffer size - 24KB. This allows
2909 * the Tx switch to function even under heavy Rx
2910 * workloads.
2911 */
2912 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2913 }
2914
2915 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2916 }
2917
2918 /* Configure pause time (2 TCs per register) */
2919 reg = hw->fc.pause_time * 0x00010001;
2920 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2921 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2922
2923 /* Configure flow control refresh threshold value */
2924 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2925
2926 out:
2927 return ret_val;
2928 }
2929
2930 /**
2931 * ixgbe_negotiate_fc - Negotiate flow control
2932 * @hw: pointer to hardware structure
2933 * @adv_reg: flow control advertised settings
2934 * @lp_reg: link partner's flow control settings
2935 * @adv_sym: symmetric pause bit in advertisement
2936 * @adv_asm: asymmetric pause bit in advertisement
2937 * @lp_sym: symmetric pause bit in link partner advertisement
2938 * @lp_asm: asymmetric pause bit in link partner advertisement
2939 *
2940 * Find the intersection between advertised settings and link partner's
2941 * advertised settings
2942 **/
2943 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2944 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2945 {
2946 if ((!(adv_reg)) || (!(lp_reg))) {
2947 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2948 "Local or link partner's advertised flow control "
2949 "settings are NULL. Local: %x, link partner: %x\n",
2950 adv_reg, lp_reg);
2951 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2952 }
2953
2954 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2955 /*
2956 * Now we need to check if the user selected Rx ONLY
2957 * of pause frames. In this case, we had to advertise
2958 * FULL flow control because we could not advertise RX
2959 * ONLY. Hence, we must now check to see if we need to
2960 * turn OFF the TRANSMISSION of PAUSE frames.
2961 */
2962 if (hw->fc.requested_mode == ixgbe_fc_full) {
2963 hw->fc.current_mode = ixgbe_fc_full;
2964 DEBUGOUT("Flow Control = FULL.\n");
2965 } else {
2966 hw->fc.current_mode = ixgbe_fc_rx_pause;
2967 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2968 }
2969 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2970 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2971 hw->fc.current_mode = ixgbe_fc_tx_pause;
2972 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2973 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2974 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2975 hw->fc.current_mode = ixgbe_fc_rx_pause;
2976 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2977 } else {
2978 hw->fc.current_mode = ixgbe_fc_none;
2979 DEBUGOUT("Flow Control = NONE.\n");
2980 }
2981 return IXGBE_SUCCESS;
2982 }
2983
2984 /**
2985 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2986 * @hw: pointer to hardware structure
2987 *
2988 * Enable flow control according on 1 gig fiber.
2989 **/
2990 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2991 {
2992 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2993 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2994
2995 /*
2996 * On multispeed fiber at 1g, bail out if
2997 * - link is up but AN did not complete, or if
2998 * - link is up and AN completed but timed out
2999 */
3000
3001 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3002 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3003 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3004 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3005 goto out;
3006 }
3007
3008 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3009 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3010
3011 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3012 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3013 IXGBE_PCS1GANA_ASM_PAUSE,
3014 IXGBE_PCS1GANA_SYM_PAUSE,
3015 IXGBE_PCS1GANA_ASM_PAUSE);
3016
3017 out:
3018 return ret_val;
3019 }
3020
3021 /**
3022 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3023 * @hw: pointer to hardware structure
3024 *
3025 * Enable flow control according to IEEE clause 37.
3026 **/
3027 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3028 {
3029 u32 links2, anlp1_reg, autoc_reg, links;
3030 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3031
3032 /*
3033 * On backplane, bail out if
3034 * - backplane autoneg was not completed, or if
3035 * - we are 82599 and link partner is not AN enabled
3036 */
3037 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3038 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3039 DEBUGOUT("Auto-Negotiation did not complete\n");
3040 goto out;
3041 }
3042
3043 if (hw->mac.type == ixgbe_mac_82599EB) {
3044 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3045 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3046 DEBUGOUT("Link partner is not AN enabled\n");
3047 goto out;
3048 }
3049 }
3050 /*
3051 * Read the 10g AN autoc and LP ability registers and resolve
3052 * local flow control settings accordingly
3053 */
3054 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3055 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3056
3057 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3058 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3059 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3060
3061 out:
3062 return ret_val;
3063 }
3064
3065 /**
3066 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3067 * @hw: pointer to hardware structure
3068 *
3069 * Enable flow control according to IEEE clause 37.
3070 **/
3071 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3072 {
3073 u16 technology_ability_reg = 0;
3074 u16 lp_technology_ability_reg = 0;
3075
3076 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3077 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3078 &technology_ability_reg);
3079 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3080 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3081 &lp_technology_ability_reg);
3082
3083 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3084 (u32)lp_technology_ability_reg,
3085 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3086 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3087 }
3088
3089 /**
3090 * ixgbe_fc_autoneg - Configure flow control
3091 * @hw: pointer to hardware structure
3092 *
3093 * Compares our advertised flow control capabilities to those advertised by
3094 * our link partner, and determines the proper flow control mode to use.
3095 **/
3096 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3097 {
3098 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3099 ixgbe_link_speed speed;
3100 bool link_up;
3101
3102 DEBUGFUNC("ixgbe_fc_autoneg");
3103
3104 /*
3105 * AN should have completed when the cable was plugged in.
3106 * Look for reasons to bail out. Bail out if:
3107 * - FC autoneg is disabled, or if
3108 * - link is not up.
3109 */
3110 if (hw->fc.disable_fc_autoneg) {
3111 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3112 "Flow control autoneg is disabled");
3113 goto out;
3114 }
3115
3116 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3117 if (!link_up) {
3118 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3119 goto out;
3120 }
3121
3122 switch (hw->phy.media_type) {
3123 /* Autoneg flow control on fiber adapters */
3124 case ixgbe_media_type_fiber_qsfp:
3125 case ixgbe_media_type_fiber:
3126 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3127 ret_val = ixgbe_fc_autoneg_fiber(hw);
3128 break;
3129
3130 /* Autoneg flow control on backplane adapters */
3131 case ixgbe_media_type_backplane:
3132 ret_val = ixgbe_fc_autoneg_backplane(hw);
3133 break;
3134
3135 /* Autoneg flow control on copper adapters */
3136 case ixgbe_media_type_copper:
3137 if (ixgbe_device_supports_autoneg_fc(hw))
3138 ret_val = ixgbe_fc_autoneg_copper(hw);
3139 break;
3140
3141 default:
3142 break;
3143 }
3144
3145 out:
3146 if (ret_val == IXGBE_SUCCESS) {
3147 hw->fc.fc_was_autonegged = true;
3148 } else {
3149 hw->fc.fc_was_autonegged = false;
3150 hw->fc.current_mode = hw->fc.requested_mode;
3151 }
3152 }
3153
3154 /*
3155 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3156 * @hw: pointer to hardware structure
3157 *
3158 * System-wide timeout range is encoded in PCIe Device Control2 register.
3159 *
3160 * Add 10% to specified maximum and return the number of times to poll for
3161 * completion timeout, in units of 100 microsec. Never return less than
3162 * 800 = 80 millisec.
3163 */
3164 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3165 {
3166 s16 devctl2;
3167 u32 pollcnt;
3168
3169 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3170 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3171
3172 switch (devctl2) {
3173 case IXGBE_PCIDEVCTRL2_65_130ms:
3174 pollcnt = 1300; /* 130 millisec */
3175 break;
3176 case IXGBE_PCIDEVCTRL2_260_520ms:
3177 pollcnt = 5200; /* 520 millisec */
3178 break;
3179 case IXGBE_PCIDEVCTRL2_1_2s:
3180 pollcnt = 20000; /* 2 sec */
3181 break;
3182 case IXGBE_PCIDEVCTRL2_4_8s:
3183 pollcnt = 80000; /* 8 sec */
3184 break;
3185 case IXGBE_PCIDEVCTRL2_17_34s:
3186 pollcnt = 34000; /* 34 sec */
3187 break;
3188 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3189 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3190 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3191 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3192 default:
3193 pollcnt = 800; /* 80 millisec minimum */
3194 break;
3195 }
3196
3197 /* add 10% to spec maximum */
3198 return (pollcnt * 11) / 10;
3199 }
3200
3201 /**
3202 * ixgbe_disable_pcie_master - Disable PCI-express master access
3203 * @hw: pointer to hardware structure
3204 *
3205 * Disables PCI-Express master access and verifies there are no pending
3206 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3207 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3208 * is returned signifying master requests disabled.
3209 **/
3210 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3211 {
3212 s32 status = IXGBE_SUCCESS;
3213 u32 i, poll;
3214 u16 value;
3215
3216 DEBUGFUNC("ixgbe_disable_pcie_master");
3217
3218 /* Always set this bit to ensure any future transactions are blocked */
3219 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3220
3221 /* Exit if master requests are blocked */
3222 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3223 IXGBE_REMOVED(hw->hw_addr))
3224 goto out;
3225
3226 /* Poll for master request bit to clear */
3227 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3228 usec_delay(100);
3229 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3230 goto out;
3231 }
3232
3233 /*
3234 * Two consecutive resets are required via CTRL.RST per datasheet
3235 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3236 * of this need. The first reset prevents new master requests from
3237 * being issued by our device. We then must wait 1usec or more for any
3238 * remaining completions from the PCIe bus to trickle in, and then reset
3239 * again to clear out any effects they may have had on our device.
3240 */
3241 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3242 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3243
3244 if (hw->mac.type >= ixgbe_mac_X550)
3245 goto out;
3246
3247 /*
3248 * Before proceeding, make sure that the PCIe block does not have
3249 * transactions pending.
3250 */
3251 poll = ixgbe_pcie_timeout_poll(hw);
3252 for (i = 0; i < poll; i++) {
3253 usec_delay(100);
3254 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3255 if (IXGBE_REMOVED(hw->hw_addr))
3256 goto out;
3257 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3258 goto out;
3259 }
3260
3261 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3262 "PCIe transaction pending bit also did not clear.\n");
3263 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3264
3265 out:
3266 return status;
3267 }
3268
3269 /**
3270 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3271 * @hw: pointer to hardware structure
3272 * @mask: Mask to specify which semaphore to acquire
3273 *
3274 * Acquires the SWFW semaphore through the GSSR register for the specified
3275 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3276 **/
3277 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3278 {
3279 u32 gssr = 0;
3280 u32 swmask = mask;
3281 u32 fwmask = mask << 5;
3282 u32 timeout = 200;
3283 u32 i;
3284
3285 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3286
3287 for (i = 0; i < timeout; i++) {
3288 /*
3289 * SW NVM semaphore bit is used for access to all
3290 * SW_FW_SYNC bits (not just NVM)
3291 */
3292 if (ixgbe_get_eeprom_semaphore(hw))
3293 return IXGBE_ERR_SWFW_SYNC;
3294
3295 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3296 if (!(gssr & (fwmask | swmask))) {
3297 gssr |= swmask;
3298 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3299 ixgbe_release_eeprom_semaphore(hw);
3300 return IXGBE_SUCCESS;
3301 } else {
3302 /* Resource is currently in use by FW or SW */
3303 ixgbe_release_eeprom_semaphore(hw);
3304 msec_delay(5);
3305 }
3306 }
3307
3308 /* If time expired clear the bits holding the lock and retry */
3309 if (gssr & (fwmask | swmask))
3310 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3311
3312 msec_delay(5);
3313 return IXGBE_ERR_SWFW_SYNC;
3314 }
3315
3316 /**
3317 * ixgbe_release_swfw_sync - Release SWFW semaphore
3318 * @hw: pointer to hardware structure
3319 * @mask: Mask to specify which semaphore to release
3320 *
3321 * Releases the SWFW semaphore through the GSSR register for the specified
3322 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3323 **/
3324 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3325 {
3326 u32 gssr;
3327 u32 swmask = mask;
3328
3329 DEBUGFUNC("ixgbe_release_swfw_sync");
3330
3331 ixgbe_get_eeprom_semaphore(hw);
3332
3333 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3334 gssr &= ~swmask;
3335 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3336
3337 ixgbe_release_eeprom_semaphore(hw);
3338 }
3339
3340 /**
3341 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3342 * @hw: pointer to hardware structure
3343 *
3344 * Stops the receive data path and waits for the HW to internally empty
3345 * the Rx security block
3346 **/
3347 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3348 {
3349 #define IXGBE_MAX_SECRX_POLL 40
3350
3351 int i;
3352 int secrxreg;
3353
3354 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3355
3356
3357 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3358 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3359 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3360 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3361 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3362 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3363 break;
3364 else
3365 /* Use interrupt-safe sleep just in case */
3366 usec_delay(1000);
3367 }
3368
3369 /* For informational purposes only */
3370 if (i >= IXGBE_MAX_SECRX_POLL)
3371 DEBUGOUT("Rx unit being enabled before security "
3372 "path fully disabled. Continuing with init.\n");
3373
3374 return IXGBE_SUCCESS;
3375 }
3376
3377 /**
3378 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3379 * @hw: pointer to hardware structure
3380 * @reg_val: Value we read from AUTOC
3381 *
3382 * The default case requires no protection so just to the register read.
3383 */
3384 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3385 {
3386 *locked = false;
3387 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3388 return IXGBE_SUCCESS;
3389 }
3390
3391 /**
3392 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3393 * @hw: pointer to hardware structure
3394 * @reg_val: value to write to AUTOC
3395 * @locked: bool to indicate whether the SW/FW lock was already taken by
3396 * previous read.
3397 *
3398 * The default case requires no protection so just to the register write.
3399 */
3400 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3401 {
3402 UNREFERENCED_1PARAMETER(locked);
3403
3404 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3405 return IXGBE_SUCCESS;
3406 }
3407
3408 /**
3409 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3410 * @hw: pointer to hardware structure
3411 *
3412 * Enables the receive data path.
3413 **/
3414 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3415 {
3416 u32 secrxreg;
3417
3418 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3419
3420 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3421 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3422 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3423 IXGBE_WRITE_FLUSH(hw);
3424
3425 return IXGBE_SUCCESS;
3426 }
3427
3428 /**
3429 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3430 * @hw: pointer to hardware structure
3431 * @regval: register value to write to RXCTRL
3432 *
3433 * Enables the Rx DMA unit
3434 **/
3435 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3436 {
3437 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3438
3439 if (regval & IXGBE_RXCTRL_RXEN)
3440 ixgbe_enable_rx(hw);
3441 else
3442 ixgbe_disable_rx(hw);
3443
3444 return IXGBE_SUCCESS;
3445 }
3446
3447 /**
3448 * ixgbe_blink_led_start_generic - Blink LED based on index.
3449 * @hw: pointer to hardware structure
3450 * @index: led number to blink
3451 **/
3452 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3453 {
3454 ixgbe_link_speed speed = 0;
3455 bool link_up = 0;
3456 u32 autoc_reg = 0;
3457 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3458 s32 ret_val = IXGBE_SUCCESS;
3459 bool locked = false;
3460
3461 DEBUGFUNC("ixgbe_blink_led_start_generic");
3462
3463 if (index > 3)
3464 return IXGBE_ERR_PARAM;
3465
3466 /*
3467 * Link must be up to auto-blink the LEDs;
3468 * Force it if link is down.
3469 */
3470 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3471
3472 if (!link_up) {
3473 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3474 if (ret_val != IXGBE_SUCCESS)
3475 goto out;
3476
3477 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3478 autoc_reg |= IXGBE_AUTOC_FLU;
3479
3480 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3481 if (ret_val != IXGBE_SUCCESS)
3482 goto out;
3483
3484 IXGBE_WRITE_FLUSH(hw);
3485 msec_delay(10);
3486 }
3487
3488 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3489 led_reg |= IXGBE_LED_BLINK(index);
3490 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3491 IXGBE_WRITE_FLUSH(hw);
3492
3493 out:
3494 return ret_val;
3495 }
3496
3497 /**
3498 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3499 * @hw: pointer to hardware structure
3500 * @index: led number to stop blinking
3501 **/
3502 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3503 {
3504 u32 autoc_reg = 0;
3505 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3506 s32 ret_val = IXGBE_SUCCESS;
3507 bool locked = false;
3508
3509 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3510
3511 if (index > 3)
3512 return IXGBE_ERR_PARAM;
3513
3514
3515 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3516 if (ret_val != IXGBE_SUCCESS)
3517 goto out;
3518
3519 autoc_reg &= ~IXGBE_AUTOC_FLU;
3520 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3521
3522 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3523 if (ret_val != IXGBE_SUCCESS)
3524 goto out;
3525
3526 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3527 led_reg &= ~IXGBE_LED_BLINK(index);
3528 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3529 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3530 IXGBE_WRITE_FLUSH(hw);
3531
3532 out:
3533 return ret_val;
3534 }
3535
3536 /**
3537 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3538 * @hw: pointer to hardware structure
3539 * @san_mac_offset: SAN MAC address offset
3540 *
3541 * This function will read the EEPROM location for the SAN MAC address
3542 * pointer, and returns the value at that location. This is used in both
3543 * get and set mac_addr routines.
3544 **/
3545 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3546 u16 *san_mac_offset)
3547 {
3548 s32 ret_val;
3549
3550 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3551
3552 /*
3553 * First read the EEPROM pointer to see if the MAC addresses are
3554 * available.
3555 */
3556 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3557 san_mac_offset);
3558 if (ret_val) {
3559 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3560 "eeprom at offset %d failed",
3561 IXGBE_SAN_MAC_ADDR_PTR);
3562 }
3563
3564 return ret_val;
3565 }
3566
3567 /**
3568 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3569 * @hw: pointer to hardware structure
3570 * @san_mac_addr: SAN MAC address
3571 *
3572 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3573 * per-port, so set_lan_id() must be called before reading the addresses.
3574 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3575 * upon for non-SFP connections, so we must call it here.
3576 **/
3577 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3578 {
3579 u16 san_mac_data, san_mac_offset;
3580 u8 i;
3581 s32 ret_val;
3582
3583 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3584
3585 /*
3586 * First read the EEPROM pointer to see if the MAC addresses are
3587 * available. If they're not, no point in calling set_lan_id() here.
3588 */
3589 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3590 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3591 goto san_mac_addr_out;
3592
3593 /* make sure we know which port we need to program */
3594 hw->mac.ops.set_lan_id(hw);
3595 /* apply the port offset to the address offset */
3596 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3597 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3598 for (i = 0; i < 3; i++) {
3599 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3600 &san_mac_data);
3601 if (ret_val) {
3602 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3603 "eeprom read at offset %d failed",
3604 san_mac_offset);
3605 goto san_mac_addr_out;
3606 }
3607 san_mac_addr[i * 2] = (u8)(san_mac_data);
3608 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3609 san_mac_offset++;
3610 }
3611 return IXGBE_SUCCESS;
3612
3613 san_mac_addr_out:
3614 /*
3615 * No addresses available in this EEPROM. It's not an
3616 * error though, so just wipe the local address and return.
3617 */
3618 for (i = 0; i < 6; i++)
3619 san_mac_addr[i] = 0xFF;
3620 return IXGBE_SUCCESS;
3621 }
3622
3623 /**
3624 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3625 * @hw: pointer to hardware structure
3626 * @san_mac_addr: SAN MAC address
3627 *
3628 * Write a SAN MAC address to the EEPROM.
3629 **/
3630 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3631 {
3632 s32 ret_val;
3633 u16 san_mac_data, san_mac_offset;
3634 u8 i;
3635
3636 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3637
3638 /* Look for SAN mac address pointer. If not defined, return */
3639 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3640 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3641 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3642
3643 /* Make sure we know which port we need to write */
3644 hw->mac.ops.set_lan_id(hw);
3645 /* Apply the port offset to the address offset */
3646 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3647 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3648
3649 for (i = 0; i < 3; i++) {
3650 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3651 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3652 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3653 san_mac_offset++;
3654 }
3655
3656 return IXGBE_SUCCESS;
3657 }
3658
3659 /**
3660 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3661 * @hw: pointer to hardware structure
3662 *
3663 * Read PCIe configuration space, and get the MSI-X vector count from
3664 * the capabilities table.
3665 **/
3666 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3667 {
3668 u16 msix_count = 1;
3669 u16 max_msix_count;
3670 u16 pcie_offset;
3671
3672 switch (hw->mac.type) {
3673 case ixgbe_mac_82598EB:
3674 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3675 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3676 break;
3677 case ixgbe_mac_82599EB:
3678 case ixgbe_mac_X540:
3679 case ixgbe_mac_X550:
3680 case ixgbe_mac_X550EM_x:
3681 case ixgbe_mac_X550EM_a:
3682 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3683 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3684 break;
3685 default:
3686 return msix_count;
3687 }
3688
3689 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3690 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3691 if (IXGBE_REMOVED(hw->hw_addr))
3692 msix_count = 0;
3693 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3694
3695 /* MSI-X count is zero-based in HW */
3696 msix_count++;
3697
3698 if (msix_count > max_msix_count)
3699 msix_count = max_msix_count;
3700
3701 return msix_count;
3702 }
3703
3704 /**
3705 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3706 * @hw: pointer to hardware structure
3707 * @addr: Address to put into receive address register
3708 * @vmdq: VMDq pool to assign
3709 *
3710 * Puts an ethernet address into a receive address register, or
3711 * finds the rar that it is aleady in; adds to the pool list
3712 **/
3713 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3714 {
3715 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3716 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3717 u32 rar;
3718 u32 rar_low, rar_high;
3719 u32 addr_low, addr_high;
3720
3721 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3722
3723 /* swap bytes for HW little endian */
3724 addr_low = addr[0] | (addr[1] << 8)
3725 | (addr[2] << 16)
3726 | (addr[3] << 24);
3727 addr_high = addr[4] | (addr[5] << 8);
3728
3729 /*
3730 * Either find the mac_id in rar or find the first empty space.
3731 * rar_highwater points to just after the highest currently used
3732 * rar in order to shorten the search. It grows when we add a new
3733 * rar to the top.
3734 */
3735 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3736 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3737
3738 if (((IXGBE_RAH_AV & rar_high) == 0)
3739 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3740 first_empty_rar = rar;
3741 } else if ((rar_high & 0xFFFF) == addr_high) {
3742 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3743 if (rar_low == addr_low)
3744 break; /* found it already in the rars */
3745 }
3746 }
3747
3748 if (rar < hw->mac.rar_highwater) {
3749 /* already there so just add to the pool bits */
3750 ixgbe_set_vmdq(hw, rar, vmdq);
3751 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3752 /* stick it into first empty RAR slot we found */
3753 rar = first_empty_rar;
3754 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3755 } else if (rar == hw->mac.rar_highwater) {
3756 /* add it to the top of the list and inc the highwater mark */
3757 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3758 hw->mac.rar_highwater++;
3759 } else if (rar >= hw->mac.num_rar_entries) {
3760 return IXGBE_ERR_INVALID_MAC_ADDR;
3761 }
3762
3763 /*
3764 * If we found rar[0], make sure the default pool bit (we use pool 0)
3765 * remains cleared to be sure default pool packets will get delivered
3766 */
3767 if (rar == 0)
3768 ixgbe_clear_vmdq(hw, rar, 0);
3769
3770 return rar;
3771 }
3772
3773 /**
3774 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3775 * @hw: pointer to hardware struct
3776 * @rar: receive address register index to disassociate
3777 * @vmdq: VMDq pool index to remove from the rar
3778 **/
3779 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3780 {
3781 u32 mpsar_lo, mpsar_hi;
3782 u32 rar_entries = hw->mac.num_rar_entries;
3783
3784 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3785
3786 /* Make sure we are using a valid rar index range */
3787 if (rar >= rar_entries) {
3788 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3789 "RAR index %d is out of range.\n", rar);
3790 return IXGBE_ERR_INVALID_ARGUMENT;
3791 }
3792
3793 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3794 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3795
3796 if (IXGBE_REMOVED(hw->hw_addr))
3797 goto done;
3798
3799 if (!mpsar_lo && !mpsar_hi)
3800 goto done;
3801
3802 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3803 if (mpsar_lo) {
3804 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3805 mpsar_lo = 0;
3806 }
3807 if (mpsar_hi) {
3808 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3809 mpsar_hi = 0;
3810 }
3811 } else if (vmdq < 32) {
3812 mpsar_lo &= ~(1 << vmdq);
3813 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3814 } else {
3815 mpsar_hi &= ~(1 << (vmdq - 32));
3816 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3817 }
3818
3819 /* was that the last pool using this rar? */
3820 if (mpsar_lo == 0 && mpsar_hi == 0 &&
3821 rar != 0 && rar != hw->mac.san_mac_rar_index)
3822 hw->mac.ops.clear_rar(hw, rar);
3823 done:
3824 return IXGBE_SUCCESS;
3825 }
3826
3827 /**
3828 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3829 * @hw: pointer to hardware struct
3830 * @rar: receive address register index to associate with a VMDq index
3831 * @vmdq: VMDq pool index
3832 **/
3833 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3834 {
3835 u32 mpsar;
3836 u32 rar_entries = hw->mac.num_rar_entries;
3837
3838 DEBUGFUNC("ixgbe_set_vmdq_generic");
3839
3840 /* Make sure we are using a valid rar index range */
3841 if (rar >= rar_entries) {
3842 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3843 "RAR index %d is out of range.\n", rar);
3844 return IXGBE_ERR_INVALID_ARGUMENT;
3845 }
3846
3847 if (vmdq < 32) {
3848 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3849 mpsar |= 1 << vmdq;
3850 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3851 } else {
3852 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3853 mpsar |= 1 << (vmdq - 32);
3854 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3855 }
3856 return IXGBE_SUCCESS;
3857 }
3858
3859 /**
3860 * This function should only be involved in the IOV mode.
3861 * In IOV mode, Default pool is next pool after the number of
3862 * VFs advertized and not 0.
3863 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3864 *
3865 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3866 * @hw: pointer to hardware struct
3867 * @vmdq: VMDq pool index
3868 **/
3869 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3870 {
3871 u32 rar = hw->mac.san_mac_rar_index;
3872
3873 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3874
3875 if (vmdq < 32) {
3876 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3877 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3878 } else {
3879 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3880 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3881 }
3882
3883 return IXGBE_SUCCESS;
3884 }
3885
3886 /**
3887 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3888 * @hw: pointer to hardware structure
3889 **/
3890 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3891 {
3892 int i;
3893
3894 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3895 DEBUGOUT(" Clearing UTA\n");
3896
3897 for (i = 0; i < 128; i++)
3898 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3899
3900 return IXGBE_SUCCESS;
3901 }
3902
3903 /**
3904 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3905 * @hw: pointer to hardware structure
3906 * @vlan: VLAN id to write to VLAN filter
3907 *
3908 * return the VLVF index where this VLAN id should be placed
3909 *
3910 **/
3911 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3912 {
3913 s32 regindex, first_empty_slot;
3914 u32 bits;
3915
3916 /* short cut the special case */
3917 if (vlan == 0)
3918 return 0;
3919
3920 /* if vlvf_bypass is set we don't want to use an empty slot, we
3921 * will simply bypass the VLVF if there are no entries present in the
3922 * VLVF that contain our VLAN
3923 */
3924 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3925
3926 /* add VLAN enable bit for comparison */
3927 vlan |= IXGBE_VLVF_VIEN;
3928
3929 /* Search for the vlan id in the VLVF entries. Save off the first empty
3930 * slot found along the way.
3931 *
3932 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3933 */
3934 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3935 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3936 if (bits == vlan)
3937 return regindex;
3938 if (!first_empty_slot && !bits)
3939 first_empty_slot = regindex;
3940 }
3941
3942 /* If we are here then we didn't find the VLAN. Return first empty
3943 * slot we found during our search, else error.
3944 */
3945 if (!first_empty_slot)
3946 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3947
3948 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3949 }
3950
3951 /**
3952 * ixgbe_set_vfta_generic - Set VLAN filter table
3953 * @hw: pointer to hardware structure
3954 * @vlan: VLAN id to write to VLAN filter
3955 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3956 * @vlan_on: boolean flag to turn on/off VLAN
3957 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3958 *
3959 * Turn on/off specified VLAN in the VLAN filter table.
3960 **/
3961 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3962 bool vlan_on, bool vlvf_bypass)
3963 {
3964 u32 regidx, vfta_delta, vfta;
3965 s32 ret_val;
3966
3967 DEBUGFUNC("ixgbe_set_vfta_generic");
3968
3969 if (vlan > 4095 || vind > 63)
3970 return IXGBE_ERR_PARAM;
3971
3972 /*
3973 * this is a 2 part operation - first the VFTA, then the
3974 * VLVF and VLVFB if VT Mode is set
3975 * We don't write the VFTA until we know the VLVF part succeeded.
3976 */
3977
3978 /* Part 1
3979 * The VFTA is a bitstring made up of 128 32-bit registers
3980 * that enable the particular VLAN id, much like the MTA:
3981 * bits[11-5]: which register
3982 * bits[4-0]: which bit in the register
3983 */
3984 regidx = vlan / 32;
3985 vfta_delta = 1 << (vlan % 32);
3986 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3987
3988 /*
3989 * vfta_delta represents the difference between the current value
3990 * of vfta and the value we want in the register. Since the diff
3991 * is an XOR mask we can just update the vfta using an XOR
3992 */
3993 vfta_delta &= vlan_on ? ~vfta : vfta;
3994 vfta ^= vfta_delta;
3995
3996 /* Part 2
3997 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3998 */
3999 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4000 vfta, vlvf_bypass);
4001 if (ret_val != IXGBE_SUCCESS) {
4002 if (vlvf_bypass)
4003 goto vfta_update;
4004 return ret_val;
4005 }
4006
4007 vfta_update:
4008 /* Update VFTA now that we are ready for traffic */
4009 if (vfta_delta)
4010 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4011
4012 return IXGBE_SUCCESS;
4013 }
4014
4015 /**
4016 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4017 * @hw: pointer to hardware structure
4018 * @vlan: VLAN id to write to VLAN filter
4019 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
4020 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
4021 * @vfta_delta: pointer to the difference between the current value of VFTA
4022 * and the desired value
4023 * @vfta: the desired value of the VFTA
4024 * @vlvf_bypass: boolean flag indicating updating default pool is okay
4025 *
4026 * Turn on/off specified bit in VLVF table.
4027 **/
4028 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4029 bool vlan_on, u32 *vfta_delta, u32 vfta,
4030 bool vlvf_bypass)
4031 {
4032 u32 bits;
4033 s32 vlvf_index;
4034
4035 DEBUGFUNC("ixgbe_set_vlvf_generic");
4036
4037 if (vlan > 4095 || vind > 63)
4038 return IXGBE_ERR_PARAM;
4039
4040 /* If VT Mode is set
4041 * Either vlan_on
4042 * make sure the vlan is in VLVF
4043 * set the vind bit in the matching VLVFB
4044 * Or !vlan_on
4045 * clear the pool bit and possibly the vind
4046 */
4047 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4048 return IXGBE_SUCCESS;
4049
4050 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4051 if (vlvf_index < 0)
4052 return vlvf_index;
4053
4054 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4055
4056 /* set the pool bit */
4057 bits |= 1 << (vind % 32);
4058 if (vlan_on)
4059 goto vlvf_update;
4060
4061 /* clear the pool bit */
4062 bits ^= 1 << (vind % 32);
4063
4064 if (!bits &&
4065 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4066 /* Clear VFTA first, then disable VLVF. Otherwise
4067 * we run the risk of stray packets leaking into
4068 * the PF via the default pool
4069 */
4070 if (*vfta_delta)
4071 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4072
4073 /* disable VLVF and clear remaining bit from pool */
4074 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4075 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4076
4077 return IXGBE_SUCCESS;
4078 }
4079
4080 /* If there are still bits set in the VLVFB registers
4081 * for the VLAN ID indicated we need to see if the
4082 * caller is requesting that we clear the VFTA entry bit.
4083 * If the caller has requested that we clear the VFTA
4084 * entry bit but there are still pools/VFs using this VLAN
4085 * ID entry then ignore the request. We're not worried
4086 * about the case where we're turning the VFTA VLAN ID
4087 * entry bit on, only when requested to turn it off as
4088 * there may be multiple pools and/or VFs using the
4089 * VLAN ID entry. In that case we cannot clear the
4090 * VFTA bit until all pools/VFs using that VLAN ID have also
4091 * been cleared. This will be indicated by "bits" being
4092 * zero.
4093 */
4094 *vfta_delta = 0;
4095
4096 vlvf_update:
4097 /* record pool change and enable VLAN ID if not already enabled */
4098 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4099 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4100
4101 return IXGBE_SUCCESS;
4102 }
4103
4104 /**
4105 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4106 * @hw: pointer to hardware structure
4107 *
4108 * Clears the VLAN filer table, and the VMDq index associated with the filter
4109 **/
4110 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4111 {
4112 u32 offset;
4113
4114 DEBUGFUNC("ixgbe_clear_vfta_generic");
4115
4116 for (offset = 0; offset < hw->mac.vft_size; offset++)
4117 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4118
4119 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4120 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4121 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4122 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4123 }
4124
4125 return IXGBE_SUCCESS;
4126 }
4127
4128 /**
4129 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4130 * @hw: pointer to hardware structure
4131 *
4132 * Contains the logic to identify if we need to verify link for the
4133 * crosstalk fix
4134 **/
4135 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4136 {
4137
4138 /* Does FW say we need the fix */
4139 if (!hw->need_crosstalk_fix)
4140 return false;
4141
4142 /* Only consider SFP+ PHYs i.e. media type fiber */
4143 switch (hw->mac.ops.get_media_type(hw)) {
4144 case ixgbe_media_type_fiber:
4145 case ixgbe_media_type_fiber_qsfp:
4146 break;
4147 default:
4148 return false;
4149 }
4150
4151 return true;
4152 }
4153
4154 /**
4155 * ixgbe_check_mac_link_generic - Determine link and speed status
4156 * @hw: pointer to hardware structure
4157 * @speed: pointer to link speed
4158 * @link_up: true when link is up
4159 * @link_up_wait_to_complete: bool used to wait for link up or not
4160 *
4161 * Reads the links register to determine if link is up and the current speed
4162 **/
4163 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4164 bool *link_up, bool link_up_wait_to_complete)
4165 {
4166 u32 links_reg, links_orig;
4167 u32 i;
4168
4169 DEBUGFUNC("ixgbe_check_mac_link_generic");
4170
4171 /* If Crosstalk fix enabled do the sanity check of making sure
4172 * the SFP+ cage is full.
4173 */
4174 if (ixgbe_need_crosstalk_fix(hw)) {
4175 u32 sfp_cage_full;
4176
4177 switch (hw->mac.type) {
4178 case ixgbe_mac_82599EB:
4179 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4180 IXGBE_ESDP_SDP2;
4181 break;
4182 case ixgbe_mac_X550EM_x:
4183 case ixgbe_mac_X550EM_a:
4184 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4185 IXGBE_ESDP_SDP0;
4186 break;
4187 default:
4188 /* sanity check - No SFP+ devices here */
4189 sfp_cage_full = false;
4190 break;
4191 }
4192
4193 if (!sfp_cage_full) {
4194 *link_up = false;
4195 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4196 return IXGBE_SUCCESS;
4197 }
4198 }
4199
4200 /* clear the old state */
4201 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4202
4203 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4204
4205 if (links_orig != links_reg) {
4206 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4207 links_orig, links_reg);
4208 }
4209
4210 if (link_up_wait_to_complete) {
4211 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4212 if (links_reg & IXGBE_LINKS_UP) {
4213 *link_up = true;
4214 break;
4215 } else {
4216 *link_up = false;
4217 }
4218 msec_delay(100);
4219 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4220 }
4221 } else {
4222 if (links_reg & IXGBE_LINKS_UP)
4223 *link_up = true;
4224 else
4225 *link_up = false;
4226 }
4227
4228 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4229 case IXGBE_LINKS_SPEED_10G_82599:
4230 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4231 if (hw->mac.type >= ixgbe_mac_X550) {
4232 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4233 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4234 }
4235 break;
4236 case IXGBE_LINKS_SPEED_1G_82599:
4237 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4238 break;
4239 case IXGBE_LINKS_SPEED_100_82599:
4240 *speed = IXGBE_LINK_SPEED_100_FULL;
4241 if (hw->mac.type == ixgbe_mac_X550) {
4242 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4243 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4244 }
4245 break;
4246 case IXGBE_LINKS_SPEED_10_X550EM_A:
4247 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4248 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4249 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
4250 *speed = IXGBE_LINK_SPEED_10_FULL;
4251 }
4252 break;
4253 default:
4254 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4255 }
4256
4257 return IXGBE_SUCCESS;
4258 }
4259
4260 /**
4261 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4262 * the EEPROM
4263 * @hw: pointer to hardware structure
4264 * @wwnn_prefix: the alternative WWNN prefix
4265 * @wwpn_prefix: the alternative WWPN prefix
4266 *
4267 * This function will read the EEPROM from the alternative SAN MAC address
4268 * block to check the support for the alternative WWNN/WWPN prefix support.
4269 **/
4270 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4271 u16 *wwpn_prefix)
4272 {
4273 u16 offset, caps;
4274 u16 alt_san_mac_blk_offset;
4275
4276 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4277
4278 /* clear output first */
4279 *wwnn_prefix = 0xFFFF;
4280 *wwpn_prefix = 0xFFFF;
4281
4282 /* check if alternative SAN MAC is supported */
4283 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4284 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4285 goto wwn_prefix_err;
4286
4287 if ((alt_san_mac_blk_offset == 0) ||
4288 (alt_san_mac_blk_offset == 0xFFFF))
4289 goto wwn_prefix_out;
4290
4291 /* check capability in alternative san mac address block */
4292 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4293 if (hw->eeprom.ops.read(hw, offset, &caps))
4294 goto wwn_prefix_err;
4295 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4296 goto wwn_prefix_out;
4297
4298 /* get the corresponding prefix for WWNN/WWPN */
4299 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4300 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4301 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4302 "eeprom read at offset %d failed", offset);
4303 }
4304
4305 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4306 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4307 goto wwn_prefix_err;
4308
4309 wwn_prefix_out:
4310 return IXGBE_SUCCESS;
4311
4312 wwn_prefix_err:
4313 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4314 "eeprom read at offset %d failed", offset);
4315 return IXGBE_SUCCESS;
4316 }
4317
4318 /**
4319 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4320 * @hw: pointer to hardware structure
4321 * @bs: the fcoe boot status
4322 *
4323 * This function will read the FCOE boot status from the iSCSI FCOE block
4324 **/
4325 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4326 {
4327 u16 offset, caps, flags;
4328 s32 status;
4329
4330 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4331
4332 /* clear output first */
4333 *bs = ixgbe_fcoe_bootstatus_unavailable;
4334
4335 /* check if FCOE IBA block is present */
4336 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4337 status = hw->eeprom.ops.read(hw, offset, &caps);
4338 if (status != IXGBE_SUCCESS)
4339 goto out;
4340
4341 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4342 goto out;
4343
4344 /* check if iSCSI FCOE block is populated */
4345 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4346 if (status != IXGBE_SUCCESS)
4347 goto out;
4348
4349 if ((offset == 0) || (offset == 0xFFFF))
4350 goto out;
4351
4352 /* read fcoe flags in iSCSI FCOE block */
4353 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4354 status = hw->eeprom.ops.read(hw, offset, &flags);
4355 if (status != IXGBE_SUCCESS)
4356 goto out;
4357
4358 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4359 *bs = ixgbe_fcoe_bootstatus_enabled;
4360 else
4361 *bs = ixgbe_fcoe_bootstatus_disabled;
4362
4363 out:
4364 return status;
4365 }
4366
4367 /**
4368 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4369 * @hw: pointer to hardware structure
4370 * @enable: enable or disable switch for MAC anti-spoofing
4371 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4372 *
4373 **/
4374 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4375 {
4376 int vf_target_reg = vf >> 3;
4377 int vf_target_shift = vf % 8;
4378 u32 pfvfspoof;
4379
4380 if (hw->mac.type == ixgbe_mac_82598EB)
4381 return;
4382
4383 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4384 if (enable)
4385 pfvfspoof |= (1 << vf_target_shift);
4386 else
4387 pfvfspoof &= ~(1 << vf_target_shift);
4388 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4389 }
4390
4391 /**
4392 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4393 * @hw: pointer to hardware structure
4394 * @enable: enable or disable switch for VLAN anti-spoofing
4395 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4396 *
4397 **/
4398 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4399 {
4400 int vf_target_reg = vf >> 3;
4401 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4402 u32 pfvfspoof;
4403
4404 if (hw->mac.type == ixgbe_mac_82598EB)
4405 return;
4406
4407 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4408 if (enable)
4409 pfvfspoof |= (1 << vf_target_shift);
4410 else
4411 pfvfspoof &= ~(1 << vf_target_shift);
4412 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4413 }
4414
4415 /**
4416 * ixgbe_get_device_caps_generic - Get additional device capabilities
4417 * @hw: pointer to hardware structure
4418 * @device_caps: the EEPROM word with the extra device capabilities
4419 *
4420 * This function will read the EEPROM location for the device capabilities,
4421 * and return the word through device_caps.
4422 **/
4423 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4424 {
4425 DEBUGFUNC("ixgbe_get_device_caps_generic");
4426
4427 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4428
4429 return IXGBE_SUCCESS;
4430 }
4431
4432 /**
4433 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4434 * @hw: pointer to hardware structure
4435 *
4436 **/
4437 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4438 {
4439 u32 regval;
4440 u32 i;
4441
4442 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4443
4444 /* Enable relaxed ordering */
4445 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4446 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4447 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4448 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4449 }
4450
4451 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4452 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4453 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4454 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4455 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4456 }
4457
4458 }
4459
4460 /**
4461 * ixgbe_calculate_checksum - Calculate checksum for buffer
4462 * @buffer: pointer to EEPROM
4463 * @length: size of EEPROM to calculate a checksum for
4464 * Calculates the checksum for some buffer on a specified length. The
4465 * checksum calculated is returned.
4466 **/
4467 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4468 {
4469 u32 i;
4470 u8 sum = 0;
4471
4472 DEBUGFUNC("ixgbe_calculate_checksum");
4473
4474 if (!buffer)
4475 return 0;
4476
4477 for (i = 0; i < length; i++)
4478 sum += buffer[i];
4479
4480 return (u8) (0 - sum);
4481 }
4482
4483 /**
4484 * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4485 * @hw: pointer to the HW structure
4486 * @buffer: command to write and where the return status will be placed
4487 * @length: length of buffer, must be multiple of 4 bytes
4488 * @timeout: time in ms to wait for command completion
4489 *
4490 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4491 * else returns semaphore error when encountering an error acquiring
4492 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4493 *
4494 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4495 * by the caller.
4496 **/
4497 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4498 u32 timeout)
4499 {
4500 u32 hicr, i, fwsts;
4501 u16 dword_len;
4502
4503 DEBUGFUNC("ixgbe_hic_unlocked");
4504
4505 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4506 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4507 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4508 }
4509
4510 /* Set bit 9 of FWSTS clearing FW reset indication */
4511 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4512 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4513
4514 /* Check that the host interface is enabled. */
4515 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4516 if (!(hicr & IXGBE_HICR_EN)) {
4517 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4518 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4519 }
4520
4521 /* Calculate length in DWORDs. We must be DWORD aligned */
4522 if (length % sizeof(u32)) {
4523 DEBUGOUT("Buffer length failure, not aligned to dword");
4524 return IXGBE_ERR_INVALID_ARGUMENT;
4525 }
4526
4527 dword_len = length >> 2;
4528
4529 /* The device driver writes the relevant command block
4530 * into the ram area.
4531 */
4532 for (i = 0; i < dword_len; i++)
4533 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4534 i, IXGBE_CPU_TO_LE32(buffer[i]));
4535
4536 /* Setting this bit tells the ARC that a new command is pending. */
4537 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4538
4539 for (i = 0; i < timeout; i++) {
4540 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4541 if (!(hicr & IXGBE_HICR_C))
4542 break;
4543 msec_delay(1);
4544 }
4545
4546 /* Check command completion */
4547 if ((timeout && i == timeout) ||
4548 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4549 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4550 "Command has failed with no status valid.\n");
4551 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4552 }
4553
4554 return IXGBE_SUCCESS;
4555 }
4556
4557 /**
4558 * ixgbe_host_interface_command - Issue command to manageability block
4559 * @hw: pointer to the HW structure
4560 * @buffer: contains the command to write and where the return status will
4561 * be placed
4562 * @length: length of buffer, must be multiple of 4 bytes
4563 * @timeout: time in ms to wait for command completion
4564 * @return_data: read and return data from the buffer (true) or not (false)
4565 * Needed because FW structures are big endian and decoding of
4566 * these fields can be 8 bit or 16 bit based on command. Decoding
4567 * is not easily understood without making a table of commands.
4568 * So we will leave this up to the caller to read back the data
4569 * in these cases.
4570 *
4571 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4572 * else returns semaphore error when encountering an error acquiring
4573 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4574 **/
4575 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4576 u32 length, u32 timeout, bool return_data)
4577 {
4578 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4579 u16 dword_len;
4580 u16 buf_len;
4581 s32 status;
4582 u32 bi;
4583
4584 DEBUGFUNC("ixgbe_host_interface_command");
4585
4586 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4587 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4588 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4589 }
4590
4591 /* Take management host interface semaphore */
4592 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4593 if (status)
4594 return status;
4595
4596 status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4597 if (status)
4598 goto rel_out;
4599
4600 if (!return_data)
4601 goto rel_out;
4602
4603 /* Calculate length in DWORDs */
4604 dword_len = hdr_size >> 2;
4605
4606 /* first pull in the header so we know the buffer length */
4607 for (bi = 0; bi < dword_len; bi++) {
4608 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4609 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4610 }
4611
4612 /* If there is any thing in data position pull it in */
4613 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4614 if (!buf_len)
4615 goto rel_out;
4616
4617 if (length < buf_len + hdr_size) {
4618 DEBUGOUT("Buffer not large enough for reply message.\n");
4619 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4620 goto rel_out;
4621 }
4622
4623 /* Calculate length in DWORDs, add 3 for odd lengths */
4624 dword_len = (buf_len + 3) >> 2;
4625
4626 /* Pull in the rest of the buffer (bi is where we left off) */
4627 for (; bi <= dword_len; bi++) {
4628 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4629 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4630 }
4631
4632 rel_out:
4633 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4634
4635 return status;
4636 }
4637
4638 /**
4639 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4640 * @hw: pointer to the HW structure
4641 * @maj: driver version major number
4642 * @min: driver version minor number
4643 * @build: driver version build number
4644 * @sub: driver version sub build number
4645 *
4646 * Sends driver version number to firmware through the manageability
4647 * block. On success return IXGBE_SUCCESS
4648 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4649 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4650 **/
4651 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4652 u8 build, u8 sub, u16 len,
4653 const char *driver_ver)
4654 {
4655 struct ixgbe_hic_drv_info fw_cmd;
4656 int i;
4657 s32 ret_val = IXGBE_SUCCESS;
4658
4659 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4660 UNREFERENCED_2PARAMETER(len, driver_ver);
4661
4662 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4663 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4664 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4665 fw_cmd.port_num = (u8)hw->bus.func;
4666 fw_cmd.ver_maj = maj;
4667 fw_cmd.ver_min = min;
4668 fw_cmd.ver_build = build;
4669 fw_cmd.ver_sub = sub;
4670 fw_cmd.hdr.checksum = 0;
4671 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4672 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4673 fw_cmd.pad = 0;
4674 fw_cmd.pad2 = 0;
4675
4676 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4677 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4678 sizeof(fw_cmd),
4679 IXGBE_HI_COMMAND_TIMEOUT,
4680 true);
4681 if (ret_val != IXGBE_SUCCESS)
4682 continue;
4683
4684 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4685 FW_CEM_RESP_STATUS_SUCCESS)
4686 ret_val = IXGBE_SUCCESS;
4687 else
4688 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4689
4690 break;
4691 }
4692
4693 return ret_val;
4694 }
4695
4696 /**
4697 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4698 * @hw: pointer to hardware structure
4699 * @num_pb: number of packet buffers to allocate
4700 * @headroom: reserve n KB of headroom
4701 * @strategy: packet buffer allocation strategy
4702 **/
4703 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4704 int strategy)
4705 {
4706 u32 pbsize = hw->mac.rx_pb_size;
4707 int i = 0;
4708 u32 rxpktsize, txpktsize, txpbthresh;
4709
4710 /* Reserve headroom */
4711 pbsize -= headroom;
4712
4713 if (!num_pb)
4714 num_pb = 1;
4715
4716 /* Divide remaining packet buffer space amongst the number of packet
4717 * buffers requested using supplied strategy.
4718 */
4719 switch (strategy) {
4720 case PBA_STRATEGY_WEIGHTED:
4721 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4722 * buffer with 5/8 of the packet buffer space.
4723 */
4724 rxpktsize = (pbsize * 5) / (num_pb * 4);
4725 pbsize -= rxpktsize * (num_pb / 2);
4726 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4727 for (; i < (num_pb / 2); i++)
4728 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4729 /* Fall through to configure remaining packet buffers */
4730 case PBA_STRATEGY_EQUAL:
4731 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4732 for (; i < num_pb; i++)
4733 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4734 break;
4735 default:
4736 break;
4737 }
4738
4739 /* Only support an equally distributed Tx packet buffer strategy. */
4740 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4741 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4742 for (i = 0; i < num_pb; i++) {
4743 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4744 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4745 }
4746
4747 /* Clear unused TCs, if any, to zero buffer size*/
4748 for (; i < IXGBE_MAX_PB; i++) {
4749 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4750 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4751 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4752 }
4753 }
4754
4755 /**
4756 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4757 * @hw: pointer to the hardware structure
4758 *
4759 * The 82599 and x540 MACs can experience issues if TX work is still pending
4760 * when a reset occurs. This function prevents this by flushing the PCIe
4761 * buffers on the system.
4762 **/
4763 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4764 {
4765 u32 gcr_ext, hlreg0, i, poll;
4766 u16 value;
4767
4768 /*
4769 * If double reset is not requested then all transactions should
4770 * already be clear and as such there is no work to do
4771 */
4772 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4773 return;
4774
4775 /*
4776 * Set loopback enable to prevent any transmits from being sent
4777 * should the link come up. This assumes that the RXCTRL.RXEN bit
4778 * has already been cleared.
4779 */
4780 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4781 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4782
4783 /* Wait for a last completion before clearing buffers */
4784 IXGBE_WRITE_FLUSH(hw);
4785 msec_delay(3);
4786
4787 /*
4788 * Before proceeding, make sure that the PCIe block does not have
4789 * transactions pending.
4790 */
4791 poll = ixgbe_pcie_timeout_poll(hw);
4792 for (i = 0; i < poll; i++) {
4793 usec_delay(100);
4794 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4795 if (IXGBE_REMOVED(hw->hw_addr))
4796 goto out;
4797 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4798 goto out;
4799 }
4800
4801 out:
4802 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4803 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4804 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4805 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4806
4807 /* Flush all writes and allow 20usec for all transactions to clear */
4808 IXGBE_WRITE_FLUSH(hw);
4809 usec_delay(20);
4810
4811 /* restore previous register values */
4812 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4813 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4814 }
4815
4816 STATIC const u8 ixgbe_emc_temp_data[4] = {
4817 IXGBE_EMC_INTERNAL_DATA,
4818 IXGBE_EMC_DIODE1_DATA,
4819 IXGBE_EMC_DIODE2_DATA,
4820 IXGBE_EMC_DIODE3_DATA
4821 };
4822 STATIC const u8 ixgbe_emc_therm_limit[4] = {
4823 IXGBE_EMC_INTERNAL_THERM_LIMIT,
4824 IXGBE_EMC_DIODE1_THERM_LIMIT,
4825 IXGBE_EMC_DIODE2_THERM_LIMIT,
4826 IXGBE_EMC_DIODE3_THERM_LIMIT
4827 };
4828
4829 /**
4830 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
4831 * @hw: pointer to hardware structure
4832 * @data: pointer to the thermal sensor data structure
4833 *
4834 * Returns the thermal sensor data structure
4835 **/
4836 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4837 {
4838 s32 status = IXGBE_SUCCESS;
4839 u16 ets_offset;
4840 u16 ets_cfg;
4841 u16 ets_sensor;
4842 u8 num_sensors;
4843 u8 sensor_index;
4844 u8 sensor_location;
4845 u8 i;
4846 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4847
4848 DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4849
4850 /* Only support thermal sensors attached to 82599 physical port 0 */
4851 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4852 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4853 status = IXGBE_NOT_IMPLEMENTED;
4854 goto out;
4855 }
4856
4857 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4858 if (status)
4859 goto out;
4860
4861 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4862 status = IXGBE_NOT_IMPLEMENTED;
4863 goto out;
4864 }
4865
4866 status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4867 if (status)
4868 goto out;
4869
4870 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4871 != IXGBE_ETS_TYPE_EMC) {
4872 status = IXGBE_NOT_IMPLEMENTED;
4873 goto out;
4874 }
4875
4876 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4877 if (num_sensors > IXGBE_MAX_SENSORS)
4878 num_sensors = IXGBE_MAX_SENSORS;
4879
4880 for (i = 0; i < num_sensors; i++) {
4881 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4882 &ets_sensor);
4883 if (status)
4884 goto out;
4885
4886 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4887 IXGBE_ETS_DATA_INDEX_SHIFT);
4888 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4889 IXGBE_ETS_DATA_LOC_SHIFT);
4890
4891 if (sensor_location != 0) {
4892 status = hw->phy.ops.read_i2c_byte(hw,
4893 ixgbe_emc_temp_data[sensor_index],
4894 IXGBE_I2C_THERMAL_SENSOR_ADDR,
4895 &data->sensor[i].temp);
4896 if (status)
4897 goto out;
4898 }
4899 }
4900 out:
4901 return status;
4902 }
4903
4904 /**
4905 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
4906 * @hw: pointer to hardware structure
4907 *
4908 * Inits the thermal sensor thresholds according to the NVM map
4909 * and save off the threshold and location values into mac.thermal_sensor_data
4910 **/
4911 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
4912 {
4913 s32 status = IXGBE_SUCCESS;
4914 u16 offset;
4915 u16 ets_offset;
4916 u16 ets_cfg;
4917 u16 ets_sensor;
4918 u8 low_thresh_delta;
4919 u8 num_sensors;
4920 u8 sensor_index;
4921 u8 sensor_location;
4922 u8 therm_limit;
4923 u8 i;
4924 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4925
4926 DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
4927
4928 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
4929
4930 /* Only support thermal sensors attached to 82599 physical port 0 */
4931 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4932 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
4933 return IXGBE_NOT_IMPLEMENTED;
4934
4935 offset = IXGBE_ETS_CFG;
4936 if (hw->eeprom.ops.read(hw, offset, &ets_offset))
4937 goto eeprom_err;
4938 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
4939 return IXGBE_NOT_IMPLEMENTED;
4940
4941 offset = ets_offset;
4942 if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
4943 goto eeprom_err;
4944 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4945 != IXGBE_ETS_TYPE_EMC)
4946 return IXGBE_NOT_IMPLEMENTED;
4947
4948 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
4949 IXGBE_ETS_LTHRES_DELTA_SHIFT);
4950 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4951
4952 for (i = 0; i < num_sensors; i++) {
4953 offset = ets_offset + 1 + i;
4954 if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
4955 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4956 "eeprom read at offset %d failed",
4957 offset);
4958 continue;
4959 }
4960 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4961 IXGBE_ETS_DATA_INDEX_SHIFT);
4962 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4963 IXGBE_ETS_DATA_LOC_SHIFT);
4964 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
4965
4966 hw->phy.ops.write_i2c_byte(hw,
4967 ixgbe_emc_therm_limit[sensor_index],
4968 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
4969
4970 if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
4971 data->sensor[i].location = sensor_location;
4972 data->sensor[i].caution_thresh = therm_limit;
4973 data->sensor[i].max_op_thresh = therm_limit -
4974 low_thresh_delta;
4975 }
4976 }
4977 return status;
4978
4979 eeprom_err:
4980 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4981 "eeprom read at offset %d failed", offset);
4982 return IXGBE_NOT_IMPLEMENTED;
4983 }
4984
4985
4986 /**
4987 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4988 * @hw: pointer to hardware structure
4989 * @map: pointer to u8 arr for returning map
4990 *
4991 * Read the rtrup2tc HW register and resolve its content into map
4992 **/
4993 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4994 {
4995 u32 reg, i;
4996
4997 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4998 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4999 map[i] = IXGBE_RTRUP2TC_UP_MASK &
5000 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5001 return;
5002 }
5003
5004 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5005 {
5006 u32 pfdtxgswc;
5007 u32 rxctrl;
5008
5009 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5010 if (rxctrl & IXGBE_RXCTRL_RXEN) {
5011 if (hw->mac.type != ixgbe_mac_82598EB) {
5012 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5013 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5014 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5015 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5016 hw->mac.set_lben = true;
5017 } else {
5018 hw->mac.set_lben = false;
5019 }
5020 }
5021 rxctrl &= ~IXGBE_RXCTRL_RXEN;
5022 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5023 }
5024 }
5025
5026 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5027 {
5028 u32 pfdtxgswc;
5029 u32 rxctrl;
5030
5031 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5032 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5033
5034 if (hw->mac.type != ixgbe_mac_82598EB) {
5035 if (hw->mac.set_lben) {
5036 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5037 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5038 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5039 hw->mac.set_lben = false;
5040 }
5041 }
5042 }
5043
5044 /**
5045 * ixgbe_mng_present - returns true when management capability is present
5046 * @hw: pointer to hardware structure
5047 */
5048 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5049 {
5050 u32 fwsm;
5051
5052 if (hw->mac.type < ixgbe_mac_82599EB)
5053 return false;
5054
5055 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5056 fwsm &= IXGBE_FWSM_MODE_MASK;
5057 return fwsm == IXGBE_FWSM_FW_MODE_PT;
5058 }
5059
5060 /**
5061 * ixgbe_mng_enabled - Is the manageability engine enabled?
5062 * @hw: pointer to hardware structure
5063 *
5064 * Returns true if the manageability engine is enabled.
5065 **/
5066 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5067 {
5068 u32 fwsm, manc, factps;
5069
5070 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5071 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5072 return false;
5073
5074 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5075 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5076 return false;
5077
5078 if (hw->mac.type <= ixgbe_mac_X540) {
5079 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5080 if (factps & IXGBE_FACTPS_MNGCG)
5081 return false;
5082 }
5083
5084 return true;
5085 }
5086
5087 /**
5088 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5089 * @hw: pointer to hardware structure
5090 * @speed: new link speed
5091 * @autoneg_wait_to_complete: true when waiting for completion is needed
5092 *
5093 * Set the link speed in the MAC and/or PHY register and restarts link.
5094 **/
5095 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5096 ixgbe_link_speed speed,
5097 bool autoneg_wait_to_complete)
5098 {
5099 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5100 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5101 s32 status = IXGBE_SUCCESS;
5102 u32 speedcnt = 0;
5103 u32 i = 0;
5104 bool autoneg, link_up = false;
5105
5106 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5107
5108 /* Mask off requested but non-supported speeds */
5109 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5110 if (status != IXGBE_SUCCESS)
5111 return status;
5112
5113 speed &= link_speed;
5114
5115 /* Try each speed one by one, highest priority first. We do this in
5116 * software because 10Gb fiber doesn't support speed autonegotiation.
5117 */
5118 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5119 speedcnt++;
5120 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5121
5122 /* Set the module link speed */
5123 switch (hw->phy.media_type) {
5124 case ixgbe_media_type_fiber:
5125 ixgbe_set_rate_select_speed(hw,
5126 IXGBE_LINK_SPEED_10GB_FULL);
5127 break;
5128 case ixgbe_media_type_fiber_qsfp:
5129 /* QSFP module automatically detects MAC link speed */
5130 break;
5131 default:
5132 DEBUGOUT("Unexpected media type.\n");
5133 break;
5134 }
5135
5136 /* Allow module to change analog characteristics (1G->10G) */
5137 msec_delay(40);
5138
5139 status = ixgbe_setup_mac_link(hw,
5140 IXGBE_LINK_SPEED_10GB_FULL,
5141 autoneg_wait_to_complete);
5142 if (status != IXGBE_SUCCESS)
5143 return status;
5144
5145 /* Flap the Tx laser if it has not already been done */
5146 ixgbe_flap_tx_laser(hw);
5147
5148 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
5149 * Section 73.10.2, we may have to wait up to 500ms if KR is
5150 * attempted. 82599 uses the same timing for 10g SFI.
5151 */
5152 for (i = 0; i < 5; i++) {
5153 /* Wait for the link partner to also set speed */
5154 msec_delay(100);
5155
5156 /* If we have link, just jump out */
5157 status = ixgbe_check_link(hw, &link_speed,
5158 &link_up, false);
5159 if (status != IXGBE_SUCCESS)
5160 return status;
5161
5162 if (link_up)
5163 goto out;
5164 }
5165 }
5166
5167 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5168 speedcnt++;
5169 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5170 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5171
5172 /* Set the module link speed */
5173 switch (hw->phy.media_type) {
5174 case ixgbe_media_type_fiber:
5175 ixgbe_set_rate_select_speed(hw,
5176 IXGBE_LINK_SPEED_1GB_FULL);
5177 break;
5178 case ixgbe_media_type_fiber_qsfp:
5179 /* QSFP module automatically detects link speed */
5180 break;
5181 default:
5182 DEBUGOUT("Unexpected media type.\n");
5183 break;
5184 }
5185
5186 /* Allow module to change analog characteristics (10G->1G) */
5187 msec_delay(40);
5188
5189 status = ixgbe_setup_mac_link(hw,
5190 IXGBE_LINK_SPEED_1GB_FULL,
5191 autoneg_wait_to_complete);
5192 if (status != IXGBE_SUCCESS)
5193 return status;
5194
5195 /* Flap the Tx laser if it has not already been done */
5196 ixgbe_flap_tx_laser(hw);
5197
5198 /* Wait for the link partner to also set speed */
5199 msec_delay(100);
5200
5201 /* If we have link, just jump out */
5202 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5203 if (status != IXGBE_SUCCESS)
5204 return status;
5205
5206 if (link_up)
5207 goto out;
5208 }
5209
5210 /* We didn't get link. Configure back to the highest speed we tried,
5211 * (if there was more than one). We call ourselves back with just the
5212 * single highest speed that the user requested.
5213 */
5214 if (speedcnt > 1)
5215 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5216 highest_link_speed,
5217 autoneg_wait_to_complete);
5218
5219 out:
5220 /* Set autoneg_advertised value based on input link speed */
5221 hw->phy.autoneg_advertised = 0;
5222
5223 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5224 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5225
5226 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5227 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5228
5229 return status;
5230 }
5231
5232 /**
5233 * ixgbe_set_soft_rate_select_speed - Set module link speed
5234 * @hw: pointer to hardware structure
5235 * @speed: link speed to set
5236 *
5237 * Set module link speed via the soft rate select.
5238 */
5239 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5240 ixgbe_link_speed speed)
5241 {
5242 s32 status;
5243 u8 rs, eeprom_data;
5244
5245 switch (speed) {
5246 case IXGBE_LINK_SPEED_10GB_FULL:
5247 /* one bit mask same as setting on */
5248 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5249 break;
5250 case IXGBE_LINK_SPEED_1GB_FULL:
5251 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5252 break;
5253 default:
5254 DEBUGOUT("Invalid fixed module speed\n");
5255 return;
5256 }
5257
5258 /* Set RS0 */
5259 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5260 IXGBE_I2C_EEPROM_DEV_ADDR2,
5261 &eeprom_data);
5262 if (status) {
5263 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5264 goto out;
5265 }
5266
5267 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5268
5269 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5270 IXGBE_I2C_EEPROM_DEV_ADDR2,
5271 eeprom_data);
5272 if (status) {
5273 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5274 goto out;
5275 }
5276
5277 /* Set RS1 */
5278 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5279 IXGBE_I2C_EEPROM_DEV_ADDR2,
5280 &eeprom_data);
5281 if (status) {
5282 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5283 goto out;
5284 }
5285
5286 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5287
5288 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5289 IXGBE_I2C_EEPROM_DEV_ADDR2,
5290 eeprom_data);
5291 if (status) {
5292 DEBUGOUT("Failed to write Rx Rate Select RS1\n");
5293 goto out;
5294 }
5295 out:
5296 return;
5297 }