]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/ixgbe/base/ixgbe_82598.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ixgbe / base / ixgbe_82598.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
3 */
4
5 #include "ixgbe_type.h"
6 #include "ixgbe_82598.h"
7 #include "ixgbe_api.h"
8 #include "ixgbe_common.h"
9 #include "ixgbe_phy.h"
10
11 #define IXGBE_82598_MAX_TX_QUEUES 32
12 #define IXGBE_82598_MAX_RX_QUEUES 64
13 #define IXGBE_82598_RAR_ENTRIES 16
14 #define IXGBE_82598_MC_TBL_SIZE 128
15 #define IXGBE_82598_VFT_TBL_SIZE 128
16 #define IXGBE_82598_RX_PB_SIZE 512
17
18 STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
19 ixgbe_link_speed *speed,
20 bool *autoneg);
21 STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
22 STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
23 bool autoneg_wait_to_complete);
24 STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
25 ixgbe_link_speed *speed, bool *link_up,
26 bool link_up_wait_to_complete);
27 STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
28 ixgbe_link_speed speed,
29 bool autoneg_wait_to_complete);
30 STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
31 ixgbe_link_speed speed,
32 bool autoneg_wait_to_complete);
33 STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
34 STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
35 STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
36 STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
37 u32 headroom, int strategy);
38 STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
39 u8 *sff8472_data);
40 /**
41 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
42 * @hw: pointer to the HW structure
43 *
44 * The defaults for 82598 should be in the range of 50us to 50ms,
45 * however the hardware default for these parts is 500us to 1ms which is less
46 * than the 10ms recommended by the pci-e spec. To address this we need to
47 * increase the value to either 10ms to 250ms for capability version 1 config,
48 * or 16ms to 55ms for version 2.
49 **/
50 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
51 {
52 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
53 u16 pcie_devctl2;
54
55 /* only take action if timeout value is defaulted to 0 */
56 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
57 goto out;
58
59 /*
60 * if capababilities version is type 1 we can write the
61 * timeout of 10ms to 250ms through the GCR register
62 */
63 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
64 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
65 goto out;
66 }
67
68 /*
69 * for version 2 capabilities we need to write the config space
70 * directly in order to set the completion timeout value for
71 * 16ms to 55ms
72 */
73 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
74 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
75 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
76 out:
77 /* disable completion timeout resend */
78 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
79 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
80 }
81
82 /**
83 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
84 * @hw: pointer to hardware structure
85 *
86 * Initialize the function pointers and assign the MAC type for 82598.
87 * Does not touch the hardware.
88 **/
89 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
90 {
91 struct ixgbe_mac_info *mac = &hw->mac;
92 struct ixgbe_phy_info *phy = &hw->phy;
93 s32 ret_val;
94
95 DEBUGFUNC("ixgbe_init_ops_82598");
96
97 ret_val = ixgbe_init_phy_ops_generic(hw);
98 ret_val = ixgbe_init_ops_generic(hw);
99
100 /* PHY */
101 phy->ops.init = ixgbe_init_phy_ops_82598;
102
103 /* MAC */
104 mac->ops.start_hw = ixgbe_start_hw_82598;
105 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
106 mac->ops.reset_hw = ixgbe_reset_hw_82598;
107 mac->ops.get_media_type = ixgbe_get_media_type_82598;
108 mac->ops.get_supported_physical_layer =
109 ixgbe_get_supported_physical_layer_82598;
110 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
111 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
112 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
113 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
114
115 /* RAR, Multicast, VLAN */
116 mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
117 mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
118 mac->ops.set_vfta = ixgbe_set_vfta_82598;
119 mac->ops.set_vlvf = NULL;
120 mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
121
122 /* Flow Control */
123 mac->ops.fc_enable = ixgbe_fc_enable_82598;
124
125 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
126 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
127 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
128 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
129 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
130 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
131 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
132
133 /* SFP+ Module */
134 phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
135 phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
136
137 /* Link */
138 mac->ops.check_link = ixgbe_check_mac_link_82598;
139 mac->ops.setup_link = ixgbe_setup_mac_link_82598;
140 mac->ops.flap_tx_laser = NULL;
141 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
142 mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
143
144 /* Manageability interface */
145 mac->ops.set_fw_drv_ver = NULL;
146
147 mac->ops.get_rtrup2tc = NULL;
148
149 return ret_val;
150 }
151
152 /**
153 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
154 * @hw: pointer to hardware structure
155 *
156 * Initialize any function pointers that were not able to be
157 * set during init_shared_code because the PHY/SFP type was
158 * not known. Perform the SFP init if necessary.
159 *
160 **/
161 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
162 {
163 struct ixgbe_mac_info *mac = &hw->mac;
164 struct ixgbe_phy_info *phy = &hw->phy;
165 s32 ret_val = IXGBE_SUCCESS;
166 u16 list_offset, data_offset;
167
168 DEBUGFUNC("ixgbe_init_phy_ops_82598");
169
170 /* Identify the PHY */
171 phy->ops.identify(hw);
172
173 /* Overwrite the link function pointers if copper PHY */
174 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
175 mac->ops.setup_link = ixgbe_setup_copper_link_82598;
176 mac->ops.get_link_capabilities =
177 ixgbe_get_copper_link_capabilities_generic;
178 }
179
180 switch (hw->phy.type) {
181 case ixgbe_phy_tn:
182 phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
183 phy->ops.check_link = ixgbe_check_phy_link_tnx;
184 phy->ops.get_firmware_version =
185 ixgbe_get_phy_firmware_version_tnx;
186 break;
187 case ixgbe_phy_nl:
188 phy->ops.reset = ixgbe_reset_phy_nl;
189
190 /* Call SFP+ identify routine to get the SFP+ module type */
191 ret_val = phy->ops.identify_sfp(hw);
192 if (ret_val != IXGBE_SUCCESS)
193 goto out;
194 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
195 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
196 goto out;
197 }
198
199 /* Check to see if SFP+ module is supported */
200 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
201 &list_offset,
202 &data_offset);
203 if (ret_val != IXGBE_SUCCESS) {
204 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
205 goto out;
206 }
207 break;
208 default:
209 break;
210 }
211
212 out:
213 return ret_val;
214 }
215
216 /**
217 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
218 * @hw: pointer to hardware structure
219 *
220 * Starts the hardware using the generic start_hw function.
221 * Disables relaxed ordering Then set pcie completion timeout
222 *
223 **/
224 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
225 {
226 u32 regval;
227 u32 i;
228 s32 ret_val = IXGBE_SUCCESS;
229
230 DEBUGFUNC("ixgbe_start_hw_82598");
231
232 ret_val = ixgbe_start_hw_generic(hw);
233 if (ret_val)
234 return ret_val;
235
236 /* Disable relaxed ordering */
237 for (i = 0; ((i < hw->mac.max_tx_queues) &&
238 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
239 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
240 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
241 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
242 }
243
244 for (i = 0; ((i < hw->mac.max_rx_queues) &&
245 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
246 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
247 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
248 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
249 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
250 }
251
252 /* set the completion timeout for interface */
253 ixgbe_set_pcie_completion_timeout(hw);
254
255 return ret_val;
256 }
257
258 /**
259 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
260 * @hw: pointer to hardware structure
261 * @speed: pointer to link speed
262 * @autoneg: boolean auto-negotiation value
263 *
264 * Determines the link capabilities by reading the AUTOC register.
265 **/
266 STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
267 ixgbe_link_speed *speed,
268 bool *autoneg)
269 {
270 s32 status = IXGBE_SUCCESS;
271 u32 autoc = 0;
272
273 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
274
275 /*
276 * Determine link capabilities based on the stored value of AUTOC,
277 * which represents EEPROM defaults. If AUTOC value has not been
278 * stored, use the current register value.
279 */
280 if (hw->mac.orig_link_settings_stored)
281 autoc = hw->mac.orig_autoc;
282 else
283 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
284
285 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
286 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
287 *speed = IXGBE_LINK_SPEED_1GB_FULL;
288 *autoneg = false;
289 break;
290
291 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
292 *speed = IXGBE_LINK_SPEED_10GB_FULL;
293 *autoneg = false;
294 break;
295
296 case IXGBE_AUTOC_LMS_1G_AN:
297 *speed = IXGBE_LINK_SPEED_1GB_FULL;
298 *autoneg = true;
299 break;
300
301 case IXGBE_AUTOC_LMS_KX4_AN:
302 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
303 *speed = IXGBE_LINK_SPEED_UNKNOWN;
304 if (autoc & IXGBE_AUTOC_KX4_SUPP)
305 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
306 if (autoc & IXGBE_AUTOC_KX_SUPP)
307 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
308 *autoneg = true;
309 break;
310
311 default:
312 status = IXGBE_ERR_LINK_SETUP;
313 break;
314 }
315
316 return status;
317 }
318
319 /**
320 * ixgbe_get_media_type_82598 - Determines media type
321 * @hw: pointer to hardware structure
322 *
323 * Returns the media type (fiber, copper, backplane)
324 **/
325 STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
326 {
327 enum ixgbe_media_type media_type;
328
329 DEBUGFUNC("ixgbe_get_media_type_82598");
330
331 /* Detect if there is a copper PHY attached. */
332 switch (hw->phy.type) {
333 case ixgbe_phy_cu_unknown:
334 case ixgbe_phy_tn:
335 media_type = ixgbe_media_type_copper;
336 goto out;
337 default:
338 break;
339 }
340
341 /* Media type for I82598 is based on device ID */
342 switch (hw->device_id) {
343 case IXGBE_DEV_ID_82598:
344 case IXGBE_DEV_ID_82598_BX:
345 /* Default device ID is mezzanine card KX/KX4 */
346 media_type = ixgbe_media_type_backplane;
347 break;
348 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
349 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
350 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
351 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
352 case IXGBE_DEV_ID_82598EB_XF_LR:
353 case IXGBE_DEV_ID_82598EB_SFP_LOM:
354 media_type = ixgbe_media_type_fiber;
355 break;
356 case IXGBE_DEV_ID_82598EB_CX4:
357 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
358 media_type = ixgbe_media_type_cx4;
359 break;
360 case IXGBE_DEV_ID_82598AT:
361 case IXGBE_DEV_ID_82598AT2:
362 media_type = ixgbe_media_type_copper;
363 break;
364 default:
365 media_type = ixgbe_media_type_unknown;
366 break;
367 }
368 out:
369 return media_type;
370 }
371
372 /**
373 * ixgbe_fc_enable_82598 - Enable flow control
374 * @hw: pointer to hardware structure
375 *
376 * Enable flow control according to the current settings.
377 **/
378 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
379 {
380 s32 ret_val = IXGBE_SUCCESS;
381 u32 fctrl_reg;
382 u32 rmcs_reg;
383 u32 reg;
384 u32 fcrtl, fcrth;
385 u32 link_speed = 0;
386 int i;
387 bool link_up;
388
389 DEBUGFUNC("ixgbe_fc_enable_82598");
390
391 /* Validate the water mark configuration */
392 if (!hw->fc.pause_time) {
393 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
394 goto out;
395 }
396
397 /* Low water mark of zero causes XOFF floods */
398 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
399 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
400 hw->fc.high_water[i]) {
401 if (!hw->fc.low_water[i] ||
402 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
403 DEBUGOUT("Invalid water mark configuration\n");
404 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
405 goto out;
406 }
407 }
408 }
409
410 /*
411 * On 82598 having Rx FC on causes resets while doing 1G
412 * so if it's on turn it off once we know link_speed. For
413 * more details see 82598 Specification update.
414 */
415 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
416 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
417 switch (hw->fc.requested_mode) {
418 case ixgbe_fc_full:
419 hw->fc.requested_mode = ixgbe_fc_tx_pause;
420 break;
421 case ixgbe_fc_rx_pause:
422 hw->fc.requested_mode = ixgbe_fc_none;
423 break;
424 default:
425 /* no change */
426 break;
427 }
428 }
429
430 /* Negotiate the fc mode to use */
431 ixgbe_fc_autoneg(hw);
432
433 /* Disable any previous flow control settings */
434 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
435 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
436
437 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
438 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
439
440 /*
441 * The possible values of fc.current_mode are:
442 * 0: Flow control is completely disabled
443 * 1: Rx flow control is enabled (we can receive pause frames,
444 * but not send pause frames).
445 * 2: Tx flow control is enabled (we can send pause frames but
446 * we do not support receiving pause frames).
447 * 3: Both Rx and Tx flow control (symmetric) are enabled.
448 * other: Invalid.
449 */
450 switch (hw->fc.current_mode) {
451 case ixgbe_fc_none:
452 /*
453 * Flow control is disabled by software override or autoneg.
454 * The code below will actually disable it in the HW.
455 */
456 break;
457 case ixgbe_fc_rx_pause:
458 /*
459 * Rx Flow control is enabled and Tx Flow control is
460 * disabled by software override. Since there really
461 * isn't a way to advertise that we are capable of RX
462 * Pause ONLY, we will advertise that we support both
463 * symmetric and asymmetric Rx PAUSE. Later, we will
464 * disable the adapter's ability to send PAUSE frames.
465 */
466 fctrl_reg |= IXGBE_FCTRL_RFCE;
467 break;
468 case ixgbe_fc_tx_pause:
469 /*
470 * Tx Flow control is enabled, and Rx Flow control is
471 * disabled by software override.
472 */
473 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
474 break;
475 case ixgbe_fc_full:
476 /* Flow control (both Rx and Tx) is enabled by SW override. */
477 fctrl_reg |= IXGBE_FCTRL_RFCE;
478 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
479 break;
480 default:
481 DEBUGOUT("Flow control param set incorrectly\n");
482 ret_val = IXGBE_ERR_CONFIG;
483 goto out;
484 break;
485 }
486
487 /* Set 802.3x based flow control settings. */
488 fctrl_reg |= IXGBE_FCTRL_DPF;
489 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
490 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
491
492 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
493 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
494 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
495 hw->fc.high_water[i]) {
496 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
497 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
498 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
499 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
500 } else {
501 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
502 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
503 }
504
505 }
506
507 /* Configure pause time (2 TCs per register) */
508 reg = hw->fc.pause_time * 0x00010001;
509 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
510 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
511
512 /* Configure flow control refresh threshold value */
513 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
514
515 out:
516 return ret_val;
517 }
518
519 /**
520 * ixgbe_start_mac_link_82598 - Configures MAC link settings
521 * @hw: pointer to hardware structure
522 * @autoneg_wait_to_complete: true when waiting for completion is needed
523 *
524 * Configures link settings based on values in the ixgbe_hw struct.
525 * Restarts the link. Performs autonegotiation if needed.
526 **/
527 STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
528 bool autoneg_wait_to_complete)
529 {
530 u32 autoc_reg;
531 u32 links_reg;
532 u32 i;
533 s32 status = IXGBE_SUCCESS;
534
535 DEBUGFUNC("ixgbe_start_mac_link_82598");
536
537 /* Restart link */
538 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
539 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
540 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
541
542 /* Only poll for autoneg to complete if specified to do so */
543 if (autoneg_wait_to_complete) {
544 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
545 IXGBE_AUTOC_LMS_KX4_AN ||
546 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
547 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
548 links_reg = 0; /* Just in case Autoneg time = 0 */
549 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
550 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
551 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
552 break;
553 msec_delay(100);
554 }
555 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
556 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
557 DEBUGOUT("Autonegotiation did not complete.\n");
558 }
559 }
560 }
561
562 /* Add delay to filter out noises during initial link setup */
563 msec_delay(50);
564
565 return status;
566 }
567
568 /**
569 * ixgbe_validate_link_ready - Function looks for phy link
570 * @hw: pointer to hardware structure
571 *
572 * Function indicates success when phy link is available. If phy is not ready
573 * within 5 seconds of MAC indicating link, the function returns error.
574 **/
575 STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
576 {
577 u32 timeout;
578 u16 an_reg;
579
580 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
581 return IXGBE_SUCCESS;
582
583 for (timeout = 0;
584 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
585 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
586 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
587
588 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
589 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
590 break;
591
592 msec_delay(100);
593 }
594
595 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
596 DEBUGOUT("Link was indicated but link is down\n");
597 return IXGBE_ERR_LINK_SETUP;
598 }
599
600 return IXGBE_SUCCESS;
601 }
602
603 /**
604 * ixgbe_check_mac_link_82598 - Get link/speed status
605 * @hw: pointer to hardware structure
606 * @speed: pointer to link speed
607 * @link_up: true is link is up, false otherwise
608 * @link_up_wait_to_complete: bool used to wait for link up or not
609 *
610 * Reads the links register to determine if link is up and the current speed
611 **/
612 STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
613 ixgbe_link_speed *speed, bool *link_up,
614 bool link_up_wait_to_complete)
615 {
616 u32 links_reg;
617 u32 i;
618 u16 link_reg, adapt_comp_reg;
619
620 DEBUGFUNC("ixgbe_check_mac_link_82598");
621
622 /*
623 * SERDES PHY requires us to read link status from undocumented
624 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
625 * indicates link down. OxC00C is read to check that the XAUI lanes
626 * are active. Bit 0 clear indicates active; set indicates inactive.
627 */
628 if (hw->phy.type == ixgbe_phy_nl) {
629 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
630 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
631 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
632 &adapt_comp_reg);
633 if (link_up_wait_to_complete) {
634 for (i = 0; i < hw->mac.max_link_up_time; i++) {
635 if ((link_reg & 1) &&
636 ((adapt_comp_reg & 1) == 0)) {
637 *link_up = true;
638 break;
639 } else {
640 *link_up = false;
641 }
642 msec_delay(100);
643 hw->phy.ops.read_reg(hw, 0xC79F,
644 IXGBE_TWINAX_DEV,
645 &link_reg);
646 hw->phy.ops.read_reg(hw, 0xC00C,
647 IXGBE_TWINAX_DEV,
648 &adapt_comp_reg);
649 }
650 } else {
651 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
652 *link_up = true;
653 else
654 *link_up = false;
655 }
656
657 if (*link_up == false)
658 goto out;
659 }
660
661 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
662 if (link_up_wait_to_complete) {
663 for (i = 0; i < hw->mac.max_link_up_time; i++) {
664 if (links_reg & IXGBE_LINKS_UP) {
665 *link_up = true;
666 break;
667 } else {
668 *link_up = false;
669 }
670 msec_delay(100);
671 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
672 }
673 } else {
674 if (links_reg & IXGBE_LINKS_UP)
675 *link_up = true;
676 else
677 *link_up = false;
678 }
679
680 if (links_reg & IXGBE_LINKS_SPEED)
681 *speed = IXGBE_LINK_SPEED_10GB_FULL;
682 else
683 *speed = IXGBE_LINK_SPEED_1GB_FULL;
684
685 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
686 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
687 *link_up = false;
688
689 out:
690 return IXGBE_SUCCESS;
691 }
692
693 /**
694 * ixgbe_setup_mac_link_82598 - Set MAC link speed
695 * @hw: pointer to hardware structure
696 * @speed: new link speed
697 * @autoneg_wait_to_complete: true when waiting for completion is needed
698 *
699 * Set the link speed in the AUTOC register and restarts link.
700 **/
701 STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
702 ixgbe_link_speed speed,
703 bool autoneg_wait_to_complete)
704 {
705 bool autoneg = false;
706 s32 status = IXGBE_SUCCESS;
707 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
708 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
709 u32 autoc = curr_autoc;
710 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
711
712 DEBUGFUNC("ixgbe_setup_mac_link_82598");
713
714 /* Check to see if speed passed in is supported. */
715 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
716 speed &= link_capabilities;
717
718 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
719 status = IXGBE_ERR_LINK_SETUP;
720
721 /* Set KX4/KX support according to speed requested */
722 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
723 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
724 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
725 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
726 autoc |= IXGBE_AUTOC_KX4_SUPP;
727 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
728 autoc |= IXGBE_AUTOC_KX_SUPP;
729 if (autoc != curr_autoc)
730 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
731 }
732
733 if (status == IXGBE_SUCCESS) {
734 /*
735 * Setup and restart the link based on the new values in
736 * ixgbe_hw This will write the AUTOC register based on the new
737 * stored values
738 */
739 status = ixgbe_start_mac_link_82598(hw,
740 autoneg_wait_to_complete);
741 }
742
743 return status;
744 }
745
746
747 /**
748 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
749 * @hw: pointer to hardware structure
750 * @speed: new link speed
751 * @autoneg_wait_to_complete: true if waiting is needed to complete
752 *
753 * Sets the link speed in the AUTOC register in the MAC and restarts link.
754 **/
755 STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
756 ixgbe_link_speed speed,
757 bool autoneg_wait_to_complete)
758 {
759 s32 status;
760
761 DEBUGFUNC("ixgbe_setup_copper_link_82598");
762
763 /* Setup the PHY according to input speed */
764 status = hw->phy.ops.setup_link_speed(hw, speed,
765 autoneg_wait_to_complete);
766 /* Set up MAC */
767 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
768
769 return status;
770 }
771
772 /**
773 * ixgbe_reset_hw_82598 - Performs hardware reset
774 * @hw: pointer to hardware structure
775 *
776 * Resets the hardware by resetting the transmit and receive units, masks and
777 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
778 * reset.
779 **/
780 STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
781 {
782 s32 status = IXGBE_SUCCESS;
783 s32 phy_status = IXGBE_SUCCESS;
784 u32 ctrl;
785 u32 gheccr;
786 u32 i;
787 u32 autoc;
788 u8 analog_val;
789
790 DEBUGFUNC("ixgbe_reset_hw_82598");
791
792 /* Call adapter stop to disable tx/rx and clear interrupts */
793 status = hw->mac.ops.stop_adapter(hw);
794 if (status != IXGBE_SUCCESS)
795 goto reset_hw_out;
796
797 /*
798 * Power up the Atlas Tx lanes if they are currently powered down.
799 * Atlas Tx lanes are powered down for MAC loopback tests, but
800 * they are not automatically restored on reset.
801 */
802 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
803 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
804 /* Enable Tx Atlas so packets can be transmitted again */
805 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
806 &analog_val);
807 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
808 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
809 analog_val);
810
811 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
812 &analog_val);
813 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
814 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
815 analog_val);
816
817 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
818 &analog_val);
819 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
820 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
821 analog_val);
822
823 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
824 &analog_val);
825 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
826 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
827 analog_val);
828 }
829
830 /* Reset PHY */
831 if (hw->phy.reset_disable == false) {
832 /* PHY ops must be identified and initialized prior to reset */
833
834 /* Init PHY and function pointers, perform SFP setup */
835 phy_status = hw->phy.ops.init(hw);
836 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
837 goto reset_hw_out;
838 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
839 goto mac_reset_top;
840
841 hw->phy.ops.reset(hw);
842 }
843
844 mac_reset_top:
845 /*
846 * Issue global reset to the MAC. This needs to be a SW reset.
847 * If link reset is used, it might reset the MAC when mng is using it
848 */
849 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
850 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
851 IXGBE_WRITE_FLUSH(hw);
852
853 /* Poll for reset bit to self-clear indicating reset is complete */
854 for (i = 0; i < 10; i++) {
855 usec_delay(1);
856 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
857 if (!(ctrl & IXGBE_CTRL_RST))
858 break;
859 }
860 if (ctrl & IXGBE_CTRL_RST) {
861 status = IXGBE_ERR_RESET_FAILED;
862 DEBUGOUT("Reset polling failed to complete.\n");
863 }
864
865 msec_delay(50);
866
867 /*
868 * Double resets are required for recovery from certain error
869 * conditions. Between resets, it is necessary to stall to allow time
870 * for any pending HW events to complete.
871 */
872 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
873 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
874 goto mac_reset_top;
875 }
876
877 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
878 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
879 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
880
881 /*
882 * Store the original AUTOC value if it has not been
883 * stored off yet. Otherwise restore the stored original
884 * AUTOC value since the reset operation sets back to deaults.
885 */
886 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
887 if (hw->mac.orig_link_settings_stored == false) {
888 hw->mac.orig_autoc = autoc;
889 hw->mac.orig_link_settings_stored = true;
890 } else if (autoc != hw->mac.orig_autoc) {
891 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
892 }
893
894 /* Store the permanent mac address */
895 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
896
897 /*
898 * Store MAC address from RAR0, clear receive address registers, and
899 * clear the multicast table
900 */
901 hw->mac.ops.init_rx_addrs(hw);
902
903 reset_hw_out:
904 if (phy_status != IXGBE_SUCCESS)
905 status = phy_status;
906
907 return status;
908 }
909
910 /**
911 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
912 * @hw: pointer to hardware struct
913 * @rar: receive address register index to associate with a VMDq index
914 * @vmdq: VMDq set index
915 **/
916 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
917 {
918 u32 rar_high;
919 u32 rar_entries = hw->mac.num_rar_entries;
920
921 DEBUGFUNC("ixgbe_set_vmdq_82598");
922
923 /* Make sure we are using a valid rar index range */
924 if (rar >= rar_entries) {
925 DEBUGOUT1("RAR index %d is out of range.\n", rar);
926 return IXGBE_ERR_INVALID_ARGUMENT;
927 }
928
929 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
930 rar_high &= ~IXGBE_RAH_VIND_MASK;
931 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
932 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
933 return IXGBE_SUCCESS;
934 }
935
936 /**
937 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
938 * @hw: pointer to hardware struct
939 * @rar: receive address register index to associate with a VMDq index
940 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
941 **/
942 STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
943 {
944 u32 rar_high;
945 u32 rar_entries = hw->mac.num_rar_entries;
946
947 UNREFERENCED_1PARAMETER(vmdq);
948
949 /* Make sure we are using a valid rar index range */
950 if (rar >= rar_entries) {
951 DEBUGOUT1("RAR index %d is out of range.\n", rar);
952 return IXGBE_ERR_INVALID_ARGUMENT;
953 }
954
955 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
956 if (rar_high & IXGBE_RAH_VIND_MASK) {
957 rar_high &= ~IXGBE_RAH_VIND_MASK;
958 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
959 }
960
961 return IXGBE_SUCCESS;
962 }
963
964 /**
965 * ixgbe_set_vfta_82598 - Set VLAN filter table
966 * @hw: pointer to hardware structure
967 * @vlan: VLAN id to write to VLAN filter
968 * @vind: VMDq output index that maps queue to VLAN id in VFTA
969 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
970 * @vlvf_bypass: boolean flag - unused
971 *
972 * Turn on/off specified VLAN in the VLAN filter table.
973 **/
974 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
975 bool vlan_on, bool vlvf_bypass)
976 {
977 u32 regindex;
978 u32 bitindex;
979 u32 bits;
980 u32 vftabyte;
981
982 UNREFERENCED_1PARAMETER(vlvf_bypass);
983
984 DEBUGFUNC("ixgbe_set_vfta_82598");
985
986 if (vlan > 4095)
987 return IXGBE_ERR_PARAM;
988
989 /* Determine 32-bit word position in array */
990 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
991
992 /* Determine the location of the (VMD) queue index */
993 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
994 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
995
996 /* Set the nibble for VMD queue index */
997 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
998 bits &= (~(0x0F << bitindex));
999 bits |= (vind << bitindex);
1000 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1001
1002 /* Determine the location of the bit for this VLAN id */
1003 bitindex = vlan & 0x1F; /* lower five bits */
1004
1005 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1006 if (vlan_on)
1007 /* Turn on this VLAN id */
1008 bits |= (1 << bitindex);
1009 else
1010 /* Turn off this VLAN id */
1011 bits &= ~(1 << bitindex);
1012 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1013
1014 return IXGBE_SUCCESS;
1015 }
1016
1017 /**
1018 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1019 * @hw: pointer to hardware structure
1020 *
1021 * Clears the VLAN filer table, and the VMDq index associated with the filter
1022 **/
1023 STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1024 {
1025 u32 offset;
1026 u32 vlanbyte;
1027
1028 DEBUGFUNC("ixgbe_clear_vfta_82598");
1029
1030 for (offset = 0; offset < hw->mac.vft_size; offset++)
1031 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1032
1033 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1034 for (offset = 0; offset < hw->mac.vft_size; offset++)
1035 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1036 0);
1037
1038 return IXGBE_SUCCESS;
1039 }
1040
1041 /**
1042 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1043 * @hw: pointer to hardware structure
1044 * @reg: analog register to read
1045 * @val: read value
1046 *
1047 * Performs read operation to Atlas analog register specified.
1048 **/
1049 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1050 {
1051 u32 atlas_ctl;
1052
1053 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1054
1055 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1056 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1057 IXGBE_WRITE_FLUSH(hw);
1058 usec_delay(10);
1059 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1060 *val = (u8)atlas_ctl;
1061
1062 return IXGBE_SUCCESS;
1063 }
1064
1065 /**
1066 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1067 * @hw: pointer to hardware structure
1068 * @reg: atlas register to write
1069 * @val: value to write
1070 *
1071 * Performs write operation to Atlas analog register specified.
1072 **/
1073 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1074 {
1075 u32 atlas_ctl;
1076
1077 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1078
1079 atlas_ctl = (reg << 8) | val;
1080 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1081 IXGBE_WRITE_FLUSH(hw);
1082 usec_delay(10);
1083
1084 return IXGBE_SUCCESS;
1085 }
1086
1087 /**
1088 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1089 * @hw: pointer to hardware structure
1090 * @dev_addr: address to read from
1091 * @byte_offset: byte offset to read from dev_addr
1092 * @eeprom_data: value read
1093 *
1094 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1095 **/
1096 STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1097 u8 byte_offset, u8 *eeprom_data)
1098 {
1099 s32 status = IXGBE_SUCCESS;
1100 u16 sfp_addr = 0;
1101 u16 sfp_data = 0;
1102 u16 sfp_stat = 0;
1103 u16 gssr;
1104 u32 i;
1105
1106 DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1107
1108 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1109 gssr = IXGBE_GSSR_PHY1_SM;
1110 else
1111 gssr = IXGBE_GSSR_PHY0_SM;
1112
1113 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1114 return IXGBE_ERR_SWFW_SYNC;
1115
1116 if (hw->phy.type == ixgbe_phy_nl) {
1117 /*
1118 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1119 * 0xC30D. These registers are used to talk to the SFP+
1120 * module's EEPROM through the SDA/SCL (I2C) interface.
1121 */
1122 sfp_addr = (dev_addr << 8) + byte_offset;
1123 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1124 hw->phy.ops.write_reg_mdi(hw,
1125 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1126 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1127 sfp_addr);
1128
1129 /* Poll status */
1130 for (i = 0; i < 100; i++) {
1131 hw->phy.ops.read_reg_mdi(hw,
1132 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1133 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1134 &sfp_stat);
1135 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1136 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1137 break;
1138 msec_delay(10);
1139 }
1140
1141 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1142 DEBUGOUT("EEPROM read did not pass.\n");
1143 status = IXGBE_ERR_SFP_NOT_PRESENT;
1144 goto out;
1145 }
1146
1147 /* Read data */
1148 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1149 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1150
1151 *eeprom_data = (u8)(sfp_data >> 8);
1152 } else {
1153 status = IXGBE_ERR_PHY;
1154 }
1155
1156 out:
1157 hw->mac.ops.release_swfw_sync(hw, gssr);
1158 return status;
1159 }
1160
1161 /**
1162 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1163 * @hw: pointer to hardware structure
1164 * @byte_offset: EEPROM byte offset to read
1165 * @eeprom_data: value read
1166 *
1167 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1168 **/
1169 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1170 u8 *eeprom_data)
1171 {
1172 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1173 byte_offset, eeprom_data);
1174 }
1175
1176 /**
1177 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1178 * @hw: pointer to hardware structure
1179 * @byte_offset: byte offset at address 0xA2
1180 * @sff8472_data: value read
1181 *
1182 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1183 **/
1184 STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1185 u8 *sff8472_data)
1186 {
1187 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1188 byte_offset, sff8472_data);
1189 }
1190
1191 /**
1192 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1193 * @hw: pointer to hardware structure
1194 *
1195 * Determines physical layer capabilities of the current configuration.
1196 **/
1197 u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1198 {
1199 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1200 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1201 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1202 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1203 u16 ext_ability = 0;
1204
1205 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1206
1207 hw->phy.ops.identify(hw);
1208
1209 /* Copper PHY must be checked before AUTOC LMS to determine correct
1210 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1211 switch (hw->phy.type) {
1212 case ixgbe_phy_tn:
1213 case ixgbe_phy_cu_unknown:
1214 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1215 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1216 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1217 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1218 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1219 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1220 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1221 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1222 goto out;
1223 default:
1224 break;
1225 }
1226
1227 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1228 case IXGBE_AUTOC_LMS_1G_AN:
1229 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1230 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1231 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1232 else
1233 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1234 break;
1235 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1236 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1237 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1238 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1239 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1240 else /* XAUI */
1241 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1242 break;
1243 case IXGBE_AUTOC_LMS_KX4_AN:
1244 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1245 if (autoc & IXGBE_AUTOC_KX_SUPP)
1246 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1247 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1248 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1249 break;
1250 default:
1251 break;
1252 }
1253
1254 if (hw->phy.type == ixgbe_phy_nl) {
1255 hw->phy.ops.identify_sfp(hw);
1256
1257 switch (hw->phy.sfp_type) {
1258 case ixgbe_sfp_type_da_cu:
1259 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1260 break;
1261 case ixgbe_sfp_type_sr:
1262 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1263 break;
1264 case ixgbe_sfp_type_lr:
1265 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1266 break;
1267 default:
1268 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1269 break;
1270 }
1271 }
1272
1273 switch (hw->device_id) {
1274 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1275 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1276 break;
1277 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1278 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1279 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1280 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1281 break;
1282 case IXGBE_DEV_ID_82598EB_XF_LR:
1283 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1284 break;
1285 default:
1286 break;
1287 }
1288
1289 out:
1290 return physical_layer;
1291 }
1292
1293 /**
1294 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1295 * port devices.
1296 * @hw: pointer to the HW structure
1297 *
1298 * Calls common function and corrects issue with some single port devices
1299 * that enable LAN1 but not LAN0.
1300 **/
1301 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1302 {
1303 struct ixgbe_bus_info *bus = &hw->bus;
1304 u16 pci_gen = 0;
1305 u16 pci_ctrl2 = 0;
1306
1307 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1308
1309 ixgbe_set_lan_id_multi_port_pcie(hw);
1310
1311 /* check if LAN0 is disabled */
1312 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1313 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1314
1315 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1316
1317 /* if LAN0 is completely disabled force function to 0 */
1318 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1319 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1320 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1321
1322 bus->func = 0;
1323 }
1324 }
1325 }
1326
1327 /**
1328 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1329 * @hw: pointer to hardware structure
1330 *
1331 **/
1332 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1333 {
1334 u32 regval;
1335 u32 i;
1336
1337 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1338
1339 /* Enable relaxed ordering */
1340 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1341 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1342 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1343 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1344 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1345 }
1346
1347 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1348 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1349 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1350 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1351 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1352 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1353 }
1354
1355 }
1356
1357 /**
1358 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1359 * @hw: pointer to hardware structure
1360 * @num_pb: number of packet buffers to allocate
1361 * @headroom: reserve n KB of headroom
1362 * @strategy: packet buffer allocation strategy
1363 **/
1364 STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1365 u32 headroom, int strategy)
1366 {
1367 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1368 u8 i = 0;
1369 UNREFERENCED_1PARAMETER(headroom);
1370
1371 if (!num_pb)
1372 return;
1373
1374 /* Setup Rx packet buffer sizes */
1375 switch (strategy) {
1376 case PBA_STRATEGY_WEIGHTED:
1377 /* Setup the first four at 80KB */
1378 rxpktsize = IXGBE_RXPBSIZE_80KB;
1379 for (; i < 4; i++)
1380 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1381 /* Setup the last four at 48KB...don't re-init i */
1382 rxpktsize = IXGBE_RXPBSIZE_48KB;
1383 /* Fall Through */
1384 case PBA_STRATEGY_EQUAL:
1385 default:
1386 /* Divide the remaining Rx packet buffer evenly among the TCs */
1387 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1388 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1389 break;
1390 }
1391
1392 /* Setup Tx packet buffer sizes */
1393 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1394 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1395 }
1396
1397 /**
1398 * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
1399 * @hw: pointer to hardware structure
1400 * @regval: register value to write to RXCTRL
1401 *
1402 * Enables the Rx DMA unit
1403 **/
1404 s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
1405 {
1406 DEBUGFUNC("ixgbe_enable_rx_dma_82598");
1407
1408 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1409
1410 return IXGBE_SUCCESS;
1411 }