]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.c
bump version to 15.2.11-pve1
[ceph.git] / ceph / src / spdk / dpdk / kernel / linux / kni / ethtool / ixgbe / ixgbe_82598.c
CommitLineData
11fdf7f2 1// SPDX-License-Identifier: GPL-2.0
7c673cae
FG
2/*******************************************************************************
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2012 Intel Corporation.
6
7c673cae
FG
7 Contact Information:
8 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10
11*******************************************************************************/
12
13#include "ixgbe_type.h"
14#include "ixgbe_82598.h"
15#include "ixgbe_api.h"
16#include "ixgbe_common.h"
17#include "ixgbe_phy.h"
18
19static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
20 ixgbe_link_speed *speed,
21 bool *autoneg);
22static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
23static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
24 bool autoneg_wait_to_complete);
25static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
26 ixgbe_link_speed *speed, bool *link_up,
27 bool link_up_wait_to_complete);
28static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
29 ixgbe_link_speed speed,
30 bool autoneg,
31 bool autoneg_wait_to_complete);
32static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
33 ixgbe_link_speed speed,
34 bool autoneg,
35 bool autoneg_wait_to_complete);
36static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
37static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
38static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
39static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
40 u32 headroom, int strategy);
41
42/**
43 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
44 * @hw: pointer to the HW structure
45 *
46 * The defaults for 82598 should be in the range of 50us to 50ms,
47 * however the hardware default for these parts is 500us to 1ms which is less
48 * than the 10ms recommended by the pci-e spec. To address this we need to
49 * increase the value to either 10ms to 250ms for capability version 1 config,
50 * or 16ms to 55ms for version 2.
51 **/
52void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
53{
54 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
55 u16 pcie_devctl2;
56
57 /* only take action if timeout value is defaulted to 0 */
58 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
59 goto out;
60
61 /*
62 * if capababilities version is type 1 we can write the
63 * timeout of 10ms to 250ms through the GCR register
64 */
65 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
66 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
67 goto out;
68 }
69
70 /*
71 * for version 2 capabilities we need to write the config space
72 * directly in order to set the completion timeout value for
73 * 16ms to 55ms
74 */
75 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
76 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
77 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
78out:
79 /* disable completion timeout resend */
80 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
81 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
82}
83
84/**
85 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
86 * @hw: pointer to hardware structure
87 *
88 * Initialize the function pointers and assign the MAC type for 82598.
89 * Does not touch the hardware.
90 **/
91s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
92{
93 struct ixgbe_mac_info *mac = &hw->mac;
94 struct ixgbe_phy_info *phy = &hw->phy;
95 s32 ret_val;
96
97 ret_val = ixgbe_init_phy_ops_generic(hw);
98 ret_val = ixgbe_init_ops_generic(hw);
99
100 /* PHY */
101 phy->ops.init = &ixgbe_init_phy_ops_82598;
102
103 /* MAC */
104 mac->ops.start_hw = &ixgbe_start_hw_82598;
105 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
106 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
107 mac->ops.get_supported_physical_layer =
108 &ixgbe_get_supported_physical_layer_82598;
109 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
110 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
111 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
112
113 /* RAR, Multicast, VLAN */
114 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
115 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
116 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
117 mac->ops.set_vlvf = NULL;
118 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
119
120 /* Flow Control */
121 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
122
123 mac->mcft_size = 128;
124 mac->vft_size = 128;
125 mac->num_rar_entries = 16;
126 mac->rx_pb_size = 512;
127 mac->max_tx_queues = 32;
128 mac->max_rx_queues = 64;
129 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
130
131 /* SFP+ Module */
132 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
133
134 /* Link */
135 mac->ops.check_link = &ixgbe_check_mac_link_82598;
136 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
137 mac->ops.flap_tx_laser = NULL;
138 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
139 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
140
141 /* Manageability interface */
142 mac->ops.set_fw_drv_ver = NULL;
143
144 return ret_val;
145}
146
147/**
148 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
149 * @hw: pointer to hardware structure
150 *
151 * Initialize any function pointers that were not able to be
152 * set during init_shared_code because the PHY/SFP type was
153 * not known. Perform the SFP init if necessary.
154 *
155 **/
156s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
157{
158 struct ixgbe_mac_info *mac = &hw->mac;
159 struct ixgbe_phy_info *phy = &hw->phy;
160 s32 ret_val = 0;
161 u16 list_offset, data_offset;
162
163 /* Identify the PHY */
164 phy->ops.identify(hw);
165
166 /* Overwrite the link function pointers if copper PHY */
167 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
168 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
169 mac->ops.get_link_capabilities =
170 &ixgbe_get_copper_link_capabilities_generic;
171 }
172
173 switch (hw->phy.type) {
174 case ixgbe_phy_tn:
175 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
176 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
177 phy->ops.get_firmware_version =
178 &ixgbe_get_phy_firmware_version_tnx;
179 break;
180 case ixgbe_phy_nl:
181 phy->ops.reset = &ixgbe_reset_phy_nl;
182
183 /* Call SFP+ identify routine to get the SFP+ module type */
184 ret_val = phy->ops.identify_sfp(hw);
185 if (ret_val != 0)
186 goto out;
187 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
188 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
189 goto out;
190 }
191
192 /* Check to see if SFP+ module is supported */
193 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
194 &list_offset,
195 &data_offset);
196 if (ret_val != 0) {
197 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
198 goto out;
199 }
200 break;
201 default:
202 break;
203 }
204
205out:
206 return ret_val;
207}
208
209/**
210 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
211 * @hw: pointer to hardware structure
212 *
213 * Starts the hardware using the generic start_hw function.
214 * Disables relaxed ordering Then set pcie completion timeout
215 *
216 **/
217s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
218{
219 u32 regval;
220 u32 i;
221 s32 ret_val = 0;
222
223 ret_val = ixgbe_start_hw_generic(hw);
224
225 /* Disable relaxed ordering */
226 for (i = 0; ((i < hw->mac.max_tx_queues) &&
227 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
228 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
229 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
230 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
231 }
232
233 for (i = 0; ((i < hw->mac.max_rx_queues) &&
234 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
235 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
236 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
237 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
238 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
239 }
240
241 /* set the completion timeout for interface */
242 if (ret_val == 0)
243 ixgbe_set_pcie_completion_timeout(hw);
244
245 return ret_val;
246}
247
248/**
249 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
250 * @hw: pointer to hardware structure
251 * @speed: pointer to link speed
252 * @autoneg: boolean auto-negotiation value
253 *
254 * Determines the link capabilities by reading the AUTOC register.
255 **/
256static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
257 ixgbe_link_speed *speed,
258 bool *autoneg)
259{
260 s32 status = 0;
261 u32 autoc = 0;
262
263 /*
264 * Determine link capabilities based on the stored value of AUTOC,
265 * which represents EEPROM defaults. If AUTOC value has not been
266 * stored, use the current register value.
267 */
268 if (hw->mac.orig_link_settings_stored)
269 autoc = hw->mac.orig_autoc;
270 else
271 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
272
273 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
274 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
275 *speed = IXGBE_LINK_SPEED_1GB_FULL;
276 *autoneg = false;
277 break;
278
279 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
280 *speed = IXGBE_LINK_SPEED_10GB_FULL;
281 *autoneg = false;
282 break;
283
284 case IXGBE_AUTOC_LMS_1G_AN:
285 *speed = IXGBE_LINK_SPEED_1GB_FULL;
286 *autoneg = true;
287 break;
288
289 case IXGBE_AUTOC_LMS_KX4_AN:
290 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
291 *speed = IXGBE_LINK_SPEED_UNKNOWN;
292 if (autoc & IXGBE_AUTOC_KX4_SUPP)
293 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
294 if (autoc & IXGBE_AUTOC_KX_SUPP)
295 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
296 *autoneg = true;
297 break;
298
299 default:
300 status = IXGBE_ERR_LINK_SETUP;
301 break;
302 }
303
304 return status;
305}
306
307/**
308 * ixgbe_get_media_type_82598 - Determines media type
309 * @hw: pointer to hardware structure
310 *
311 * Returns the media type (fiber, copper, backplane)
312 **/
313static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
314{
315 enum ixgbe_media_type media_type;
316
317 /* Detect if there is a copper PHY attached. */
318 switch (hw->phy.type) {
319 case ixgbe_phy_cu_unknown:
320 case ixgbe_phy_tn:
321 media_type = ixgbe_media_type_copper;
322 goto out;
323 default:
324 break;
325 }
326
327 /* Media type for I82598 is based on device ID */
328 switch (hw->device_id) {
329 case IXGBE_DEV_ID_82598:
330 case IXGBE_DEV_ID_82598_BX:
331 /* Default device ID is mezzanine card KX/KX4 */
332 media_type = ixgbe_media_type_backplane;
333 break;
334 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
335 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
336 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
337 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
338 case IXGBE_DEV_ID_82598EB_XF_LR:
339 case IXGBE_DEV_ID_82598EB_SFP_LOM:
340 media_type = ixgbe_media_type_fiber;
341 break;
342 case IXGBE_DEV_ID_82598EB_CX4:
343 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
344 media_type = ixgbe_media_type_cx4;
345 break;
346 case IXGBE_DEV_ID_82598AT:
347 case IXGBE_DEV_ID_82598AT2:
348 media_type = ixgbe_media_type_copper;
349 break;
350 default:
351 media_type = ixgbe_media_type_unknown;
352 break;
353 }
354out:
355 return media_type;
356}
357
358/**
359 * ixgbe_fc_enable_82598 - Enable flow control
360 * @hw: pointer to hardware structure
361 *
362 * Enable flow control according to the current settings.
363 **/
364s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
365{
366 s32 ret_val = 0;
367 u32 fctrl_reg;
368 u32 rmcs_reg;
369 u32 reg;
370 u32 fcrtl, fcrth;
371 u32 link_speed = 0;
372 int i;
373 bool link_up;
374
375 /* Validate the water mark configuration */
376 if (!hw->fc.pause_time) {
377 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
378 goto out;
379 }
380
381 /* Low water mark of zero causes XOFF floods */
382 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
383 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
384 hw->fc.high_water[i]) {
385 if (!hw->fc.low_water[i] ||
386 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
387 hw_dbg(hw, "Invalid water mark configuration\n");
388 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
389 goto out;
390 }
391 }
392 }
393
394 /*
395 * On 82598 having Rx FC on causes resets while doing 1G
396 * so if it's on turn it off once we know link_speed. For
397 * more details see 82598 Specification update.
398 */
399 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
400 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
401 switch (hw->fc.requested_mode) {
402 case ixgbe_fc_full:
403 hw->fc.requested_mode = ixgbe_fc_tx_pause;
404 break;
405 case ixgbe_fc_rx_pause:
406 hw->fc.requested_mode = ixgbe_fc_none;
407 break;
408 default:
409 /* no change */
410 break;
411 }
412 }
413
414 /* Negotiate the fc mode to use */
415 ixgbe_fc_autoneg(hw);
416
417 /* Disable any previous flow control settings */
418 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
419 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
420
421 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
422 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
423
424 /*
425 * The possible values of fc.current_mode are:
426 * 0: Flow control is completely disabled
427 * 1: Rx flow control is enabled (we can receive pause frames,
428 * but not send pause frames).
429 * 2: Tx flow control is enabled (we can send pause frames but
430 * we do not support receiving pause frames).
431 * 3: Both Rx and Tx flow control (symmetric) are enabled.
432 * other: Invalid.
433 */
434 switch (hw->fc.current_mode) {
435 case ixgbe_fc_none:
436 /*
437 * Flow control is disabled by software override or autoneg.
438 * The code below will actually disable it in the HW.
439 */
440 break;
441 case ixgbe_fc_rx_pause:
442 /*
443 * Rx Flow control is enabled and Tx Flow control is
444 * disabled by software override. Since there really
445 * isn't a way to advertise that we are capable of RX
446 * Pause ONLY, we will advertise that we support both
447 * symmetric and asymmetric Rx PAUSE. Later, we will
448 * disable the adapter's ability to send PAUSE frames.
449 */
450 fctrl_reg |= IXGBE_FCTRL_RFCE;
451 break;
452 case ixgbe_fc_tx_pause:
453 /*
454 * Tx Flow control is enabled, and Rx Flow control is
455 * disabled by software override.
456 */
457 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
458 break;
459 case ixgbe_fc_full:
460 /* Flow control (both Rx and Tx) is enabled by SW override. */
461 fctrl_reg |= IXGBE_FCTRL_RFCE;
462 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
463 break;
464 default:
465 hw_dbg(hw, "Flow control param set incorrectly\n");
466 ret_val = IXGBE_ERR_CONFIG;
467 goto out;
468 break;
469 }
470
471 /* Set 802.3x based flow control settings. */
472 fctrl_reg |= IXGBE_FCTRL_DPF;
473 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
474 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
475
476 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
477 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
478 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
479 hw->fc.high_water[i]) {
480 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
481 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
482 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
483 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
484 } else {
485 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
486 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
487 }
488
489 }
490
491 /* Configure pause time (2 TCs per register) */
492 reg = hw->fc.pause_time * 0x00010001;
493 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
494 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
495
496 /* Configure flow control refresh threshold value */
497 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
498
499out:
500 return ret_val;
501}
502
503/**
504 * ixgbe_start_mac_link_82598 - Configures MAC link settings
505 * @hw: pointer to hardware structure
506 *
507 * Configures link settings based on values in the ixgbe_hw struct.
508 * Restarts the link. Performs autonegotiation if needed.
509 **/
510static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
511 bool autoneg_wait_to_complete)
512{
513 u32 autoc_reg;
514 u32 links_reg;
515 u32 i;
516 s32 status = 0;
517
518 /* Restart link */
519 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
520 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
521 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
522
523 /* Only poll for autoneg to complete if specified to do so */
524 if (autoneg_wait_to_complete) {
525 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
526 IXGBE_AUTOC_LMS_KX4_AN ||
527 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
528 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
529 links_reg = 0; /* Just in case Autoneg time = 0 */
530 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
531 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
532 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
533 break;
534 msleep(100);
535 }
536 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
537 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
538 hw_dbg(hw, "Autonegotiation did not complete.\n");
539 }
540 }
541 }
542
543 /* Add delay to filter out noises during initial link setup */
544 msleep(50);
545
546 return status;
547}
548
549/**
550 * ixgbe_validate_link_ready - Function looks for phy link
551 * @hw: pointer to hardware structure
552 *
553 * Function indicates success when phy link is available. If phy is not ready
554 * within 5 seconds of MAC indicating link, the function returns error.
555 **/
556static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
557{
558 u32 timeout;
559 u16 an_reg;
560
561 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
562 return 0;
563
564 for (timeout = 0;
565 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
566 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
567 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
568
569 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
570 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
571 break;
572
573 msleep(100);
574 }
575
576 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
577 hw_dbg(hw, "Link was indicated but link is down\n");
578 return IXGBE_ERR_LINK_SETUP;
579 }
580
581 return 0;
582}
583
584/**
585 * ixgbe_check_mac_link_82598 - Get link/speed status
586 * @hw: pointer to hardware structure
587 * @speed: pointer to link speed
588 * @link_up: true is link is up, false otherwise
589 * @link_up_wait_to_complete: bool used to wait for link up or not
590 *
591 * Reads the links register to determine if link is up and the current speed
592 **/
593static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
594 ixgbe_link_speed *speed, bool *link_up,
595 bool link_up_wait_to_complete)
596{
597 u32 links_reg;
598 u32 i;
599 u16 link_reg, adapt_comp_reg;
600
601 /*
602 * SERDES PHY requires us to read link status from undocumented
603 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
604 * indicates link down. OxC00C is read to check that the XAUI lanes
605 * are active. Bit 0 clear indicates active; set indicates inactive.
606 */
607 if (hw->phy.type == ixgbe_phy_nl) {
608 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
609 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
610 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
611 &adapt_comp_reg);
612 if (link_up_wait_to_complete) {
613 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
614 if ((link_reg & 1) &&
615 ((adapt_comp_reg & 1) == 0)) {
616 *link_up = true;
617 break;
618 } else {
619 *link_up = false;
620 }
621 msleep(100);
622 hw->phy.ops.read_reg(hw, 0xC79F,
623 IXGBE_TWINAX_DEV,
624 &link_reg);
625 hw->phy.ops.read_reg(hw, 0xC00C,
626 IXGBE_TWINAX_DEV,
627 &adapt_comp_reg);
628 }
629 } else {
630 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
631 *link_up = true;
632 else
633 *link_up = false;
634 }
635
636 if (*link_up == false)
637 goto out;
638 }
639
640 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
641 if (link_up_wait_to_complete) {
642 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
643 if (links_reg & IXGBE_LINKS_UP) {
644 *link_up = true;
645 break;
646 } else {
647 *link_up = false;
648 }
649 msleep(100);
650 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
651 }
652 } else {
653 if (links_reg & IXGBE_LINKS_UP)
654 *link_up = true;
655 else
656 *link_up = false;
657 }
658
659 if (links_reg & IXGBE_LINKS_SPEED)
660 *speed = IXGBE_LINK_SPEED_10GB_FULL;
661 else
662 *speed = IXGBE_LINK_SPEED_1GB_FULL;
663
664 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
665 (ixgbe_validate_link_ready(hw) != 0))
666 *link_up = false;
667
668out:
669 return 0;
670}
671
672/**
673 * ixgbe_setup_mac_link_82598 - Set MAC link speed
674 * @hw: pointer to hardware structure
675 * @speed: new link speed
676 * @autoneg: true if autonegotiation enabled
677 * @autoneg_wait_to_complete: true when waiting for completion is needed
678 *
679 * Set the link speed in the AUTOC register and restarts link.
680 **/
681static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
682 ixgbe_link_speed speed, bool autoneg,
683 bool autoneg_wait_to_complete)
684{
685 s32 status = 0;
686 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
687 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
688 u32 autoc = curr_autoc;
689 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
690
691 /* Check to see if speed passed in is supported. */
692 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
693 speed &= link_capabilities;
694
695 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
696 status = IXGBE_ERR_LINK_SETUP;
697
698 /* Set KX4/KX support according to speed requested */
699 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
700 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
701 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
702 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
703 autoc |= IXGBE_AUTOC_KX4_SUPP;
704 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
705 autoc |= IXGBE_AUTOC_KX_SUPP;
706 if (autoc != curr_autoc)
707 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
708 }
709
710 if (status == 0) {
711 /*
712 * Setup and restart the link based on the new values in
713 * ixgbe_hw This will write the AUTOC register based on the new
714 * stored values
715 */
716 status = ixgbe_start_mac_link_82598(hw,
717 autoneg_wait_to_complete);
718 }
719
720 return status;
721}
722
723
724/**
725 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
726 * @hw: pointer to hardware structure
727 * @speed: new link speed
728 * @autoneg: true if autonegotiation enabled
729 * @autoneg_wait_to_complete: true if waiting is needed to complete
730 *
731 * Sets the link speed in the AUTOC register in the MAC and restarts link.
732 **/
733static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
734 ixgbe_link_speed speed,
735 bool autoneg,
736 bool autoneg_wait_to_complete)
737{
738 s32 status;
739
740 /* Setup the PHY according to input speed */
741 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
742 autoneg_wait_to_complete);
743 /* Set up MAC */
744 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
745
746 return status;
747}
748
749/**
750 * ixgbe_reset_hw_82598 - Performs hardware reset
751 * @hw: pointer to hardware structure
752 *
753 * Resets the hardware by resetting the transmit and receive units, masks and
754 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
755 * reset.
756 **/
757static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
758{
759 s32 status = 0;
760 s32 phy_status = 0;
761 u32 ctrl;
762 u32 gheccr;
763 u32 i;
764 u32 autoc;
765 u8 analog_val;
766
767 /* Call adapter stop to disable tx/rx and clear interrupts */
768 status = hw->mac.ops.stop_adapter(hw);
769 if (status != 0)
770 goto reset_hw_out;
771
772 /*
773 * Power up the Atlas Tx lanes if they are currently powered down.
774 * Atlas Tx lanes are powered down for MAC loopback tests, but
775 * they are not automatically restored on reset.
776 */
777 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
778 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
779 /* Enable Tx Atlas so packets can be transmitted again */
780 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
781 &analog_val);
782 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
783 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
784 analog_val);
785
786 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
787 &analog_val);
788 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
789 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
790 analog_val);
791
792 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
793 &analog_val);
794 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
795 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
796 analog_val);
797
798 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
799 &analog_val);
800 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
801 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
802 analog_val);
803 }
804
805 /* Reset PHY */
806 if (hw->phy.reset_disable == false) {
807 /* PHY ops must be identified and initialized prior to reset */
808
809 /* Init PHY and function pointers, perform SFP setup */
810 phy_status = hw->phy.ops.init(hw);
811 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
812 goto reset_hw_out;
813 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
814 goto mac_reset_top;
815
816 hw->phy.ops.reset(hw);
817 }
818
819mac_reset_top:
820 /*
821 * Issue global reset to the MAC. This needs to be a SW reset.
822 * If link reset is used, it might reset the MAC when mng is using it
823 */
824 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
825 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
826 IXGBE_WRITE_FLUSH(hw);
827
828 /* Poll for reset bit to self-clear indicating reset is complete */
829 for (i = 0; i < 10; i++) {
830 udelay(1);
831 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
832 if (!(ctrl & IXGBE_CTRL_RST))
833 break;
834 }
835 if (ctrl & IXGBE_CTRL_RST) {
836 status = IXGBE_ERR_RESET_FAILED;
837 hw_dbg(hw, "Reset polling failed to complete.\n");
838 }
839
840 msleep(50);
841
842 /*
843 * Double resets are required for recovery from certain error
844 * conditions. Between resets, it is necessary to stall to allow time
845 * for any pending HW events to complete.
846 */
847 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
848 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
849 goto mac_reset_top;
850 }
851
852 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
853 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
854 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
855
856 /*
857 * Store the original AUTOC value if it has not been
858 * stored off yet. Otherwise restore the stored original
859 * AUTOC value since the reset operation sets back to deaults.
860 */
861 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
862 if (hw->mac.orig_link_settings_stored == false) {
863 hw->mac.orig_autoc = autoc;
864 hw->mac.orig_link_settings_stored = true;
865 } else if (autoc != hw->mac.orig_autoc) {
866 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
867 }
868
869 /* Store the permanent mac address */
870 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
871
872 /*
873 * Store MAC address from RAR0, clear receive address registers, and
874 * clear the multicast table
875 */
876 hw->mac.ops.init_rx_addrs(hw);
877
878reset_hw_out:
879 if (phy_status != 0)
880 status = phy_status;
881
882 return status;
883}
884
885/**
886 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
887 * @hw: pointer to hardware struct
888 * @rar: receive address register index to associate with a VMDq index
889 * @vmdq: VMDq set index
890 **/
891s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
892{
893 u32 rar_high;
894 u32 rar_entries = hw->mac.num_rar_entries;
895
896 /* Make sure we are using a valid rar index range */
897 if (rar >= rar_entries) {
898 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
899 return IXGBE_ERR_INVALID_ARGUMENT;
900 }
901
902 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
903 rar_high &= ~IXGBE_RAH_VIND_MASK;
904 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
905 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
906 return 0;
907}
908
909/**
910 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
911 * @hw: pointer to hardware struct
912 * @rar: receive address register index to associate with a VMDq index
913 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
914 **/
915static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
916{
917 u32 rar_high;
918 u32 rar_entries = hw->mac.num_rar_entries;
919
920
921 /* Make sure we are using a valid rar index range */
922 if (rar >= rar_entries) {
923 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
924 return IXGBE_ERR_INVALID_ARGUMENT;
925 }
926
927 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
928 if (rar_high & IXGBE_RAH_VIND_MASK) {
929 rar_high &= ~IXGBE_RAH_VIND_MASK;
930 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
931 }
932
933 return 0;
934}
935
936/**
937 * ixgbe_set_vfta_82598 - Set VLAN filter table
938 * @hw: pointer to hardware structure
939 * @vlan: VLAN id to write to VLAN filter
940 * @vind: VMDq output index that maps queue to VLAN id in VFTA
941 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
942 *
943 * Turn on/off specified VLAN in the VLAN filter table.
944 **/
945s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
946 bool vlan_on)
947{
948 u32 regindex;
949 u32 bitindex;
950 u32 bits;
951 u32 vftabyte;
952
953 if (vlan > 4095)
954 return IXGBE_ERR_PARAM;
955
956 /* Determine 32-bit word position in array */
957 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
958
959 /* Determine the location of the (VMD) queue index */
960 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
961 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
962
963 /* Set the nibble for VMD queue index */
964 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
965 bits &= (~(0x0F << bitindex));
966 bits |= (vind << bitindex);
967 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
968
969 /* Determine the location of the bit for this VLAN id */
970 bitindex = vlan & 0x1F; /* lower five bits */
971
972 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
973 if (vlan_on)
974 /* Turn on this VLAN id */
975 bits |= (1 << bitindex);
976 else
977 /* Turn off this VLAN id */
978 bits &= ~(1 << bitindex);
979 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
980
981 return 0;
982}
983
984/**
985 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
986 * @hw: pointer to hardware structure
987 *
988 * Clears the VLAN filer table, and the VMDq index associated with the filter
989 **/
990static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
991{
992 u32 offset;
993 u32 vlanbyte;
994
995 for (offset = 0; offset < hw->mac.vft_size; offset++)
996 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
997
998 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
999 for (offset = 0; offset < hw->mac.vft_size; offset++)
1000 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1001 0);
1002
1003 return 0;
1004}
1005
1006/**
1007 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1008 * @hw: pointer to hardware structure
1009 * @reg: analog register to read
1010 * @val: read value
1011 *
1012 * Performs read operation to Atlas analog register specified.
1013 **/
1014s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1015{
1016 u32 atlas_ctl;
1017
1018 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1019 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1020 IXGBE_WRITE_FLUSH(hw);
1021 udelay(10);
1022 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1023 *val = (u8)atlas_ctl;
1024
1025 return 0;
1026}
1027
1028/**
1029 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1030 * @hw: pointer to hardware structure
1031 * @reg: atlas register to write
1032 * @val: value to write
1033 *
1034 * Performs write operation to Atlas analog register specified.
1035 **/
1036s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1037{
1038 u32 atlas_ctl;
1039
1040 atlas_ctl = (reg << 8) | val;
1041 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1042 IXGBE_WRITE_FLUSH(hw);
1043 udelay(10);
1044
1045 return 0;
1046}
1047
1048/**
1049 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1050 * @hw: pointer to hardware structure
1051 * @byte_offset: EEPROM byte offset to read
1052 * @eeprom_data: value read
1053 *
1054 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1055 **/
1056s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1057 u8 *eeprom_data)
1058{
1059 s32 status = 0;
1060 u16 sfp_addr = 0;
1061 u16 sfp_data = 0;
1062 u16 sfp_stat = 0;
1063 u32 i;
1064
1065 if (hw->phy.type == ixgbe_phy_nl) {
1066 /*
1067 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1068 * 0xC30D. These registers are used to talk to the SFP+
1069 * module's EEPROM through the SDA/SCL (I2C) interface.
1070 */
1071 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1072 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1073 hw->phy.ops.write_reg(hw,
1074 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1075 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1076 sfp_addr);
1077
1078 /* Poll status */
1079 for (i = 0; i < 100; i++) {
1080 hw->phy.ops.read_reg(hw,
1081 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1082 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1083 &sfp_stat);
1084 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1085 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1086 break;
1087 msleep(10);
1088 }
1089
1090 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1091 hw_dbg(hw, "EEPROM read did not pass.\n");
1092 status = IXGBE_ERR_SFP_NOT_PRESENT;
1093 goto out;
1094 }
1095
1096 /* Read data */
1097 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1098 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1099
1100 *eeprom_data = (u8)(sfp_data >> 8);
1101 } else {
1102 status = IXGBE_ERR_PHY;
1103 goto out;
1104 }
1105
1106out:
1107 return status;
1108}
1109
1110/**
1111 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1112 * @hw: pointer to hardware structure
1113 *
1114 * Determines physical layer capabilities of the current configuration.
1115 **/
1116u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1117{
1118 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1119 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1120 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1121 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1122 u16 ext_ability = 0;
1123
1124 hw->phy.ops.identify(hw);
1125
1126 /* Copper PHY must be checked before AUTOC LMS to determine correct
1127 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1128 switch (hw->phy.type) {
1129 case ixgbe_phy_tn:
1130 case ixgbe_phy_cu_unknown:
1131 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1132 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1133 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1134 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1135 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1136 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1137 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1138 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1139 goto out;
1140 default:
1141 break;
1142 }
1143
1144 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1145 case IXGBE_AUTOC_LMS_1G_AN:
1146 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1147 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1148 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1149 else
1150 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1151 break;
1152 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1153 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1154 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1155 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1156 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1157 else /* XAUI */
1158 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1159 break;
1160 case IXGBE_AUTOC_LMS_KX4_AN:
1161 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1162 if (autoc & IXGBE_AUTOC_KX_SUPP)
1163 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1164 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1165 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1166 break;
1167 default:
1168 break;
1169 }
1170
1171 if (hw->phy.type == ixgbe_phy_nl) {
1172 hw->phy.ops.identify_sfp(hw);
1173
1174 switch (hw->phy.sfp_type) {
1175 case ixgbe_sfp_type_da_cu:
1176 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1177 break;
1178 case ixgbe_sfp_type_sr:
1179 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1180 break;
1181 case ixgbe_sfp_type_lr:
1182 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1183 break;
1184 default:
1185 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1186 break;
1187 }
1188 }
1189
1190 switch (hw->device_id) {
1191 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1192 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1193 break;
1194 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1195 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1196 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1197 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1198 break;
1199 case IXGBE_DEV_ID_82598EB_XF_LR:
1200 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1201 break;
1202 default:
1203 break;
1204 }
1205
1206out:
1207 return physical_layer;
1208}
1209
1210/**
1211 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1212 * port devices.
1213 * @hw: pointer to the HW structure
1214 *
1215 * Calls common function and corrects issue with some single port devices
1216 * that enable LAN1 but not LAN0.
1217 **/
1218void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1219{
1220 struct ixgbe_bus_info *bus = &hw->bus;
1221 u16 pci_gen = 0;
1222 u16 pci_ctrl2 = 0;
1223
1224 ixgbe_set_lan_id_multi_port_pcie(hw);
1225
1226 /* check if LAN0 is disabled */
1227 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1228 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1229
1230 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1231
1232 /* if LAN0 is completely disabled force function to 0 */
1233 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1234 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1235 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1236
1237 bus->func = 0;
1238 }
1239 }
1240}
1241
1242/**
1243 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1244 * @hw: pointer to hardware structure
1245 * @num_pb: number of packet buffers to allocate
1246 * @headroom: reserve n KB of headroom
1247 * @strategy: packet buffer allocation strategy
1248 **/
1249static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1250 u32 headroom, int strategy)
1251{
1252 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1253 u8 i = 0;
1254
1255 if (!num_pb)
1256 return;
1257
1258 /* Setup Rx packet buffer sizes */
1259 switch (strategy) {
1260 case PBA_STRATEGY_WEIGHTED:
1261 /* Setup the first four at 80KB */
1262 rxpktsize = IXGBE_RXPBSIZE_80KB;
1263 for (; i < 4; i++)
1264 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1265 /* Setup the last four at 48KB...don't re-init i */
1266 rxpktsize = IXGBE_RXPBSIZE_48KB;
1267 /* Fall Through */
1268 case PBA_STRATEGY_EQUAL:
1269 default:
1270 /* Divide the remaining Rx packet buffer evenly among the TCs */
1271 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1272 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1273 break;
1274 }
1275
1276 /* Setup Tx packet buffer sizes */
1277 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1278 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1279
1280 return;
1281}