]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c
update download target update for octopus release
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_eal / linuxapp / kni / ethtool / ixgbe / ixgbe_82598.c
CommitLineData
7c673cae
FG
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "LICENSE.GPL".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "ixgbe_type.h"
29#include "ixgbe_82598.h"
30#include "ixgbe_api.h"
31#include "ixgbe_common.h"
32#include "ixgbe_phy.h"
33
34static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
35 ixgbe_link_speed *speed,
36 bool *autoneg);
37static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
38static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
39 bool autoneg_wait_to_complete);
40static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
41 ixgbe_link_speed *speed, bool *link_up,
42 bool link_up_wait_to_complete);
43static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
44 ixgbe_link_speed speed,
45 bool autoneg,
46 bool autoneg_wait_to_complete);
47static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
48 ixgbe_link_speed speed,
49 bool autoneg,
50 bool autoneg_wait_to_complete);
51static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
52static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
53static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
54static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
55 u32 headroom, int strategy);
56
57/**
58 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
59 * @hw: pointer to the HW structure
60 *
61 * The defaults for 82598 should be in the range of 50us to 50ms,
62 * however the hardware default for these parts is 500us to 1ms which is less
63 * than the 10ms recommended by the pci-e spec. To address this we need to
64 * increase the value to either 10ms to 250ms for capability version 1 config,
65 * or 16ms to 55ms for version 2.
66 **/
67void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
68{
69 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
70 u16 pcie_devctl2;
71
72 /* only take action if timeout value is defaulted to 0 */
73 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
74 goto out;
75
76 /*
77 * if capababilities version is type 1 we can write the
78 * timeout of 10ms to 250ms through the GCR register
79 */
80 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
81 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
82 goto out;
83 }
84
85 /*
86 * for version 2 capabilities we need to write the config space
87 * directly in order to set the completion timeout value for
88 * 16ms to 55ms
89 */
90 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
91 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
92 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
93out:
94 /* disable completion timeout resend */
95 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
96 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
97}
98
99/**
100 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
101 * @hw: pointer to hardware structure
102 *
103 * Initialize the function pointers and assign the MAC type for 82598.
104 * Does not touch the hardware.
105 **/
106s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
107{
108 struct ixgbe_mac_info *mac = &hw->mac;
109 struct ixgbe_phy_info *phy = &hw->phy;
110 s32 ret_val;
111
112 ret_val = ixgbe_init_phy_ops_generic(hw);
113 ret_val = ixgbe_init_ops_generic(hw);
114
115 /* PHY */
116 phy->ops.init = &ixgbe_init_phy_ops_82598;
117
118 /* MAC */
119 mac->ops.start_hw = &ixgbe_start_hw_82598;
120 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
121 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
122 mac->ops.get_supported_physical_layer =
123 &ixgbe_get_supported_physical_layer_82598;
124 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
125 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
126 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
127
128 /* RAR, Multicast, VLAN */
129 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
130 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
131 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
132 mac->ops.set_vlvf = NULL;
133 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
134
135 /* Flow Control */
136 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
137
138 mac->mcft_size = 128;
139 mac->vft_size = 128;
140 mac->num_rar_entries = 16;
141 mac->rx_pb_size = 512;
142 mac->max_tx_queues = 32;
143 mac->max_rx_queues = 64;
144 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
145
146 /* SFP+ Module */
147 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
148
149 /* Link */
150 mac->ops.check_link = &ixgbe_check_mac_link_82598;
151 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
152 mac->ops.flap_tx_laser = NULL;
153 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
154 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
155
156 /* Manageability interface */
157 mac->ops.set_fw_drv_ver = NULL;
158
159 return ret_val;
160}
161
162/**
163 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
164 * @hw: pointer to hardware structure
165 *
166 * Initialize any function pointers that were not able to be
167 * set during init_shared_code because the PHY/SFP type was
168 * not known. Perform the SFP init if necessary.
169 *
170 **/
171s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
172{
173 struct ixgbe_mac_info *mac = &hw->mac;
174 struct ixgbe_phy_info *phy = &hw->phy;
175 s32 ret_val = 0;
176 u16 list_offset, data_offset;
177
178 /* Identify the PHY */
179 phy->ops.identify(hw);
180
181 /* Overwrite the link function pointers if copper PHY */
182 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
183 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
184 mac->ops.get_link_capabilities =
185 &ixgbe_get_copper_link_capabilities_generic;
186 }
187
188 switch (hw->phy.type) {
189 case ixgbe_phy_tn:
190 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
191 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
192 phy->ops.get_firmware_version =
193 &ixgbe_get_phy_firmware_version_tnx;
194 break;
195 case ixgbe_phy_nl:
196 phy->ops.reset = &ixgbe_reset_phy_nl;
197
198 /* Call SFP+ identify routine to get the SFP+ module type */
199 ret_val = phy->ops.identify_sfp(hw);
200 if (ret_val != 0)
201 goto out;
202 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
203 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
204 goto out;
205 }
206
207 /* Check to see if SFP+ module is supported */
208 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
209 &list_offset,
210 &data_offset);
211 if (ret_val != 0) {
212 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
213 goto out;
214 }
215 break;
216 default:
217 break;
218 }
219
220out:
221 return ret_val;
222}
223
224/**
225 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
226 * @hw: pointer to hardware structure
227 *
228 * Starts the hardware using the generic start_hw function.
229 * Disables relaxed ordering Then set pcie completion timeout
230 *
231 **/
232s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
233{
234 u32 regval;
235 u32 i;
236 s32 ret_val = 0;
237
238 ret_val = ixgbe_start_hw_generic(hw);
239
240 /* Disable relaxed ordering */
241 for (i = 0; ((i < hw->mac.max_tx_queues) &&
242 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
243 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
244 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
245 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
246 }
247
248 for (i = 0; ((i < hw->mac.max_rx_queues) &&
249 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
250 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
251 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
252 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
253 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
254 }
255
256 /* set the completion timeout for interface */
257 if (ret_val == 0)
258 ixgbe_set_pcie_completion_timeout(hw);
259
260 return ret_val;
261}
262
263/**
264 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
265 * @hw: pointer to hardware structure
266 * @speed: pointer to link speed
267 * @autoneg: boolean auto-negotiation value
268 *
269 * Determines the link capabilities by reading the AUTOC register.
270 **/
271static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
272 ixgbe_link_speed *speed,
273 bool *autoneg)
274{
275 s32 status = 0;
276 u32 autoc = 0;
277
278 /*
279 * Determine link capabilities based on the stored value of AUTOC,
280 * which represents EEPROM defaults. If AUTOC value has not been
281 * stored, use the current register value.
282 */
283 if (hw->mac.orig_link_settings_stored)
284 autoc = hw->mac.orig_autoc;
285 else
286 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
287
288 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
289 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
290 *speed = IXGBE_LINK_SPEED_1GB_FULL;
291 *autoneg = false;
292 break;
293
294 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
295 *speed = IXGBE_LINK_SPEED_10GB_FULL;
296 *autoneg = false;
297 break;
298
299 case IXGBE_AUTOC_LMS_1G_AN:
300 *speed = IXGBE_LINK_SPEED_1GB_FULL;
301 *autoneg = true;
302 break;
303
304 case IXGBE_AUTOC_LMS_KX4_AN:
305 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
306 *speed = IXGBE_LINK_SPEED_UNKNOWN;
307 if (autoc & IXGBE_AUTOC_KX4_SUPP)
308 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
309 if (autoc & IXGBE_AUTOC_KX_SUPP)
310 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
311 *autoneg = true;
312 break;
313
314 default:
315 status = IXGBE_ERR_LINK_SETUP;
316 break;
317 }
318
319 return status;
320}
321
322/**
323 * ixgbe_get_media_type_82598 - Determines media type
324 * @hw: pointer to hardware structure
325 *
326 * Returns the media type (fiber, copper, backplane)
327 **/
328static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
329{
330 enum ixgbe_media_type media_type;
331
332 /* Detect if there is a copper PHY attached. */
333 switch (hw->phy.type) {
334 case ixgbe_phy_cu_unknown:
335 case ixgbe_phy_tn:
336 media_type = ixgbe_media_type_copper;
337 goto out;
338 default:
339 break;
340 }
341
342 /* Media type for I82598 is based on device ID */
343 switch (hw->device_id) {
344 case IXGBE_DEV_ID_82598:
345 case IXGBE_DEV_ID_82598_BX:
346 /* Default device ID is mezzanine card KX/KX4 */
347 media_type = ixgbe_media_type_backplane;
348 break;
349 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
350 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
351 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
352 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
353 case IXGBE_DEV_ID_82598EB_XF_LR:
354 case IXGBE_DEV_ID_82598EB_SFP_LOM:
355 media_type = ixgbe_media_type_fiber;
356 break;
357 case IXGBE_DEV_ID_82598EB_CX4:
358 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
359 media_type = ixgbe_media_type_cx4;
360 break;
361 case IXGBE_DEV_ID_82598AT:
362 case IXGBE_DEV_ID_82598AT2:
363 media_type = ixgbe_media_type_copper;
364 break;
365 default:
366 media_type = ixgbe_media_type_unknown;
367 break;
368 }
369out:
370 return media_type;
371}
372
373/**
374 * ixgbe_fc_enable_82598 - Enable flow control
375 * @hw: pointer to hardware structure
376 *
377 * Enable flow control according to the current settings.
378 **/
379s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
380{
381 s32 ret_val = 0;
382 u32 fctrl_reg;
383 u32 rmcs_reg;
384 u32 reg;
385 u32 fcrtl, fcrth;
386 u32 link_speed = 0;
387 int i;
388 bool link_up;
389
390 /* Validate the water mark configuration */
391 if (!hw->fc.pause_time) {
392 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
393 goto out;
394 }
395
396 /* Low water mark of zero causes XOFF floods */
397 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
398 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
399 hw->fc.high_water[i]) {
400 if (!hw->fc.low_water[i] ||
401 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
402 hw_dbg(hw, "Invalid water mark configuration\n");
403 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
404 goto out;
405 }
406 }
407 }
408
409 /*
410 * On 82598 having Rx FC on causes resets while doing 1G
411 * so if it's on turn it off once we know link_speed. For
412 * more details see 82598 Specification update.
413 */
414 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
415 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
416 switch (hw->fc.requested_mode) {
417 case ixgbe_fc_full:
418 hw->fc.requested_mode = ixgbe_fc_tx_pause;
419 break;
420 case ixgbe_fc_rx_pause:
421 hw->fc.requested_mode = ixgbe_fc_none;
422 break;
423 default:
424 /* no change */
425 break;
426 }
427 }
428
429 /* Negotiate the fc mode to use */
430 ixgbe_fc_autoneg(hw);
431
432 /* Disable any previous flow control settings */
433 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
434 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
435
436 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
437 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
438
439 /*
440 * The possible values of fc.current_mode are:
441 * 0: Flow control is completely disabled
442 * 1: Rx flow control is enabled (we can receive pause frames,
443 * but not send pause frames).
444 * 2: Tx flow control is enabled (we can send pause frames but
445 * we do not support receiving pause frames).
446 * 3: Both Rx and Tx flow control (symmetric) are enabled.
447 * other: Invalid.
448 */
449 switch (hw->fc.current_mode) {
450 case ixgbe_fc_none:
451 /*
452 * Flow control is disabled by software override or autoneg.
453 * The code below will actually disable it in the HW.
454 */
455 break;
456 case ixgbe_fc_rx_pause:
457 /*
458 * Rx Flow control is enabled and Tx Flow control is
459 * disabled by software override. Since there really
460 * isn't a way to advertise that we are capable of RX
461 * Pause ONLY, we will advertise that we support both
462 * symmetric and asymmetric Rx PAUSE. Later, we will
463 * disable the adapter's ability to send PAUSE frames.
464 */
465 fctrl_reg |= IXGBE_FCTRL_RFCE;
466 break;
467 case ixgbe_fc_tx_pause:
468 /*
469 * Tx Flow control is enabled, and Rx Flow control is
470 * disabled by software override.
471 */
472 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
473 break;
474 case ixgbe_fc_full:
475 /* Flow control (both Rx and Tx) is enabled by SW override. */
476 fctrl_reg |= IXGBE_FCTRL_RFCE;
477 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
478 break;
479 default:
480 hw_dbg(hw, "Flow control param set incorrectly\n");
481 ret_val = IXGBE_ERR_CONFIG;
482 goto out;
483 break;
484 }
485
486 /* Set 802.3x based flow control settings. */
487 fctrl_reg |= IXGBE_FCTRL_DPF;
488 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
489 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
490
491 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
492 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
493 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
494 hw->fc.high_water[i]) {
495 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
496 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
497 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
498 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
499 } else {
500 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
501 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
502 }
503
504 }
505
506 /* Configure pause time (2 TCs per register) */
507 reg = hw->fc.pause_time * 0x00010001;
508 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
509 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
510
511 /* Configure flow control refresh threshold value */
512 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
513
514out:
515 return ret_val;
516}
517
518/**
519 * ixgbe_start_mac_link_82598 - Configures MAC link settings
520 * @hw: pointer to hardware structure
521 *
522 * Configures link settings based on values in the ixgbe_hw struct.
523 * Restarts the link. Performs autonegotiation if needed.
524 **/
525static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
526 bool autoneg_wait_to_complete)
527{
528 u32 autoc_reg;
529 u32 links_reg;
530 u32 i;
531 s32 status = 0;
532
533 /* Restart link */
534 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
535 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
536 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
537
538 /* Only poll for autoneg to complete if specified to do so */
539 if (autoneg_wait_to_complete) {
540 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
541 IXGBE_AUTOC_LMS_KX4_AN ||
542 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
543 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
544 links_reg = 0; /* Just in case Autoneg time = 0 */
545 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
546 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
547 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
548 break;
549 msleep(100);
550 }
551 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
552 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
553 hw_dbg(hw, "Autonegotiation did not complete.\n");
554 }
555 }
556 }
557
558 /* Add delay to filter out noises during initial link setup */
559 msleep(50);
560
561 return status;
562}
563
564/**
565 * ixgbe_validate_link_ready - Function looks for phy link
566 * @hw: pointer to hardware structure
567 *
568 * Function indicates success when phy link is available. If phy is not ready
569 * within 5 seconds of MAC indicating link, the function returns error.
570 **/
571static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
572{
573 u32 timeout;
574 u16 an_reg;
575
576 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
577 return 0;
578
579 for (timeout = 0;
580 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
581 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
582 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
583
584 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
585 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
586 break;
587
588 msleep(100);
589 }
590
591 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
592 hw_dbg(hw, "Link was indicated but link is down\n");
593 return IXGBE_ERR_LINK_SETUP;
594 }
595
596 return 0;
597}
598
599/**
600 * ixgbe_check_mac_link_82598 - Get link/speed status
601 * @hw: pointer to hardware structure
602 * @speed: pointer to link speed
603 * @link_up: true is link is up, false otherwise
604 * @link_up_wait_to_complete: bool used to wait for link up or not
605 *
606 * Reads the links register to determine if link is up and the current speed
607 **/
608static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
609 ixgbe_link_speed *speed, bool *link_up,
610 bool link_up_wait_to_complete)
611{
612 u32 links_reg;
613 u32 i;
614 u16 link_reg, adapt_comp_reg;
615
616 /*
617 * SERDES PHY requires us to read link status from undocumented
618 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
619 * indicates link down. OxC00C is read to check that the XAUI lanes
620 * are active. Bit 0 clear indicates active; set indicates inactive.
621 */
622 if (hw->phy.type == ixgbe_phy_nl) {
623 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
624 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
625 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
626 &adapt_comp_reg);
627 if (link_up_wait_to_complete) {
628 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
629 if ((link_reg & 1) &&
630 ((adapt_comp_reg & 1) == 0)) {
631 *link_up = true;
632 break;
633 } else {
634 *link_up = false;
635 }
636 msleep(100);
637 hw->phy.ops.read_reg(hw, 0xC79F,
638 IXGBE_TWINAX_DEV,
639 &link_reg);
640 hw->phy.ops.read_reg(hw, 0xC00C,
641 IXGBE_TWINAX_DEV,
642 &adapt_comp_reg);
643 }
644 } else {
645 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
646 *link_up = true;
647 else
648 *link_up = false;
649 }
650
651 if (*link_up == false)
652 goto out;
653 }
654
655 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
656 if (link_up_wait_to_complete) {
657 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
658 if (links_reg & IXGBE_LINKS_UP) {
659 *link_up = true;
660 break;
661 } else {
662 *link_up = false;
663 }
664 msleep(100);
665 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
666 }
667 } else {
668 if (links_reg & IXGBE_LINKS_UP)
669 *link_up = true;
670 else
671 *link_up = false;
672 }
673
674 if (links_reg & IXGBE_LINKS_SPEED)
675 *speed = IXGBE_LINK_SPEED_10GB_FULL;
676 else
677 *speed = IXGBE_LINK_SPEED_1GB_FULL;
678
679 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
680 (ixgbe_validate_link_ready(hw) != 0))
681 *link_up = false;
682
683out:
684 return 0;
685}
686
687/**
688 * ixgbe_setup_mac_link_82598 - Set MAC link speed
689 * @hw: pointer to hardware structure
690 * @speed: new link speed
691 * @autoneg: true if autonegotiation enabled
692 * @autoneg_wait_to_complete: true when waiting for completion is needed
693 *
694 * Set the link speed in the AUTOC register and restarts link.
695 **/
696static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
697 ixgbe_link_speed speed, bool autoneg,
698 bool autoneg_wait_to_complete)
699{
700 s32 status = 0;
701 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
702 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
703 u32 autoc = curr_autoc;
704 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
705
706 /* Check to see if speed passed in is supported. */
707 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
708 speed &= link_capabilities;
709
710 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
711 status = IXGBE_ERR_LINK_SETUP;
712
713 /* Set KX4/KX support according to speed requested */
714 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
715 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
716 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
717 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
718 autoc |= IXGBE_AUTOC_KX4_SUPP;
719 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
720 autoc |= IXGBE_AUTOC_KX_SUPP;
721 if (autoc != curr_autoc)
722 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
723 }
724
725 if (status == 0) {
726 /*
727 * Setup and restart the link based on the new values in
728 * ixgbe_hw This will write the AUTOC register based on the new
729 * stored values
730 */
731 status = ixgbe_start_mac_link_82598(hw,
732 autoneg_wait_to_complete);
733 }
734
735 return status;
736}
737
738
739/**
740 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
741 * @hw: pointer to hardware structure
742 * @speed: new link speed
743 * @autoneg: true if autonegotiation enabled
744 * @autoneg_wait_to_complete: true if waiting is needed to complete
745 *
746 * Sets the link speed in the AUTOC register in the MAC and restarts link.
747 **/
748static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
749 ixgbe_link_speed speed,
750 bool autoneg,
751 bool autoneg_wait_to_complete)
752{
753 s32 status;
754
755 /* Setup the PHY according to input speed */
756 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
757 autoneg_wait_to_complete);
758 /* Set up MAC */
759 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
760
761 return status;
762}
763
764/**
765 * ixgbe_reset_hw_82598 - Performs hardware reset
766 * @hw: pointer to hardware structure
767 *
768 * Resets the hardware by resetting the transmit and receive units, masks and
769 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
770 * reset.
771 **/
772static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
773{
774 s32 status = 0;
775 s32 phy_status = 0;
776 u32 ctrl;
777 u32 gheccr;
778 u32 i;
779 u32 autoc;
780 u8 analog_val;
781
782 /* Call adapter stop to disable tx/rx and clear interrupts */
783 status = hw->mac.ops.stop_adapter(hw);
784 if (status != 0)
785 goto reset_hw_out;
786
787 /*
788 * Power up the Atlas Tx lanes if they are currently powered down.
789 * Atlas Tx lanes are powered down for MAC loopback tests, but
790 * they are not automatically restored on reset.
791 */
792 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
793 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
794 /* Enable Tx Atlas so packets can be transmitted again */
795 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
796 &analog_val);
797 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
798 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
799 analog_val);
800
801 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
802 &analog_val);
803 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
804 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
805 analog_val);
806
807 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
808 &analog_val);
809 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
810 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
811 analog_val);
812
813 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
814 &analog_val);
815 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
816 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
817 analog_val);
818 }
819
820 /* Reset PHY */
821 if (hw->phy.reset_disable == false) {
822 /* PHY ops must be identified and initialized prior to reset */
823
824 /* Init PHY and function pointers, perform SFP setup */
825 phy_status = hw->phy.ops.init(hw);
826 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
827 goto reset_hw_out;
828 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
829 goto mac_reset_top;
830
831 hw->phy.ops.reset(hw);
832 }
833
834mac_reset_top:
835 /*
836 * Issue global reset to the MAC. This needs to be a SW reset.
837 * If link reset is used, it might reset the MAC when mng is using it
838 */
839 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
840 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
841 IXGBE_WRITE_FLUSH(hw);
842
843 /* Poll for reset bit to self-clear indicating reset is complete */
844 for (i = 0; i < 10; i++) {
845 udelay(1);
846 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
847 if (!(ctrl & IXGBE_CTRL_RST))
848 break;
849 }
850 if (ctrl & IXGBE_CTRL_RST) {
851 status = IXGBE_ERR_RESET_FAILED;
852 hw_dbg(hw, "Reset polling failed to complete.\n");
853 }
854
855 msleep(50);
856
857 /*
858 * Double resets are required for recovery from certain error
859 * conditions. Between resets, it is necessary to stall to allow time
860 * for any pending HW events to complete.
861 */
862 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
863 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
864 goto mac_reset_top;
865 }
866
867 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
868 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
869 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
870
871 /*
872 * Store the original AUTOC value if it has not been
873 * stored off yet. Otherwise restore the stored original
874 * AUTOC value since the reset operation sets back to deaults.
875 */
876 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
877 if (hw->mac.orig_link_settings_stored == false) {
878 hw->mac.orig_autoc = autoc;
879 hw->mac.orig_link_settings_stored = true;
880 } else if (autoc != hw->mac.orig_autoc) {
881 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
882 }
883
884 /* Store the permanent mac address */
885 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
886
887 /*
888 * Store MAC address from RAR0, clear receive address registers, and
889 * clear the multicast table
890 */
891 hw->mac.ops.init_rx_addrs(hw);
892
893reset_hw_out:
894 if (phy_status != 0)
895 status = phy_status;
896
897 return status;
898}
899
900/**
901 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
902 * @hw: pointer to hardware struct
903 * @rar: receive address register index to associate with a VMDq index
904 * @vmdq: VMDq set index
905 **/
906s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
907{
908 u32 rar_high;
909 u32 rar_entries = hw->mac.num_rar_entries;
910
911 /* Make sure we are using a valid rar index range */
912 if (rar >= rar_entries) {
913 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
914 return IXGBE_ERR_INVALID_ARGUMENT;
915 }
916
917 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
918 rar_high &= ~IXGBE_RAH_VIND_MASK;
919 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
920 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
921 return 0;
922}
923
924/**
925 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
926 * @hw: pointer to hardware struct
927 * @rar: receive address register index to associate with a VMDq index
928 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
929 **/
930static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
931{
932 u32 rar_high;
933 u32 rar_entries = hw->mac.num_rar_entries;
934
935
936 /* Make sure we are using a valid rar index range */
937 if (rar >= rar_entries) {
938 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
939 return IXGBE_ERR_INVALID_ARGUMENT;
940 }
941
942 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
943 if (rar_high & IXGBE_RAH_VIND_MASK) {
944 rar_high &= ~IXGBE_RAH_VIND_MASK;
945 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
946 }
947
948 return 0;
949}
950
951/**
952 * ixgbe_set_vfta_82598 - Set VLAN filter table
953 * @hw: pointer to hardware structure
954 * @vlan: VLAN id to write to VLAN filter
955 * @vind: VMDq output index that maps queue to VLAN id in VFTA
956 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
957 *
958 * Turn on/off specified VLAN in the VLAN filter table.
959 **/
960s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
961 bool vlan_on)
962{
963 u32 regindex;
964 u32 bitindex;
965 u32 bits;
966 u32 vftabyte;
967
968 if (vlan > 4095)
969 return IXGBE_ERR_PARAM;
970
971 /* Determine 32-bit word position in array */
972 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
973
974 /* Determine the location of the (VMD) queue index */
975 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
976 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
977
978 /* Set the nibble for VMD queue index */
979 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
980 bits &= (~(0x0F << bitindex));
981 bits |= (vind << bitindex);
982 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
983
984 /* Determine the location of the bit for this VLAN id */
985 bitindex = vlan & 0x1F; /* lower five bits */
986
987 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
988 if (vlan_on)
989 /* Turn on this VLAN id */
990 bits |= (1 << bitindex);
991 else
992 /* Turn off this VLAN id */
993 bits &= ~(1 << bitindex);
994 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
995
996 return 0;
997}
998
999/**
1000 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1001 * @hw: pointer to hardware structure
1002 *
1003 * Clears the VLAN filer table, and the VMDq index associated with the filter
1004 **/
1005static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1006{
1007 u32 offset;
1008 u32 vlanbyte;
1009
1010 for (offset = 0; offset < hw->mac.vft_size; offset++)
1011 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1012
1013 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1014 for (offset = 0; offset < hw->mac.vft_size; offset++)
1015 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1016 0);
1017
1018 return 0;
1019}
1020
1021/**
1022 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1023 * @hw: pointer to hardware structure
1024 * @reg: analog register to read
1025 * @val: read value
1026 *
1027 * Performs read operation to Atlas analog register specified.
1028 **/
1029s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1030{
1031 u32 atlas_ctl;
1032
1033 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1034 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1035 IXGBE_WRITE_FLUSH(hw);
1036 udelay(10);
1037 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1038 *val = (u8)atlas_ctl;
1039
1040 return 0;
1041}
1042
1043/**
1044 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1045 * @hw: pointer to hardware structure
1046 * @reg: atlas register to write
1047 * @val: value to write
1048 *
1049 * Performs write operation to Atlas analog register specified.
1050 **/
1051s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1052{
1053 u32 atlas_ctl;
1054
1055 atlas_ctl = (reg << 8) | val;
1056 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1057 IXGBE_WRITE_FLUSH(hw);
1058 udelay(10);
1059
1060 return 0;
1061}
1062
1063/**
1064 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1065 * @hw: pointer to hardware structure
1066 * @byte_offset: EEPROM byte offset to read
1067 * @eeprom_data: value read
1068 *
1069 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1070 **/
1071s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1072 u8 *eeprom_data)
1073{
1074 s32 status = 0;
1075 u16 sfp_addr = 0;
1076 u16 sfp_data = 0;
1077 u16 sfp_stat = 0;
1078 u32 i;
1079
1080 if (hw->phy.type == ixgbe_phy_nl) {
1081 /*
1082 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1083 * 0xC30D. These registers are used to talk to the SFP+
1084 * module's EEPROM through the SDA/SCL (I2C) interface.
1085 */
1086 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1087 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1088 hw->phy.ops.write_reg(hw,
1089 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1090 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1091 sfp_addr);
1092
1093 /* Poll status */
1094 for (i = 0; i < 100; i++) {
1095 hw->phy.ops.read_reg(hw,
1096 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1097 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1098 &sfp_stat);
1099 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1100 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1101 break;
1102 msleep(10);
1103 }
1104
1105 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1106 hw_dbg(hw, "EEPROM read did not pass.\n");
1107 status = IXGBE_ERR_SFP_NOT_PRESENT;
1108 goto out;
1109 }
1110
1111 /* Read data */
1112 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1113 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1114
1115 *eeprom_data = (u8)(sfp_data >> 8);
1116 } else {
1117 status = IXGBE_ERR_PHY;
1118 goto out;
1119 }
1120
1121out:
1122 return status;
1123}
1124
1125/**
1126 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1127 * @hw: pointer to hardware structure
1128 *
1129 * Determines physical layer capabilities of the current configuration.
1130 **/
1131u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1132{
1133 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1134 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1135 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1136 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1137 u16 ext_ability = 0;
1138
1139 hw->phy.ops.identify(hw);
1140
1141 /* Copper PHY must be checked before AUTOC LMS to determine correct
1142 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1143 switch (hw->phy.type) {
1144 case ixgbe_phy_tn:
1145 case ixgbe_phy_cu_unknown:
1146 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1147 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1148 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1149 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1150 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1151 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1152 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1153 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1154 goto out;
1155 default:
1156 break;
1157 }
1158
1159 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1160 case IXGBE_AUTOC_LMS_1G_AN:
1161 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1162 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1163 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1164 else
1165 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1166 break;
1167 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1168 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1169 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1170 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1171 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1172 else /* XAUI */
1173 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1174 break;
1175 case IXGBE_AUTOC_LMS_KX4_AN:
1176 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1177 if (autoc & IXGBE_AUTOC_KX_SUPP)
1178 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1179 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1180 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1181 break;
1182 default:
1183 break;
1184 }
1185
1186 if (hw->phy.type == ixgbe_phy_nl) {
1187 hw->phy.ops.identify_sfp(hw);
1188
1189 switch (hw->phy.sfp_type) {
1190 case ixgbe_sfp_type_da_cu:
1191 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1192 break;
1193 case ixgbe_sfp_type_sr:
1194 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1195 break;
1196 case ixgbe_sfp_type_lr:
1197 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1198 break;
1199 default:
1200 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1201 break;
1202 }
1203 }
1204
1205 switch (hw->device_id) {
1206 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1207 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1208 break;
1209 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1210 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1211 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1212 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1213 break;
1214 case IXGBE_DEV_ID_82598EB_XF_LR:
1215 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1216 break;
1217 default:
1218 break;
1219 }
1220
1221out:
1222 return physical_layer;
1223}
1224
1225/**
1226 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1227 * port devices.
1228 * @hw: pointer to the HW structure
1229 *
1230 * Calls common function and corrects issue with some single port devices
1231 * that enable LAN1 but not LAN0.
1232 **/
1233void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1234{
1235 struct ixgbe_bus_info *bus = &hw->bus;
1236 u16 pci_gen = 0;
1237 u16 pci_ctrl2 = 0;
1238
1239 ixgbe_set_lan_id_multi_port_pcie(hw);
1240
1241 /* check if LAN0 is disabled */
1242 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1243 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1244
1245 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1246
1247 /* if LAN0 is completely disabled force function to 0 */
1248 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1249 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1250 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1251
1252 bus->func = 0;
1253 }
1254 }
1255}
1256
1257/**
1258 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1259 * @hw: pointer to hardware structure
1260 * @num_pb: number of packet buffers to allocate
1261 * @headroom: reserve n KB of headroom
1262 * @strategy: packet buffer allocation strategy
1263 **/
1264static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1265 u32 headroom, int strategy)
1266{
1267 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1268 u8 i = 0;
1269
1270 if (!num_pb)
1271 return;
1272
1273 /* Setup Rx packet buffer sizes */
1274 switch (strategy) {
1275 case PBA_STRATEGY_WEIGHTED:
1276 /* Setup the first four at 80KB */
1277 rxpktsize = IXGBE_RXPBSIZE_80KB;
1278 for (; i < 4; i++)
1279 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1280 /* Setup the last four at 48KB...don't re-init i */
1281 rxpktsize = IXGBE_RXPBSIZE_48KB;
1282 /* Fall Through */
1283 case PBA_STRATEGY_EQUAL:
1284 default:
1285 /* Divide the remaining Rx packet buffer evenly among the TCs */
1286 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1287 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1288 break;
1289 }
1290
1291 /* Setup Tx packet buffer sizes */
1292 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1293 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1294
1295 return;
1296}