]> git.proxmox.com Git - ceph.git/blame - ceph/src/dpdk/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / lib / librte_eal / linuxapp / kni / ethtool / igb / igb_ethtool.c
CommitLineData
7c673cae
FG
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "LICENSE.GPL".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for igb */
29
30#include <linux/netdevice.h>
31#include <linux/vmalloc.h>
32
33#ifdef SIOCETHTOOL
34#include <linux/ethtool.h>
35#ifdef CONFIG_PM_RUNTIME
36#include <linux/pm_runtime.h>
37#endif /* CONFIG_PM_RUNTIME */
38#include <linux/highmem.h>
39
40#include "igb.h"
41#include "igb_regtest.h"
42#include <linux/if_vlan.h>
43#ifdef ETHTOOL_GEEE
44#include <linux/mdio.h>
45#endif
46
47#ifdef ETHTOOL_OPS_COMPAT
48#include "kcompat_ethtool.c"
49#endif
50#ifdef ETHTOOL_GSTATS
51struct igb_stats {
52 char stat_string[ETH_GSTRING_LEN];
53 int sizeof_stat;
54 int stat_offset;
55};
56
57#define IGB_STAT(_name, _stat) { \
58 .stat_string = _name, \
59 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
60 .stat_offset = offsetof(struct igb_adapter, _stat) \
61}
62static const struct igb_stats igb_gstrings_stats[] = {
63 IGB_STAT("rx_packets", stats.gprc),
64 IGB_STAT("tx_packets", stats.gptc),
65 IGB_STAT("rx_bytes", stats.gorc),
66 IGB_STAT("tx_bytes", stats.gotc),
67 IGB_STAT("rx_broadcast", stats.bprc),
68 IGB_STAT("tx_broadcast", stats.bptc),
69 IGB_STAT("rx_multicast", stats.mprc),
70 IGB_STAT("tx_multicast", stats.mptc),
71 IGB_STAT("multicast", stats.mprc),
72 IGB_STAT("collisions", stats.colc),
73 IGB_STAT("rx_crc_errors", stats.crcerrs),
74 IGB_STAT("rx_no_buffer_count", stats.rnbc),
75 IGB_STAT("rx_missed_errors", stats.mpc),
76 IGB_STAT("tx_aborted_errors", stats.ecol),
77 IGB_STAT("tx_carrier_errors", stats.tncrs),
78 IGB_STAT("tx_window_errors", stats.latecol),
79 IGB_STAT("tx_abort_late_coll", stats.latecol),
80 IGB_STAT("tx_deferred_ok", stats.dc),
81 IGB_STAT("tx_single_coll_ok", stats.scc),
82 IGB_STAT("tx_multi_coll_ok", stats.mcc),
83 IGB_STAT("tx_timeout_count", tx_timeout_count),
84 IGB_STAT("rx_long_length_errors", stats.roc),
85 IGB_STAT("rx_short_length_errors", stats.ruc),
86 IGB_STAT("rx_align_errors", stats.algnerrc),
87 IGB_STAT("tx_tcp_seg_good", stats.tsctc),
88 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
89 IGB_STAT("rx_flow_control_xon", stats.xonrxc),
90 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
91 IGB_STAT("tx_flow_control_xon", stats.xontxc),
92 IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
93 IGB_STAT("rx_long_byte_count", stats.gorc),
94 IGB_STAT("tx_dma_out_of_sync", stats.doosync),
95#ifndef IGB_NO_LRO
96 IGB_STAT("lro_aggregated", lro_stats.coal),
97 IGB_STAT("lro_flushed", lro_stats.flushed),
98#endif /* IGB_LRO */
99 IGB_STAT("tx_smbus", stats.mgptc),
100 IGB_STAT("rx_smbus", stats.mgprc),
101 IGB_STAT("dropped_smbus", stats.mgpdc),
102 IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
103 IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
104 IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
105 IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
106#ifdef HAVE_PTP_1588_CLOCK
107 IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
108 IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
109#endif /* HAVE_PTP_1588_CLOCK */
110};
111
112#define IGB_NETDEV_STAT(_net_stat) { \
113 .stat_string = #_net_stat, \
114 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
115 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
116}
117static const struct igb_stats igb_gstrings_net_stats[] = {
118 IGB_NETDEV_STAT(rx_errors),
119 IGB_NETDEV_STAT(tx_errors),
120 IGB_NETDEV_STAT(tx_dropped),
121 IGB_NETDEV_STAT(rx_length_errors),
122 IGB_NETDEV_STAT(rx_over_errors),
123 IGB_NETDEV_STAT(rx_frame_errors),
124 IGB_NETDEV_STAT(rx_fifo_errors),
125 IGB_NETDEV_STAT(tx_fifo_errors),
126 IGB_NETDEV_STAT(tx_heartbeat_errors)
127};
128
129#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats)
130#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats)
131#define IGB_RX_QUEUE_STATS_LEN \
132 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
133#define IGB_TX_QUEUE_STATS_LEN \
134 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
135#define IGB_QUEUE_STATS_LEN \
136 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
137 IGB_RX_QUEUE_STATS_LEN) + \
138 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
139 IGB_TX_QUEUE_STATS_LEN))
140#define IGB_STATS_LEN \
141 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
142
143#endif /* ETHTOOL_GSTATS */
144#ifdef ETHTOOL_TEST
145static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
146 "Register test (offline)", "Eeprom test (offline)",
147 "Interrupt test (offline)", "Loopback test (offline)",
148 "Link test (on/offline)"
149};
150#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
151#endif /* ETHTOOL_TEST */
152
153static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
154{
155 struct igb_adapter *adapter = netdev_priv(netdev);
156 struct e1000_hw *hw = &adapter->hw;
157 u32 status;
158
159 if (hw->phy.media_type == e1000_media_type_copper) {
160
161 ecmd->supported = (SUPPORTED_10baseT_Half |
162 SUPPORTED_10baseT_Full |
163 SUPPORTED_100baseT_Half |
164 SUPPORTED_100baseT_Full |
165 SUPPORTED_1000baseT_Full|
166 SUPPORTED_Autoneg |
167 SUPPORTED_TP |
168 SUPPORTED_Pause);
169 ecmd->advertising = ADVERTISED_TP;
170
171 if (hw->mac.autoneg == 1) {
172 ecmd->advertising |= ADVERTISED_Autoneg;
173 /* the e1000 autoneg seems to match ethtool nicely */
174 ecmd->advertising |= hw->phy.autoneg_advertised;
175 }
176
177 ecmd->port = PORT_TP;
178 ecmd->phy_address = hw->phy.addr;
179 ecmd->transceiver = XCVR_INTERNAL;
180
181 } else {
182 ecmd->supported = (SUPPORTED_1000baseT_Full |
183 SUPPORTED_100baseT_Full |
184 SUPPORTED_FIBRE |
185 SUPPORTED_Autoneg |
186 SUPPORTED_Pause);
187 if (hw->mac.type == e1000_i354)
188 ecmd->supported |= (SUPPORTED_2500baseX_Full);
189
190 ecmd->advertising = ADVERTISED_FIBRE;
191
192 switch (adapter->link_speed) {
193 case SPEED_2500:
194 ecmd->advertising = ADVERTISED_2500baseX_Full;
195 break;
196 case SPEED_1000:
197 ecmd->advertising = ADVERTISED_1000baseT_Full;
198 break;
199 case SPEED_100:
200 ecmd->advertising = ADVERTISED_100baseT_Full;
201 break;
202 default:
203 break;
204 }
205
206 if (hw->mac.autoneg == 1)
207 ecmd->advertising |= ADVERTISED_Autoneg;
208
209 ecmd->port = PORT_FIBRE;
210 ecmd->transceiver = XCVR_EXTERNAL;
211 }
212
213 if (hw->mac.autoneg != 1)
214 ecmd->advertising &= ~(ADVERTISED_Pause |
215 ADVERTISED_Asym_Pause);
216
217 if (hw->fc.requested_mode == e1000_fc_full)
218 ecmd->advertising |= ADVERTISED_Pause;
219 else if (hw->fc.requested_mode == e1000_fc_rx_pause)
220 ecmd->advertising |= (ADVERTISED_Pause |
221 ADVERTISED_Asym_Pause);
222 else if (hw->fc.requested_mode == e1000_fc_tx_pause)
223 ecmd->advertising |= ADVERTISED_Asym_Pause;
224 else
225 ecmd->advertising &= ~(ADVERTISED_Pause |
226 ADVERTISED_Asym_Pause);
227
228 status = E1000_READ_REG(hw, E1000_STATUS);
229
230 if (status & E1000_STATUS_LU) {
231 if ((hw->mac.type == e1000_i354) &&
232 (status & E1000_STATUS_2P5_SKU) &&
233 !(status & E1000_STATUS_2P5_SKU_OVER))
234 ecmd->speed = SPEED_2500;
235 else if (status & E1000_STATUS_SPEED_1000)
236 ecmd->speed = SPEED_1000;
237 else if (status & E1000_STATUS_SPEED_100)
238 ecmd->speed = SPEED_100;
239 else
240 ecmd->speed = SPEED_10;
241
242 if ((status & E1000_STATUS_FD) ||
243 hw->phy.media_type != e1000_media_type_copper)
244 ecmd->duplex = DUPLEX_FULL;
245 else
246 ecmd->duplex = DUPLEX_HALF;
247
248 } else {
249 ecmd->speed = -1;
250 ecmd->duplex = -1;
251 }
252
253 if ((hw->phy.media_type == e1000_media_type_fiber) ||
254 hw->mac.autoneg)
255 ecmd->autoneg = AUTONEG_ENABLE;
256 else
257 ecmd->autoneg = AUTONEG_DISABLE;
258#ifdef ETH_TP_MDI_X
259
260 /* MDI-X => 2; MDI =>1; Invalid =>0 */
261 if (hw->phy.media_type == e1000_media_type_copper)
262 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
263 ETH_TP_MDI;
264 else
265 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
266
267#ifdef ETH_TP_MDI_AUTO
268 if (hw->phy.mdix == AUTO_ALL_MODES)
269 ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
270 else
271 ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
272
273#endif
274#endif /* ETH_TP_MDI_X */
275 return 0;
276}
277
278static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
279{
280 struct igb_adapter *adapter = netdev_priv(netdev);
281 struct e1000_hw *hw = &adapter->hw;
282
283 if (ecmd->duplex == DUPLEX_HALF) {
284 if (!hw->dev_spec._82575.eee_disable)
285 dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n");
286 hw->dev_spec._82575.eee_disable = true;
287 } else {
288 if (hw->dev_spec._82575.eee_disable)
289 dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n");
290 hw->dev_spec._82575.eee_disable = false;
291 }
292
293 /* When SoL/IDER sessions are active, autoneg/speed/duplex
294 * cannot be changed */
295 if (e1000_check_reset_block(hw)) {
296 dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link "
297 "characteristics when SoL/IDER is active.\n");
298 return -EINVAL;
299 }
300
301#ifdef ETH_TP_MDI_AUTO
302 /*
303 * MDI setting is only allowed when autoneg enabled because
304 * some hardware doesn't allow MDI setting when speed or
305 * duplex is forced.
306 */
307 if (ecmd->eth_tp_mdix_ctrl) {
308 if (hw->phy.media_type != e1000_media_type_copper)
309 return -EOPNOTSUPP;
310
311 if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
312 (ecmd->autoneg != AUTONEG_ENABLE)) {
313 dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
314 return -EINVAL;
315 }
316 }
317
318#endif /* ETH_TP_MDI_AUTO */
319 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
320 usleep_range(1000, 2000);
321
322 if (ecmd->autoneg == AUTONEG_ENABLE) {
323 hw->mac.autoneg = 1;
324 if (hw->phy.media_type == e1000_media_type_fiber) {
325 hw->phy.autoneg_advertised = ecmd->advertising |
326 ADVERTISED_FIBRE |
327 ADVERTISED_Autoneg;
328 switch (adapter->link_speed) {
329 case SPEED_2500:
330 hw->phy.autoneg_advertised =
331 ADVERTISED_2500baseX_Full;
332 break;
333 case SPEED_1000:
334 hw->phy.autoneg_advertised =
335 ADVERTISED_1000baseT_Full;
336 break;
337 case SPEED_100:
338 hw->phy.autoneg_advertised =
339 ADVERTISED_100baseT_Full;
340 break;
341 default:
342 break;
343 }
344 } else {
345 hw->phy.autoneg_advertised = ecmd->advertising |
346 ADVERTISED_TP |
347 ADVERTISED_Autoneg;
348 }
349 ecmd->advertising = hw->phy.autoneg_advertised;
350 if (adapter->fc_autoneg)
351 hw->fc.requested_mode = e1000_fc_default;
352 } else {
353 if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
354 clear_bit(__IGB_RESETTING, &adapter->state);
355 return -EINVAL;
356 }
357 }
358
359#ifdef ETH_TP_MDI_AUTO
360 /* MDI-X => 2; MDI => 1; Auto => 3 */
361 if (ecmd->eth_tp_mdix_ctrl) {
362 /* fix up the value for auto (3 => 0) as zero is mapped
363 * internally to auto
364 */
365 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
366 hw->phy.mdix = AUTO_ALL_MODES;
367 else
368 hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
369 }
370
371#endif /* ETH_TP_MDI_AUTO */
372 /* reset the link */
373 if (netif_running(adapter->netdev)) {
374 igb_down(adapter);
375 igb_up(adapter);
376 } else
377 igb_reset(adapter);
378
379 clear_bit(__IGB_RESETTING, &adapter->state);
380 return 0;
381}
382
383static u32 igb_get_link(struct net_device *netdev)
384{
385 struct igb_adapter *adapter = netdev_priv(netdev);
386 struct e1000_mac_info *mac = &adapter->hw.mac;
387
388 /*
389 * If the link is not reported up to netdev, interrupts are disabled,
390 * and so the physical link state may have changed since we last
391 * looked. Set get_link_status to make sure that the true link
392 * state is interrogated, rather than pulling a cached and possibly
393 * stale link state from the driver.
394 */
395 if (!netif_carrier_ok(netdev))
396 mac->get_link_status = 1;
397
398 return igb_has_link(adapter);
399}
400
401static void igb_get_pauseparam(struct net_device *netdev,
402 struct ethtool_pauseparam *pause)
403{
404 struct igb_adapter *adapter = netdev_priv(netdev);
405 struct e1000_hw *hw = &adapter->hw;
406
407 pause->autoneg =
408 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
409
410 if (hw->fc.current_mode == e1000_fc_rx_pause)
411 pause->rx_pause = 1;
412 else if (hw->fc.current_mode == e1000_fc_tx_pause)
413 pause->tx_pause = 1;
414 else if (hw->fc.current_mode == e1000_fc_full) {
415 pause->rx_pause = 1;
416 pause->tx_pause = 1;
417 }
418}
419
420static int igb_set_pauseparam(struct net_device *netdev,
421 struct ethtool_pauseparam *pause)
422{
423 struct igb_adapter *adapter = netdev_priv(netdev);
424 struct e1000_hw *hw = &adapter->hw;
425 int retval = 0;
426
427 adapter->fc_autoneg = pause->autoneg;
428
429 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
430 usleep_range(1000, 2000);
431
432 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
433 hw->fc.requested_mode = e1000_fc_default;
434 if (netif_running(adapter->netdev)) {
435 igb_down(adapter);
436 igb_up(adapter);
437 } else {
438 igb_reset(adapter);
439 }
440 } else {
441 if (pause->rx_pause && pause->tx_pause)
442 hw->fc.requested_mode = e1000_fc_full;
443 else if (pause->rx_pause && !pause->tx_pause)
444 hw->fc.requested_mode = e1000_fc_rx_pause;
445 else if (!pause->rx_pause && pause->tx_pause)
446 hw->fc.requested_mode = e1000_fc_tx_pause;
447 else if (!pause->rx_pause && !pause->tx_pause)
448 hw->fc.requested_mode = e1000_fc_none;
449
450 hw->fc.current_mode = hw->fc.requested_mode;
451
452 if (hw->phy.media_type == e1000_media_type_fiber) {
453 retval = hw->mac.ops.setup_link(hw);
454 /* implicit goto out */
455 } else {
456 retval = e1000_force_mac_fc(hw);
457 if (retval)
458 goto out;
459 e1000_set_fc_watermarks_generic(hw);
460 }
461 }
462
463out:
464 clear_bit(__IGB_RESETTING, &adapter->state);
465 return retval;
466}
467
468static u32 igb_get_msglevel(struct net_device *netdev)
469{
470 struct igb_adapter *adapter = netdev_priv(netdev);
471 return adapter->msg_enable;
472}
473
474static void igb_set_msglevel(struct net_device *netdev, u32 data)
475{
476 struct igb_adapter *adapter = netdev_priv(netdev);
477 adapter->msg_enable = data;
478}
479
480static int igb_get_regs_len(struct net_device *netdev)
481{
482#define IGB_REGS_LEN 555
483 return IGB_REGS_LEN * sizeof(u32);
484}
485
486static void igb_get_regs(struct net_device *netdev,
487 struct ethtool_regs *regs, void *p)
488{
489 struct igb_adapter *adapter = netdev_priv(netdev);
490 struct e1000_hw *hw = &adapter->hw;
491 u32 *regs_buff = p;
492 u8 i;
493
494 memset(p, 0, IGB_REGS_LEN * sizeof(u32));
495
496 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
497
498 /* General Registers */
499 regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
500 regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
501 regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
502 regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
503 regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
504 regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
505 regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
506 regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
507 regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
508 regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
509 regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
510 regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
511
512 /* NVM Register */
513 regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
514
515 /* Interrupt */
516 /* Reading EICS for EICR because they read the
517 * same but EICS does not clear on read */
518 regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
519 regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
520 regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
521 regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
522 regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
523 regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
524 /* Reading ICS for ICR because they read the
525 * same but ICS does not clear on read */
526 regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
527 regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
528 regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
529 regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
530 regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
531 regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
532 regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
533
534 /* Flow Control */
535 regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
536 regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
537 regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
538 regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
539 regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
540 regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
541
542 /* Receive */
543 regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
544 regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
545 regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
546 regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
547 regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
548 regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
549
550 /* Transmit */
551 regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
552 regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
553 regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
554 regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
555
556 /* Wake Up */
557 regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
558 regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
559 regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
560 regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
561 regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
562
563 /* MAC */
564 regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
565 regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
566 regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
567 regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
568 regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
569 regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
570 regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
571
572 /* Statistics */
573 regs_buff[54] = adapter->stats.crcerrs;
574 regs_buff[55] = adapter->stats.algnerrc;
575 regs_buff[56] = adapter->stats.symerrs;
576 regs_buff[57] = adapter->stats.rxerrc;
577 regs_buff[58] = adapter->stats.mpc;
578 regs_buff[59] = adapter->stats.scc;
579 regs_buff[60] = adapter->stats.ecol;
580 regs_buff[61] = adapter->stats.mcc;
581 regs_buff[62] = adapter->stats.latecol;
582 regs_buff[63] = adapter->stats.colc;
583 regs_buff[64] = adapter->stats.dc;
584 regs_buff[65] = adapter->stats.tncrs;
585 regs_buff[66] = adapter->stats.sec;
586 regs_buff[67] = adapter->stats.htdpmc;
587 regs_buff[68] = adapter->stats.rlec;
588 regs_buff[69] = adapter->stats.xonrxc;
589 regs_buff[70] = adapter->stats.xontxc;
590 regs_buff[71] = adapter->stats.xoffrxc;
591 regs_buff[72] = adapter->stats.xofftxc;
592 regs_buff[73] = adapter->stats.fcruc;
593 regs_buff[74] = adapter->stats.prc64;
594 regs_buff[75] = adapter->stats.prc127;
595 regs_buff[76] = adapter->stats.prc255;
596 regs_buff[77] = adapter->stats.prc511;
597 regs_buff[78] = adapter->stats.prc1023;
598 regs_buff[79] = adapter->stats.prc1522;
599 regs_buff[80] = adapter->stats.gprc;
600 regs_buff[81] = adapter->stats.bprc;
601 regs_buff[82] = adapter->stats.mprc;
602 regs_buff[83] = adapter->stats.gptc;
603 regs_buff[84] = adapter->stats.gorc;
604 regs_buff[86] = adapter->stats.gotc;
605 regs_buff[88] = adapter->stats.rnbc;
606 regs_buff[89] = adapter->stats.ruc;
607 regs_buff[90] = adapter->stats.rfc;
608 regs_buff[91] = adapter->stats.roc;
609 regs_buff[92] = adapter->stats.rjc;
610 regs_buff[93] = adapter->stats.mgprc;
611 regs_buff[94] = adapter->stats.mgpdc;
612 regs_buff[95] = adapter->stats.mgptc;
613 regs_buff[96] = adapter->stats.tor;
614 regs_buff[98] = adapter->stats.tot;
615 regs_buff[100] = adapter->stats.tpr;
616 regs_buff[101] = adapter->stats.tpt;
617 regs_buff[102] = adapter->stats.ptc64;
618 regs_buff[103] = adapter->stats.ptc127;
619 regs_buff[104] = adapter->stats.ptc255;
620 regs_buff[105] = adapter->stats.ptc511;
621 regs_buff[106] = adapter->stats.ptc1023;
622 regs_buff[107] = adapter->stats.ptc1522;
623 regs_buff[108] = adapter->stats.mptc;
624 regs_buff[109] = adapter->stats.bptc;
625 regs_buff[110] = adapter->stats.tsctc;
626 regs_buff[111] = adapter->stats.iac;
627 regs_buff[112] = adapter->stats.rpthc;
628 regs_buff[113] = adapter->stats.hgptc;
629 regs_buff[114] = adapter->stats.hgorc;
630 regs_buff[116] = adapter->stats.hgotc;
631 regs_buff[118] = adapter->stats.lenerrs;
632 regs_buff[119] = adapter->stats.scvpc;
633 regs_buff[120] = adapter->stats.hrmpc;
634
635 for (i = 0; i < 4; i++)
636 regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
637 for (i = 0; i < 4; i++)
638 regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
639 for (i = 0; i < 4; i++)
640 regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
641 for (i = 0; i < 4; i++)
642 regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
643 for (i = 0; i < 4; i++)
644 regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
645 for (i = 0; i < 4; i++)
646 regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
647 for (i = 0; i < 4; i++)
648 regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
649 for (i = 0; i < 4; i++)
650 regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
651
652 for (i = 0; i < 10; i++)
653 regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
654 for (i = 0; i < 8; i++)
655 regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
656 for (i = 0; i < 8; i++)
657 regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
658 for (i = 0; i < 16; i++)
659 regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
660 for (i = 0; i < 16; i++)
661 regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
662
663 for (i = 0; i < 4; i++)
664 regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
665 for (i = 0; i < 4; i++)
666 regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
667 for (i = 0; i < 4; i++)
668 regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
669 for (i = 0; i < 4; i++)
670 regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
671 for (i = 0; i < 4; i++)
672 regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
673 for (i = 0; i < 4; i++)
674 regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
675 for (i = 0; i < 4; i++)
676 regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
677 for (i = 0; i < 4; i++)
678 regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
679 for (i = 0; i < 4; i++)
680 regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
681
682 for (i = 0; i < 4; i++)
683 regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
684 for (i = 0; i < 4; i++)
685 regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
686 for (i = 0; i < 32; i++)
687 regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
688 for (i = 0; i < 128; i++)
689 regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
690 for (i = 0; i < 128; i++)
691 regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
692 for (i = 0; i < 4; i++)
693 regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
694
695 regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
696 regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
697 regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
698 regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
699 if (hw->mac.type > e1000_82580) {
700 regs_buff[551] = adapter->stats.o2bgptc;
701 regs_buff[552] = adapter->stats.b2ospc;
702 regs_buff[553] = adapter->stats.o2bspc;
703 regs_buff[554] = adapter->stats.b2ogprc;
704 }
705}
706
707static int igb_get_eeprom_len(struct net_device *netdev)
708{
709 struct igb_adapter *adapter = netdev_priv(netdev);
710 return adapter->hw.nvm.word_size * 2;
711}
712
713static int igb_get_eeprom(struct net_device *netdev,
714 struct ethtool_eeprom *eeprom, u8 *bytes)
715{
716 struct igb_adapter *adapter = netdev_priv(netdev);
717 struct e1000_hw *hw = &adapter->hw;
718 u16 *eeprom_buff;
719 int first_word, last_word;
720 int ret_val = 0;
721 u16 i;
722
723 if (eeprom->len == 0)
724 return -EINVAL;
725
726 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
727
728 first_word = eeprom->offset >> 1;
729 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
730
731 eeprom_buff = kmalloc(sizeof(u16) *
732 (last_word - first_word + 1), GFP_KERNEL);
733 if (!eeprom_buff)
734 return -ENOMEM;
735
736 if (hw->nvm.type == e1000_nvm_eeprom_spi)
737 ret_val = e1000_read_nvm(hw, first_word,
738 last_word - first_word + 1,
739 eeprom_buff);
740 else {
741 for (i = 0; i < last_word - first_word + 1; i++) {
742 ret_val = e1000_read_nvm(hw, first_word + i, 1,
743 &eeprom_buff[i]);
744 if (ret_val)
745 break;
746 }
747 }
748
749 /* Device's eeprom is always little-endian, word addressable */
750 for (i = 0; i < last_word - first_word + 1; i++)
751 eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]);
752
753 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
754 eeprom->len);
755 kfree(eeprom_buff);
756
757 return ret_val;
758}
759
760static int igb_set_eeprom(struct net_device *netdev,
761 struct ethtool_eeprom *eeprom, u8 *bytes)
762{
763 struct igb_adapter *adapter = netdev_priv(netdev);
764 struct e1000_hw *hw = &adapter->hw;
765 u16 *eeprom_buff;
766 void *ptr;
767 int max_len, first_word, last_word, ret_val = 0;
768 u16 i;
769
770 if (eeprom->len == 0)
771 return -EOPNOTSUPP;
772
773 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
774 return -EFAULT;
775
776 max_len = hw->nvm.word_size * 2;
777
778 first_word = eeprom->offset >> 1;
779 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
780 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
781 if (!eeprom_buff)
782 return -ENOMEM;
783
784 ptr = (void *)eeprom_buff;
785
786 if (eeprom->offset & 1) {
787 /* need read/modify/write of first changed EEPROM word */
788 /* only the second byte of the word is being modified */
789 ret_val = e1000_read_nvm(hw, first_word, 1,
790 &eeprom_buff[0]);
791 ptr++;
792 }
793 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
794 /* need read/modify/write of last changed EEPROM word */
795 /* only the first byte of the word is being modified */
796 ret_val = e1000_read_nvm(hw, last_word, 1,
797 &eeprom_buff[last_word - first_word]);
798 }
799
800 /* Device's eeprom is always little-endian, word addressable */
801 for (i = 0; i < last_word - first_word + 1; i++)
802 le16_to_cpus(&eeprom_buff[i]);
803
804 memcpy(ptr, bytes, eeprom->len);
805
806 for (i = 0; i < last_word - first_word + 1; i++)
807 cpu_to_le16s(&eeprom_buff[i]);
808
809 ret_val = e1000_write_nvm(hw, first_word,
810 last_word - first_word + 1, eeprom_buff);
811
812 /* Update the checksum if write succeeded.
813 * and flush shadow RAM for 82573 controllers */
814 if (ret_val == 0)
815 e1000_update_nvm_checksum(hw);
816
817 kfree(eeprom_buff);
818 return ret_val;
819}
820
821static void igb_get_drvinfo(struct net_device *netdev,
822 struct ethtool_drvinfo *drvinfo)
823{
824 struct igb_adapter *adapter = netdev_priv(netdev);
825
826 strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
827 strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1);
828
829 strncpy(drvinfo->fw_version, adapter->fw_version,
830 sizeof(drvinfo->fw_version) - 1);
831 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1);
832 drvinfo->n_stats = IGB_STATS_LEN;
833 drvinfo->testinfo_len = IGB_TEST_LEN;
834 drvinfo->regdump_len = igb_get_regs_len(netdev);
835 drvinfo->eedump_len = igb_get_eeprom_len(netdev);
836}
837
838static void igb_get_ringparam(struct net_device *netdev,
839 struct ethtool_ringparam *ring)
840{
841 struct igb_adapter *adapter = netdev_priv(netdev);
842
843 ring->rx_max_pending = IGB_MAX_RXD;
844 ring->tx_max_pending = IGB_MAX_TXD;
845 ring->rx_mini_max_pending = 0;
846 ring->rx_jumbo_max_pending = 0;
847 ring->rx_pending = adapter->rx_ring_count;
848 ring->tx_pending = adapter->tx_ring_count;
849 ring->rx_mini_pending = 0;
850 ring->rx_jumbo_pending = 0;
851}
852
853static int igb_set_ringparam(struct net_device *netdev,
854 struct ethtool_ringparam *ring)
855{
856 struct igb_adapter *adapter = netdev_priv(netdev);
857 struct igb_ring *temp_ring;
858 int i, err = 0;
859 u16 new_rx_count, new_tx_count;
860
861 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
862 return -EINVAL;
863
864 new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
865 new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD);
866 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
867
868 new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
869 new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD);
870 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
871
872 if ((new_tx_count == adapter->tx_ring_count) &&
873 (new_rx_count == adapter->rx_ring_count)) {
874 /* nothing to do */
875 return 0;
876 }
877
878 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
879 usleep_range(1000, 2000);
880
881 if (!netif_running(adapter->netdev)) {
882 for (i = 0; i < adapter->num_tx_queues; i++)
883 adapter->tx_ring[i]->count = new_tx_count;
884 for (i = 0; i < adapter->num_rx_queues; i++)
885 adapter->rx_ring[i]->count = new_rx_count;
886 adapter->tx_ring_count = new_tx_count;
887 adapter->rx_ring_count = new_rx_count;
888 goto clear_reset;
889 }
890
891 if (adapter->num_tx_queues > adapter->num_rx_queues)
892 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
893 else
894 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
895
896 if (!temp_ring) {
897 err = -ENOMEM;
898 goto clear_reset;
899 }
900
901 igb_down(adapter);
902
903 /*
904 * We can't just free everything and then setup again,
905 * because the ISRs in MSI-X mode get passed pointers
906 * to the tx and rx ring structs.
907 */
908 if (new_tx_count != adapter->tx_ring_count) {
909 for (i = 0; i < adapter->num_tx_queues; i++) {
910 memcpy(&temp_ring[i], adapter->tx_ring[i],
911 sizeof(struct igb_ring));
912
913 temp_ring[i].count = new_tx_count;
914 err = igb_setup_tx_resources(&temp_ring[i]);
915 if (err) {
916 while (i) {
917 i--;
918 igb_free_tx_resources(&temp_ring[i]);
919 }
920 goto err_setup;
921 }
922 }
923
924 for (i = 0; i < adapter->num_tx_queues; i++) {
925 igb_free_tx_resources(adapter->tx_ring[i]);
926
927 memcpy(adapter->tx_ring[i], &temp_ring[i],
928 sizeof(struct igb_ring));
929 }
930
931 adapter->tx_ring_count = new_tx_count;
932 }
933
934 if (new_rx_count != adapter->rx_ring_count) {
935 for (i = 0; i < adapter->num_rx_queues; i++) {
936 memcpy(&temp_ring[i], adapter->rx_ring[i],
937 sizeof(struct igb_ring));
938
939 temp_ring[i].count = new_rx_count;
940 err = igb_setup_rx_resources(&temp_ring[i]);
941 if (err) {
942 while (i) {
943 i--;
944 igb_free_rx_resources(&temp_ring[i]);
945 }
946 goto err_setup;
947 }
948
949 }
950
951 for (i = 0; i < adapter->num_rx_queues; i++) {
952 igb_free_rx_resources(adapter->rx_ring[i]);
953
954 memcpy(adapter->rx_ring[i], &temp_ring[i],
955 sizeof(struct igb_ring));
956 }
957
958 adapter->rx_ring_count = new_rx_count;
959 }
960err_setup:
961 igb_up(adapter);
962 vfree(temp_ring);
963clear_reset:
964 clear_bit(__IGB_RESETTING, &adapter->state);
965 return err;
966}
967static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
968 int reg, u32 mask, u32 write)
969{
970 struct e1000_hw *hw = &adapter->hw;
971 u32 pat, val;
972 static const u32 _test[] =
973 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
974 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
975 E1000_WRITE_REG(hw, reg, (_test[pat] & write));
976 val = E1000_READ_REG(hw, reg) & mask;
977 if (val != (_test[pat] & write & mask)) {
978 dev_err(pci_dev_to_dev(adapter->pdev), "pattern test reg %04X "
979 "failed: got 0x%08X expected 0x%08X\n",
980 E1000_REGISTER(hw, reg), val, (_test[pat] & write & mask));
981 *data = E1000_REGISTER(hw, reg);
982 return 1;
983 }
984 }
985
986 return 0;
987}
988
989static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
990 int reg, u32 mask, u32 write)
991{
992 struct e1000_hw *hw = &adapter->hw;
993 u32 val;
994 E1000_WRITE_REG(hw, reg, write & mask);
995 val = E1000_READ_REG(hw, reg);
996 if ((write & mask) != (val & mask)) {
997 dev_err(pci_dev_to_dev(adapter->pdev), "set/check reg %04X test failed:"
998 " got 0x%08X expected 0x%08X\n", reg,
999 (val & mask), (write & mask));
1000 *data = E1000_REGISTER(hw, reg);
1001 return 1;
1002 }
1003
1004 return 0;
1005}
1006
1007#define REG_PATTERN_TEST(reg, mask, write) \
1008 do { \
1009 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1010 return 1; \
1011 } while (0)
1012
1013#define REG_SET_AND_CHECK(reg, mask, write) \
1014 do { \
1015 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1016 return 1; \
1017 } while (0)
1018
1019static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1020{
1021 struct e1000_hw *hw = &adapter->hw;
1022 struct igb_reg_test *test;
1023 u32 value, before, after;
1024 u32 i, toggle;
1025
1026 switch (adapter->hw.mac.type) {
1027 case e1000_i350:
1028 case e1000_i354:
1029 test = reg_test_i350;
1030 toggle = 0x7FEFF3FF;
1031 break;
1032 case e1000_i210:
1033 case e1000_i211:
1034 test = reg_test_i210;
1035 toggle = 0x7FEFF3FF;
1036 break;
1037 case e1000_82580:
1038 test = reg_test_82580;
1039 toggle = 0x7FEFF3FF;
1040 break;
1041 case e1000_82576:
1042 test = reg_test_82576;
1043 toggle = 0x7FFFF3FF;
1044 break;
1045 default:
1046 test = reg_test_82575;
1047 toggle = 0x7FFFF3FF;
1048 break;
1049 }
1050
1051 /* Because the status register is such a special case,
1052 * we handle it separately from the rest of the register
1053 * tests. Some bits are read-only, some toggle, and some
1054 * are writable on newer MACs.
1055 */
1056 before = E1000_READ_REG(hw, E1000_STATUS);
1057 value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
1058 E1000_WRITE_REG(hw, E1000_STATUS, toggle);
1059 after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
1060 if (value != after) {
1061 dev_err(pci_dev_to_dev(adapter->pdev), "failed STATUS register test "
1062 "got: 0x%08X expected: 0x%08X\n", after, value);
1063 *data = 1;
1064 return 1;
1065 }
1066 /* restore previous status */
1067 E1000_WRITE_REG(hw, E1000_STATUS, before);
1068
1069 /* Perform the remainder of the register test, looping through
1070 * the test table until we either fail or reach the null entry.
1071 */
1072 while (test->reg) {
1073 for (i = 0; i < test->array_len; i++) {
1074 switch (test->test_type) {
1075 case PATTERN_TEST:
1076 REG_PATTERN_TEST(test->reg +
1077 (i * test->reg_offset),
1078 test->mask,
1079 test->write);
1080 break;
1081 case SET_READ_TEST:
1082 REG_SET_AND_CHECK(test->reg +
1083 (i * test->reg_offset),
1084 test->mask,
1085 test->write);
1086 break;
1087 case WRITE_NO_TEST:
1088 writel(test->write,
1089 (adapter->hw.hw_addr + test->reg)
1090 + (i * test->reg_offset));
1091 break;
1092 case TABLE32_TEST:
1093 REG_PATTERN_TEST(test->reg + (i * 4),
1094 test->mask,
1095 test->write);
1096 break;
1097 case TABLE64_TEST_LO:
1098 REG_PATTERN_TEST(test->reg + (i * 8),
1099 test->mask,
1100 test->write);
1101 break;
1102 case TABLE64_TEST_HI:
1103 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1104 test->mask,
1105 test->write);
1106 break;
1107 }
1108 }
1109 test++;
1110 }
1111
1112 *data = 0;
1113 return 0;
1114}
1115
1116static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1117{
1118 *data = 0;
1119
1120 /* Validate NVM checksum */
1121 if (e1000_validate_nvm_checksum(&adapter->hw) < 0)
1122 *data = 2;
1123
1124 return *data;
1125}
1126
1127static irqreturn_t igb_test_intr(int irq, void *data)
1128{
1129 struct igb_adapter *adapter = (struct igb_adapter *) data;
1130 struct e1000_hw *hw = &adapter->hw;
1131
1132 adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
1133
1134 return IRQ_HANDLED;
1135}
1136
1137static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1138{
1139 struct e1000_hw *hw = &adapter->hw;
1140 struct net_device *netdev = adapter->netdev;
1141 u32 mask, ics_mask, i = 0, shared_int = TRUE;
1142 u32 irq = adapter->pdev->irq;
1143
1144 *data = 0;
1145
1146 /* Hook up test interrupt handler just for this test */
1147 if (adapter->msix_entries) {
1148 if (request_irq(adapter->msix_entries[0].vector,
1149 &igb_test_intr, 0, netdev->name, adapter)) {
1150 *data = 1;
1151 return -1;
1152 }
1153 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1154 shared_int = FALSE;
1155 if (request_irq(irq,
1156 igb_test_intr, 0, netdev->name, adapter)) {
1157 *data = 1;
1158 return -1;
1159 }
1160 } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
1161 netdev->name, adapter)) {
1162 shared_int = FALSE;
1163 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
1164 netdev->name, adapter)) {
1165 *data = 1;
1166 return -1;
1167 }
1168 dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n",
1169 (shared_int ? "shared" : "unshared"));
1170
1171 /* Disable all the interrupts */
1172 E1000_WRITE_REG(hw, E1000_IMC, ~0);
1173 E1000_WRITE_FLUSH(hw);
1174 usleep_range(10000, 20000);
1175
1176 /* Define all writable bits for ICS */
1177 switch (hw->mac.type) {
1178 case e1000_82575:
1179 ics_mask = 0x37F47EDD;
1180 break;
1181 case e1000_82576:
1182 ics_mask = 0x77D4FBFD;
1183 break;
1184 case e1000_82580:
1185 ics_mask = 0x77DCFED5;
1186 break;
1187 case e1000_i350:
1188 case e1000_i354:
1189 ics_mask = 0x77DCFED5;
1190 break;
1191 case e1000_i210:
1192 case e1000_i211:
1193 ics_mask = 0x774CFED5;
1194 break;
1195 default:
1196 ics_mask = 0x7FFFFFFF;
1197 break;
1198 }
1199
1200 /* Test each interrupt */
1201 for (; i < 31; i++) {
1202 /* Interrupt to test */
1203 mask = 1 << i;
1204
1205 if (!(mask & ics_mask))
1206 continue;
1207
1208 if (!shared_int) {
1209 /* Disable the interrupt to be reported in
1210 * the cause register and then force the same
1211 * interrupt and see if one gets posted. If
1212 * an interrupt was posted to the bus, the
1213 * test failed.
1214 */
1215 adapter->test_icr = 0;
1216
1217 /* Flush any pending interrupts */
1218 E1000_WRITE_REG(hw, E1000_ICR, ~0);
1219
1220 E1000_WRITE_REG(hw, E1000_IMC, mask);
1221 E1000_WRITE_REG(hw, E1000_ICS, mask);
1222 E1000_WRITE_FLUSH(hw);
1223 usleep_range(10000, 20000);
1224
1225 if (adapter->test_icr & mask) {
1226 *data = 3;
1227 break;
1228 }
1229 }
1230
1231 /* Enable the interrupt to be reported in
1232 * the cause register and then force the same
1233 * interrupt and see if one gets posted. If
1234 * an interrupt was not posted to the bus, the
1235 * test failed.
1236 */
1237 adapter->test_icr = 0;
1238
1239 /* Flush any pending interrupts */
1240 E1000_WRITE_REG(hw, E1000_ICR, ~0);
1241
1242 E1000_WRITE_REG(hw, E1000_IMS, mask);
1243 E1000_WRITE_REG(hw, E1000_ICS, mask);
1244 E1000_WRITE_FLUSH(hw);
1245 usleep_range(10000, 20000);
1246
1247 if (!(adapter->test_icr & mask)) {
1248 *data = 4;
1249 break;
1250 }
1251
1252 if (!shared_int) {
1253 /* Disable the other interrupts to be reported in
1254 * the cause register and then force the other
1255 * interrupts and see if any get posted. If
1256 * an interrupt was posted to the bus, the
1257 * test failed.
1258 */
1259 adapter->test_icr = 0;
1260
1261 /* Flush any pending interrupts */
1262 E1000_WRITE_REG(hw, E1000_ICR, ~0);
1263
1264 E1000_WRITE_REG(hw, E1000_IMC, ~mask);
1265 E1000_WRITE_REG(hw, E1000_ICS, ~mask);
1266 E1000_WRITE_FLUSH(hw);
1267 usleep_range(10000, 20000);
1268
1269 if (adapter->test_icr & mask) {
1270 *data = 5;
1271 break;
1272 }
1273 }
1274 }
1275
1276 /* Disable all the interrupts */
1277 E1000_WRITE_REG(hw, E1000_IMC, ~0);
1278 E1000_WRITE_FLUSH(hw);
1279 usleep_range(10000, 20000);
1280
1281 /* Unhook test interrupt handler */
1282 if (adapter->msix_entries)
1283 free_irq(adapter->msix_entries[0].vector, adapter);
1284 else
1285 free_irq(irq, adapter);
1286
1287 return *data;
1288}
1289
1290static void igb_free_desc_rings(struct igb_adapter *adapter)
1291{
1292 igb_free_tx_resources(&adapter->test_tx_ring);
1293 igb_free_rx_resources(&adapter->test_rx_ring);
1294}
1295
1296static int igb_setup_desc_rings(struct igb_adapter *adapter)
1297{
1298 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1299 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1300 struct e1000_hw *hw = &adapter->hw;
1301 int ret_val;
1302
1303 /* Setup Tx descriptor ring and Tx buffers */
1304 tx_ring->count = IGB_DEFAULT_TXD;
1305 tx_ring->dev = pci_dev_to_dev(adapter->pdev);
1306 tx_ring->netdev = adapter->netdev;
1307 tx_ring->reg_idx = adapter->vfs_allocated_count;
1308
1309 if (igb_setup_tx_resources(tx_ring)) {
1310 ret_val = 1;
1311 goto err_nomem;
1312 }
1313
1314 igb_setup_tctl(adapter);
1315 igb_configure_tx_ring(adapter, tx_ring);
1316
1317 /* Setup Rx descriptor ring and Rx buffers */
1318 rx_ring->count = IGB_DEFAULT_RXD;
1319 rx_ring->dev = pci_dev_to_dev(adapter->pdev);
1320 rx_ring->netdev = adapter->netdev;
1321#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1322 rx_ring->rx_buffer_len = IGB_RX_HDR_LEN;
1323#endif
1324 rx_ring->reg_idx = adapter->vfs_allocated_count;
1325
1326 if (igb_setup_rx_resources(rx_ring)) {
1327 ret_val = 2;
1328 goto err_nomem;
1329 }
1330
1331 /* set the default queue to queue 0 of PF */
1332 E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3);
1333
1334 /* enable receive ring */
1335 igb_setup_rctl(adapter);
1336 igb_configure_rx_ring(adapter, rx_ring);
1337
1338 igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
1339
1340 return 0;
1341
1342err_nomem:
1343 igb_free_desc_rings(adapter);
1344 return ret_val;
1345}
1346
1347static void igb_phy_disable_receiver(struct igb_adapter *adapter)
1348{
1349 struct e1000_hw *hw = &adapter->hw;
1350
1351 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1352 e1000_write_phy_reg(hw, 29, 0x001F);
1353 e1000_write_phy_reg(hw, 30, 0x8FFC);
1354 e1000_write_phy_reg(hw, 29, 0x001A);
1355 e1000_write_phy_reg(hw, 30, 0x8FF0);
1356}
1357
1358static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1359{
1360 struct e1000_hw *hw = &adapter->hw;
1361 u32 ctrl_reg = 0;
1362
1363 hw->mac.autoneg = FALSE;
1364
1365 if (hw->phy.type == e1000_phy_m88) {
1366 if (hw->phy.id != I210_I_PHY_ID) {
1367 /* Auto-MDI/MDIX Off */
1368 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1369 /* reset to update Auto-MDI/MDIX */
1370 e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1371 /* autoneg off */
1372 e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1373 } else {
1374 /* force 1000, set loopback */
1375 e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1376 e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1377 }
1378 } else {
1379 /* enable MII loopback */
1380 if (hw->phy.type == e1000_phy_82580)
1381 e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041);
1382 }
1383
1384 /* force 1000, set loopback */
1385 e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1386
1387 /* Now set up the MAC to the same speed/duplex as the PHY. */
1388 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1389 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1390 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1391 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1392 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1393 E1000_CTRL_FD | /* Force Duplex to FULL */
1394 E1000_CTRL_SLU); /* Set link up enable bit */
1395
1396 if (hw->phy.type == e1000_phy_m88)
1397 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1398
1399 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1400
1401 /* Disable the receiver on the PHY so when a cable is plugged in, the
1402 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1403 */
1404 if (hw->phy.type == e1000_phy_m88)
1405 igb_phy_disable_receiver(adapter);
1406
1407 mdelay(500);
1408 return 0;
1409}
1410
1411static int igb_set_phy_loopback(struct igb_adapter *adapter)
1412{
1413 return igb_integrated_phy_loopback(adapter);
1414}
1415
1416static int igb_setup_loopback_test(struct igb_adapter *adapter)
1417{
1418 struct e1000_hw *hw = &adapter->hw;
1419 u32 reg;
1420
1421 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1422
1423 /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1424 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1425 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1426 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1427 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1428 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1429
1430 /* Enable DH89xxCC MPHY for near end loopback */
1431 reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
1432 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
1433 E1000_MPHY_PCS_CLK_REG_OFFSET;
1434 E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
1435
1436 reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
1437 reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1438 E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
1439 }
1440
1441 reg = E1000_READ_REG(hw, E1000_RCTL);
1442 reg |= E1000_RCTL_LBM_TCVR;
1443 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1444
1445 E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
1446
1447 reg = E1000_READ_REG(hw, E1000_CTRL);
1448 reg &= ~(E1000_CTRL_RFCE |
1449 E1000_CTRL_TFCE |
1450 E1000_CTRL_LRST);
1451 reg |= E1000_CTRL_SLU |
1452 E1000_CTRL_FD;
1453 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1454
1455 /* Unset switch control to serdes energy detect */
1456 reg = E1000_READ_REG(hw, E1000_CONNSW);
1457 reg &= ~E1000_CONNSW_ENRGSRC;
1458 E1000_WRITE_REG(hw, E1000_CONNSW, reg);
1459
1460 /* Unset sigdetect for SERDES loopback on
1461 * 82580 and newer devices
1462 */
1463 if (hw->mac.type >= e1000_82580) {
1464 reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1465 reg |= E1000_PCS_CFG_IGN_SD;
1466 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1467 }
1468
1469 /* Set PCS register for forced speed */
1470 reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1471 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
1472 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
1473 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1474 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
1475 E1000_PCS_LCTL_FSD | /* Force Speed */
1476 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1477 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1478
1479 return 0;
1480 }
1481
1482 return igb_set_phy_loopback(adapter);
1483}
1484
1485static void igb_loopback_cleanup(struct igb_adapter *adapter)
1486{
1487 struct e1000_hw *hw = &adapter->hw;
1488 u32 rctl;
1489 u16 phy_reg;
1490
1491 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1492 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1493 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1494 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1495 u32 reg;
1496
1497 /* Disable near end loopback on DH89xxCC */
1498 reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
1499 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK ) |
1500 E1000_MPHY_PCS_CLK_REG_OFFSET;
1501 E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
1502
1503 reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
1504 reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1505 E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
1506 }
1507
1508 rctl = E1000_READ_REG(hw, E1000_RCTL);
1509 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1510 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1511
1512 hw->mac.autoneg = TRUE;
1513 e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
1514 if (phy_reg & MII_CR_LOOPBACK) {
1515 phy_reg &= ~MII_CR_LOOPBACK;
1516 if (hw->phy.type == I210_I_PHY_ID)
1517 e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1518 e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
1519 e1000_phy_commit(hw);
1520 }
1521}
1522static void igb_create_lbtest_frame(struct sk_buff *skb,
1523 unsigned int frame_size)
1524{
1525 memset(skb->data, 0xFF, frame_size);
1526 frame_size /= 2;
1527 memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1528 memset(&skb->data[frame_size + 10], 0xBE, 1);
1529 memset(&skb->data[frame_size + 12], 0xAF, 1);
1530}
1531
1532static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
1533 unsigned int frame_size)
1534{
1535 unsigned char *data;
1536 bool match = true;
1537
1538 frame_size >>= 1;
1539
1540#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1541 data = rx_buffer->skb->data;
1542#else
1543 data = kmap(rx_buffer->page);
1544#endif
1545
1546 if (data[3] != 0xFF ||
1547 data[frame_size + 10] != 0xBE ||
1548 data[frame_size + 12] != 0xAF)
1549 match = false;
1550
1551#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
1552 kunmap(rx_buffer->page);
1553
1554#endif
1555 return match;
1556}
1557
1558static u16 igb_clean_test_rings(struct igb_ring *rx_ring,
1559 struct igb_ring *tx_ring,
1560 unsigned int size)
1561{
1562 union e1000_adv_rx_desc *rx_desc;
1563 struct igb_rx_buffer *rx_buffer_info;
1564 struct igb_tx_buffer *tx_buffer_info;
1565 u16 rx_ntc, tx_ntc, count = 0;
1566
1567 /* initialize next to clean and descriptor values */
1568 rx_ntc = rx_ring->next_to_clean;
1569 tx_ntc = tx_ring->next_to_clean;
1570 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1571
1572 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
1573 /* check rx buffer */
1574 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1575
1576 /* sync Rx buffer for CPU read */
1577 dma_sync_single_for_cpu(rx_ring->dev,
1578 rx_buffer_info->dma,
1579#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1580 IGB_RX_HDR_LEN,
1581#else
1582 IGB_RX_BUFSZ,
1583#endif
1584 DMA_FROM_DEVICE);
1585
1586 /* verify contents of skb */
1587 if (igb_check_lbtest_frame(rx_buffer_info, size))
1588 count++;
1589
1590 /* sync Rx buffer for device write */
1591 dma_sync_single_for_device(rx_ring->dev,
1592 rx_buffer_info->dma,
1593#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
1594 IGB_RX_HDR_LEN,
1595#else
1596 IGB_RX_BUFSZ,
1597#endif
1598 DMA_FROM_DEVICE);
1599
1600 /* unmap buffer on tx side */
1601 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1602 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1603
1604 /* increment rx/tx next to clean counters */
1605 rx_ntc++;
1606 if (rx_ntc == rx_ring->count)
1607 rx_ntc = 0;
1608 tx_ntc++;
1609 if (tx_ntc == tx_ring->count)
1610 tx_ntc = 0;
1611
1612 /* fetch next descriptor */
1613 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1614 }
1615
1616 /* re-map buffers to ring, store next to clean values */
1617 igb_alloc_rx_buffers(rx_ring, count);
1618 rx_ring->next_to_clean = rx_ntc;
1619 tx_ring->next_to_clean = tx_ntc;
1620
1621 return count;
1622}
1623
1624static int igb_run_loopback_test(struct igb_adapter *adapter)
1625{
1626 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1627 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1628 u16 i, j, lc, good_cnt;
1629 int ret_val = 0;
1630 unsigned int size = IGB_RX_HDR_LEN;
1631 netdev_tx_t tx_ret_val;
1632 struct sk_buff *skb;
1633
1634 /* allocate test skb */
1635 skb = alloc_skb(size, GFP_KERNEL);
1636 if (!skb)
1637 return 11;
1638
1639 /* place data into test skb */
1640 igb_create_lbtest_frame(skb, size);
1641 skb_put(skb, size);
1642
1643 /*
1644 * Calculate the loop count based on the largest descriptor ring
1645 * The idea is to wrap the largest ring a number of times using 64
1646 * send/receive pairs during each loop
1647 */
1648
1649 if (rx_ring->count <= tx_ring->count)
1650 lc = ((tx_ring->count / 64) * 2) + 1;
1651 else
1652 lc = ((rx_ring->count / 64) * 2) + 1;
1653
1654 for (j = 0; j <= lc; j++) { /* loop count loop */
1655 /* reset count of good packets */
1656 good_cnt = 0;
1657
1658 /* place 64 packets on the transmit queue*/
1659 for (i = 0; i < 64; i++) {
1660 skb_get(skb);
1661 tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
1662 if (tx_ret_val == NETDEV_TX_OK)
1663 good_cnt++;
1664 }
1665
1666 if (good_cnt != 64) {
1667 ret_val = 12;
1668 break;
1669 }
1670
1671 /* allow 200 milliseconds for packets to go from tx to rx */
1672 msleep(200);
1673
1674 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1675 if (good_cnt != 64) {
1676 ret_val = 13;
1677 break;
1678 }
1679 } /* end loop count loop */
1680
1681 /* free the original skb */
1682 kfree_skb(skb);
1683
1684 return ret_val;
1685}
1686
1687static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1688{
1689 /* PHY loopback cannot be performed if SoL/IDER
1690 * sessions are active */
1691 if (e1000_check_reset_block(&adapter->hw)) {
1692 dev_err(pci_dev_to_dev(adapter->pdev),
1693 "Cannot do PHY loopback test "
1694 "when SoL/IDER is active.\n");
1695 *data = 0;
1696 goto out;
1697 }
1698 if (adapter->hw.mac.type == e1000_i354) {
1699 dev_info(&adapter->pdev->dev,
1700 "Loopback test not supported on i354.\n");
1701 *data = 0;
1702 goto out;
1703 }
1704 *data = igb_setup_desc_rings(adapter);
1705 if (*data)
1706 goto out;
1707 *data = igb_setup_loopback_test(adapter);
1708 if (*data)
1709 goto err_loopback;
1710 *data = igb_run_loopback_test(adapter);
1711
1712 igb_loopback_cleanup(adapter);
1713
1714err_loopback:
1715 igb_free_desc_rings(adapter);
1716out:
1717 return *data;
1718}
1719
1720static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1721{
1722 u32 link;
1723 int i, time;
1724
1725 *data = 0;
1726 time = 0;
1727 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
1728 int i = 0;
1729 adapter->hw.mac.serdes_has_link = FALSE;
1730
1731 /* On some blade server designs, link establishment
1732 * could take as long as 2-3 minutes */
1733 do {
1734 e1000_check_for_link(&adapter->hw);
1735 if (adapter->hw.mac.serdes_has_link)
1736 goto out;
1737 msleep(20);
1738 } while (i++ < 3750);
1739
1740 *data = 1;
1741 } else {
1742 for (i=0; i < IGB_MAX_LINK_TRIES; i++) {
1743 link = igb_has_link(adapter);
1744 if (link)
1745 goto out;
1746 else {
1747 time++;
1748 msleep(1000);
1749 }
1750 }
1751 if (!link)
1752 *data = 1;
1753 }
1754 out:
1755 return *data;
1756}
1757
1758static void igb_diag_test(struct net_device *netdev,
1759 struct ethtool_test *eth_test, u64 *data)
1760{
1761 struct igb_adapter *adapter = netdev_priv(netdev);
1762 u16 autoneg_advertised;
1763 u8 forced_speed_duplex, autoneg;
1764 bool if_running = netif_running(netdev);
1765
1766 set_bit(__IGB_TESTING, &adapter->state);
1767 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1768 /* Offline tests */
1769
1770 /* save speed, duplex, autoneg settings */
1771 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1772 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1773 autoneg = adapter->hw.mac.autoneg;
1774
1775 dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
1776
1777 /* power up link for link test */
1778 igb_power_up_link(adapter);
1779
1780 /* Link test performed before hardware reset so autoneg doesn't
1781 * interfere with test result */
1782 if (igb_link_test(adapter, &data[4]))
1783 eth_test->flags |= ETH_TEST_FL_FAILED;
1784
1785 if (if_running)
1786 /* indicate we're in test mode */
1787 dev_close(netdev);
1788 else
1789 igb_reset(adapter);
1790
1791 if (igb_reg_test(adapter, &data[0]))
1792 eth_test->flags |= ETH_TEST_FL_FAILED;
1793
1794 igb_reset(adapter);
1795 if (igb_eeprom_test(adapter, &data[1]))
1796 eth_test->flags |= ETH_TEST_FL_FAILED;
1797
1798 igb_reset(adapter);
1799 if (igb_intr_test(adapter, &data[2]))
1800 eth_test->flags |= ETH_TEST_FL_FAILED;
1801
1802 igb_reset(adapter);
1803
1804 /* power up link for loopback test */
1805 igb_power_up_link(adapter);
1806
1807 if (igb_loopback_test(adapter, &data[3]))
1808 eth_test->flags |= ETH_TEST_FL_FAILED;
1809
1810 /* restore speed, duplex, autoneg settings */
1811 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1812 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1813 adapter->hw.mac.autoneg = autoneg;
1814
1815 /* force this routine to wait until autoneg complete/timeout */
1816 adapter->hw.phy.autoneg_wait_to_complete = TRUE;
1817 igb_reset(adapter);
1818 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
1819
1820 clear_bit(__IGB_TESTING, &adapter->state);
1821 if (if_running)
1822 dev_open(netdev);
1823 } else {
1824 dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n");
1825
1826 /* PHY is powered down when interface is down */
1827 if (if_running && igb_link_test(adapter, &data[4]))
1828 eth_test->flags |= ETH_TEST_FL_FAILED;
1829 else
1830 data[4] = 0;
1831
1832 /* Online tests aren't run; pass by default */
1833 data[0] = 0;
1834 data[1] = 0;
1835 data[2] = 0;
1836 data[3] = 0;
1837
1838 clear_bit(__IGB_TESTING, &adapter->state);
1839 }
1840 msleep_interruptible(4 * 1000);
1841}
1842
1843static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1844{
1845 struct igb_adapter *adapter = netdev_priv(netdev);
1846
1847 wol->supported = WAKE_UCAST | WAKE_MCAST |
1848 WAKE_BCAST | WAKE_MAGIC |
1849 WAKE_PHY;
1850 wol->wolopts = 0;
1851
1852 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
1853 return;
1854
1855 /* apply any specific unsupported masks here */
1856 switch (adapter->hw.device_id) {
1857 default:
1858 break;
1859 }
1860
1861 if (adapter->wol & E1000_WUFC_EX)
1862 wol->wolopts |= WAKE_UCAST;
1863 if (adapter->wol & E1000_WUFC_MC)
1864 wol->wolopts |= WAKE_MCAST;
1865 if (adapter->wol & E1000_WUFC_BC)
1866 wol->wolopts |= WAKE_BCAST;
1867 if (adapter->wol & E1000_WUFC_MAG)
1868 wol->wolopts |= WAKE_MAGIC;
1869 if (adapter->wol & E1000_WUFC_LNKC)
1870 wol->wolopts |= WAKE_PHY;
1871}
1872
1873static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1874{
1875 struct igb_adapter *adapter = netdev_priv(netdev);
1876
1877 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
1878 return -EOPNOTSUPP;
1879
1880 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
1881 return wol->wolopts ? -EOPNOTSUPP : 0;
1882
1883 /* these settings will always override what we currently have */
1884 adapter->wol = 0;
1885
1886 if (wol->wolopts & WAKE_UCAST)
1887 adapter->wol |= E1000_WUFC_EX;
1888 if (wol->wolopts & WAKE_MCAST)
1889 adapter->wol |= E1000_WUFC_MC;
1890 if (wol->wolopts & WAKE_BCAST)
1891 adapter->wol |= E1000_WUFC_BC;
1892 if (wol->wolopts & WAKE_MAGIC)
1893 adapter->wol |= E1000_WUFC_MAG;
1894 if (wol->wolopts & WAKE_PHY)
1895 adapter->wol |= E1000_WUFC_LNKC;
1896 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1897
1898 return 0;
1899}
1900
1901/* bit defines for adapter->led_status */
1902#ifdef HAVE_ETHTOOL_SET_PHYS_ID
1903static int igb_set_phys_id(struct net_device *netdev,
1904 enum ethtool_phys_id_state state)
1905{
1906 struct igb_adapter *adapter = netdev_priv(netdev);
1907 struct e1000_hw *hw = &adapter->hw;
1908
1909 switch (state) {
1910 case ETHTOOL_ID_ACTIVE:
1911 e1000_blink_led(hw);
1912 return 2;
1913 case ETHTOOL_ID_ON:
1914 e1000_led_on(hw);
1915 break;
1916 case ETHTOOL_ID_OFF:
1917 e1000_led_off(hw);
1918 break;
1919 case ETHTOOL_ID_INACTIVE:
1920 e1000_led_off(hw);
1921 e1000_cleanup_led(hw);
1922 break;
1923 }
1924
1925 return 0;
1926}
1927#else
1928static int igb_phys_id(struct net_device *netdev, u32 data)
1929{
1930 struct igb_adapter *adapter = netdev_priv(netdev);
1931 struct e1000_hw *hw = &adapter->hw;
1932 unsigned long timeout;
1933
1934 timeout = data * 1000;
1935
1936 /*
1937 * msleep_interruptable only accepts unsigned int so we are limited
1938 * in how long a duration we can wait
1939 */
1940 if (!timeout || timeout > UINT_MAX)
1941 timeout = UINT_MAX;
1942
1943 e1000_blink_led(hw);
1944 msleep_interruptible(timeout);
1945
1946 e1000_led_off(hw);
1947 e1000_cleanup_led(hw);
1948
1949 return 0;
1950}
1951#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
1952
1953static int igb_set_coalesce(struct net_device *netdev,
1954 struct ethtool_coalesce *ec)
1955{
1956 struct igb_adapter *adapter = netdev_priv(netdev);
1957 int i;
1958
1959 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1960 ((ec->rx_coalesce_usecs > 3) &&
1961 (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1962 (ec->rx_coalesce_usecs == 2))
1963 {
1964 printk("set_coalesce:invalid parameter..");
1965 return -EINVAL;
1966 }
1967
1968 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1969 ((ec->tx_coalesce_usecs > 3) &&
1970 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1971 (ec->tx_coalesce_usecs == 2))
1972 return -EINVAL;
1973
1974 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
1975 return -EINVAL;
1976
1977 if (ec->tx_max_coalesced_frames_irq)
1978 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
1979
1980 /* If ITR is disabled, disable DMAC */
1981 if (ec->rx_coalesce_usecs == 0) {
1982 adapter->dmac = IGB_DMAC_DISABLE;
1983 }
1984
1985 /* convert to rate of irq's per second */
1986 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
1987 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
1988 else
1989 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
1990
1991 /* convert to rate of irq's per second */
1992 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
1993 adapter->tx_itr_setting = adapter->rx_itr_setting;
1994 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
1995 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
1996 else
1997 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
1998
1999 for (i = 0; i < adapter->num_q_vectors; i++) {
2000 struct igb_q_vector *q_vector = adapter->q_vector[i];
2001 q_vector->tx.work_limit = adapter->tx_work_limit;
2002 if (q_vector->rx.ring)
2003 q_vector->itr_val = adapter->rx_itr_setting;
2004 else
2005 q_vector->itr_val = adapter->tx_itr_setting;
2006 if (q_vector->itr_val && q_vector->itr_val <= 3)
2007 q_vector->itr_val = IGB_START_ITR;
2008 q_vector->set_itr = 1;
2009 }
2010
2011 return 0;
2012}
2013
2014static int igb_get_coalesce(struct net_device *netdev,
2015 struct ethtool_coalesce *ec)
2016{
2017 struct igb_adapter *adapter = netdev_priv(netdev);
2018
2019 if (adapter->rx_itr_setting <= 3)
2020 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2021 else
2022 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2023
2024 ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
2025
2026 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
2027 if (adapter->tx_itr_setting <= 3)
2028 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2029 else
2030 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2031 }
2032
2033 return 0;
2034}
2035
2036static int igb_nway_reset(struct net_device *netdev)
2037{
2038 struct igb_adapter *adapter = netdev_priv(netdev);
2039 if (netif_running(netdev))
2040 igb_reinit_locked(adapter);
2041 return 0;
2042}
2043
2044#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
2045static int igb_get_sset_count(struct net_device *netdev, int sset)
2046{
2047 switch (sset) {
2048 case ETH_SS_STATS:
2049 return IGB_STATS_LEN;
2050 case ETH_SS_TEST:
2051 return IGB_TEST_LEN;
2052 default:
2053 return -ENOTSUPP;
2054 }
2055}
2056#else
2057static int igb_get_stats_count(struct net_device *netdev)
2058{
2059 return IGB_STATS_LEN;
2060}
2061
2062static int igb_diag_test_count(struct net_device *netdev)
2063{
2064 return IGB_TEST_LEN;
2065}
2066#endif
2067
2068static void igb_get_ethtool_stats(struct net_device *netdev,
2069 struct ethtool_stats *stats, u64 *data)
2070{
2071 struct igb_adapter *adapter = netdev_priv(netdev);
2072#ifdef HAVE_NETDEV_STATS_IN_NETDEV
2073 struct net_device_stats *net_stats = &netdev->stats;
2074#else
2075 struct net_device_stats *net_stats = &adapter->net_stats;
2076#endif
2077 u64 *queue_stat;
2078 int i, j, k;
2079 char *p;
2080
2081 igb_update_stats(adapter);
2082
2083 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2084 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
2085 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
2086 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2087 }
2088 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
2089 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
2090 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
2091 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2092 }
2093 for (j = 0; j < adapter->num_tx_queues; j++) {
2094 queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
2095 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
2096 data[i] = queue_stat[k];
2097 }
2098 for (j = 0; j < adapter->num_rx_queues; j++) {
2099 queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
2100 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
2101 data[i] = queue_stat[k];
2102 }
2103}
2104
2105static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2106{
2107 struct igb_adapter *adapter = netdev_priv(netdev);
2108 u8 *p = data;
2109 int i;
2110
2111 switch (stringset) {
2112 case ETH_SS_TEST:
2113 memcpy(data, *igb_gstrings_test,
2114 IGB_TEST_LEN*ETH_GSTRING_LEN);
2115 break;
2116 case ETH_SS_STATS:
2117 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2118 memcpy(p, igb_gstrings_stats[i].stat_string,
2119 ETH_GSTRING_LEN);
2120 p += ETH_GSTRING_LEN;
2121 }
2122 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
2123 memcpy(p, igb_gstrings_net_stats[i].stat_string,
2124 ETH_GSTRING_LEN);
2125 p += ETH_GSTRING_LEN;
2126 }
2127 for (i = 0; i < adapter->num_tx_queues; i++) {
2128 sprintf(p, "tx_queue_%u_packets", i);
2129 p += ETH_GSTRING_LEN;
2130 sprintf(p, "tx_queue_%u_bytes", i);
2131 p += ETH_GSTRING_LEN;
2132 sprintf(p, "tx_queue_%u_restart", i);
2133 p += ETH_GSTRING_LEN;
2134 }
2135 for (i = 0; i < adapter->num_rx_queues; i++) {
2136 sprintf(p, "rx_queue_%u_packets", i);
2137 p += ETH_GSTRING_LEN;
2138 sprintf(p, "rx_queue_%u_bytes", i);
2139 p += ETH_GSTRING_LEN;
2140 sprintf(p, "rx_queue_%u_drops", i);
2141 p += ETH_GSTRING_LEN;
2142 sprintf(p, "rx_queue_%u_csum_err", i);
2143 p += ETH_GSTRING_LEN;
2144 sprintf(p, "rx_queue_%u_alloc_failed", i);
2145 p += ETH_GSTRING_LEN;
2146 sprintf(p, "rx_queue_%u_ipv4_packets", i);
2147 p += ETH_GSTRING_LEN;
2148 sprintf(p, "rx_queue_%u_ipv4e_packets", i);
2149 p += ETH_GSTRING_LEN;
2150 sprintf(p, "rx_queue_%u_ipv6_packets", i);
2151 p += ETH_GSTRING_LEN;
2152 sprintf(p, "rx_queue_%u_ipv6e_packets", i);
2153 p += ETH_GSTRING_LEN;
2154 sprintf(p, "rx_queue_%u_tcp_packets", i);
2155 p += ETH_GSTRING_LEN;
2156 sprintf(p, "rx_queue_%u_udp_packets", i);
2157 p += ETH_GSTRING_LEN;
2158 sprintf(p, "rx_queue_%u_sctp_packets", i);
2159 p += ETH_GSTRING_LEN;
2160 sprintf(p, "rx_queue_%u_nfs_packets", i);
2161 p += ETH_GSTRING_LEN;
2162 }
2163/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2164 break;
2165 }
2166}
2167
2168#ifdef HAVE_ETHTOOL_GET_TS_INFO
2169static int igb_get_ts_info(struct net_device *dev,
2170 struct ethtool_ts_info *info)
2171{
2172 struct igb_adapter *adapter = netdev_priv(dev);
2173
2174 switch (adapter->hw.mac.type) {
2175#ifdef HAVE_PTP_1588_CLOCK
2176 case e1000_82575:
2177 info->so_timestamping =
2178 SOF_TIMESTAMPING_TX_SOFTWARE |
2179 SOF_TIMESTAMPING_RX_SOFTWARE |
2180 SOF_TIMESTAMPING_SOFTWARE;
2181 return 0;
2182 case e1000_82576:
2183 case e1000_82580:
2184 case e1000_i350:
2185 case e1000_i354:
2186 case e1000_i210:
2187 case e1000_i211:
2188 info->so_timestamping =
2189 SOF_TIMESTAMPING_TX_SOFTWARE |
2190 SOF_TIMESTAMPING_RX_SOFTWARE |
2191 SOF_TIMESTAMPING_SOFTWARE |
2192 SOF_TIMESTAMPING_TX_HARDWARE |
2193 SOF_TIMESTAMPING_RX_HARDWARE |
2194 SOF_TIMESTAMPING_RAW_HARDWARE;
2195
2196 if (adapter->ptp_clock)
2197 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2198 else
2199 info->phc_index = -1;
2200
2201 info->tx_types =
2202 (1 << HWTSTAMP_TX_OFF) |
2203 (1 << HWTSTAMP_TX_ON);
2204
2205 info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
2206
2207 /* 82576 does not support timestamping all packets. */
2208 if (adapter->hw.mac.type >= e1000_82580)
2209 info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
2210 else
2211 info->rx_filters |=
2212 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2213 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2214 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
2215 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
2216 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
2217 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
2218 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2219
2220 return 0;
2221#endif /* HAVE_PTP_1588_CLOCK */
2222 default:
2223 return -EOPNOTSUPP;
2224 }
2225}
2226#endif /* HAVE_ETHTOOL_GET_TS_INFO */
2227
2228#ifdef CONFIG_PM_RUNTIME
2229static int igb_ethtool_begin(struct net_device *netdev)
2230{
2231 struct igb_adapter *adapter = netdev_priv(netdev);
2232
2233 pm_runtime_get_sync(&adapter->pdev->dev);
2234
2235 return 0;
2236}
2237
2238static void igb_ethtool_complete(struct net_device *netdev)
2239{
2240 struct igb_adapter *adapter = netdev_priv(netdev);
2241
2242 pm_runtime_put(&adapter->pdev->dev);
2243}
2244#endif /* CONFIG_PM_RUNTIME */
2245
2246#ifndef HAVE_NDO_SET_FEATURES
2247static u32 igb_get_rx_csum(struct net_device *netdev)
2248{
2249 return !!(netdev->features & NETIF_F_RXCSUM);
2250}
2251
2252static int igb_set_rx_csum(struct net_device *netdev, u32 data)
2253{
2254 const u32 feature_list = NETIF_F_RXCSUM;
2255
2256 if (data)
2257 netdev->features |= feature_list;
2258 else
2259 netdev->features &= ~feature_list;
2260
2261 return 0;
2262}
2263
2264static int igb_set_tx_csum(struct net_device *netdev, u32 data)
2265{
2266 struct igb_adapter *adapter = netdev_priv(netdev);
2267#ifdef NETIF_F_IPV6_CSUM
2268 u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2269#else
2270 u32 feature_list = NETIF_F_IP_CSUM;
2271#endif
2272
2273 if (adapter->hw.mac.type >= e1000_82576)
2274 feature_list |= NETIF_F_SCTP_CSUM;
2275
2276 if (data)
2277 netdev->features |= feature_list;
2278 else
2279 netdev->features &= ~feature_list;
2280
2281 return 0;
2282}
2283
2284#ifdef NETIF_F_TSO
2285static int igb_set_tso(struct net_device *netdev, u32 data)
2286{
2287#ifdef NETIF_F_TSO6
2288 const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6;
2289#else
2290 const u32 feature_list = NETIF_F_TSO;
2291#endif
2292
2293 if (data)
2294 netdev->features |= feature_list;
2295 else
2296 netdev->features &= ~feature_list;
2297
2298#ifndef HAVE_NETDEV_VLAN_FEATURES
2299 if (!data) {
2300 struct igb_adapter *adapter = netdev_priv(netdev);
2301 struct net_device *v_netdev;
2302 int i;
2303
2304 /* disable TSO on all VLANs if they're present */
2305 if (!adapter->vlgrp)
2306 goto tso_out;
2307
2308 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
2309 v_netdev = vlan_group_get_device(adapter->vlgrp, i);
2310 if (!v_netdev)
2311 continue;
2312
2313 v_netdev->features &= ~feature_list;
2314 vlan_group_set_device(adapter->vlgrp, i, v_netdev);
2315 }
2316 }
2317
2318tso_out:
2319
2320#endif /* HAVE_NETDEV_VLAN_FEATURES */
2321 return 0;
2322}
2323
2324#endif /* NETIF_F_TSO */
2325#ifdef ETHTOOL_GFLAGS
2326static int igb_set_flags(struct net_device *netdev, u32 data)
2327{
2328 u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
2329 ETH_FLAG_RXHASH;
2330#ifndef HAVE_VLAN_RX_REGISTER
2331 u32 changed = netdev->features ^ data;
2332#endif
2333 int rc;
2334#ifndef IGB_NO_LRO
2335
2336 supported_flags |= ETH_FLAG_LRO;
2337#endif
2338 /*
2339 * Since there is no support for separate tx vlan accel
2340 * enabled make sure tx flag is cleared if rx is.
2341 */
2342 if (!(data & ETH_FLAG_RXVLAN))
2343 data &= ~ETH_FLAG_TXVLAN;
2344
2345 rc = ethtool_op_set_flags(netdev, data, supported_flags);
2346 if (rc)
2347 return rc;
2348#ifndef HAVE_VLAN_RX_REGISTER
2349
2350 if (changed & ETH_FLAG_RXVLAN)
2351 igb_vlan_mode(netdev, data);
2352#endif
2353
2354 return 0;
2355}
2356
2357#endif /* ETHTOOL_GFLAGS */
2358#endif /* HAVE_NDO_SET_FEATURES */
2359#ifdef ETHTOOL_SADV_COAL
2360static int igb_set_adv_coal(struct net_device *netdev, struct ethtool_value *edata)
2361{
2362 struct igb_adapter *adapter = netdev_priv(netdev);
2363
2364 switch (edata->data) {
2365 case IGB_DMAC_DISABLE:
2366 adapter->dmac = edata->data;
2367 break;
2368 case IGB_DMAC_MIN:
2369 adapter->dmac = edata->data;
2370 break;
2371 case IGB_DMAC_500:
2372 adapter->dmac = edata->data;
2373 break;
2374 case IGB_DMAC_EN_DEFAULT:
2375 adapter->dmac = edata->data;
2376 break;
2377 case IGB_DMAC_2000:
2378 adapter->dmac = edata->data;
2379 break;
2380 case IGB_DMAC_3000:
2381 adapter->dmac = edata->data;
2382 break;
2383 case IGB_DMAC_4000:
2384 adapter->dmac = edata->data;
2385 break;
2386 case IGB_DMAC_5000:
2387 adapter->dmac = edata->data;
2388 break;
2389 case IGB_DMAC_6000:
2390 adapter->dmac = edata->data;
2391 break;
2392 case IGB_DMAC_7000:
2393 adapter->dmac = edata->data;
2394 break;
2395 case IGB_DMAC_8000:
2396 adapter->dmac = edata->data;
2397 break;
2398 case IGB_DMAC_9000:
2399 adapter->dmac = edata->data;
2400 break;
2401 case IGB_DMAC_MAX:
2402 adapter->dmac = edata->data;
2403 break;
2404 default:
2405 adapter->dmac = IGB_DMAC_DISABLE;
2406 printk("set_dmac: invalid setting, setting DMAC to %d\n",
2407 adapter->dmac);
2408 }
2409 printk("%s: setting DMAC to %d\n", netdev->name, adapter->dmac);
2410 return 0;
2411}
2412#endif /* ETHTOOL_SADV_COAL */
2413#ifdef ETHTOOL_GADV_COAL
2414static void igb_get_dmac(struct net_device *netdev,
2415 struct ethtool_value *edata)
2416{
2417 struct igb_adapter *adapter = netdev_priv(netdev);
2418 edata->data = adapter->dmac;
2419
2420 return;
2421}
2422#endif
2423
2424#ifdef ETHTOOL_GEEE
2425static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2426{
2427 struct igb_adapter *adapter = netdev_priv(netdev);
2428 struct e1000_hw *hw = &adapter->hw;
2429 u32 ret_val;
2430 u16 phy_data;
2431
2432 if ((hw->mac.type < e1000_i350) ||
2433 (hw->phy.media_type != e1000_media_type_copper))
2434 return -EOPNOTSUPP;
2435
2436 edata->supported = (SUPPORTED_1000baseT_Full |
2437 SUPPORTED_100baseT_Full);
2438
2439 if (!hw->dev_spec._82575.eee_disable)
2440 edata->advertised =
2441 mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
2442
2443 /* The IPCNFG and EEER registers are not supported on I354. */
2444 if (hw->mac.type == e1000_i354) {
2445 e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active);
2446 } else {
2447 u32 eeer;
2448
2449 eeer = E1000_READ_REG(hw, E1000_EEER);
2450
2451 /* EEE status on negotiated link */
2452 if (eeer & E1000_EEER_EEE_NEG)
2453 edata->eee_active = true;
2454
2455 if (eeer & E1000_EEER_TX_LPI_EN)
2456 edata->tx_lpi_enabled = true;
2457 }
2458
2459 /* EEE Link Partner Advertised */
2460 switch (hw->mac.type) {
2461 case e1000_i350:
2462 ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
2463 &phy_data);
2464 if (ret_val)
2465 return -ENODATA;
2466
2467 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2468
2469 break;
2470 case e1000_i354:
2471 case e1000_i210:
2472 case e1000_i211:
2473 ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
2474 E1000_EEE_LP_ADV_DEV_I210,
2475 &phy_data);
2476 if (ret_val)
2477 return -ENODATA;
2478
2479 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2480
2481 break;
2482 default:
2483 break;
2484 }
2485
2486 edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
2487
2488 if ((hw->mac.type == e1000_i354) &&
2489 (edata->eee_enabled))
2490 edata->tx_lpi_enabled = true;
2491
2492 /*
2493 * report correct negotiated EEE status for devices that
2494 * wrongly report EEE at half-duplex
2495 */
2496 if (adapter->link_duplex == HALF_DUPLEX) {
2497 edata->eee_enabled = false;
2498 edata->eee_active = false;
2499 edata->tx_lpi_enabled = false;
2500 edata->advertised &= ~edata->advertised;
2501 }
2502
2503 return 0;
2504}
2505#endif
2506
2507#ifdef ETHTOOL_SEEE
2508static int igb_set_eee(struct net_device *netdev,
2509 struct ethtool_eee *edata)
2510{
2511 struct igb_adapter *adapter = netdev_priv(netdev);
2512 struct e1000_hw *hw = &adapter->hw;
2513 struct ethtool_eee eee_curr;
2514 s32 ret_val;
2515
2516 if ((hw->mac.type < e1000_i350) ||
2517 (hw->phy.media_type != e1000_media_type_copper))
2518 return -EOPNOTSUPP;
2519
2520 ret_val = igb_get_eee(netdev, &eee_curr);
2521 if (ret_val)
2522 return ret_val;
2523
2524 if (eee_curr.eee_enabled) {
2525 if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
2526 dev_err(pci_dev_to_dev(adapter->pdev),
2527 "Setting EEE tx-lpi is not supported\n");
2528 return -EINVAL;
2529 }
2530
2531 /* Tx LPI time is not implemented currently */
2532 if (edata->tx_lpi_timer) {
2533 dev_err(pci_dev_to_dev(adapter->pdev),
2534 "Setting EEE Tx LPI timer is not supported\n");
2535 return -EINVAL;
2536 }
2537
2538 if (edata->advertised &
2539 ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
2540 dev_err(pci_dev_to_dev(adapter->pdev),
2541 "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
2542 return -EINVAL;
2543 }
2544
2545 } else if (!edata->eee_enabled) {
2546 dev_err(pci_dev_to_dev(adapter->pdev),
2547 "Setting EEE options is not supported with EEE disabled\n");
2548 return -EINVAL;
2549 }
2550
2551 adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
2552
2553 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
2554 hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
2555
2556 /* reset link */
2557 if (netif_running(netdev))
2558 igb_reinit_locked(adapter);
2559 else
2560 igb_reset(adapter);
2561 }
2562
2563 return 0;
2564}
2565#endif /* ETHTOOL_SEEE */
2566
2567#ifdef ETHTOOL_GRXRINGS
2568static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2569 struct ethtool_rxnfc *cmd)
2570{
2571 cmd->data = 0;
2572
2573 /* Report default options for RSS on igb */
2574 switch (cmd->flow_type) {
2575 case TCP_V4_FLOW:
2576 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2577 case UDP_V4_FLOW:
2578 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2579 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2580 case SCTP_V4_FLOW:
2581 case AH_ESP_V4_FLOW:
2582 case AH_V4_FLOW:
2583 case ESP_V4_FLOW:
2584 case IPV4_FLOW:
2585 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2586 break;
2587 case TCP_V6_FLOW:
2588 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2589 case UDP_V6_FLOW:
2590 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2591 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2592 case SCTP_V6_FLOW:
2593 case AH_ESP_V6_FLOW:
2594 case AH_V6_FLOW:
2595 case ESP_V6_FLOW:
2596 case IPV6_FLOW:
2597 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2598 break;
2599 default:
2600 return -EINVAL;
2601 }
2602
2603 return 0;
2604}
2605
2606static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2607#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
2608 void *rule_locs)
2609#else
2610 u32 *rule_locs)
2611#endif
2612{
2613 struct igb_adapter *adapter = netdev_priv(dev);
2614 int ret = -EOPNOTSUPP;
2615
2616 switch (cmd->cmd) {
2617 case ETHTOOL_GRXRINGS:
2618 cmd->data = adapter->num_rx_queues;
2619 ret = 0;
2620 break;
2621 case ETHTOOL_GRXFH:
2622 ret = igb_get_rss_hash_opts(adapter, cmd);
2623 break;
2624 default:
2625 break;
2626 }
2627
2628 return ret;
2629}
2630
2631#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
2632 IGB_FLAG_RSS_FIELD_IPV6_UDP)
2633static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
2634 struct ethtool_rxnfc *nfc)
2635{
2636 u32 flags = adapter->flags;
2637
2638 /*
2639 * RSS does not support anything other than hashing
2640 * to queues on src and dst IPs and ports
2641 */
2642 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2643 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2644 return -EINVAL;
2645
2646 switch (nfc->flow_type) {
2647 case TCP_V4_FLOW:
2648 case TCP_V6_FLOW:
2649 if (!(nfc->data & RXH_IP_SRC) ||
2650 !(nfc->data & RXH_IP_DST) ||
2651 !(nfc->data & RXH_L4_B_0_1) ||
2652 !(nfc->data & RXH_L4_B_2_3))
2653 return -EINVAL;
2654 break;
2655 case UDP_V4_FLOW:
2656 if (!(nfc->data & RXH_IP_SRC) ||
2657 !(nfc->data & RXH_IP_DST))
2658 return -EINVAL;
2659 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2660 case 0:
2661 flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
2662 break;
2663 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2664 flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
2665 break;
2666 default:
2667 return -EINVAL;
2668 }
2669 break;
2670 case UDP_V6_FLOW:
2671 if (!(nfc->data & RXH_IP_SRC) ||
2672 !(nfc->data & RXH_IP_DST))
2673 return -EINVAL;
2674 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2675 case 0:
2676 flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
2677 break;
2678 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2679 flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
2680 break;
2681 default:
2682 return -EINVAL;
2683 }
2684 break;
2685 case AH_ESP_V4_FLOW:
2686 case AH_V4_FLOW:
2687 case ESP_V4_FLOW:
2688 case SCTP_V4_FLOW:
2689 case AH_ESP_V6_FLOW:
2690 case AH_V6_FLOW:
2691 case ESP_V6_FLOW:
2692 case SCTP_V6_FLOW:
2693 if (!(nfc->data & RXH_IP_SRC) ||
2694 !(nfc->data & RXH_IP_DST) ||
2695 (nfc->data & RXH_L4_B_0_1) ||
2696 (nfc->data & RXH_L4_B_2_3))
2697 return -EINVAL;
2698 break;
2699 default:
2700 return -EINVAL;
2701 }
2702
2703 /* if we changed something we need to update flags */
2704 if (flags != adapter->flags) {
2705 struct e1000_hw *hw = &adapter->hw;
2706 u32 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2707
2708 if ((flags & UDP_RSS_FLAGS) &&
2709 !(adapter->flags & UDP_RSS_FLAGS))
2710 DPRINTK(DRV, WARNING,
2711 "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2712
2713 adapter->flags = flags;
2714
2715 /* Perform hash on these packet types */
2716 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2717 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2718 E1000_MRQC_RSS_FIELD_IPV6 |
2719 E1000_MRQC_RSS_FIELD_IPV6_TCP;
2720
2721 mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
2722 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2723
2724 if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2725 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
2726
2727 if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2728 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
2729
2730 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2731 }
2732
2733 return 0;
2734}
2735
2736static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2737{
2738 struct igb_adapter *adapter = netdev_priv(dev);
2739 int ret = -EOPNOTSUPP;
2740
2741 switch (cmd->cmd) {
2742 case ETHTOOL_SRXFH:
2743 ret = igb_set_rss_hash_opt(adapter, cmd);
2744 break;
2745 default:
2746 break;
2747 }
2748
2749 return ret;
2750}
2751#endif /* ETHTOOL_GRXRINGS */
2752
2753static const struct ethtool_ops igb_ethtool_ops = {
2754 .get_settings = igb_get_settings,
2755 .set_settings = igb_set_settings,
2756 .get_drvinfo = igb_get_drvinfo,
2757 .get_regs_len = igb_get_regs_len,
2758 .get_regs = igb_get_regs,
2759 .get_wol = igb_get_wol,
2760 .set_wol = igb_set_wol,
2761 .get_msglevel = igb_get_msglevel,
2762 .set_msglevel = igb_set_msglevel,
2763 .nway_reset = igb_nway_reset,
2764 .get_link = igb_get_link,
2765 .get_eeprom_len = igb_get_eeprom_len,
2766 .get_eeprom = igb_get_eeprom,
2767 .set_eeprom = igb_set_eeprom,
2768 .get_ringparam = igb_get_ringparam,
2769 .set_ringparam = igb_set_ringparam,
2770 .get_pauseparam = igb_get_pauseparam,
2771 .set_pauseparam = igb_set_pauseparam,
2772 .self_test = igb_diag_test,
2773 .get_strings = igb_get_strings,
2774#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2775#ifdef HAVE_ETHTOOL_SET_PHYS_ID
2776 .set_phys_id = igb_set_phys_id,
2777#else
2778 .phys_id = igb_phys_id,
2779#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
2780#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
2781#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
2782 .get_sset_count = igb_get_sset_count,
2783#else
2784 .get_stats_count = igb_get_stats_count,
2785 .self_test_count = igb_diag_test_count,
2786#endif
2787 .get_ethtool_stats = igb_get_ethtool_stats,
2788#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
2789 .get_perm_addr = ethtool_op_get_perm_addr,
2790#endif
2791 .get_coalesce = igb_get_coalesce,
2792 .set_coalesce = igb_set_coalesce,
2793#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2794#ifdef HAVE_ETHTOOL_GET_TS_INFO
2795 .get_ts_info = igb_get_ts_info,
2796#endif /* HAVE_ETHTOOL_GET_TS_INFO */
2797#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
2798#ifdef CONFIG_PM_RUNTIME
2799 .begin = igb_ethtool_begin,
2800 .complete = igb_ethtool_complete,
2801#endif /* CONFIG_PM_RUNTIME */
2802#ifndef HAVE_NDO_SET_FEATURES
2803 .get_rx_csum = igb_get_rx_csum,
2804 .set_rx_csum = igb_set_rx_csum,
2805 .get_tx_csum = ethtool_op_get_tx_csum,
2806 .set_tx_csum = igb_set_tx_csum,
2807 .get_sg = ethtool_op_get_sg,
2808 .set_sg = ethtool_op_set_sg,
2809#ifdef NETIF_F_TSO
2810 .get_tso = ethtool_op_get_tso,
2811 .set_tso = igb_set_tso,
2812#endif
2813#ifdef ETHTOOL_GFLAGS
2814 .get_flags = ethtool_op_get_flags,
2815 .set_flags = igb_set_flags,
2816#endif /* ETHTOOL_GFLAGS */
2817#endif /* HAVE_NDO_SET_FEATURES */
2818#ifdef ETHTOOL_GADV_COAL
2819 .get_advcoal = igb_get_adv_coal,
2820 .set_advcoal = igb_set_dmac_coal,
2821#endif /* ETHTOOL_GADV_COAL */
2822#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2823#ifdef ETHTOOL_GEEE
2824 .get_eee = igb_get_eee,
2825#endif
2826#ifdef ETHTOOL_SEEE
2827 .set_eee = igb_set_eee,
2828#endif
2829#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
2830#ifdef ETHTOOL_GRXRINGS
2831 .get_rxnfc = igb_get_rxnfc,
2832 .set_rxnfc = igb_set_rxnfc,
2833#endif
2834};
2835
2836#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2837static const struct ethtool_ops_ext igb_ethtool_ops_ext = {
2838 .size = sizeof(struct ethtool_ops_ext),
2839 .get_ts_info = igb_get_ts_info,
2840 .set_phys_id = igb_set_phys_id,
2841 .get_eee = igb_get_eee,
2842 .set_eee = igb_set_eee,
2843};
2844
2845void igb_set_ethtool_ops(struct net_device *netdev)
2846{
2847 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
2848 set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext);
2849}
2850#else
2851void igb_set_ethtool_ops(struct net_device *netdev)
2852{
2853 /* have to "undeclare" const on this struct to remove warnings */
2854 SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops);
2855}
2856#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
2857#endif /* SIOCETHTOOL */